diff --git "a/4223.jsonl" "b/4223.jsonl" new file mode 100644--- /dev/null +++ "b/4223.jsonl" @@ -0,0 +1,776 @@ +{"seq_id":"210213537","text":"import tornadobase.application\nfrom . import handlers\nfrom . import modules\n\nimport tornado\nfrom tornado.web import URLSpec\nfrom tornado.options import define, options\n\n\ndefine('dbname', type=str)\ndefine('dbuser', type=str)\ndefine('dbpass', type=str)\ndefine('session_timeout', type=int, default=86400)\ndefine('u2f_app_id', type=str)\n\n\nclass Application(tornadobase.application.Application):\n\n def init_settings(self):\n settings = super().init_settings()\n settings['ui_modules'] = modules\n settings['default_handler_class'] = handlers.NotFoundHandler\n\n return settings\n\n def init_handlers(self):\n\n self.handlers = [\n URLSpec(r'/',\n handlers.IndexHandler,\n name='Home'),\n URLSpec(r'/articles',\n handlers.ArticleHandler,\n name='Articles'),\n URLSpec(r'/articles/new',\n handlers.NewArticleHandler,\n name='NewArticle'),\n URLSpec(r'/articles/([0-9a-z-]*)',\n handlers.ArticleHandler,\n name='Article'),\n URLSpec(r'/articles/edit/([0-9a-z-]+)',\n handlers.EditArticleHandler,\n name='EditArticle'),\n URLSpec(r'/assets/',\n handlers.StaticHandler,\n name='Assets'),\n URLSpec(r'/login',\n handlers.AuthHandler,\n name='Login'),\n URLSpec(r'/logout',\n handlers.AuthHandler,\n {'action': 'logout'},\n name='Logout'),\n URLSpec(r'/register',\n handlers.U2FRegisterHandler,\n name='Register'),\n URLSpec(r'/sign',\n handlers.U2FAuthHandler,\n name='Sign'),\n URLSpec(r'/profile',\n handlers.ProfileHandler,\n name='Profile')]\n\n def stop(self):\n tornado.ioloop.IOLoop.current().stop()\n\n\ndef main():\n options.parse_command_line()\n app = Application()\n app.start()\n","sub_path":"zeroecks/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"93648314","text":"\"\"\"Support for Life360 device tracking.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom life360 import Life360Error\nimport voluptuous as vol\n\nfrom homeassistant.components.device_tracker import (\n CONF_SCAN_INTERVAL,\n DOMAIN as DEVICE_TRACKER_DOMAIN,\n)\nfrom homeassistant.components.zone import async_active_zone\nfrom homeassistant.const import (\n ATTR_BATTERY_CHARGING,\n ATTR_ENTITY_ID,\n CONF_PREFIX,\n LENGTH_FEET,\n LENGTH_KILOMETERS,\n LENGTH_METERS,\n LENGTH_MILES,\n STATE_UNKNOWN,\n)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.event import track_time_interval\nfrom homeassistant.util.async_ import run_callback_threadsafe\nfrom homeassistant.util.distance import convert\nimport homeassistant.util.dt as dt_util\n\nfrom .const import (\n CONF_CIRCLES,\n CONF_DRIVING_SPEED,\n CONF_ERROR_THRESHOLD,\n CONF_MAX_GPS_ACCURACY,\n CONF_MAX_UPDATE_WAIT,\n CONF_MEMBERS,\n CONF_SHOW_AS_STATE,\n CONF_WARNING_THRESHOLD,\n DOMAIN,\n SHOW_DRIVING,\n SHOW_MOVING,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nSPEED_FACTOR_MPH = 2.25\nEVENT_DELAY = timedelta(seconds=30)\n\nATTR_ADDRESS = \"address\"\nATTR_AT_LOC_SINCE = \"at_loc_since\"\nATTR_DRIVING = \"driving\"\nATTR_LAST_SEEN = \"last_seen\"\nATTR_MOVING = \"moving\"\nATTR_PLACE = \"place\"\nATTR_RAW_SPEED = \"raw_speed\"\nATTR_SPEED = \"speed\"\nATTR_WAIT = \"wait\"\nATTR_WIFI_ON = \"wifi_on\"\n\nEVENT_UPDATE_OVERDUE = \"life360_update_overdue\"\nEVENT_UPDATE_RESTORED = \"life360_update_restored\"\n\n\ndef _include_name(filter_dict, name):\n if not name:\n return False\n if not filter_dict:\n return True\n name = name.lower()\n if filter_dict[\"include\"]:\n return name in filter_dict[\"list\"]\n return name not in filter_dict[\"list\"]\n\n\ndef _exc_msg(exc):\n return f\"{exc.__class__.__name__}: {exc}\"\n\n\ndef _dump_filter(filter_dict, desc, func=lambda x: x):\n if not filter_dict:\n return\n _LOGGER.debug(\n \"%scluding %s: %s\",\n \"In\" if filter_dict[\"include\"] else \"Ex\",\n desc,\n \", \".join([func(name) for name in filter_dict[\"list\"]]),\n )\n\n\ndef setup_scanner(hass, config, see, discovery_info=None):\n \"\"\"Set up device scanner.\"\"\"\n config = hass.data[DOMAIN][\"config\"]\n apis = hass.data[DOMAIN][\"apis\"]\n Life360Scanner(hass, config, see, apis)\n return True\n\n\ndef _utc_from_ts(val):\n try:\n return dt_util.utc_from_timestamp(float(val))\n except (TypeError, ValueError):\n return None\n\n\ndef _dt_attr_from_ts(timestamp):\n utc = _utc_from_ts(timestamp)\n if utc:\n return utc\n return STATE_UNKNOWN\n\n\ndef _bool_attr_from_int(val):\n try:\n return bool(int(val))\n except (TypeError, ValueError):\n return STATE_UNKNOWN\n\n\nclass Life360Scanner:\n \"\"\"Life360 device scanner.\"\"\"\n\n def __init__(self, hass, config, see, apis):\n \"\"\"Initialize Life360Scanner.\"\"\"\n self._hass = hass\n self._see = see\n self._max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)\n self._max_update_wait = config.get(CONF_MAX_UPDATE_WAIT)\n self._prefix = config[CONF_PREFIX]\n self._circles_filter = config.get(CONF_CIRCLES)\n self._members_filter = config.get(CONF_MEMBERS)\n self._driving_speed = config.get(CONF_DRIVING_SPEED)\n self._show_as_state = config[CONF_SHOW_AS_STATE]\n self._apis = apis\n self._errs = {}\n self._error_threshold = config[CONF_ERROR_THRESHOLD]\n self._warning_threshold = config[CONF_WARNING_THRESHOLD]\n self._max_errs = self._error_threshold + 1\n self._dev_data = {}\n self._circles_logged = set()\n self._members_logged = set()\n\n _dump_filter(self._circles_filter, \"Circles\")\n _dump_filter(self._members_filter, \"device IDs\", self._dev_id)\n\n self._started = dt_util.utcnow()\n self._update_life360()\n track_time_interval(\n self._hass, self._update_life360, config[CONF_SCAN_INTERVAL]\n )\n\n def _dev_id(self, name):\n return self._prefix + name\n\n def _ok(self, key):\n if self._errs.get(key, 0) >= self._max_errs:\n _LOGGER.error(\"%s: OK again\", key)\n self._errs[key] = 0\n\n def _err(self, key, err_msg):\n _errs = self._errs.get(key, 0)\n if _errs < self._max_errs:\n self._errs[key] = _errs = _errs + 1\n msg = f\"{key}: {err_msg}\"\n if _errs >= self._error_threshold:\n if _errs == self._max_errs:\n msg = f\"Suppressing further errors until OK: {msg}\"\n _LOGGER.error(msg)\n elif _errs >= self._warning_threshold:\n _LOGGER.warning(msg)\n\n def _exc(self, key, exc):\n self._err(key, _exc_msg(exc))\n\n def _prev_seen(self, dev_id, last_seen):\n prev_seen, reported = self._dev_data.get(dev_id, (None, False))\n\n if self._max_update_wait:\n now = dt_util.utcnow()\n most_recent_update = last_seen or prev_seen or self._started\n overdue = now - most_recent_update > self._max_update_wait\n if overdue and not reported and now - self._started > EVENT_DELAY:\n self._hass.bus.fire(\n EVENT_UPDATE_OVERDUE,\n {ATTR_ENTITY_ID: f\"{DEVICE_TRACKER_DOMAIN}.{dev_id}\"},\n )\n reported = True\n elif not overdue and reported:\n self._hass.bus.fire(\n EVENT_UPDATE_RESTORED,\n {\n ATTR_ENTITY_ID: f\"{DEVICE_TRACKER_DOMAIN}.{dev_id}\",\n ATTR_WAIT: str(last_seen - (prev_seen or self._started)).split(\n \".\", maxsplit=1\n )[0],\n },\n )\n reported = False\n\n # Don't remember last_seen unless it's really an update.\n if not last_seen or prev_seen and last_seen <= prev_seen:\n last_seen = prev_seen\n self._dev_data[dev_id] = last_seen, reported\n\n return prev_seen\n\n def _update_member(self, member, dev_id):\n loc = member.get(\"location\")\n try:\n last_seen = _utc_from_ts(loc.get(\"timestamp\"))\n except AttributeError:\n last_seen = None\n prev_seen = self._prev_seen(dev_id, last_seen)\n\n if not loc:\n err_msg = member[\"issues\"][\"title\"]\n if err_msg:\n if member[\"issues\"][\"dialog\"]:\n err_msg += f\": {member['issues']['dialog']}\"\n else:\n err_msg = \"Location information missing\"\n self._err(dev_id, err_msg)\n return\n\n # Only update when we truly have an update.\n if not last_seen:\n _LOGGER.warning(\"%s: Ignoring update because timestamp is missing\", dev_id)\n return\n if prev_seen and last_seen < prev_seen:\n _LOGGER.warning(\n \"%s: Ignoring update because timestamp is older than last timestamp\",\n dev_id,\n )\n _LOGGER.debug(\"%s < %s\", last_seen, prev_seen)\n return\n if last_seen == prev_seen:\n return\n\n lat = loc.get(\"latitude\")\n lon = loc.get(\"longitude\")\n gps_accuracy = loc.get(\"accuracy\")\n try:\n lat = float(lat)\n lon = float(lon)\n # Life360 reports accuracy in feet, but Device Tracker expects\n # gps_accuracy in meters.\n gps_accuracy = round(\n convert(float(gps_accuracy), LENGTH_FEET, LENGTH_METERS)\n )\n except (TypeError, ValueError):\n self._err(dev_id, f\"GPS data invalid: {lat}, {lon}, {gps_accuracy}\")\n return\n\n self._ok(dev_id)\n\n msg = f\"Updating {dev_id}\"\n if prev_seen:\n msg += f\"; Time since last update: {last_seen - prev_seen}\"\n _LOGGER.debug(msg)\n\n if self._max_gps_accuracy is not None and gps_accuracy > self._max_gps_accuracy:\n _LOGGER.warning(\n \"%s: Ignoring update because expected GPS \"\n \"accuracy (%.0f) is not met: %.0f\",\n dev_id,\n self._max_gps_accuracy,\n gps_accuracy,\n )\n return\n\n # Get raw attribute data, converting empty strings to None.\n place = loc.get(\"name\") or None\n address1 = loc.get(\"address1\") or None\n address2 = loc.get(\"address2\") or None\n if address1 and address2:\n address = \", \".join([address1, address2])\n else:\n address = address1 or address2\n raw_speed = loc.get(\"speed\") or None\n driving = _bool_attr_from_int(loc.get(\"isDriving\"))\n moving = _bool_attr_from_int(loc.get(\"inTransit\"))\n try:\n battery = int(float(loc.get(\"battery\")))\n except (TypeError, ValueError):\n battery = None\n\n # Try to convert raw speed into real speed.\n try:\n speed = float(raw_speed) * SPEED_FACTOR_MPH\n if self._hass.config.units.is_metric:\n speed = convert(speed, LENGTH_MILES, LENGTH_KILOMETERS)\n speed = max(0, round(speed))\n except (TypeError, ValueError):\n speed = STATE_UNKNOWN\n\n # Make driving attribute True if it isn't and we can derive that it\n # should be True from other data.\n if (\n driving in (STATE_UNKNOWN, False)\n and self._driving_speed is not None\n and speed != STATE_UNKNOWN\n ):\n driving = speed >= self._driving_speed\n\n attrs = {\n ATTR_ADDRESS: address,\n ATTR_AT_LOC_SINCE: _dt_attr_from_ts(loc.get(\"since\")),\n ATTR_BATTERY_CHARGING: _bool_attr_from_int(loc.get(\"charge\")),\n ATTR_DRIVING: driving,\n ATTR_LAST_SEEN: last_seen,\n ATTR_MOVING: moving,\n ATTR_PLACE: place,\n ATTR_RAW_SPEED: raw_speed,\n ATTR_SPEED: speed,\n ATTR_WIFI_ON: _bool_attr_from_int(loc.get(\"wifiState\")),\n }\n\n # If user wants driving or moving to be shown as state, and current\n # location is not in a HA zone, then set location name accordingly.\n loc_name = None\n active_zone = run_callback_threadsafe(\n self._hass.loop, async_active_zone, self._hass, lat, lon, gps_accuracy\n ).result()\n if not active_zone:\n if SHOW_DRIVING in self._show_as_state and driving is True:\n loc_name = SHOW_DRIVING\n elif SHOW_MOVING in self._show_as_state and moving is True:\n loc_name = SHOW_MOVING\n\n self._see(\n dev_id=dev_id,\n location_name=loc_name,\n gps=(lat, lon),\n gps_accuracy=gps_accuracy,\n battery=battery,\n attributes=attrs,\n picture=member.get(\"avatar\"),\n )\n\n def _update_members(self, members, members_updated):\n for member in members:\n member_id = member[\"id\"]\n if member_id in members_updated:\n continue\n err_key = \"Member data\"\n try:\n first = member.get(\"firstName\")\n last = member.get(\"lastName\")\n if first and last:\n full_name = \" \".join([first, last])\n else:\n full_name = first or last\n slug_name = cv.slugify(full_name)\n include_member = _include_name(self._members_filter, slug_name)\n dev_id = self._dev_id(slug_name)\n if member_id not in self._members_logged:\n self._members_logged.add(member_id)\n _LOGGER.debug(\n \"%s -> %s: will%s be tracked, id=%s\",\n full_name,\n dev_id,\n \"\" if include_member else \" NOT\",\n member_id,\n )\n sharing = bool(int(member[\"features\"][\"shareLocation\"]))\n except (KeyError, TypeError, ValueError, vol.Invalid):\n self._err(err_key, member)\n continue\n self._ok(err_key)\n\n if include_member and sharing:\n members_updated.append(member_id)\n self._update_member(member, dev_id)\n\n def _update_life360(self, now=None):\n circles_updated = []\n members_updated = []\n\n for api in self._apis.values():\n err_key = \"get_circles\"\n try:\n circles = api.get_circles()\n except Life360Error as exc:\n self._exc(err_key, exc)\n continue\n self._ok(err_key)\n\n for circle in circles:\n circle_id = circle[\"id\"]\n if circle_id in circles_updated:\n continue\n circles_updated.append(circle_id)\n circle_name = circle[\"name\"]\n incl_circle = _include_name(self._circles_filter, circle_name)\n if circle_id not in self._circles_logged:\n self._circles_logged.add(circle_id)\n _LOGGER.debug(\n \"%s Circle: will%s be included, id=%s\",\n circle_name,\n \"\" if incl_circle else \" NOT\",\n circle_id,\n )\n try:\n places = api.get_circle_places(circle_id)\n place_data = \"Circle's Places:\"\n for place in places:\n place_data += f\"\\n- name: {place['name']}\"\n place_data += f\"\\n latitude: {place['latitude']}\"\n place_data += f\"\\n longitude: {place['longitude']}\"\n place_data += f\"\\n radius: {place['radius']}\"\n if not places:\n place_data += \" None\"\n _LOGGER.debug(place_data)\n except (Life360Error, KeyError):\n pass\n if incl_circle:\n err_key = f'get_circle_members \"{circle_name}\"'\n try:\n members = api.get_circle_members(circle_id)\n except Life360Error as exc:\n self._exc(err_key, exc)\n continue\n self._ok(err_key)\n\n self._update_members(members, members_updated)\n","sub_path":"homeassistant/components/life360/device_tracker.py","file_name":"device_tracker.py","file_ext":"py","file_size_in_byte":14622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"639625684","text":"# Python Cropper Tool\nimport os\nimport csv\nimport cv2\nimport math\nimport argparse\nimport numpy as np\nimport multiprocessing as mp\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n 'src', help='carpeta donde se encuentran las imagenes de origen', type=str)\nparser.add_argument('--max-cpu', '-m', metavar='M', action='store', dest='maxcpu',\n help='numero maximo de procesos permitidos', type=int, default=mp.cpu_count())\nparser.add_argument('--quality', '-q', metavar='Q', action='store',\n dest='quality', help='calidad de los recortes', type=str, default='')\nparser.add_argument('--tag', '-t', metavar='T', action='store',\n dest='tag', help='etiquetas de los recortes', type=str, default='')\nargs = parser.parse_args()\n\nsource_dir = os.path.abspath(args.src)\nsource_name = os.path.basename(source_dir)\nparent_dir = os.path.abspath(os.path.join(source_dir, os.pardir))\n\noutput_dir = parent_dir + '/' + source_name + '_recortes'\ncsv_dir = parent_dir + '/' + source_name + '_csv'\n\n\ndef csvProcesser(file_uri):\n with open(file_uri, 'rt') as csvfile:\n csvdata = csv.reader(csvfile, delimiter=',')\n crop_count = 1\n img_uri = file_uri[:-4].replace('_csv', '')\n\n slashpos = file_uri.rfind('/')\n stol_dot = file_uri[:-4].rfind('.')\n img_name = file_uri[slashpos+1:stol_dot]\n img_extension = file_uri[stol_dot:-4]\n\n img = cv2.imread(img_uri)\n\n # rotate image if necessary\n rotations = 0\n for row in csvdata:\n if row[0][0] == '#':\n rotations = int(int(row[0][1:])/90)\n for _ in range(rotations):\n img = cv2.flip(cv2.transpose(img), 1)\n continue\n # skip crop if it doesn't match specified tags and/or quality (if any)\n if args.tag != '':\n if args.tag.upper() != row[4].upper():\n return\n if args.quality != '':\n if args.quality.upper() != row[5].upper():\n return\n x1 = int(row[0])\n y1 = int(row[1])\n x2 = int(row[2])\n y2 = int(row[3])\n # crop image, save, close csv\n w = x2 - x1\n h = y2 - y1\n\n if w > h:\n padding = (w - h)/2\n upper_padding = math.ceil(padding)\n lower_padding = math.floor(padding)\n target_crop = cv2.copyMakeBorder(\n img, upper_padding, lower_padding, 0, 0, cv2.BORDER_CONSTANT, None, [0, 0, 0])\n elif h > w:\n padding = (h - w)/2\n left_padding = math.ceil(padding)\n right_padding = math.floor(padding)\n target_crop = cv2.copyMakeBorder(\n img, 0, 0, left_padding, right_padding, cv2.BORDER_CONSTANT, None, [0, 0, 0])\n else:\n target_crop = img[y1:y2, x1:x2]\n\n dest_file = output_dir+'/'+img_name+'_' + \\\n str(crop_count).zfill(3)+img_extension\n crop_count += 1\n print('Guardando %s' % (dest_file))\n cv2.imwrite(dest_file, target_crop)\n del target_crop\n csvfile.close()\n\n\ndef main():\n if args.maxcpu <= 0:\n print('Numero de CPUs no valido.')\n quit()\n # loop over files in source dir, put CSV files in list\n csv_bag = []\n for file in os.listdir(csv_dir):\n filename = os.fsdecode(file)\n if(filename.endswith('.csv')):\n csv_bag.append(csv_dir + '/' + filename)\n\n # if list is empty, exit\n list_size = len(csv_bag)\n if(list_size > 0):\n print('Se encontr%s %i archivo%s CSV.' % ('aron' if list_size > 1 else 'o',\n list_size, 's' if list_size > 1 else ''))\n else:\n print('No se encontraron archivos para procesar.')\n quit()\n\n # create output folder if it doesn't exist\n os.makedirs(os.path.dirname(output_dir + '/'), exist_ok=True)\n\n cpu_count = list_size if list_size <= args.maxcpu else args.maxcpu\n if cpu_count > mp.cpu_count():\n cpu_count = mp.cpu_count()\n\n print('Procesos concurrentes: %i' % (cpu_count))\n # if list is not empty, spawn process pool and begin\n with mp.Pool(cpu_count) as pool:\n pool.map(csvProcesser, csv_bag)\n print('Listo.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cropper.py","file_name":"cropper.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"557041659","text":"# Download dna damage png and pass through a high pass filter.\n# DNA pngs are 16 bit. We loose pixel bit depth. SOme cells are too bright.\n\nimport sys\nimport os\nimport pdb\nimport girder_client\nimport json\nimport urllib2\nimport numpy as np\nimport scipy\nimport cv2\nimport math\n\n\ndef ensure_path(dir_path):\n parent, name = os.path.split(dir_path)\n if name == '':\n return\n ensure_path(parent)\n ensure_dir(dir_path)\n\ndef ensure_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\n \n# out_path is the directory on disk to save the images.\n# It must be created before this method is called.\n# item_obj is an object returned by the girder api get/item call.\ndef dump_item(gc, item_obj, out_path):\n item_id = item_obj['_id']\n # change the item name into a file name.\n item_name = item_obj['name']\n item_name = item_name.split('_')[-1]\n\n # lets try to save a sa png image.\n item_root_name = item_name.split('.')[0]\n item_name = '%s.png'%item_root_name\n \n file_path_root = os.path.join(out_path, item_name)\n\n # Get the file.\n file_resp = gc.get(\"item/%s/files\"%item_id)\n file_id = file_resp[0]['_id']\n\n tile_url = gc.urlBase+\"file/%s/download\"%file_id\n req = urllib2.Request(tile_url)\n req.add_header('Girder-Token', gc.token)\n resp = urllib2.urlopen(req)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n #image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n pdb.set_trace()\n image = cv2.imdecode(image, 2)\n print(item_name)\n print(image.shape)\n\n # now save the image as a png.\n #scipy.misc.imsave(path+'.png', region)\n cv2.imwrite(file_path_root, image)\n\n\n\n\n\n\n \n# parent_path directory must exist\ndef dump_folder(gc, folder_obj, out_path):\n folder_id = folder_obj['_id']\n\n # dump all items\n items_resp = gc.get(\"item?folderId=%s&limit=5000\"%folder_id)\n for item_obj in items_resp:\n dump_item(gc, item_obj, out_path)\n\n\n\ndef print_usage():\n print(\"usage:\")\n print(\"python %s serverName girder_id\"%sys.argv[0])\n\n\nif __name__ == '__main__':\n\n keys = {'lemon':'', \\\n 'wsi2': ''}\n urls = {'lemon':'http://lemon/api/v1', \\\n 'wsi2': 'http://wsi2.slide-atlas.org:8080/api/v1'}\n\n if len(sys.argv) != 3:\n print_usage()\n exit()\n\n server_name = sys.argv[1]\n if not server_name in keys:\n print(\"Unknown server %s\"%server_name)\n exit()\n\n gc = girder_client.GirderClient(apiUrl=urls[server_name])\n gc.authenticate('law12019', apiKey=keys[server_name])\n\n # can be a folder id or an item id.\n girder_id = sys.argv[2]\n #out_path = sys.argv[3]\n out_path = os.path.realpath('./')\n\n ensure_dir(out_path)\n try:\n # Get the folder object\n folder_obj = gc.get(\"folder/%s\"%girder_id)\n # now save to disk\n dump_folder(gc, folder_obj, out_path)\n except Exception as inst:\n print(\"not a folder\")\n\n #try:\n # Get the item object\n #item_obj = gc.get(\"item/%s\"%girder_id)\n # now save to disk\n #dump_item(gc, item_obj, out_path)\n #except Exception as inst:\n #print(\"not an item\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/download_salsa_images.py","file_name":"download_salsa_images.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577811825","text":"import inspect\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal, DecimalException\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any\nfrom uuid import UUID\n\nfrom . import errors\nfrom .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time\nfrom .utils import change_exception, display_as_type\n\nNoneType = type(None)\n\n\ndef not_none_validator(v):\n if v is None:\n raise errors.NoneIsNotAllowedError()\n return v\n\n\ndef str_validator(v) -> str:\n if isinstance(v, (str, NoneType)):\n return v\n elif isinstance(v, (bytes, bytearray)):\n return v.decode()\n elif isinstance(v, (float, int, Decimal)):\n # is there anything else we want to add here? If you think so, create an issue.\n return str(v)\n else:\n raise errors.StrError()\n\n\ndef bytes_validator(v) -> bytes:\n if isinstance(v, bytes):\n return v\n elif isinstance(v, bytearray):\n return bytes(v)\n elif isinstance(v, str):\n return v.encode()\n elif isinstance(v, (float, int, Decimal)):\n return str(v).encode()\n else:\n raise errors.BytesError()\n\n\nBOOL_STRINGS = {\n '1',\n 'TRUE',\n 'ON',\n 'YES',\n}\n\n\ndef bool_validator(v) -> bool:\n if isinstance(v, bool):\n return v\n if isinstance(v, bytes):\n v = v.decode()\n if isinstance(v, str):\n return v.upper() in BOOL_STRINGS\n return bool(v)\n\n\ndef int_validator(v) -> int:\n if isinstance(v, int):\n return v\n\n with change_exception(errors.IntegerError, TypeError, ValueError):\n return int(v)\n\n\ndef float_validator(v) -> float:\n if isinstance(v, float):\n return v\n\n with change_exception(errors.FloatError, TypeError, ValueError):\n return float(v)\n\n\ndef number_size_validator(v, field, config, **kwargs):\n if field.type_.gt is not None and not v > field.type_.gt:\n raise errors.NumberNotGtError(limit_value=field.type_.gt)\n elif field.type_.ge is not None and not v >= field.type_.ge:\n raise errors.NumberNotGeError(limit_value=field.type_.ge)\n\n if field.type_.lt is not None and not v < field.type_.lt:\n raise errors.NumberNotLtError(limit_value=field.type_.lt)\n if field.type_.le is not None and not v <= field.type_.le:\n raise errors.NumberNotLeError(limit_value=field.type_.le)\n\n return v\n\n\ndef anystr_length_validator(v, field, config, **kwargs):\n v_len = len(v)\n\n min_length = getattr(field.type_, 'min_length', config.min_anystr_length)\n if min_length is not None and v_len < min_length:\n raise errors.AnyStrMinLengthError(limit_value=min_length)\n\n max_length = getattr(field.type_, 'max_length', config.max_anystr_length)\n if max_length is not None and v_len > max_length:\n raise errors.AnyStrMaxLengthError(limit_value=max_length)\n\n return v\n\n\ndef anystr_strip_whitespace(v, field, config, **kwargs):\n strip_whitespace = getattr(field.type_, 'strip_whitespace', config.anystr_strip_whitespace)\n if strip_whitespace:\n v = v.strip()\n\n return v\n\n\ndef ordered_dict_validator(v) -> OrderedDict:\n if isinstance(v, OrderedDict):\n return v\n\n with change_exception(errors.DictError, TypeError, ValueError):\n return OrderedDict(v)\n\n\ndef dict_validator(v) -> dict:\n if isinstance(v, dict):\n return v\n\n with change_exception(errors.DictError, TypeError, ValueError):\n return dict(v)\n\n\ndef list_validator(v) -> list:\n if isinstance(v, list):\n return v\n elif isinstance(v, (tuple, set)) or inspect.isgenerator(v):\n return list(v)\n else:\n raise errors.ListError()\n\n\ndef tuple_validator(v) -> tuple:\n if isinstance(v, tuple):\n return v\n elif isinstance(v, (list, set)) or inspect.isgenerator(v):\n return tuple(v)\n else:\n raise errors.TupleError()\n\n\ndef set_validator(v) -> set:\n if isinstance(v, set):\n return v\n elif isinstance(v, (list, tuple)) or inspect.isgenerator(v):\n return set(v)\n else:\n raise errors.SetError()\n\n\ndef enum_validator(v, field, config, **kwargs) -> Enum:\n with change_exception(errors.EnumError, ValueError):\n enum_v = field.type_(v)\n\n return enum_v.value if config.use_enum_values else enum_v\n\n\ndef uuid_validator(v, field, config, **kwargs) -> UUID:\n with change_exception(errors.UUIDError, ValueError):\n if isinstance(v, str):\n v = UUID(v)\n elif isinstance(v, (bytes, bytearray)):\n v = UUID(v.decode())\n\n if not isinstance(v, UUID):\n raise errors.UUIDError()\n\n required_version = getattr(field.type_, '_required_version', None)\n if required_version and v.version != required_version:\n raise errors.UUIDVersionError(required_version=required_version)\n\n return v\n\n\ndef decimal_validator(v) -> Decimal:\n if isinstance(v, Decimal):\n return v\n elif isinstance(v, (bytes, bytearray)):\n v = v.decode()\n\n v = str(v).strip()\n\n with change_exception(errors.DecimalError, DecimalException):\n v = Decimal(v)\n\n if not v.is_finite():\n raise errors.DecimalIsNotFiniteError()\n\n return v\n\n\ndef path_validator(v) -> Path:\n if isinstance(v, Path):\n return v\n\n with change_exception(errors.PathError, TypeError):\n return Path(v)\n\n\ndef path_exists_validator(v) -> Path:\n if not v.exists():\n raise errors.PathNotExistsError(path=v)\n\n return v\n\n\n# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same\n_VALIDATORS = [\n (Enum, [enum_validator]),\n\n (str, [not_none_validator, str_validator, anystr_strip_whitespace, anystr_length_validator]),\n (bytes, [not_none_validator, bytes_validator, anystr_strip_whitespace, anystr_length_validator]),\n\n (bool, [bool_validator]),\n (int, [int_validator]),\n (float, [float_validator]),\n\n (Path, [path_validator]),\n\n (datetime, [parse_datetime]),\n (date, [parse_date]),\n (time, [parse_time]),\n (timedelta, [parse_duration]),\n\n (OrderedDict, [ordered_dict_validator]),\n (dict, [dict_validator]),\n (list, [list_validator]),\n (tuple, [tuple_validator]),\n (set, [set_validator]),\n (UUID, [not_none_validator, uuid_validator]),\n (Decimal, [not_none_validator, decimal_validator]),\n]\n\n\ndef find_validators(type_):\n if type_ is Any:\n return []\n for val_type, validators in _VALIDATORS:\n try:\n if issubclass(type_, val_type):\n return validators\n except TypeError as e:\n raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e\n raise errors.ConfigError(f'no validator found for {type_}')\n","sub_path":"pydantic/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":6792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"482065026","text":"import torch\nfrom torch.utils.data import Dataset\nfrom Config import Config\nimport re\nimport gensim\nimport numpy as np\n\n\nclass My_Dataset(Dataset):\n def __init__(self, data, label):\n self.data = data\n # 考虑到测试机没有label,但是还要用这个数据结构\n if label is not None:\n self.label = label\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n data = torch.from_numpy(self.data[index])\n if self.label is not None:\n label = torch.from_numpy(self.label[index])\n return data, label\n else:\n return data\n\n\ndef get_stopwords_list():\n # 创建停用词表\n stopwords_list = [line.strip() for line in open(Config.stopwords_path, encoding='utf-8').readlines()]\n return stopwords_list\n\n\ndef build_word2id(save_path):\n \"\"\"\n :param save_path: word2id的保存地址\n :return: word2id矩阵\n\n strip(): 只能删除开头或是结尾的字符或是字符串。\n split(): 分割字符串\n \"\"\"\n stopwords = get_stopwords_list()\n word2id = {'_PAD_': 0}\n paths = [Config.train_path, Config.val_path]\n\n for path in paths:\n # 打开文件\n with open(path, encoding='utf-8') as f:\n # 读取每一行\n for line in f.readlines():\n # 去除开头结尾的空字符串,并且根据空格分割成一个个词\n words = line.strip().split()\n available_words = []\n # 遍历所有词\n for word in words:\n # 如果不是停用词,英文单词和制表符,就是可用词\n if word not in stopwords and len(re.findall('[a-zA-Z]+', word)) == 0 and word != '\\t':\n available_words.append(word)\n # 遍历所有可用词,加入word2id字典\n for word in available_words:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n # 把word2id存到文件中\n with open(save_path, 'w', encoding='utf-8') as f:\n for word in word2id:\n f.write(word + '\\t' + str(word2id[word]) + '\\n')\n return word2id\n\n\ndef build_word2vec(pretrained_word2vec_path, word2id, save_word2vec_path=None):\n \"\"\"\n 构建word2vec\n :param save_word2vec_path: 保存word2vec\n :param pretrained_word2vec_path: 预训练的word2vec文件\n :param word2id: word2id\n :return: word2vec size:[单词数量,每个词的维度]\n \"\"\"\n # 所有单词数量\n n_word = max(word2id.values()) + 1\n # 加载预训练的word2vec\n pretrained_word2vec = gensim.models.KeyedVectors.load_word2vec_format(fname=pretrained_word2vec_path, binary=True)\n # 初始化word2vec,范围[-1, 1], size[n_word, pretrained_word2vec.vector_size],即[单词数量,每个词的维度]\n word2vec = np.array(np.random.uniform(low=-1., high=1., size=[n_word, pretrained_word2vec.vector_size]))\n # 遍历所有单词\n for word in word2id.keys():\n # 如果预训练文件中有对应的单词,就将对应单词的向量赋值给word2vec\n try:\n word2vec[word2id[word]] = pretrained_word2vec[word]\n # 如果没有,就用初始化的向量\n except KeyError:\n pass\n # 如果目录不为空,保存word2vec\n if save_word2vec_path:\n with open(save_word2vec_path, 'w', encoding='utf-8') as f:\n for vec in word2vec:\n # 把List[int] -> List[str], 因为join函数的参数类型是Iterable[str], List[str]符合要求\n vec = [str(v) for v in vec]\n f.write(' '.join(vec) + '\\n')\n return word2vec\n\n\ndef text2array(path, word2id, seq_len, no_label):\n \"\"\"\n :param no_label: 有无label\n :param path: 文件地址\n :param word2id: word2id\n :param seq_len: 固定句子长度\n :return: array size:[句子个数, 句子固定长度], label size:[句子个数, 1]\n \"\"\"\n i = 0\n label_array = []\n # 获取句子个数\n n_sentence = len(open(path, encoding='utf-8').readlines())\n # 初始化句子矩阵,[句子个数, 句子固定长度]\n sentence_array = np.zeros(shape=(n_sentence, seq_len))\n with open(path, encoding='utf-8') as f:\n for line in f.readlines():\n l_s = line.strip().split()\n sentence = l_s[1:]\n # 将句子中的单词变为对应的id,若单词找不到则为0\n new_sentence = [word2id.get(word, 0) for word in sentence]\n new_sentence_np = np.array(new_sentence).reshape(1, -1)\n # 如果句子长度小于固定长度,将句子右对齐,左边用0补上; 否则截断\n if np.size(new_sentence_np, 1) < seq_len:\n sentence_array[i, seq_len - np.size(new_sentence_np, 1):] = new_sentence_np[0, :]\n else:\n sentence_array[i, 0:seq_len] = new_sentence_np[0, 0:seq_len]\n i = i + 1\n if not no_label:\n label_array.append(int(l_s[0]))\n if no_label:\n return np.array(sentence_array)\n return np.array(sentence_array), np.array([label_array]).T\n\n\n# def prepare_data(word2id, seq_len, train_path, test_path, val_path):\n# \"\"\"\n# :param word2id:\n# :param seq_len:\n# :param train_path:\n# :param test_path:\n# :param val_path:\n# :return: array size:[句子个数, 句子固定长度], label size:[句子个数, 1]\n# \"\"\"\n# # text -> array & label\n# train_array, train_label = text2array(train_path, word2id, seq_len, False)\n# val_array, val_label = text2array(val_path, word2id, seq_len, False)\n# test_array, test_label = text2array(test_path, word2id, seq_len, False)\n# return train_array, train_label, val_array, val_label, test_array, test_label\n\n\nif __name__ == '__main__':\n print('Data_Processing main')\n\n","sub_path":"Data_Procesing.py","file_name":"Data_Procesing.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"68623580","text":"\"\"\"\nhttps://www.pramp.com/question/XdMZJgZoAnFXqwjJwnBZ\n\"\"\"\n\nclass MinHeap:\n def __init__(self):\n self.arr = []\n\n def __str__(self):\n return self.arr.__str__()\n\n def insert(self, item):\n current = len(self.arr)\n self.arr.append(item)\n\n while current > 0:\n parent = (current - 1) // 2\n if self.arr[parent] > self.arr[current]:\n self.arr[parent], self.arr[current] = self.arr[current], self.arr[parent]\n current = parent\n else:\n break\n\n def extract(self):\n if not self.arr:\n return None\n\n item = self.arr[0]\n if len(self.arr) == 1:\n self.arr.pop()\n return item\n\n self.arr[0] = self.arr.pop()\n\n current = 0\n\n while True:\n left_child = current * 2 + 1\n right_child = left_child + 1\n\n if left_child >= len(self.arr):\n break\n\n if left_child == len(self.arr) - 1:\n min_child = left_child\n else:\n min_child = left_child if self.arr[left_child] < self.arr[right_child] else right_child\n\n if self.arr[current] > self.arr[min_child]:\n self.arr[current], self.arr[min_child] = self.arr[min_child], self.arr[current]\n current = min_child\n else:\n break\n\n return item\n\n# test_arr = [6, 5, 3, 1, 8, 7, 2, 4]\n# min_heap = MinHeap()\n# for it in test_arr:\n# min_heap.insert(it)\n#\n# print(min_heap)\n#\n# for _ in range(len(test_arr) - 1):\n# it = min_heap.extract()\n# print(\">>> \" + str(it))\n\ndef sort(arr, margin):\n \"\"\"\n Using a heap sort\n O(n * log(k))\n \"\"\"\n if not (arr and margin):\n return arr\n\n heap = MinHeap()\n length = len(arr)\n for i in range(margin + 1):\n heap.insert(arr[i])\n for i in range(margin + 1, length):\n arr[i - margin - 1] = heap.extract()\n heap.insert(arr[i])\n for i in range(margin + 1):\n arr[length - margin - 1 + i] = heap.extract()\n return arr\n\n\ntest_arr = [3, 1, 2, 5, 4, 6, 9, 8, 7]\nsort(test_arr, 2)\nprint(test_arr)\n","sub_path":"pramp/k-messed-array-sort/k-messed-array-sort2.py","file_name":"k-messed-array-sort2.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"64770906","text":"class Student():\n licznik=100000\n kierunek=\"Informatyka stosowana\"\n uniwersytet=\"UEK Kraków\"\n \n def __init__(self,imię):\n self.imię=imię\n self.album=Student.licznik\n Student.licznik+=1\n \n def __str__(self):\n return f'{self.imię}, {Student.licznik}, {Student.kierunek}, {Student.uniwersytet}'\n\nstudent1=Student(\"Jan Kowalski\")\nprint(student1)\nstudent2=Student(\"Adam Nowak\")\nprint(student2)\nstudent3=Student(\"Ala Kot\")\nprint(student3)","sub_path":"07-ObjectOrientedProgramming/zadanie 7.py","file_name":"zadanie 7.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"651872204","text":"from pom_pages.hotels_page import HotelPage\nfrom pom_pages.hotels_search_page import HotelSearchPage\n\n\ndef test_search_by_hotel_name(browser):\n # Step 1\n hotel_page = HotelPage(browser)\n hotel_page.load(\"https://www.travelocity.com/Hotels\")\n\n # Step 2\n hotel_page.search_hotel()\n\n # Step 3\n hotel_search_page = HotelSearchPage(browser)\n hotel_search_page.first_result_from_listing()\n hotel_search_page.member_discount()\n","sub_path":"pom_tests/test_pom_ejercicio3.py","file_name":"test_pom_ejercicio3.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374360003","text":"# -*- encoding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n# Copyright (C) OpenERP Venezuela ().\n# All Rights Reserved\n# Credits######################################################\n# Coded by: nhomar@openerp.com.ve,\n# Planified by: Nhomar Hernandez\n# Finance by: Helados Gilda, C.A. http://heladosgilda.com.ve\n# Audited by: Humberto Arocha humberto@openerp.com.ve\n#############################################################################\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\n\nclass mrp_routing_workcenter(osv.Model):\n _inherit = \"mrp.routing.workcenter\"\n\n def _calcular(self, cr, uid, ids, field_name, arg, context):\n res = {}\n for i in self.browse(cr, uid, ids):\n cost = 0.00\n cost = i.hour_nbr * i.costo\n res[i.id] = cost\n return res\n\n _columns = {\n 'costo': fields.float('Costo Unitario', required=True),\n 'costo_total': fields.function(_calcular, method=True, type='float',\n string='Costo Total', store=False),\n }\n _defaults = {\n 'costo': lambda *a: 0.0,\n }\n","sub_path":"mrp_advance/mrp_routing_cost/mrp_routing.py","file_name":"mrp_routing.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"329033675","text":"from tkinter import Tk,Frame,Canvas,Entry,SUNKEN,Button,END\n\nfrom hangWords001c_120 import long_list as word_content\nfrom tkinter.simpledialog import askstring\nimport shelve, os, logging,re\n\nfrom tkinter.messagebox import showinfo, askokcancel\n\nclass Tk_Hangman(Frame):\n \"\"\"\n This is a spelling game .\n Each puzzle starts with 1500 points, wrong answers -250.\n Enter one character in the entry\n widget you can use either submit button or return (keyboard Enter).\n When the starts you have a row of empty boxes and the\n alphabet under it to keep track of your guesses.\n Saving the stats with a flat text file\n and the shelve module. \n\n \"\"\"\n def __init__(self, parent=None):\n self.parent= parent\n self.parent.title('HangMan')\n Frame.__init__(self, self.parent)\n self.pack(expand='yes',fill='both')\n self.canvas= Canvas(self)\n self.canvas.config(width= 1500, height= 900, bg='gray90')\n self.canvas.pack(expand='yes', fill='both')\n self.point_total= int\n self.xxx= 0\n self.guess= []\n fieldname= ('wins','losses','points')\n self.hangMan= [self.make_head, self.make_neck, self.make_body, self.make_l_arm, \n self.make_r_arm, self.make_r_leg, self.make_l_leg,]\n \n self.picked_letters= []\n self.game_Count= int\n self.gamecount= int\n self.points= int\n self.wins= int\n self.losses= int\n self.pointz= int\n self.point_total= int\n self.shelve_name= 'hangman_001c'\n self.text_flat= 'hangman_001c.txt'\n self.letters= ['a','b','c','d','e','f','g','h',\n 'i','j','k','l','m','n',\n 'o','p','q','r','s','t','u','v',\n 'w','x','y','z',]\n self.build_puzzle()\n \n def setup_file(self):\n if os.path.exists(self.text_flat):\n self.read_game_count()\n self.read_stats()\n else:\n self.game_Count= 0\n self.gamecount= 0\n self.wins= 0\n self.losses= 0\n self.pointz= 0\n self.shelve_setup()\n def read_stats(self):\n db= shelve.open(self.shelve_name, 'r')\n self.gamecount= db['stats']['gamecount']\n self.wins= db['stats']['wins']\n self.losses= db['stats']['losses']\n self.pointz= db['stats']['points']\n db.close()\n return self.gamecount, self.wins, self.losses\n\n def shelve_setup(self):\n stats= {'gamecount':0,'wins':0,'losses':0,'points':0}\n db= shelve.open(self.shelve_name, 'n')\n db['stats']= stats\n db.close()\n def points_total(self):\n db= shelve.open(self.shelve_name, writeback=True)\n copy= db['stats']['points']\n db['stats']['points']= self.points + copy\n db.close()\n return self.points\n def win_update(self):\n logging.debug('start in wins %d gamecount %d points %d'% (self.wins,\n self.gamecount,\n self.points))\n db= shelve.open(self.shelve_name, writeback=True)\n db['stats']['wins']= self.wins + 1\n db['stats']['gamecount']= self.gamecount + 1\n db.close()\n self.points_total()\n return self.gamecount, self.wins\n def loss_update(self):\n self.points_total()\n db= shelve.open(self.shelve_name, writeback=True)\n db['stats']['losses']= self.losses + 1\n db['stats']['gamecount']= self.gamecount + 1\n db.close\n return self.losses, self.gamecount\n def read_game_count(self):\n f= open(self.text_flat) \n self.game_Count = int(f.read())\n return self.game_Count\n def write_game_count(self):\n f= open(self.text_flat, 'w+')\n f.write(str(self.game_Count))\n f.close() \n \n def make_head(self):\n head= [(217.5, 181.5), (197.5, 191.5), (185.5, 226.5),\n (183.5, 257.5), (195.5, 298.5), (216.5, 317.5),\n (235.5, 344.5), (256.5, 369.5), (300.5, 366.5),\n (319.5, 338.5), (321.5, 309.5), (317.5, 270.5),\n (309.5, 240.5), (294.5, 218.5), (271.5, 197.5),\n (246.5, 181.5)]\n \n self.canvas.create_polygon(head,fill= 'red', tag='head')\n \n self.points -= 250 \n self.show_points()\n def make_neck(self):\n neck= [(196.5, 312.5), (204.5, 351.5), (205.5, 383.5),\n (195.5, 410.5), (198.5, 422.5), (234.5, 452.5),\n (271.5, 470.5), (308.5, 458.5), (326.5, 433.5),\n (326.5, 412.5), (297.5, 411.5), (290.5, 427.5),\n (276.5, 380.5), (254.5, 382.5), (231.5, 351.5),\n (212.5, 324.5)]\n \n self.canvas.create_polygon(neck, fill='black',tag='neck')\n \n self.points -= 250 \n self.show_points()\n def make_l_arm(self):\n left= [(79.5, 491.5), (147.5, 509.5), (144.5, 545.5),\n (149.5, 591.5), (138.5, 616.5), (140.5, 640.5),\n (157.5, 666.5), (143.5, 679.5), (101.5, 685.5),\n (69.5, 677.5), (65.5, 661.5), (81.5, 648.5),\n (90.5, 627.5), (85.5, 593.5), (73.5, 533.5)]\n \n \n self.canvas.create_polygon(left, fill='black', tag='l_arm')\n \n self.points -= 250 \n self.show_points()\n def make_r_arm(self):\n r_arm= [(356.5, 511.5), (361.5, 532.5), (374.5, 564.5),\n (379.5, 598.5), (382.5, 635.5), (391.5, 652.5),\n (386.5, 671.5), (403.5, 675.5), (435.5, 673.5),\n (460.5, 653.5), (445.5, 629.5), (435.5, 622.5),\n (422.5, 612.5), (422.5, 574.5), (416.5, 539.5),\n (414.5, 508.5), (408.5, 483.5)]\n \n self.canvas.create_polygon(r_arm, fill='black',tag='r_arm')\n \n self.points -= 250 \n self.show_points()\n def make_body(self):\n body= [(335.5, 412.5), (368.5, 414.5), (399.5, 441.5),\n (407.5, 475.5), (363.5, 503.5), (363.5, 503.5),\n (348.5, 510.5), (341.5, 491.5), (339.5, 566.5),\n (344.5, 626.5), (277.5, 626.5), (215.5, 623.5),\n (184.5, 619.5), (163.5, 480.5), (149.5, 502.5),\n (79.5, 485.5), (76.5, 441.5), (92.5, 412.5),\n (152.5, 399.5), (190.5, 397.5), (189.5, 416.5),\n (194.5, 429.5), (231.5, 460.5), (272.5, 480.5),\n (311.5, 466.5), (332.5, 436.5)]\n \n self.canvas.create_polygon(body, fill='black',tag='body')\n \n self.points -= 250 \n self.show_points()\n def make_l_leg(self):\n left_leg= [(184.5, 630.5), (217.5, 636.5), (261.5, 633.5),\n (261.5, 633.5), (273.5, 706.5), (255.5, 761.5),\n (241.5, 804.5), (234.5, 841.5), (225.5, 879.5),\n (213.5, 887.5), (167.5, 888.5), (118.5, 889.5),\n (83.5, 885.5), (69.5, 856.5), (84.5, 838.5),\n (116.5, 838.5), (132.5, 844.5), (162.5, 814.5),\n (169.5, 766.5), (188.5, 675.5)]\n \n self.canvas.create_polygon(left_leg, fill='black', tag='l_leg')\n self.points -= 250 \n self.show_points()\n def make_r_leg(self):\n r_leg= [(267.5, 637.5), (278.5, 712.5), (325.5, 768.5),\n (342.5, 791.5), (359.5, 835.5), (364.5, 874.5),\n (368.5, 897.5), (428.5, 893.5), (463.5, 886.5),\n (485.5, 876.5), (480.5, 824.5), (442.5, 837.5),\n (412.5, 843.5), (387.5, 773.5), (361.5, 722.5),\n (344.5, 691.5), (344.5, 636.5)]\n \n self.canvas.create_polygon(r_leg, fill='black', tag='r_leg')\n \n self.points -= 250 \n self.show_points()\n \n def show_values(self):\n x=20\n y=130\n for l in self.letters:\n self.canvas.create_text(x,y, text=l, fill='black',\n font=('times',16,'bold'),\n tag='value%s'%l)\n x += 25\n return self.puzzle_number(x,y)\n\n def start_points(self):\n self.points= 1500\n return self.show_points()\n \n\n def show_points(self):\n self.delete_points()\n x=1100\n y=500\n txt= 'Total game points: %d'% self.points\n tally= self.canvas.create_text(x,y, text= txt, fill='black',\n font=('ms',16,'bold'), tag='tgps')\n return self.points\n def delete_points(self):\n return self.canvas.delete('tgps')\n \n \n\n def puzzle_number(self,x,y):\n self.widget= Entry(root, relief= SUNKEN,)\n \n self.widget.place(x= 960, y= 225)\n self.widget.focus()\n self.widget.bind('', (lambda event:self.ask_for_letter()))\n self.btn= Button(root, text='Submit', command= self.ask_for_letter)\n x += 370\n self.btn.place(x= 1200, y= 225)\n the_num= 'The puzzle number: %d'% self.game_Count\n self.canvas.create_text(x+100,y, text=the_num, fill='black',\n font=('ms', 18, 'bold'), tag='game_num')\n the_wins= 'The total wins: %d' % self.wins\n self.canvas.create_text(x+100,y+60, text= the_wins, fill='black',\n font=('ms', 18, 'bold'), tag='t_wins')\n the_loss= 'Total number of losses: %d'% self.losses\n self.canvas.create_text(x+100, y+160, text= the_loss, fill='black',\n font=('ms',18,'bold'), tag='t_loss')\n the_gme= 'Total shelve game count: %d' % self.gamecount\n self.canvas.create_text(x+100, y+230, text= the_gme, fill='black',\n font=('ms',18,'bold'),tag='shelve_c')\n the_pointz= 'Total points: %d'% self.pointz\n self.canvas.create_text(x+100, y+300, text=the_pointz, fill='purple',\n font=('ms',18,'bold'), tag='points')\n def get_word(self,p): \n self.res= []\n num= self.game_Count\n self.picked_word= p[num]\n for i in self.picked_word:\n self.res.append(i) \n return self.picked_word, self.res\n \n def build_puzzle(self):\n logging.debug('Building the puzzle')\n self.start_points()\n self.setup_file()\n self.show_values()\n self.get_word(word_content)\n x= 10 \n x1= 10\n y= 60\n y1= 60\n for j in self.picked_word:\n self.canvas.create_rectangle(x,x1,y,y1, fill='white', width=2,\n tag='puzzle%s'%j)\n x+= 50\n y+= 50\n logging.debug('tiles made, x: %d y: %d'%(x,y))\n \n\n def next_puzzle(self):\n self.canvas.delete('all')\n self.build_puzzle()\n\n def new_game(self): \n self.xxx= 0\n self.guess= []\n self.picked_letters= []\n new_g= askokcancel('New Game','Play again?')\n self.game_Count += 1\n if new_g:\n if self.game_Count > len(word_content)-1:\n self.game_Count= 0\n self.write_game_count()\n self.shelve_setup()\n self.next_puzzle()\n else:\n self.write_game_count()\n self.next_puzzle() \n else:\n if self.game_Count > len(word_content)-1:\n self.game_Count= 0\n self.write_game_count()\n self.shelve_setup()\n root.destroy()\n else:\n self.write_game_count()\n root.destroy() \n\n def ask_for_letter(self): \n findletter= self.widget.get()\n \n if findletter:\n self.widget.delete(0,END)\n if len(findletter) >1 or findletter in self.picked_letters:\n showinfo('Error', 'Pick again one letter or already picked')\n self.ask_for_letter()\n else:\n if re.findall(findletter, self.picked_word):\n tagGuess= 'puzzle'+ findletter\n t_g= self.canvas.find_withtag(tagGuess)\n for i in list(t_g):\n pos= self.canvas.coords(i)\n x1= (pos[0] + pos[2])/2\n y1= (pos[1] + pos[3])/2\n self.canvas.create_text(x1,y1, text= findletter, fill= 'red',\n font=('arial', 25, 'bold'), tag='guess')\n self.guess.append(findletter)\n val_d= 'value'+ (findletter)\n self.canvas.delete(val_d)\n self.picked_letters.append(findletter)\n if len(self.res) == len(self.guess):\n self.win_update()\n self.canvas.create_text(750,650, text='Win', fill='yellow',\n font=('times', 50, 'bold'), tag='win')\n self.canvas.create_text(800,800,text=self.picked_word, fill='green',\n font=('times',50,'bold'), tag='word')\n \n self.new_game() \n \n else:\n if self.xxx == 6:\n self.hangMan[self.xxx]()\n self.canvas.create_text(750,650, text='Loss', fill='orange',\n font=('times', 50, 'bold'), tag='loss')\n self.loss_update()\n logging.debug('The puzzle word: %s'% self.picked_word)\n self.canvas.create_text(800,800,text=self.picked_word, fill='green',\n font=('times',50,'bold'), tag='word')\n self.new_game()\n else:\n self.hangMan[self.xxx]()\n d_val= 'value'+ (findletter)\n self.canvas.delete(d_val)\n self.picked_letters.append(findletter)\n self.xxx+=1\n \n\nif __name__ == '__main__':\n root= Tk() \n root.title('Hangman_11b')\n Tk_Hangman(root)\n root.mainloop()\n\n","sub_path":"hangman001c.py","file_name":"hangman001c.py","file_ext":"py","file_size_in_byte":14567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"581730749","text":"import io\nimport cv2, threading, queue\nfrom ..models.cv.recognition import FaceRecognition\nfrom ..models.cv.detection import FaceDetection\nimport numpy as np\n\nclass VideoCapture:\n def __init__(self, name, width=640, height=480):\n self.cap = cv2.VideoCapture(name)\n self.cap.set(3, int(width))\n self.cap.set(4, int(height))\n print('camera resolution: ', self.cap.get(3), self.cap.get(4))\n # self.detector = FaceDetection()\n # self.predictor = FaceRecognition(['demo_face_model.hdf5'], 'demo_label_dict.hdf5')\n # self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 256)\n # self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 256)\n self.q = queue.Queue()\n t = threading.Thread(target=self._reader)\n t.daemon = True\n t.start()\n\n # read frames as soon as they are available, keeping only most recent one\n def _reader(self):\n while self.cap.isOpened:\n ret, frame = self.cap.read()\n # frame, cropped_frame = self.detector.detect(frame)\n\n # print(np.asarray(frame).shape)\n # print(np.asarray(cropped_frame).shape)\n\n # _, str_label, prob = self.predictor.predict(cropped_frame, 0)\n\n if not ret:\n break\n if not self.q.empty():\n try:\n self.q.get_nowait() # discard previous (unprocessed) frame\n except queue.Empty:\n pass\n # bytes_data = io.BytesIO()\n # frame.save(bytes_data, format='JPEG')\n self.q.put(frame)\n\n def read(self):\n return self.q.get()\n \n\ndef stream(video_capture):\n while(True):\n frame = video_capture.read()\n yield frame\n \n\ndef stop_stream(video_capture):\n video_capture.release()\n cv2.destroyAllWindows()\n\n#################################333 \n\nclass VideoCamera(object):\n def __init__(self):\n # Using OpenCV to capture from device 0. If you have trouble capturing\n # from a webcam, comment the line below out and use a video file\n # instead.\n self.video = cv2.VideoCapture(0)\n # If you decide to use video.mp4, you must have this file in the folder\n # as the main.py.\n # self.video = cv2.VideoCapture('video.mp4')\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n success, image = self.video.read()\n # We are using Motion JPEG, but OpenCV defaults to capture raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n\ndef gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n# stream()\n","sub_path":"server/ai/utils/stream_utils.py","file_name":"stream_utils.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"403969160","text":"#! /usr/bin/env python3\n\nimport copy\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom phue import Bridge\nfrom soco import SoCo\nimport pygame\nimport time\n\n# General Settings\nprediction = ''\naction = ''\nscore = 0\nimg_counter = 500\n\n\n# pygame.event.wait()\n\nclass Volume(object):\n def __init__(self):\n self.level = .5\n\n def increase(self, amount):\n self.level += amount\n print(f'New level is: {self.level}')\n\n def decrease(self, amount):\n self.level -= amount\n print(f'New level is: {self.level}')\n\n\nvol = Volume()\n\n# Turn on/off the ability to save images, or control Philips Hue/Sonos\nsave_images, selected_gesture = False, 'peace'\nsmart_home = False\n\n# Philips Hue Settings\n\ngesture_names = {0: 'Fist',\n 1: 'L',\n 2: 'Okay',\n 3: 'Palm',\n 4: 'Peace'}\n\nmodel = load_model('models/VGG_cross_validated.h5')\n\n\ndef predict_rgb_image(img):\n result = gesture_names[model.predict_classes(img)[0]]\n print(result)\n return (result)\n\n\ndef predict_rgb_image_vgg(image):\n image = np.array(image, dtype='float32')\n image /= 255\n pred_array = model.predict(image)\n result = gesture_names[np.argmax(pred_array)]\n score = float(\"%0.2f\" % (max(pred_array[0]) * 100))\n return result, score\n\n\n# parameters\ncap_region_x_begin = 0.5 # start point/total width\ncap_region_y_end = 0.8 # start point/total width\nthreshold = 60 # binary threshold\nblurValue = 41 # GaussianBlur parameter\nbgSubThreshold = 50\nlearningRate = 0\n\n# variableslt\nbgModel = None\ntriggerSwitch = False\nisBgCaptured = 0\n\n\n\n# Camera\n\ndef getGest(fr):\n frame = fr\n frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter\n frame = cv2.flip(frame, 1) # flip the frame horizontally\n #cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),\n # (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)\n\n #cv2.imshow('original', frame)\n\n # Run once background is captured\n #if isBgCaptured == 1:\n bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)\n fgmask = bgModel.apply(frame, learningRate=learningRate)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n imag = res\n img = imag[0:int(cap_region_y_end * frame.shape[0]),\n int(cap_region_x_begin * frame.shape[1]):frame.shape[1]] # clip the ROI\n # cv2.imshow('mask', img)\n\n # convert the image into binary image\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)\n # cv2.imshow('blur', blur)\n ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Add prediction and action text to thresholded image\n # cv2.put ret, frame = camera.read()\n # cv2.putText(thresh, f\"Action: {action}\", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) # Draw the text\n # Draw the text\n \"\"\"cv2.putText(thresh, f\"Prediction: {prediction} ({score}%)\", (50, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255))\n cv2.putText(thresh, f\"Action: {action}\", (50, 80), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255)) # Draw the text\n cv2.imshow('ori', thresh)\"\"\"\n\n # get the contours\n thresh1 = copy.deepcopy(thresh)\n contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n length = len(contours)\n maxArea = -1\n if length > 0:\n for i in range(length): # find the biggest contour (according to area)\n temp = contours[i]\n area = cv2.contourArea(temp)\n if area > maxArea:\n maxArea = area\n ci = i\n\n res = contours[ci]\n hull = cv2.convexHull(res)\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n #cv2.imshow('output', drawing)\n\n # Keyboard OP\n k = cv2.waitKey(10)\n\n bgModel = None\n triggerSwitch = False\n isBgCaptured = 0\n # If space bar pressed\n #cv2.imshow('original', frame)\n # copies 1 channel BW image to all 3 RGB channels\n target = np.stack((thresh,) * 3, axis=-1)\n target = cv2.resize(target, (224, 224))\n target = target.reshape(1, 224, 224, 3)\n prediction, score = predict_rgb_image_vgg(target)\n return prediction, score\n","sub_path":"gestDet.py","file_name":"gestDet.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122192407","text":"number_of_cars = int(input())\ncars_dict = {}\nfor i in range(number_of_cars):\n cars = input()\n model,miles,fuel = cars.split(\"|\")\n miles = int(miles)\n fuel = int(fuel)\n cars_dict[model] = [miles,fuel]\n\ncommand = input()\n\nwhile not command == \"Stop\":\n if \"Drive\" in command:\n cmd,model,dist,fuel = command.split(\" : \")\n dist = int(dist)\n fuel = int(fuel)\n if cars_dict[model][1] < fuel:\n print(f\"Not enough fuel to make that ride\")\n else:\n cars_dict[model][1] -= fuel\n cars_dict[model][0] += dist\n print(f\"{model} driven for {dist} kilometers. {fuel} liters of fuel consumed.\")\n if cars_dict[model][0] >= 100000:\n cars_dict.pop(model)\n print(f\"Time to sell the {model}!\")\n elif \"Refuel\" in command:\n cmd,model,fuel = command.split(\" : \")\n fuel = int(fuel)\n if cars_dict[model][1] + fuel > 75:\n filled = 75 - cars_dict[model][1]\n cars_dict[model][1] = 75\n print(f\"{model} refueled with {filled} liters\")\n else:\n cars_dict[model][1] += fuel\n print(f\"{model} refueled with {fuel} liters\")\n elif \"Revert\" in command:\n cmd, model, dist = command.split(\" : \")\n dist = int(dist)\n if cars_dict[model][0] - dist < 10000:\n cars_dict[model][0] = 10000\n else:\n cars_dict[model][0] -= dist\n print(f\"{model} mileage decreased by {dist} kilometers\")\n command = input()\n\ncars_dict = dict(sorted(cars_dict.items(), key = lambda x:x[0]))\ncars_dict = dict(sorted(cars_dict.items(),key = lambda x:x[1][0],reverse=True))\n\nfor car, miles_fuel in cars_dict.items():\n print(f\"{car} -> Mileage: {miles_fuel[0]} kms, Fuel in the tank: {miles_fuel[1]} lt.\")\n\n","sub_path":"final exams/need for speed 3.py","file_name":"need for speed 3.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"644852379","text":"import os\nimport logging\nfrom .base import BaseReport\nimport numpy as np\nimport h5py\nfrom scipy.stats import threshold\nfrom scipy.misc import imsave\n\n\nclass FileReport(BaseReport):\n logger = logging.getLogger(\"ddvt.rep.file\")\n\n def __init__(self, file, temp_dir):\n self.file = file\n # all files are valid until proven broken\n self.valid = True\n # starts with no reports\n self.reports = []\n self.images = []\n self.temp_dir = temp_dir\n\n @property\n def filename(self):\n return self.file.split('/')[-1]\n\n @property\n def path(self):\n return os.path.dirname(self.file)\n\n @property\n def slug(self):\n return self.file.split('_')[-1].split('.h5')[0]\n\n def render(self, directory):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n if self.valid:\n self.render_image(directory)\n with open(os.path.join(directory, 'index.html'), 'w') as index_out:\n self.logger.info(\"Writing file Page for {}\".format(self.file))\n index_out.write(self.file_template.render(title=self.file,\n file_report=self))\n\n def render_image(self, dir):\n input_file = h5py.File(self.file, 'r')\n wires = input_file['image/wires']\n n = 1\n scale = 100\n thresh = 25\n self.logger.info(\"\"\"Producing {} images with\n scale {} and threshold {}\"\"\".format(n, scale, thresh))\n try:\n image = wires[0]\n self.logger.info(\"Image: min: {}, max: {}\".format(np.min(image),\n np.max(image)))\n buff = np.ndarray(shape=(image.shape[1], image.shape[2],\n image.shape[0]),\n dtype=np.float64)\n for i in range(3):\n buff[:, :, i] = image[i, :, :]\n buff = buff * scale\n buff = threshold(buff, threshmin=thresh) + threshold(buff,\n threshmax=-thresh)\n self.logger.info(\"Buffer: min: {}, max: {}\".format(np.min(buff),\n np.max(buff)))\n output_file = os.path.join(dir,\n 'wires.png')\n imsave(output_file, buff)\n except Exception as e:\n self.logger.warning(\"problem creating image\")\n self.logger.warning(e)\n","sub_path":"dl_data_validation_toolset/framework/report/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259786960","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCalculate the SFR from corrected FUV and Halpha\n\n@author: Tom Williams\n\"\"\"\n\nfrom __future__ import absolute_import, print_function, division\nimport numpy as np\nfrom scipy.constants import h,k,c\n\n#Start with the easy stuff. Convert the FUV flux into a luminosity in ergs/s\n\nfuv_jy = 2.6636\n\nfuv_erg_s = 1e-23*fuv_jy*1.99e15*4*np.pi*(3.1e18*740*1000)**2\n\n#Pull the MBB from my fit to estimate TIR luminosity\n\nkappa=0.051\nm33_distance_kpc = 785\nm33_distance_m = m33_distance_kpc*3.086e19\n\nfile = open('/home/daedalusdata/c1625914/M33/regrids/mbb_fits/m33_one_temp.txt')\n\nlines = file.readlines()\n\nline = lines[3].strip()\ncolumns = line.split()\n\nmbb_beta = 1.65\nmbb_m = float(columns[2])\nmbb_t = float(columns[5])\n\nwavelengths = np.linspace(3e-6,1100e-6,1000)\nfrequencies = c/wavelengths\n\nkappa_nu = kappa*(frequencies / (c/500e-6) )**mbb_beta\n\nplanck_function = (2*h/c**2) * frequencies**(3) / (np.exp( (h*frequencies) / (k*mbb_t) ) -1 )\n\nmbb_values = kappa_nu*planck_function*mbb_m/ (m33_distance_m**2*1e-26)\n\nl_tir_jy = np.abs(np.trapz(mbb_values,frequencies))\nl_tir_erg = l_tir_jy*1e-23*4*np.pi*(m33_distance_m*100)**2\n\nfuv_corr = fuv_erg_s + 0.46*l_tir_erg\n\n#Convert this corrected FUV luminosity to an SFR\n\nsfr_fuv_corr = np.log10(fuv_corr)-43.35\n\nprint('FUV/TIR SFR: '+str(10**sfr_fuv_corr))\n\n#Also look at the Halpha+24micron SFR. \n\nhalpha_jy = 0.09\nhalpha_erg_s = 1e-23*halpha_jy*4.57e14*4*np.pi*(3.1e18*740*1000)**2 \n\nmips_24_jy = 47.33\nmips_24_erg_s = 1e-23*mips_24_jy*1.25e13*4*np.pi*(3.1e18*740*1000)**2\n\n#Correct Halpha with 24micron\n\nhalpha_corr = halpha_erg_s+0.02*mips_24_erg_s\n\nsfr_halpha_corr = np.log10(halpha_corr)-41.27\nprint('Halpha/24 micron SFR: '+str(10**sfr_halpha_corr))\n\nprint('Complete!')","sub_path":"skirt/fuv_halpha_sfr.py","file_name":"fuv_halpha_sfr.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"228303017","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'team'\nSITENAME = \"theEnterpriseStack\"\nALTNAME = \"#! \" + SITENAME\nSITEURL = 'http://theenterprisestack.com'\n\nPATH = 'content'\n\nTIMEZONE = 'Australia/Sydney'\n\nDEFAULT_LANG = u'en'\n\nTHEME = 'pelican-themes/pelican-bootstrap3'\nBOOTSTRAP_THEME = 'sandstone'\nSTATIC_PATHS = ['images', 'blog']\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = 'feeds/all.atom.xml'\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nBOOTSTRAP_NAVBAR_INVERSE = False\n\nSOCIAL = (('twitter', 'http://twitter.com'),\n ('linkedin', 'http://www.linkedin.com'),\n ('github', 'http://github.com/'))\n\nMENUITEMS = [\n ('Contact', '/contact.html'),\n ('Cloud Services', '/cloud-services.html'),\n ('Performance Monitoring', '/performance-monitoring.html'),\n ('Consulting and Design', '/consulting-design.html'),\n ('Blog', '/category/blog.html')\n]\n\nBANNER = 'images/banner_2.jpg'\nFAVICON = 'images/favicon.png'\n\nHIDE_SIDEBAR = True\nDISPLAY_CATEGORIES_ON_SIDEBAR = False\nDISPLAY_RECENT_POSTS_ON_SIDEBAR = True\nDISPLAY_CATEGORY_IN_BREADCRUMBS = False\nDISPLAY_TAGS_ON_SIDEBAR = True\nDISPLAY_SERIES_ON_SIDEBAR = True\n\nTYPOGRAPHY = True\nDISPLAY_BREADCRUMBS = False\n\nDISPLAY_PAGES_ON_MENU = False\nDISPLAY_CATEGORIES_ON_MENU = False\nUSE_FOLDER_AS_CATEGORY = True\nDEFAULT_CATEGORY = 'Blog'\n\nBANNER_ALL_PAGES = False\n\nARTICLE_PATHS = ['articles']\nARTICLE_URL = '{category}/{slug}/'\nARTICLE_SAVE_AS = '{category}/{slug}/index.html'\nPAGE_URL = '{slug}.html'\nPAGE_SAVE_AS = '{slug}.html'\nTAG_URL = 'tags/{slug}.html'\nTAG_SAVE_AS = 'tags/{slug}.html'\nTAGS_URL = 'tags.html'\nDIRECT_TEMPLATES = ['index', 'categories', 'authors', 'archives', 'article_list', 'contact']\n\nPLUGIN_PATHS = ['pelican-plugins']\nPLUGINS = ['related_posts', 'series', 'tag_cloud']\n\nABOUT_ME = False\n\nGOOGLE_ANALYTICS = 'UA-69921042'\nGOOGLE_ANALYTICS_UNIVERSAL = 'UA-69921042'","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"406278422","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport time\nimport h5py\nimport torch\nimport random\nimport argparse\nfrom torch import nn\nfrom utils import get_logger\nfrom functools import partial\nfrom utils import AverageMeter\nfrom nli.models import PpoModel\nfrom utils import EarlyStopping\nfrom utils import get_lr_scheduler\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom nli.data_preprocessing import NliDataset\n\n\ndef make_path_preparations(args):\n seed = hash(str(args)) % 1000_000\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)\n\n # logger path\n args_hash = str(hash(str(args)))\n if not os.path.exists(args.logs_path):\n os.makedirs(args.logs_path)\n logger = get_logger(f\"{args.logs_path}/l{args_hash}.log\")\n print(f\"{args.logs_path}/l{args_hash}.log\")\n logger.info(f\"args: {str(args)}\")\n logger.info(f\"args hash: {args_hash}\")\n logger.info(f\"random seed: {seed}\")\n\n # model path\n args.model_dir = f\"{args.model_dir}/m{args_hash}\"\n if not os.path.exists(args.model_dir):\n os.makedirs(args.model_dir)\n logger.info(f\"checkpoint's dir is: {args.model_dir}\")\n\n # tensorboard path\n tensorboard_path = f\"{args.tensorboard_path}/t{args_hash}\"\n if not os.path.exists(tensorboard_path):\n os.makedirs(tensorboard_path)\n summary_writer = dict()\n summary_writer[\"train\"] = SummaryWriter(log_dir=os.path.join(tensorboard_path, 'log' + args_hash, 'train'))\n summary_writer[\"valid\"] = SummaryWriter(log_dir=os.path.join(tensorboard_path, 'log' + args_hash, 'valid'))\n\n return logger, summary_writer\n\n\ndef get_data(args):\n if args.nli == \"snli\":\n train_data = NliDataset.load_data(f\"data/nli/snli_1.0/train_lower={args.lower}.pckl\")\n valid_data = NliDataset.load_data(f\"data/nli/snli_1.0/valid_lower={args.lower}.pckl\")\n test_data = NliDataset.load_data(f\"data/nli/snli_1.0/test_lower={args.lower}.pckl\")\n elif args.nli == \"multi_nli\":\n train_data = NliDataset.load_data(f\"data/nli/multinli_1.0/train_lower={args.lower}.pckl\")\n train_data.extend(NliDataset.load_data(f\"data/nli/snli_1.0/train_lower={args.lower}.pckl\"))\n valid_data = NliDataset.load_data(f\"data/nli/multinli_1.0/valid_matched_lower={args.lower}.pckl\")\n test_data = None\n else:\n raise ValueError\n print(f\"train len: {len(train_data)}\")\n print(f\"valid len: {len(valid_data)}\")\n\n train_dataset = NliDataset(train_data, max_len=args.max_len)\n valid_dataset = NliDataset(valid_data)\n test_dataset = None if test_data is None else NliDataset(test_data)\n\n print(f\"train len: {len(train_dataset.data)}\")\n print(f\"valid len: {len(valid_dataset.data)}\")\n\n train_data = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True,\n collate_fn=NliDataset.collate_fn, pin_memory=True)\n valid_data = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False,\n collate_fn=NliDataset.collate_fn, pin_memory=True)\n test_data = None if test_dataset is None else \\\n DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False,\n collate_fn=NliDataset.collate_fn, pin_memory=True)\n\n with h5py.File(f\"data/nli/glove_lower={args.lower}.h5\", 'r') as f:\n glove = f[\"glove\"][...]\n\n args.vocab_size = glove.shape[0]\n args.label_size = NliDataset.label_size\n\n return train_data, valid_data, test_data, glove\n\n\ndef prepare_optimisers(args, logger, policy_parameters, environment_parameters):\n if args.env_optimizer == \"adam\":\n env_opt_class = torch.optim.Adam\n elif args.env_optimizer == \"amsgrad\":\n env_opt_class = partial(torch.optim.Adam, amsgrad=True)\n elif args.env_optimizer == \"adadelta\":\n env_opt_class = torch.optim.Adadelta\n else:\n env_opt_class = torch.optim.SGD\n\n if args.pol_optimizer == \"adam\":\n pol_opt_class = torch.optim.Adam\n elif args.pol_optimizer == \"amsgrad\":\n pol_opt_class = partial(torch.optim.Adam, amsgrad=True)\n elif args.pol_optimizer == \"adadelta\":\n pol_opt_class = torch.optim.Adadelta\n else:\n pol_opt_class = torch.optim.SGD\n\n optimizer = {\"policy\": pol_opt_class(params=policy_parameters, lr=args.pol_lr, weight_decay=args.l2_weight),\n \"env\": env_opt_class(params=environment_parameters, lr=args.env_lr, weight_decay=args.l2_weight)}\n lr_scheduler = {\"policy\": get_lr_scheduler(logger, optimizer[\"policy\"], patience=args.lr_scheduler_patience,\n threshold=args.lr_scheduler_threshold),\n \"env\": get_lr_scheduler(logger, optimizer[\"env\"], patience=args.lr_scheduler_patience,\n threshold=args.lr_scheduler_threshold)}\n es = EarlyStopping(mode=\"max\", patience=args.es_patience, threshold=args.es_threshold)\n return optimizer, lr_scheduler, es\n\n\ndef perform_env_optimizer_step(optimizer, model, args):\n if args.clip_grad_norm > 0:\n nn.utils.clip_grad_norm_(parameters=model.get_environment_parameters(),\n max_norm=args.clip_grad_norm,\n norm_type=float(\"inf\"))\n optimizer[\"env\"].step()\n optimizer[\"env\"].zero_grad()\n\n\ndef perform_policy_optimizer_step(optimizer, model, args):\n if args.clip_grad_norm > 0:\n nn.utils.clip_grad_norm_(parameters=model.get_policy_parameters(),\n max_norm=args.clip_grad_norm,\n norm_type=float(\"inf\"))\n optimizer[\"policy\"].step()\n optimizer[\"policy\"].zero_grad()\n\n\ndef test(test_data, model, device, logger):\n if test_data is None:\n return\n\n loading_time_meter = AverageMeter()\n batch_time_meter = AverageMeter()\n ce_loss_meter = AverageMeter()\n accuracy_meter = AverageMeter()\n entropy_meter = AverageMeter()\n n_entropy_meter = AverageMeter()\n\n model.eval()\n start = time.time()\n with torch.no_grad():\n for labels, premises, p_mask, hypotheses, h_mask in test_data:\n labels = labels.to(device=device, non_blocking=True)\n premises = premises.to(device=device, non_blocking=True)\n p_mask = p_mask.to(device=device, non_blocking=True)\n hypotheses = hypotheses.to(device=device, non_blocking=True)\n h_mask = h_mask.to(device=device, non_blocking=True)\n loading_time_meter.update(time.time() - start)\n\n pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \\\n model(premises, p_mask, hypotheses, h_mask, labels)\n entropy = entropy.mean()\n normalized_entropy = normalized_entropy.mean()\n\n accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()\n n = p_mask.shape[0]\n accuracy_meter.update(accuracy.item(), n)\n ce_loss_meter.update(ce_loss.item(), n)\n entropy_meter.update(entropy.item(), n)\n n_entropy_meter.update(normalized_entropy.item(), n)\n batch_time_meter.update(time.time() - start)\n start = time.time()\n\n logger.info(f\"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} \"\n f\"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} \"\n f\"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}\")\n logger.info(\"done\")\n\n return accuracy_meter.avg\n\n\ndef validate(valid_data, model, epoch, device, logger, summary_writer):\n loading_time_meter = AverageMeter()\n batch_time_meter = AverageMeter()\n ce_loss_meter = AverageMeter()\n accuracy_meter = AverageMeter()\n entropy_meter = AverageMeter()\n n_entropy_meter = AverageMeter()\n\n model.eval()\n start = time.time()\n with torch.no_grad():\n for labels, premises, p_mask, hypotheses, h_mask in valid_data:\n labels = labels.to(device=device, non_blocking=True)\n premises = premises.to(device=device, non_blocking=True)\n p_mask = p_mask.to(device=device, non_blocking=True)\n hypotheses = hypotheses.to(device=device, non_blocking=True)\n h_mask = h_mask.to(device=device, non_blocking=True)\n loading_time_meter.update(time.time() - start)\n\n pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \\\n model(premises, p_mask, hypotheses, h_mask, labels)\n entropy = entropy.mean()\n normalized_entropy = normalized_entropy.mean()\n\n accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()\n n = p_mask.shape[0]\n accuracy_meter.update(accuracy.item(), n)\n ce_loss_meter.update(ce_loss.item(), n)\n entropy_meter.update(entropy.item(), n)\n n_entropy_meter.update(normalized_entropy.item(), n)\n batch_time_meter.update(time.time() - start)\n start = time.time()\n\n logger.info(f\"Valid: epoch: {epoch} ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} \"\n f\"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} \"\n f\"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}\")\n\n summary_writer[\"valid\"].add_scalar(tag=\"ce\", scalar_value=ce_loss_meter.avg, global_step=global_step)\n summary_writer[\"valid\"].add_scalar(tag=\"accuracy\", scalar_value=accuracy_meter.avg, global_step=global_step)\n summary_writer[\"valid\"].add_scalar(tag=\"n_entropy\", scalar_value=n_entropy_meter.avg, global_step=global_step)\n\n model.train()\n return accuracy_meter.avg\n\n\ndef train(train_data, valid_data, model, optimizer, lr_scheduler, es, epoch, args, logger, summary_writer):\n loading_time_meter = AverageMeter()\n batch_time_meter = AverageMeter()\n ce_loss_meter = AverageMeter()\n accuracy_meter = AverageMeter()\n entropy_meter = AverageMeter()\n n_entropy_meter = AverageMeter()\n prob_ratio_meter = AverageMeter()\n\n device = args.gpu_id\n model.train()\n start = time.time()\n for batch_idx, (labels, premises, p_mask, hypotheses, h_mask) in enumerate(train_data):\n labels = labels.to(device=device, non_blocking=True)\n premises = premises.to(device=device, non_blocking=True)\n p_mask = p_mask.to(device=device, non_blocking=True)\n hypotheses = hypotheses.to(device=device, non_blocking=True)\n h_mask = h_mask.to(device=device, non_blocking=True)\n loading_time_meter.update(time.time() - start)\n\n pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \\\n model(premises, p_mask, hypotheses, h_mask, labels)\n\n ce_loss.backward()\n perform_env_optimizer_step(optimizer, model, args)\n for k in range(args.ppo_updates):\n if k == 0:\n new_normalized_entropy, new_actions_log_prob = normalized_entropy, actions_log_prob\n else:\n new_normalized_entropy, new_actions_log_prob = \\\n model.evaluate_actions(premises, p_mask, actions[\"p_actions\"],\n hypotheses, h_mask, actions[\"h_actions\"])\n prob_ratio = (new_actions_log_prob - actions_log_prob.detach()).exp()\n clamped_prob_ratio = prob_ratio.clamp(1.0 - args.epsilon, 1.0 + args.epsilon)\n ppo_loss = torch.max(prob_ratio * rewards, clamped_prob_ratio * rewards).mean()\n loss = ppo_loss - args.entropy_weight * new_normalized_entropy.mean()\n loss.backward()\n perform_policy_optimizer_step(optimizer, model, args)\n\n entropy = entropy.mean()\n normalized_entropy = normalized_entropy.mean()\n n = p_mask.shape[0]\n accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()\n accuracy_meter.update(accuracy.item(), n)\n ce_loss_meter.update(ce_loss.item(), n)\n entropy_meter.update(entropy.item(), n)\n n_entropy_meter.update(normalized_entropy.item(), n)\n prob_ratio_meter.update((1.0-prob_ratio.detach()).abs().mean().item(), n)\n batch_time_meter.update(time.time() - start)\n\n global global_step\n summary_writer[\"train\"].add_scalar(tag=\"ce\", scalar_value=ce_loss.item(), global_step=global_step)\n summary_writer[\"train\"].add_scalar(tag=\"accuracy\", scalar_value=accuracy.item(), global_step=global_step)\n summary_writer[\"train\"].add_scalar(tag=\"n_entropy\", scalar_value=normalized_entropy.item(),\n global_step=global_step)\n summary_writer[\"train\"].add_scalar(tag=\"prob_ratio\", scalar_value=prob_ratio_meter.value,\n global_step=global_step)\n global_step += 1\n\n if (batch_idx + 1) % (len(train_data) // 3) == 0:\n logger.info(f\"Train: epoch: {epoch} batch_idx: {batch_idx + 1} ce_loss: {ce_loss_meter.avg:.4f} \"\n f\"accuracy: {accuracy_meter.avg:.4f} entropy: {entropy_meter.avg:.4f} \"\n f\"n_entropy: {n_entropy_meter.avg:.4f} loading_time: {loading_time_meter.avg:.4f} \"\n f\"batch_time: {batch_time_meter.avg:.4f}\")\n val_accuracy = validate(valid_data, model, epoch, device, logger, summary_writer)\n lr_scheduler[\"env\"].step(val_accuracy)\n lr_scheduler[\"policy\"].step(val_accuracy)\n es.step(val_accuracy)\n global best_model_path\n if es.is_converged:\n return\n if es.is_improved():\n logger.info(\"saving model...\")\n best_model_path = f\"{args.model_dir}/{epoch}-{batch_idx}.mdl\"\n torch.save({\"epoch\": epoch, \"batch_idx\": batch_idx, \"state_dict\": model.state_dict()}, best_model_path)\n model.train()\n start = time.time()\n\n\ndef main(args):\n logger, summary_writer = make_path_preparations(args)\n train_data, valid_data, test_data, vectors = get_data(args)\n\n model = PpoModel(vocab_size=args.vocab_size,\n word_dim=args.word_dim,\n hidden_dim=args.hidden_dim,\n mlp_hidden_dim=args.mlp_hidden_dim,\n label_dim=args.label_size,\n dropout_prob=args.dropout_prob,\n parser_leaf_transformation=args.parser_leaf_transformation,\n parser_trans_hidden_dim=args.parser_trans_hidden_dim,\n tree_leaf_transformation=args.tree_leaf_transformation,\n tree_trans_hidden_dim=args.tree_trans_hidden_dim,\n baseline_type=args.baseline_type,\n var_normalization=args.var_normalization,\n use_batchnorm=args.use_batchnorm).cuda(args.gpu_id)\n dtype = model.embd_parser.weight.data.dtype\n device = model.embd_parser.weight.data.device\n model.embd_parser.weight.data = torch.tensor(vectors, dtype=dtype, device=device)\n model.embd_tree.weight.data = torch.tensor(vectors, dtype=dtype, device=device)\n if args.freeze_embeddings:\n model.embd_parser.weight.requires_grad = False\n model.embd_tree.weight.requires_grad = False\n logger.info(\"Embeddings is frozen!\")\n\n optimizer, lr_scheduler, es = prepare_optimisers(args, logger,\n policy_parameters=model.get_policy_parameters(),\n environment_parameters=model.get_environment_parameters())\n\n validate(valid_data, model, 0, args.gpu_id, logger, summary_writer)\n for epoch in range(args.max_epoch):\n train(train_data, valid_data, model, optimizer, lr_scheduler, es, epoch, args, logger, summary_writer)\n if es.is_converged:\n break\n print(best_model_path)\n checkpoint = torch.load(best_model_path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n test(test_data, model, args.gpu_id, logger)\n\n\nif __name__ == \"__main__\":\n # SNLI\n args = {\"nli\": \"snli\",\n \"freeze-embeddings\": \"True\",\n \"use-batchnorm\": \"True\",\n \"dropout-prob\": 0.1,\n \"lower\": \"True\",\n \"mlp-hidden-dim\": 1024,\n \"word-dim\": 300,\n \"hidden-dim\": 300,\n \"parser-leaf-transformation\": \"lstm_transformation\",\n \"parser-trans-hidden_dim\": 300,\n \"tree-leaf-transformation\": \"lstm_transformation\",\n \"tree-trans-hidden_dim\": 300,\n \"baseline-type\": \"self_critical\",\n \"var-normalization\": \"True\",\n \"entropy-weight\": 0.0,\n \"clip-grad-norm\": 0.0,\n \"env-optimizer\": \"adadelta\",\n \"pol-optimizer\": \"adadelta\",\n \"env-lr\": 1.0,\n \"pol-lr\": 1.0,\n \"ppo-updates\": 1,\n \"epsilon\": 0.2,\n \"lr-scheduler-patience\": 8,\n \"lr-scheduler-threshold\": 0.005,\n \"l2-weight\": 0.0,\n \"batch-size\": 64,\n \"max-len\": 120,\n \"max-epoch\": 150,\n \"es-patience\": 20,\n \"es-threshold\": 0.005,\n \"gpu-id\": 0,\n \"model-dir\": \"data/snli/ppo/models/exp0\",\n \"logs-path\": \"data/snli/ppo/logs/exp0\",\n \"tensorboard-path\": \"data/snli/ppo/tensorboard/exp0\"\n }\n\n # MultiNLI\n # args = {\"nli\": \"multi_nli\",\n # \"freeze-embeddings\": \"True\",\n # \"use-batchnorm\": \"True\",\n # \"dropout-prob\": 0.1,\n # \"lower\": \"True\",\n # \"mlp-hidden-dim\": 1024,\n # \"word-dim\": 300,\n # \"hidden-dim\": 300,\n # \"parser-leaf-transformation\": \"lstm_transformation\",\n # \"parser-trans-hidden_dim\": 300,\n # \"tree-leaf-transformation\": \"lstm_transformation\",\n # \"tree-trans-hidden_dim\": 300,\n # \"baseline-type\": \"self_critical\",\n # \"var-normalization\": \"True\",\n # \"entropy-weight\": 0.0,\n # \"clip-grad-norm\": 0.0,\n # \"env-optimizer\": \"adadelta\",\n # \"pol-optimizer\": \"adadelta\",\n # \"env-lr\": 1.0,\n # \"pol-lr\": 1.0,\n # \"ppo-updates\": 1,\n # \"epsilon\": 0.2,\n # \"lr-scheduler-patience\": 8,\n # \"lr-scheduler-threshold\": 0.005,\n # \"l2-weight\": 0.0,\n # \"batch-size\": 64,\n # \"max-len\": 120,\n # \"max-epoch\": 150,\n # \"es-patience\": 20,\n # \"es-threshold\": 0.005,\n # \"gpu-id\": 0,\n # \"model-dir\": \"data/multi_nli/ppo/models/exp0\",\n # \"logs-path\": \"data/multi_nli/ppo/logs/exp0\",\n # \"tensorboard-path\": \"data/multi_nli/ppo/tensorboard/exp0\"\n # }\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--nli\", default=args[\"nli\"], choices=[\"multi_nli\", \"snli\"])\n parser.add_argument(\"--freeze-embeddings\", default=args[\"freeze-embeddings\"],\n type=lambda val: True if val == \"True\" else False)\n parser.add_argument(\"--use-batchnorm\", default=args[\"use-batchnorm\"],\n type=lambda val: True if val == \"True\" else False)\n parser.add_argument(\"--dropout-prob\", default=args[\"dropout-prob\"], type=float)\n parser.add_argument(\"--lower\", default=args[\"lower\"],\n type=lambda val: True if val == \"True\" else False)\n parser.add_argument(\"--mlp-hidden-dim\", default=args[\"mlp-hidden-dim\"], type=int)\n parser.add_argument(\"--word-dim\", required=False, default=args[\"word-dim\"], type=int)\n parser.add_argument(\"--hidden-dim\", required=False, default=args[\"hidden-dim\"], type=int)\n parser.add_argument(\"--parser-leaf-transformation\", required=False, default=args[\"parser-leaf-transformation\"],\n choices=[\"no_transformation\", \"lstm_transformation\",\n \"bi_lstm_transformation\", \"conv_transformation\"])\n parser.add_argument(\"--parser-trans-hidden_dim\", required=False, default=args[\"parser-trans-hidden_dim\"], type=int)\n parser.add_argument(\"--tree-leaf-transformation\", required=False, default=args[\"tree-leaf-transformation\"],\n choices=[\"no_transformation\", \"lstm_transformation\",\n \"bi_lstm_transformation\", \"conv_transformation\"])\n parser.add_argument(\"--tree-trans-hidden_dim\", required=False, default=args[\"tree-trans-hidden_dim\"], type=int)\n\n parser.add_argument(\"--baseline-type\", default=args[\"baseline-type\"],\n choices=[\"no_baseline\", \"ema\", \"self_critical\"])\n parser.add_argument(\"--var-normalization\", default=args[\"var-normalization\"],\n type=lambda string: True if string == \"True\" else False)\n parser.add_argument(\"--entropy-weight\", default=args[\"entropy-weight\"], type=float)\n parser.add_argument(\"--clip-grad-norm\", default=args[\"clip-grad-norm\"], type=float,\n help=\"If the value is less or equal to zero clipping is not performed.\")\n\n parser.add_argument(\"--env-optimizer\", required=False, default=args[\"env-optimizer\"], choices=[\"adam\", \"amsgrad\", \"sgd\", \"adadelta\"])\n parser.add_argument(\"--pol-optimizer\", required=False, default=args[\"pol-optimizer\"], choices=[\"adam\", \"amsgrad\", \"sgd\", \"adadelta\"])\n parser.add_argument(\"--env-lr\", required=False, default=args[\"env-lr\"], type=float)\n parser.add_argument(\"--pol-lr\", required=False, default=args[\"pol-lr\"], type=float)\n parser.add_argument(\"--ppo-updates\", required=False, default=args[\"ppo-updates\"], type=int)\n parser.add_argument(\"--epsilon\", required=False, default=args[\"epsilon\"], type=float)\n parser.add_argument(\"--lr-scheduler-patience\", required=False, default=args[\"lr-scheduler-patience\"], type=int)\n parser.add_argument(\"--lr-scheduler-threshold\", required=False, default=args[\"lr-scheduler-threshold\"], type=float)\n parser.add_argument(\"--l2-weight\", required=False, default=args[\"l2-weight\"], type=float)\n parser.add_argument(\"--batch-size\", required=False, default=args[\"batch-size\"], type=int)\n\n parser.add_argument(\"--max-len\", default=args[\"max-len\"], type=int)\n parser.add_argument(\"--max-epoch\", required=False, default=args[\"max-epoch\"], type=int)\n parser.add_argument(\"--es-patience\", required=False, default=args[\"es-patience\"], type=int)\n parser.add_argument(\"--es-threshold\", required=False, default=args[\"es-threshold\"], type=float)\n parser.add_argument(\"--gpu-id\", required=False, default=args[\"gpu-id\"], type=int)\n parser.add_argument(\"--model-dir\", required=False, default=args[\"model-dir\"], type=str)\n parser.add_argument(\"--logs-path\", required=False, default=args[\"logs-path\"], type=str)\n parser.add_argument(\"--tensorboard-path\", required=False, default=args[\"tensorboard-path\"], type=str)\n\n global_step = 0\n best_model_path = None\n args = parser.parse_args()\n with torch.cuda.device(args.gpu_id):\n main(args)\n","sub_path":"nli/ppo/train_ppo_model.py","file_name":"train_ppo_model.py","file_ext":"py","file_size_in_byte":24239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"197995101","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import mode\nimport time\nfrom sklearn import model_selection, preprocessing\nimport xgboost as xgb\nfrom sklearn.cross_validation import KFold\nfrom xgboost.sklearn import XGBRegressor\nimport datetime\n#now = datetime.datetime.now()\nfrom sklearn.model_selection import train_test_split, GridSearchCV,cross_val_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import accuracy_score,roc_auc_score,mean_squared_error\n\npd.options.mode.chained_assignment = None # default='warn'\npd.set_option('display.max_columns', 500)\n\ntrain = pd.read_csv('kaggle/russia/train.csv')\ntest = pd.read_csv('kaggle/russia/test.csv')\nmacro = pd.read_csv('kaggle/russia/macro.csv')\nid_test = test.id\nfull = pd.read_csv('kaggle/russia/full.csv')\n\n\n#缺失值处理完毕,接下来挖掘新的特征\n#房子本身的信息\nfull['floor_inverse']=full['max_floor']-full['floor']\nfull['floor_ratio']=full['floor']/full['max_floor']\n#生活面积占总面积的比值,厨房面积占的比例,每个房间的面积大小\nfull['life_ratio']=full['life_sq']/full['full_sq']\nfull['kitch_ratio']=full['kitch_sq']/full['full_sq']\nfull['sq_per_room']=(full['life_sq']-full['kitch_sq'])/full['num_room']#0.31325\n\n\n\n\nfull['sq_per_room1']=full['full_sq']/full['num_room']#0.31543,结果下降\n\n\nfull['extra_area']=full['full_sq']-full['life_sq']#0.31343,结果微降\n\n#人口信息\nfull['pop_density_raion']=full['raion_popul']/full['area_m']#0.31639,结果微降\n\nfull['young_proportion ']=full['young_all']/full['area_m']#0.31548退步明显\nfull['work_proportion ']=full['work_all']/full['area_m']#0.31443\nfull['retire_proportion ']=full['ekder_all']/full['area_m']\n#已上三个都加上,结果是.031481\n#售卖时距离建造的时间\n\n#教育信息\nfull['ratio_preschool']=full['children_preschool']/full['preschool_quota']#0.31518\nfull['ratio_school']=full['children_school']/full['school_quota']\n#加上以上两个特征结果0.31518\n\n\n##=------------------------------------------------------------------\n#特征处理完毕,接下来选择模型预测,融合\n##=------------------------------------------------------------------\n\n\ny_train = train[\"price_doc\"]\nx_train=full[:30471]\nx_test=full[30471:]\n\n#做本地的交叉验证\nkf = KFold(x_train.shape[0], n_folds=5, random_state=1)\n#cv借鉴了这个脚本https://www.kaggle.com/shaweji/script-v6/code\ndef rmse(y_true,y_pred):\n return np.sqrt(mean_squared_error(y_true,y_pred))\ndef rmse_cv(model, X_train, y):\n # RMSE with Cross Validation\n rmse= np.sqrt(-cross_val_score(model, X_train, y,\n scoring=\"neg_mean_squared_error\", cv = kf))\n return(rmse)\n\nmodel_xgb=xgb.XGBRegressor(\n learning_rate=0.05,\n max_depth=5,\n subsample=0.7,\n colsample_bytree=0.7,\n objective='reg:linear',\n silent=True,\n seed=2017\n)\n\ntime_start=time.time()\n\n# model_xgb.fit(x_train,y_train)\n\n\nprint(\"mean RMSE of XGBoost with CV: \", np.mean(rmse_cv(model_xgb, x_train, y_train)))\ntime_stop=time.time()\nprint(time_stop-time_start,\"seconds\")\n\nmodel_xgb.fit(x_train,y_train)\ny_predict=model_xgb.predict(x_test)\noutput = pd.DataFrame({'id': id_test, 'price_doc': y_predict})\noutput.to_csv('kaggle/russia/result.csv', index=False)\n\n\n\n\n\n\n\nscoring=\"roc_auc\"\nX1, X2, y1, y2 = train_test_split(x_train, y_train, random_state=2017)\n\nxgb_params = {\n 'eta': 0.05,#0.05\n 'max_depth': 5,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'objective': 'reg:linear',\n 'eval_metric': 'rmse',\n 'silent': 1\n}\n\ndtrain = xgb.DMatrix(X1, y1)\ndtest = xgb.DMatrix(X2,y2)\nwatchlist = [(dtrain, 'train'), (dtest,'valid')]\n\nmodel = xgb.train(xgb_params, dtrain, num_boost_round=1000, evals=watchlist, early_stopping_rounds=20,verbose_eval=50)\n\npredictions=model.predict(xgb.DMatrix(X2))\nfig, ax = plt.subplots(1, 1, figsize=(8, 13))\nxgb.plot_importance(model,height=0.5, ax=ax)\nplt.show(block=False)\n\n\n\n\n\n#上面是本地的交叉验证,下面是预测得到结果,其实是一样的,cv就是自带的交叉验证\nxgb_params = {\n 'eta': 0.05,#0.05\n 'max_depth': 5,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'objective': 'reg:linear',\n 'eval_metric': 'rmse',\n 'silent': 1\n}\n\ndtrain = xgb.DMatrix(x_train, y_train)\ndtest = xgb.DMatrix(x_test)\n\ncv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,verbose_eval=50, show_stdv=False)\ncv_output[['train-rmse-mean', 'test-rmse-mean']].plot()\n\nnum_boost_rounds = len(cv_output)\nmodel = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round= num_boost_rounds)\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 13))\nxgb.plot_importance(model, height=0.5, ax=ax)\nplt.show(block=False)\n\ny_predict = model.predict(dtest)\noutput = pd.DataFrame({'id': id_test, 'price_doc': y_predict})\noutput.head()\n\n\noutput.to_csv('kaggle/russia/result.csv', index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n#筛选出一些重要性较高特征\nfeatures=[]\na=model.get_fscore()\nimportance_map=sorted(a.items(), key=lambda d:d[1],reverse=True)\nfor x in importance_map[:220]:\n # print(x)\n features.append(x[0])\n\n\n\ny_train = train[\"price_doc\"]\nx_train=full[:30471][features]\nx_test=full[30471:][features]\nxgb_params = {\n 'eta': 0.05,#学习率\n 'max_depth': 5,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'objective': 'reg:linear',\n 'eval_metric': 'rmse',\n 'silent': 1\n}\n\ndtrain = xgb.DMatrix(x_train, y_train)\ndtest = xgb.DMatrix(x_test)\n\ncv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,verbose_eval=50, show_stdv=False)\ncv_output[['train-rmse-mean', 'test-rmse-mean']].plot()\n\nnum_boost_rounds = len(cv_output)\nmodel = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round= num_boost_rounds)\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 13))\nxgb.plot_importance(model, height=0.5, ax=ax)\nplt.show(block=False)\n\n\n\n\ny_predict = model.predict(dtest)\noutput = pd.DataFrame({'id': id_test, 'price_doc': y_predict})\noutput.head()\n\n\noutput.to_csv('kaggle/russia/result.csv', index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"zyf/feature_tune.py","file_name":"feature_tune.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"186779697","text":"from twisted.internet.protocol import DatagramProtocol\nfrom twisted.internet import reactor\n\nclass Echo(DatagramProtocol):\n def datagramReceived(self, data, address):\n self.transport.write(data, address)\n print(\" \".join(hex(ord(n)) for n in data))\n\n\nreactor.listenUDP(9999, Echo())\nreactor.run()","sub_path":"Receiver.py","file_name":"Receiver.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"15269618","text":"from bs4 import BeautifulSoup\nimport requests\n\ndef get_url(site_url):\n ls = []\n response = requests.get(site_url)\n soup = BeautifulSoup(response.text, 'html.parser')\n for link in soup.find_all('a'):\n url = link.get('href')\n if url:\n if url.startswith('/Images/'):\n yield(site_url + url)\n\n\n\ndef main():\n site_url = 'https://exam.ioe.edu.np'\n res = get_url(site_url)\n for item in res:\n print(item)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ioe.py","file_name":"ioe.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"254855520","text":"import boto3\nimport json\nimport os\n\ndynamodb = boto3.client('dynamodb')\n\n\ndef lambda_handler(event, context):\n response = dynamodb.scan(TableName=os.environ['tableName'])\n tasks = list(\n {\n 'PK': item['PK']['S'],\n 'description': item['description']['S'],\n 'isCompleted': item['isCompleted']['BOOL'],\n }\n for item in response['Items']\n )\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"body\": json.dumps(tasks)\n }\n","sub_path":"007-terraform-static-website/resources/get-all-tasks.py","file_name":"get-all-tasks.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"615379215","text":"#!/usr/bin/env python\nfrom sot_ros_api import *\n\nfrom rospy_tutorials.msg import Floats\nfrom rospy.numpy_msg import numpy_msg\nfrom geometry_msgs.msg import PoseStamped, PointStamped\nfrom dynamic_graph.sot.core.meta_task_joint_weights import MetaTaskJointWeights\n\ndef ComBordersPublisher(refInf,refSup):\n pub = rospy.Publisher('sot_controller/com_borders', numpy_msg(Floats))\n a = numpy.concatenate((numpy.array(refInf,dtype=numpy.float32),numpy.array(refSup,dtype=numpy.float32)))\n #r = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n pub.publish(a)\n #r.sleep()\n\ndef callback_head(data):\n xyz = numpy.array([data.point.x , data.point.y , data.point.z])\n taskGAZE.goto3D((xyz[0],xyz[1],xyz[2]))\n\ndef callback_left_hand(data):\n quat = numpy.array([data.pose.orientation.x , data.pose.orientation.y , data.pose.orientation.z , data.pose.orientation.w])\n xyz = numpy.array([data.pose.position.x , data.pose.position.y , data.pose.position.z])\n move_target_left(xyz,quat)\n\ndef callback_right_hand(data):\n quat = numpy.array([data.pose.orientation.x , data.pose.orientation.y , data.pose.orientation.z , data.pose.orientation.w])\n xyz = numpy.array([data.pose.position.x , data.pose.position.y , data.pose.position.z])\n move_target_right(xyz,quat)\n\ndef listener():\n rospy.init_node('listener')\n rospy.Subscriber(\"left_hand_ref_pose_filt\",PoseStamped, callback_left_hand)\n rospy.Subscriber(\"right_hand_ref_pose_filt\",PoseStamped, callback_right_hand)\n rospy.Subscriber(\"head_ref_point_filt\",PointStamped, callback_head)\n\ndef move_target_left(xyz,quat):\n goal = goalDef(xyz,quat)\n gotoNd(taskLW,goal,'111111',10)\n\ndef move_target_right(xyz,quat):\n goal = goalDef(xyz,quat)\n gotoNd(taskRW,goal,'111111',10)\n\nif __name__ == '__main__':\n \n taskLW = MetaTaskKine6d('lw',robot.dynamic,'arm_left_tool_joint','arm_left_tool_joint')\n taskLW.feature.frame('current')\n \n taskRW = MetaTaskKine6d('rw',robot.dynamic,'arm_right_tool_joint','arm_right_tool_joint')\n taskRW.feature.frame('current')\n \n taskGAZE = createGazeTask(\"camera_joint\")\n \n robot.dynamic.upperJl.recompute(0)\n robot.dynamic.lowerJl.recompute(0)\n taskJL = TaskJointLimits('taskJL')\n plug(robot.dynamic.position,taskJL.position)\n taskJL.controlGain.value = 1\n taskJL.referenceInf.value = robot.dynamic.lowerJl.value\n taskJL.referenceSup.value = robot.dynamic.upperJl.value\n taskJL.dt.value = 1\n taskJL.selec.value = toFlags(range(6,robot.dimension))\n \n taskRL = MetaTaskKine6d('contact_rl',robot.dynamic,'right_sole_joint','right_sole_joint')\n taskRL.feature.frame('current')\n taskRL.gain.setConstant(1)\n \n taskLL = MetaTaskKine6d('contact_ll',robot.dynamic,'left_sole_joint','left_sole_joint')\n taskLL.feature.frame('current')\n taskLL.gain.setConstant(1)\n \n weights_diag_flag = 1\n if (weights_diag_flag):\n diag = numpy.array([ 8.00916 , 8.00916 , 8.00916 , 2.96527 , 2.85807 ,\n 1.54464 , 0.469273 , 1.20164 , 1.12471 , 0.517821 ,\n 0.109894 , 0.0615758, 0.469273 , 1.19488 , 1.11749 ,\n 0.509942 , 0.109894 , 0.0615758, 1.31341 , 1.50104 ,\n 0.683272 , 0.639302 , 0.178914 , 0.236697 , 0.120986 ,\n 0.0156722, 0.0213592, 0.692531 , 0.634706 , 0.180416 ,\n 0.263362 , 0.120986 , 0.0156722, 0.0213592, 0.511262 ,\n 0.520098 ])\n diag = tuple(diag)\n else:\n diag = None\n \n taskWEIGHTS = createWeightsTask(diag, dt = 1, gain = 1)\n \n offset = 0.075\n taskCOM = createComIneqTask(dt = 1, referenceInf = (-0.3,-0.3+offset, 0), referenceSup = (0.3,0.3+offset,0))\n \n push(taskJL)\n push(taskCOM)\n solver.addContact(taskLL)\n push(taskLW)\n push(taskGAZE)\n \n listener()\n \n createRosImport('vector3', robot.dynamic.com, 'sot_controller/com_position')\n ComBordersPublisher(taskCOM.referenceInf.value,taskCOM.referenceSup.value)\n \n rospy.spin()","sub_path":"src/sot_ros_api/trash/reemc_equilibrium.py","file_name":"reemc_equilibrium.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81930637","text":"# %load q08_get_total_extras/build.py\n# Default Imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\nimport numpy as np\n\npath = 'data/ipl_matches_small.csv'\n\ndef get_total_extras():\n data = read_ipl_data_csv(path, dtype = int)\n #print(data)\n extra = 0\n for row in data:\n extra+=row[17]\n #print(extra==88)\n return int(extra)\n \n \n#get_total_extras()\n\n\n","sub_path":"q08_get_total_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"13638595","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 6 16:51:40 2020\n\n@author: bill-\n\"\"\"\n\n'''\nThis script contains all SQL components\n-make a connection to the Yodlee DB\n-insert records to it\n-delete records\n\nwhen it throws error about transaction blocked enter: rollback\nthis reverts old incorrect queries\n\nTHE SCRIPT ONLY CONDUCTS QUERIES; THE OUTPUT IS A TUPLE!\nTHE MODULE IT IS CONNECTED TO WILL CONVERT THE PULLED CONTENT TO A DF\n'''\n\n#establish a connection to the Yodlee DB\nimport psycopg2\nfrom psycopg2 import OperationalError\nfrom psycopg2 import pool\n#import PostgreSQL_access\n#%%\nname = \"postgres\"\nuser = \"envel_yodlee\"\npw = \"XX\"\nhost = \"XX\"\nport = \"5432\"\n#create_connection(name, user, pw, host, port)\n#%%\n#assign connection object as variable + use in further functions\ndef create_connection(db_name, db_user, db_password, db_host, db_port):\n connection = None\n try:\n connection = psycopg2.connect(\n database=db_name,\n user=db_user,\n password=db_password,\n host=db_host,\n port=db_port,\n )\n print(f\"Connection to PostgreSQL {db_name} successful\")\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n return connection\n#%%\ndef execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except OperationalError as e:\n print(f\"The error '{e}' occurred\")\n#%%\n#example query for transaction in MA\n#select_users = \"SELECT * FROM bank_record WHERE state = 'MA'\"\n#generates a tuple output\n#transaction_query = execute_read_query(connection, select_users)\n#%%\n #insert a value into the DB\ndef insert_val(query_string):\n\n '''\n sql_query: PostgreSQL query command. Engulf query in triple letter strings\n \"\"\"example query here\"\"\"\n returns\n ------\n edit_msg\n '''\n# SQL query example\n # create_users = \"\"\"\n # INSERT INTO\n # users (name, age, gender, nationality)\n # VALUES\n # ('James', 25, 'male', 'USA'),\n # ('Leila', 32, 'female', 'France'),\n # ('Brigitte', 35, 'female', 'England'),\n # ('Mike', 40, 'male', 'Denmark'),\n # ('Elizabeth', 21, 'female', 'Canada');\n # \"\"\"\n\n sql_query = query_string\n execute_query(connection, sql_query)\n return 'edit_msg'\n\n\n\n# alternative version\n\ndef insert_val_alt(table, columns, insertion_val):\n\n '''\n table: string. Table in the databank to be amended.\n columns: list. no letterstrings to pass the columns that are to be amended\n insertion_val: tuple. Pass information inside a tuple with value in letter strings\n (only for PostgreSQL); separated by commas\n returns\n ------\n edit_msg\n '''\n# Tuple example\n # tuples = [\n # (\"James\", 25, \"male\", \"USA\"),\n # (\"Leila\", 32, \"female\", \"France\"),\n # (\"Brigitte\", 35, \"female\", \"England\"),\n # (\"Mike\", 40, \"male\", \"Denmark\"),\n # (\"Elizabeth\", 21, \"female\", \"Canada\"),\n # ]\n\n tuple_values = insertion_val\n\n # create the placeholders for the columns that will be fitted with values\n tuple_records = \", \".join([\"%s\"] * len(tuple_values))\n insert_query = (\n f\"INSERT INTO {table} ({columns}) VALUES {tuple_records};\"\n )\n\n connection.autocommit = True\n cursor = connection.cursor()\n cursor.execute(insert_query, tuple_values)\n return 'edit_msg'\n#%%\ndef delete_val():\n #delete comments\n delete_comment = \"DELETE FROM comments WHERE id = 2\"\n execute_query(connection, delete_comment)\n return 'values deleted'\n#%%\n'''\nIMPORTANT: This closes all connections even those that are in use by applications!\n Use with caution!\n'''\n#close a single connection pool\ndef close_connection():\n pool.SimpleConnectionPool.closeall\n\n\n if __name__ == \"__main__\":\n import sys\n close_connection(int(sys.argv[1]))\n","sub_path":"ml_code/model_data/SQL_connection.py","file_name":"SQL_connection.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"222991105","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 24 18:45:07 2017\r\n\r\n@author: parth\r\n\"\"\"\r\n\r\nimport csv\r\nimport sys\r\nimport ast\r\nfrom collections import Counter\r\nimport copy\r\nimport random\r\nfrom math import log\r\n\r\n#function for splitting dataset based on an attribute\r\ndef divideset(rows, column, value):\r\n split_function = None\r\n if isinstance(value, int):\r\n split_function = lambda row: row[column] >= value\r\n else:\r\n split_function = lambda row: row[column] == value\r\n\r\n set1 = [row for row in rows if split_function(row)]\r\n set2 = [row for row in rows if not split_function(row)]\r\n return (set1, set2)\r\n\r\n#count number of values based on class attribute and return a dictionary\r\ndef uniquecounts(rows):\r\n results = {}\r\n for row in rows:\r\n # The target variable is the last column\r\n r = row[len(row) - 1]\r\n if r not in results: results[r] = 0\r\n results[r] += 1\r\n return results\r\n\r\n\r\n#function to calculate the variance impurity of data set\r\ndef varianceImpurity(rows):\r\n if len(rows) == 0: return 0\r\n results = uniquecounts(rows)\r\n total_samples = len(rows)\r\n variance_impurity = (results['0'] * results['1']) / (total_samples ** 2)\r\n return variance_impurity\r\n\r\n\r\n#function to calculate entropy of data set\r\ndef entropy(rows):\r\n log_base_2 = lambda x: log(x) / log(2)\r\n results = uniquecounts(rows)\r\n entr = 0.0\r\n for r in results.keys():\r\n p = float(results[r]) / len(rows)\r\n entr = entr - p * log_base_2(p)\r\n return entr\r\n\r\n\r\n#create class for a node of tree.\r\n#A leaf node will have results as unique counts for each class variable\r\n#A non leaf node will have subsequent branches as true branch and false branch\r\nclass decisionnode:\r\n def __init__(self, col=-1, value=None, results=None, tb=None, fb=None):\r\n self.col = col\r\n self.value = value\r\n self.results = results\r\n self.tb = tb\r\n self.fb = fb\r\n\r\n\r\n#function used to split data set based on entropy(default) or variance impurity after calculating gain\r\ndef buildtree(rows, scoref=entropy):\r\n if len(rows) == 0: return decisionnode()\r\n current_score = scoref(rows)\r\n\r\n best_gain = 0.0\r\n best_criteria = None\r\n best_sets = None\r\n\r\n #the last column is the target attribute\r\n column_count = len(rows[0]) - 1\r\n for col in range(0, column_count):\r\n #divide data sets based on each attribute and calculate gain based on column. Select the attribute which results in best gain\r\n global column_values\r\n column_values = {}\r\n for row in rows:\r\n column_values[row[col]] = 1\r\n for value in column_values.keys():\r\n (set1, set2) = divideset(rows, col, value)\r\n\r\n # Calculate gain based on entropy(Information gain) or variance impurity based on requirement\r\n p = float(len(set1)) / len(rows)\r\n gain = current_score - p * scoref(set1) - (1 - p) * scoref(set2)\r\n if gain > best_gain and len(set1) > 0 and len(set2) > 0: # set must not be empty\r\n best_gain = gain\r\n best_criteria = (col, value)\r\n best_sets = (set1, set2)\r\n\r\n # Create the sub branches\r\n if best_gain > 0:\r\n trueBranch = buildtree(best_sets[0])\r\n falseBranch = buildtree(best_sets[1])\r\n return decisionnode(col=best_criteria[0], value=best_criteria[1],\r\n tb=trueBranch, fb=falseBranch)\r\n else:\r\n return decisionnode(results=uniquecounts(rows))\r\n\r\n\r\n#print tree in required format\r\ndef printtree(tree, header_data, indent):\r\n if tree.results != None:\r\n for key in tree.results:\r\n print(str(key))\r\n else:\r\n print(\"\")\r\n print(indent + str(header_data[tree.col]) + ' = ' + str(tree.value) + ' : ', end=\"\")\r\n printtree(tree.tb, header_data, indent + ' |')\r\n\r\n print(indent + str(header_data[tree.col]) + ' = ' + str(int(tree.value) ^ 1) + ' : ', end=\"\")\r\n printtree(tree.fb, header_data, indent + ' |')\r\n\r\n\r\n#function to calculate the accuracy\r\ndef tree_accuracy(rows, tree):\r\n correct_predictions = 0\r\n for row in rows:\r\n pred_val = classify(row, tree)\r\n if row[-1] == pred_val:\r\n correct_predictions += 1\r\n accuracy = 100 * correct_predictions / len(rows)\r\n return accuracy\r\n\r\n\r\n#function to classify data set based on a learned tree\r\ndef classify(observation, tree):\r\n if tree.results != None:\r\n for key in tree.results:\r\n predicted_value = key\r\n return predicted_value\r\n else:\r\n v = observation[tree.col]\r\n if isinstance(v, int) or isinstance(v, float):\r\n if v >= tree.value:\r\n branch = tree.tb\r\n else:\r\n branch = tree.fb\r\n else:\r\n if v == tree.value:\r\n branch = tree.tb\r\n else:\r\n branch = tree.fb\r\n predicted_value = classify(observation, branch)\r\n return predicted_value\r\n\r\n\r\n#function to count total number of non leaf nodes and label them according to number\r\ndef list_nodes(nodes, tree, count):\r\n if tree.results != None:\r\n return nodes, count\r\n count += 1\r\n nodes[count] = tree\r\n (nodes, count) = list_nodes(nodes, tree.tb, count)\r\n (nodes, count) = list_nodes(nodes, tree.fb, count)\r\n return nodes, count\r\n\r\n\r\ndef count_class_occurence(tree, class_occurence):\r\n if tree.results != None:\r\n for key in tree.results:\r\n class_occurence[key] += tree.results[key]\r\n return class_occurence\r\n\r\n left_branch_occurence = count_class_occurence(tree.fb, class_occurence)\r\n right_branch_occurence = count_class_occurence(tree.tb, left_branch_occurence)\r\n\r\n return right_branch_occurence\r\n\r\n\r\n#replace subtree according to the pruning algorithm\r\ndef findAndReplaceSubtree(tree_copy, subtree_to_replace, subtree_to_replace_with):\r\n if (tree_copy.results != None):\r\n return tree_copy\r\n\r\n if (tree_copy == subtree_to_replace):\r\n tree_copy = subtree_to_replace_with\r\n return tree_copy\r\n\r\n tree_copy.fb = findAndReplaceSubtree(tree_copy.fb, subtree_to_replace, subtree_to_replace_with)\r\n tree_copy.tb = findAndReplaceSubtree(tree_copy.tb, subtree_to_replace, subtree_to_replace_with)\r\n\r\n return tree_copy\r\n\r\n#function to prune tree\r\ndef prune_tree(tree, l, k, data):\r\n tree_best = tree\r\n best_accuracy = tree_accuracy(data, tree)\r\n tree_copy = None\r\n for i in range(1, l):\r\n m = random.randint(1, k)\r\n tree_copy = copy.deepcopy(tree)\r\n for j in range(1, m):\r\n (nodes, initial_count) = list_nodes({}, tree_copy, 0)\r\n if (initial_count > 0):\r\n p = random.randint(1, initial_count)\r\n # replcae subtree rooted in p\r\n subtree_p = nodes[p]\r\n\r\n # count examples with class variable as 0 and 1 in the subtree\r\n class_occurence = {'0': 0, '1': 0}\r\n count = count_class_occurence(subtree_p, class_occurence)\r\n # replace subtree with leaf node depending if zero or one count is greater\r\n if count['0'] > count['1']:\r\n count['0'] = count['0'] + count['1']\r\n count.pop('1')\r\n subtree_p = decisionnode(results=count)\r\n else:\r\n count['1'] = count['0'] + count['1']\r\n count.pop('0')\r\n subtree_p = decisionnode(results=count)\r\n\r\n tree_copy = findAndReplaceSubtree(tree_copy, nodes[p], subtree_p)\r\n\r\n # calculate accuracy based on pruned tree\r\n curr_accuracy = tree_accuracy(data, tree_copy)\r\n if (curr_accuracy > best_accuracy):\r\n best_accuracy = curr_accuracy\r\n tree_best = tree_copy\r\n return tree_best, best_accuracy\r\n\r\n\r\n#output tree to file Results.txt\r\ndef writeTreetoFile(tree, header_data, indent, text_file):\r\n if tree.results != None:\r\n for key in tree.results:\r\n text_file.write(\"%s\\n\" % str(key))\r\n else:\r\n text_file.write(\"%s\\n\" % \"\")\r\n val1 = indent + str(header_data[tree.col]) + ' = ' + str(tree.value) + ' : '\r\n text_file.write(\"%s\" % val1)\r\n writeTreetoFile(tree.tb, header_data, indent + ' |', text_file)\r\n\r\n val2 = indent + str(header_data[tree.col]) + ' = ' + str(int(tree.value) ^ 1) + ' : '\r\n text_file.write(\"%s\" % val2)\r\n writeTreetoFile(tree.fb, header_data, indent + ' |', text_file)\r\n\r\n\r\ndef main():\r\n args = str(sys.argv)\r\n args = ast.literal_eval(args)\r\n if (len(args) < 6):\r\n print (\"Input arguments should be 6. Please refer the Readme file regarding input format.\")\r\n elif (args[3][-4:] != \".csv\" or args[4][-4:] != \".csv\" or args[5][-4:] != \".csv\"):\r\n print(args[2])\r\n print (\"Your training, validation and test file must be a .csv!\")\r\n else:\r\n l = int(args[1])\r\n k = int(args[2])\r\n train_filename = str(args[3])\r\n validation_filename = str(args[4])\r\n test_filename = str(args[5])\r\n to_print = str(args[6])\r\n\r\n with open(train_filename, newline='', encoding='utf_8') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n header_data = next(spamreader)\r\n train_training_data = list(spamreader)\r\n\r\n with open(validation_filename, newline='', encoding='utf_8') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n validation_training_data = list(spamreader)\r\n\r\n with open(test_filename, newline='', encoding='utf_8') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n test_training_data = list(spamreader)\r\n\r\n using_IG_str = \"----- Using Information Gain heuristic -----\"\r\n using_VI_str = \"----- Using Variance Impurity heuristic -----\"\r\n\r\n l_arr = [10, 15, 20, 25, 30, 35, 40, 45, 50, 55]\r\n k_arr = [15, 20, 25, 30, 35, 40, 45, 50, 55, 60]\r\n\r\n # build tree using information gain heuristic\r\n learned_tree_IG = buildtree(train_training_data, scoref=entropy)\r\n print(using_IG_str)\r\n if(to_print.lower() == \"yes\"):\r\n print(\"Printing the learned tree : \")\r\n printtree(learned_tree_IG, header_data, '')\r\n\r\n train_accuracy = tree_accuracy(train_training_data, learned_tree_IG)\r\n print(\"Training data accuracy : \", train_accuracy)\r\n\r\n validation_accuracy = tree_accuracy(validation_training_data, learned_tree_IG)\r\n print(\"Validation data accuracy : \", validation_accuracy)\r\n\r\n test_accuracy = tree_accuracy(test_training_data, learned_tree_IG)\r\n print(\"Test data accuracy : \", test_accuracy)\r\n\r\n (pruned_best_tree_validation, pruned_best_accuracy_validation) = prune_tree(learned_tree_IG, l, k,\r\n validation_training_data)\r\n\r\n print(\"Validation data accuracy after pruning : \", pruned_best_accuracy_validation)\r\n\r\n (pruned_best_tree_test, pruned_best_accuracy_test) = prune_tree(learned_tree_IG, l, k, test_training_data)\r\n if (to_print.lower() == \"yes\"):\r\n print(\"Printing the pruned tree using on test data : \")\r\n printtree(pruned_best_tree_test, header_data, '')\r\n\r\n print(\"Test data accuracy after pruning : \", pruned_best_accuracy_test)\r\n\r\n #check accuracies of test data with 10 combinations of l and k\r\n print(\"Calculating accuracies of test data with 10 combinations of l and k :\")\r\n for l_val, k_val in zip(l_arr, k_arr):\r\n (pruned_best_tree_test, pruned_best_accuracy_test) = prune_tree(learned_tree_IG, l_val, k_val,\r\n test_training_data)\r\n print(\"Test data accuracy after pruning with l = \", l_val,\" and k = \" , k_val,\r\n \" : \", pruned_best_accuracy_test)\r\n\r\n\r\n # build tree using variance impurity heuristic\r\n learned_tree_VI = buildtree(train_training_data, scoref=varianceImpurity)\r\n print(using_VI_str)\r\n if (to_print.lower() == \"yes\"):\r\n print(\"Printing the learned tree : \")\r\n printtree(learned_tree_VI, header_data, '')\r\n\r\n train_accuracy_VI = tree_accuracy(train_training_data, learned_tree_VI)\r\n print(\"Training data accuracy : \", train_accuracy_VI)\r\n\r\n validation_accuracy_VI = tree_accuracy(validation_training_data, learned_tree_VI)\r\n print(\"Validation data accuracy : \", validation_accuracy_VI)\r\n\r\n test_accuracy_VI = tree_accuracy(test_training_data, learned_tree_VI)\r\n print(\"Test data accuracy : \", test_accuracy_VI)\r\n\r\n (pruned_best_tree_validation_VI, pruned_best_accuracy_validation_VI) = prune_tree(learned_tree_VI, l, k, validation_training_data)\r\n print(\"Validation data accuracy after pruning: \", pruned_best_accuracy_validation_VI)\r\n\r\n (pruned_best_tree_test_VI, pruned_best_accuracy_test_VI) = prune_tree(learned_tree_VI, l, k, test_training_data)\r\n if (to_print.lower() == \"yes\"):\r\n print(\"Printing the pruned tree using on test data : \")\r\n printtree(pruned_best_tree_test_VI, header_data, '')\r\n\r\n print(\"Test data accuracy after pruning : \", pruned_best_accuracy_test_VI)\r\n\r\n # check accuracies of test data with 10 combinations of l and k\r\n print(\"Calculating accuracies of test data with 10 combinations of l and k :\")\r\n for l_val, k_val in zip(l_arr, k_arr):\r\n (pruned_best_tree_test_VI, pruned_best_accuracy_test_VI) = prune_tree(learned_tree_VI, l_val, k_val,\r\n test_training_data)\r\n print(\"Test data accuracy after pruning with l = \", l_val, \" and k = \", k_val,\r\n \" : \", pruned_best_accuracy_test_VI)\r\n\r\n #write results to a file using Information gain heuristic\r\n with open(\"Results.txt\", \"w\") as text_file:\r\n text_file.write(\"%s\\n\\n\" % str(using_IG_str))\r\n\r\n if (to_print.lower() == \"yes\"):\r\n text_file.write(\"%s\\n\\n\" % \"Printing the learned tree : \")\r\n writeTreetoFile(learned_tree_IG, header_data, '', text_file)\r\n\r\n train_accuracy_str = \"Training data accuracy : \", train_accuracy\r\n text_file.write(\"%s\\n\" % str(train_accuracy_str))\r\n\r\n validation_accuracy_str = \"Validation data accuracy : \", validation_accuracy\r\n text_file.write(\"%s\\n\" % str(validation_accuracy_str))\r\n\r\n test_accuracy_str = \"Test data accuracy : \", test_accuracy\r\n text_file.write(\"%s\\n\" % str(test_accuracy_str))\r\n\r\n if (to_print.lower() == \"yes\"):\r\n text_file.write(\"%s\\n\" % \"Printing the pruned tree using on test data : \")\r\n writeTreetoFile(pruned_best_tree_test, header_data, '', text_file)\r\n\r\n pruned_best_accuracy_validation_str = \"Validation data accuracy after pruning : \", pruned_best_accuracy_validation\r\n text_file.write(\"%s\\n\" % str(pruned_best_accuracy_validation_str))\r\n\r\n pruned_best_accuracy_test_str = \"Test data accuracy after pruning : \", pruned_best_accuracy_test\r\n text_file.write(\"%s\\n\" % str(pruned_best_accuracy_test_str))\r\n\r\n # check accuracies of test data with 10 combinations of l and k\r\n text_file.write(\"%s\\n\\n\" % \"Calculating accuracies of test data with 10 combinations of l and k :\")\r\n for l_val, k_val in zip(l_arr, k_arr):\r\n (pruned_best_tree_test, pruned_best_accuracy_test) = prune_tree(learned_tree_IG, l_val, k_val,\r\n test_training_data)\r\n test_accuracy_str_i = \"Test data accuracy after pruning with l = \", l_val, \" and k = \", k_val, \\\r\n \" : \", pruned_best_accuracy_test\r\n text_file.write(\"%s\\n\" %str(test_accuracy_str_i))\r\n\r\n #write results to a file using Variance Impurity heuristic\r\n text_file.write(\"%s\\n\\n\\n\\n\")\r\n text_file.write(\"%s\\n\\n\" % str(using_VI_str))\r\n if (to_print.lower() == \"yes\"):\r\n text_file.write(\"%s\\n\\n\" % \"Printing the learned tree : \")\r\n writeTreetoFile(learned_tree_VI, header_data, '', text_file)\r\n\r\n train_accuracy_str_VI = \"Training data accuracy : \", train_accuracy_VI\r\n text_file.write(\"%s\\n\" % str(train_accuracy_str_VI))\r\n\r\n validation_accuracy_str_VI = \"Validation data accuracy : \", validation_accuracy_VI\r\n text_file.write(\"%s\\n\" % str(validation_accuracy_str_VI))\r\n\r\n test_accuracy_str_VI = \"Test data accuracy : \", test_accuracy_VI\r\n text_file.write(\"%s\\n\" % str(test_accuracy_str_VI))\r\n\r\n if (to_print.lower() == \"yes\"):\r\n text_file.write(\"%s\\n\" % \"Printing the pruned tree using on test data : \")\r\n writeTreetoFile(pruned_best_tree_test_VI, header_data, '', text_file)\r\n\r\n pruned_best_accuracy_validation_str_VI = \"Validation data accuracy after pruning : \", pruned_best_accuracy_validation_VI\r\n text_file.write(\"%s\\n\" % str(pruned_best_accuracy_validation_str_VI))\r\n\r\n pruned_best_accuracy_test_str_VI = \"Test data accuracy after pruning : \", pruned_best_accuracy_test_VI\r\n text_file.write(\"%s\\n\" % str(pruned_best_accuracy_test_str_VI))\r\n\r\n # check accuracies of test data with 10 combinations of l and k\r\n text_file.write(\"%s\\n\\n\" % \"Calculating accuracies of test data with 10 combinations of l and k :\")\r\n for l_val, k_val in zip(l_arr, k_arr):\r\n (pruned_best_tree_test_VI, pruned_best_accuracy_test_VI) = prune_tree(learned_tree_VI, l_val, k_val,\r\n test_training_data)\r\n test_accuracy_str_VI_i = \"Test data accuracy after pruning with l = \", l_val, \" and k = \", k_val, \\\r\n \" : \", pruned_best_accuracy_test_VI\r\n text_file.write(\"%s\\n\" %str(test_accuracy_str_VI_i))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Decision_Tree.py","file_name":"Decision_Tree.py","file_ext":"py","file_size_in_byte":18929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"616186482","text":"#Harry Pham 79422112\n\nimport decimal\n\nrates = []\n\nclass Conversion_Rate:\n def __init__(self, currency_code:str, decimal_places:decimal.Decimal, value:\n decimal.Decimal)->None:\n self._currency_code = currency_code\n self._decimal_places = decimal_places\n self._value = value\n def __repr__(self):\n return 'Conversion_Rate(Currency Code: {}, Decimal Places: {}, Value {})'.format(\n self._currency_code, self._decimal_places, self._value)\n def currency_code(self)->str:\n return self._currency_code\n def decimal_places(self)->decimal.Decimal:\n return self._decimal_places\n def value(self)->str:\n return self._value\n\nclass Money:\n def __init__(self, currency_code, amount):\n currency_exists = False\n for rate in rates:\n if rate.currency_code() == currency_code:\n currency_exists = True\n self._currency_rate = rate.value()\n self._decimal_places = rate.decimal_places()\n break\n if currency_exists == False:\n raise ValueError('This currency does not exist.')\n self._currency_code = currency_code\n self._amount = amount\n def __add__(self, money):\n if money.currency_code() == self._currency_code:\n return Money(self._currency_code,\n (self._amount + money.amount()).quantize(\n self._decimal_places))\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return Money(self._currency_code,\n (self._amount + other_amount_value).quantize(\n self._decimal_places))\n def __sub__(self, money):\n if money.currency_code() == self._currency_code:\n return Money(self._currency_code,\n (self._amount - money.amount()).quantize(\n self._decimal_places))\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return Money(self._currency_code,\n (self._amount - other_amount_value).quantize(\n self._decimal_places))\n def __lt__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount < money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount < other_amount_value\n def __gt__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount > money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount > other_amount_value\n def __le__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount <= money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount <= other_amount_value\n def __ge__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount >= money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount >= other_amount_value\n def __eq__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount == money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount == other_amount_value\n def __ne__(self, money):\n if money.currency_code() == self._currency_code:\n return self._amount != money.amount()\n else:\n other_amount_value = money.convert_currency_value(self._currency_code)\n return self._amount != other_amount_value\n def __repr__(self):\n return 'Money(Currency Code: {}, Amount: {})'.format(self._currency_code,\n self._amount)\n def convert_currency_value(self, currency_code):\n for rate in rates:\n if rate.currency_code() == currency_code:\n other_currency_rate = rate.value()\n other_currency_decimal_places = rate.decimal_places()\n break\n converted_amount_value = ((self._amount / self._currency_rate)\n * other_currency_rate).quantize(\n other_currency_decimal_places)\n return converted_amount_value\n def currency_code(self):\n return self._currency_code\n def amount(self):\n return self._amount\n def formatted_print(self):\n return '{} {}'.format(self._currency_code, self._amount)\n\nclass Credit_Card:\n def __init__(self, card_number, balance, limit):\n self._card_number = card_number\n self._balance = balance\n self._limit = limit\n def __repr__(self):\n return 'Credit_Card(Card Number: {}, Balance: {}, Limit: {})'.format(\n self._card_number,\n self._balance,\n self._limit)\n def card_number(self):\n return self._card_number\n def balance(self):\n return self._balance\n def limit(self):\n return self._limit\n def change_balance(self, new_balance):\n self._balance = new_balance\n\ndef card_exists(card_list:list, card_number):\n for card in card_list:\n if card.card_number() == card_number:\n return True, card\n return False, None\ndef ui():\n cards = []\n try:\n with open('currency.txt', 'r') as currency:\n for line in currency:\n rate = line.split()\n rates.append(Conversion_Rate(rate[0],\n decimal.Decimal('10')**-int(rate[1]),\n decimal.Decimal(rate[2])))\n except FileNotFoundError:\n print('No currency data Found.')\n input('Press any key to exit program.')\n return\n else:\n print('Loading currency data...')\n print('Currency data loaded.')\n try:\n with open('data.txt', 'r') as data:\n for line in data:\n card = line.split()\n cards.append(Credit_Card(int(card[0]), Money(card[1],\n decimal.Decimal(card[2])),\n Money(card[1], decimal.Decimal(card[3]))))\n except FileNotFoundError:\n print('No previous card data found.')\n else:\n print('Loading previous card data...')\n print('Card data loaded.')\n while True:\n response = input().split()\n if len(response)>0:\n command = response[0]\n else:\n command = response\n if command == \"ISSUE\":\n currency_code = response[1]\n try:\n limit = Money(currency_code, decimal.Decimal(response[2]))\n balance = Money(currency_code, decimal.Decimal('0'))\n if limit.amount() > 0:\n taken_numbers=[]\n for i in range(len(cards)):\n taken_numbers.append(cards[i].card_number())\n card_number = 10000\n while card_number in taken_numbers:\n card_number += 1\n cards.append(Credit_Card(card_number,\n balance, limit))\n print('ISSUED {}'.format(card_number))\n else:\n print('NEGATIVE_LIMIT')\n except ValueError:\n print('NO_SUCH_CURRENCY')\n if command == \"CANCEL\":\n card_number = int(response[1])\n exists, canceled_card = card_exists(cards, card_number)\n if exists == True:\n if canceled_card.balance().amount() == 0:\n cards.remove(canceled_card)\n print('CANCELED {}'.format(card_number))\n else:\n print('NONZERO_BALANCE')\n else:\n print('NO_SUCH_CARD')\n if command == \"PURCHASE\":\n card_number = int(response[1])\n exists, card = card_exists(cards, card_number)\n if exists == True:\n try:\n amount = Money(response[2], decimal.Decimal(response[3]))\n except ValueError:\n print('NO_SUCH_CURRENCY')\n if amount.amount() > 0:\n new_balance = card.balance() + amount\n if new_balance <= card.limit():\n card.change_balance(new_balance)\n print('AUTHORIZED {}'.format(card.balance().formatted_print()))\n else:\n print('OVER_LIMIT')\n else:\n print('NONPOSITIVE_AMOUNT')\n else:\n print('NO_SUCH_CARD')\n if command == \"PAYMENT\":\n card_number = int(response[1])\n exists, card = card_exists(cards, card_number)\n if exists == True:\n try:\n amount = Money(response[2], decimal.Decimal(response[3]))\n except ValueError:\n print('NO_SUCH_CURRENCY')\n if amount.amount() > 0:\n new_balance = card.balance() - amount\n if new_balance.amount() < 0:\n new_balance = Money(card.balance().currency_code(),\n decimal.Decimal('0'))\n card.change_balance(new_balance)\n else:\n card.change_balance(new_balance)\n print('PAID {}'.format(card.balance().formatted_print()))\n else:\n print('NONPOSITIVE_AMOUNT')\n else:\n print('NO_SUCH_CARD')\n if command == \"EXIT\":\n with open('data.txt', 'w') as data:\n for card in cards:\n data.write('{} {} {} {}\\n'.format(card.card_number(),\n card.balance().currency_code(),\n card.balance().amount(),\n card.limit().amount()))\n print('GOODBYE')\n input('Press any key to exit program.')\n return\nif __name__ == '__main__':\n print('Welcome! Please enter a command.')\n ui()\n","sub_path":"project1/Project1_Easy_Money.py","file_name":"Project1_Easy_Money.py","file_ext":"py","file_size_in_byte":10702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"544030891","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass AzNsActionGroup(Model):\n \"\"\"Azure action group.\n\n :param action_group: Azure Action Group reference.\n :type action_group: list[str]\n :param email_subject: Custom subject override for all email ids in Azure\n action group\n :type email_subject: str\n :param custom_webhook_payload: Custom payload to be sent for all webhook\n URI in Azure action group\n :type custom_webhook_payload: str\n \"\"\"\n\n _attribute_map = {\n 'action_group': {'key': 'actionGroup', 'type': '[str]'},\n 'email_subject': {'key': 'emailSubject', 'type': 'str'},\n 'custom_webhook_payload': {'key': 'customWebhookPayload', 'type': 'str'},\n }\n\n def __init__(self, *, action_group=None, email_subject: str=None, custom_webhook_payload: str=None, **kwargs) -> None:\n super(AzNsActionGroup, self).__init__(**kwargs)\n self.action_group = action_group\n self.email_subject = email_subject\n self.custom_webhook_payload = custom_webhook_payload\n","sub_path":"azure-mgmt-monitor/azure/mgmt/monitor/models/az_ns_action_group_py3.py","file_name":"az_ns_action_group_py3.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"434289228","text":"# Errores al crear una variable\n# libreria math\n'''\nvar 1 = 0\n1var = 0\nvar-1 = 0\n@var = 0\n'''\n\n'''\nvar_x1 = var_x2 = var_x3 = 150\nprint(var_x2)\n\nvar_y1, var_y2, var_y3 = 12,23,40.5\nprint(var_y3)\n'''\n'''\nOperaciones Aritmeticas\n\n 4 + suma\n 4 - resta\n 3 * multiplicación\n 3 / division\n // division entera\n 2 ** potencia\n 1 () parentesis\n'''\n\n'''\nvar_1 = 8\nvar_2 = 6\n\ncuadrado = var_1 ** 2\ncubico = var_2 ** 3\n\nprint(cuadrado)\nprint(cubico)\n\nr = var_1 ** (1/2)\nprint(r)\n'''\n\n\n\nnota1 = 3.4\nnota2 = 2\nnota3 = 2.9\nnota4 = 3\nnota5 = 4.2\n\npromedio = ((nota1 + nota2 + nota3 + nota4 + nota5) / 5)\nprint(\"el promedio de las notas es : \" + str(promedio))\n\npromedio_fsum = (sum((nota1,nota2,nota3,nota4,nota5)) / 5)\nprint(type(promedio_fsum))\nprint(type((nota1,nota2,nota3,nota4,nota5)))\n\nnotaMax = max(nota1,nota2,nota3,nota4,nota5)\nprint(\"la nota maxima es : \" + str(notaMax))\n\nnotaMin = min(nota1,nota2,nota3,nota4,nota5)\nprint(\"la nota minima es : \" + str(notaMin)) \n\n#pi = 3.715\n#pi = round(pi,1)\n#print(pi)\n\n'''\ntexto = \"ciclo 1 fundamentos de programacion\"\nprint(len(texto))\n\nx = range(100)\nprint(len(x))\n\ny = range(20,100)\nprint(len(y))\n'''\n\n","sub_path":"Mision-TIC-GRUPO-09-master(16-06-21)/Semana 1/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528355536","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Tensorflow\nimport tensorflow as tf\nfrom tensorflow.python.keras.callbacks import TensorBoard\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# session = tf.Session(config=config)\n\n# Keras\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n# Other\nfrom utils import read_data\nfrom time import time\nfrom models import conv_lstm\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\ndf = read_data('data_cleaned.csv', first=False)\n\nprint(df.head(10))\n\ntk = Tokenizer()\ntk.fit_on_texts(df['text'])\n\nsequences = tk.texts_to_sequences(df['text'])\ndata = pad_sequences(sequences, maxlen=100)\nprint(data.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(data, df['stars'], test_size=0.25, random_state=1)\n\n# Build neural network with LSTM and CNN\nvocabulary_size = len(tk.word_counts.keys())+1\nmax_words = 100\nembedding_size = 128\nmodel = lstm(vocabulary_size, embedding_size, max_words)\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\n# Fit and evaluate\nhist = model.fit(X_train, y_train, validation_split=0.3, batch_size=1024, epochs=3, callbacks=[tensorboard])\naccuracy = model.evaluate(X_test, y_test, verbose=0)\nprint(\"accuracy : \", accuracy[1])\n\n# Save model and weight\nmodel.save_weights('trained_model_weights.h5')\nwith open('architecture.json', 'w') as f:\n f.write(model.to_json())\n\npred = model.predict(X_test)\ny_pred = []\nfor x in pred:\n if x[0] >= 0.6:\n y_pred.append(1)\n else:\n y_pred.append(0)\n\nmatrix = confusion_matrix(y_test, y_pred)\nprint(matrix)\n\n\n\n\n\n","sub_path":"train_lstm.py","file_name":"train_lstm.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"92497946","text":"from bs4 import BeautifulSoup\nimport urllib.request as request\nimport os.path\n\nurl = \"http://www.city.yokohama.lg.jp/somu/org/kikikanri/data/shelter.xml\"\nsavename = \"shelter.xml\"\n\nif not os.path.exists(savename):\n request.urlretrieve(url, savename)\n\nxml = open(savename, \"r\", encoding=\"utf-8\")\nsoup = BeautifulSoup(xml, \"html.parser\")\n\ninfo = {}\n\n\nfor i in soup.find_all(\"shelter\"):\n name = i.find('name').string\n ward = i.find('ward').string\n addr = i.find('address').string\n note = i.find('notes').string\n\n if not (ward in info):\n info[ward] = []\n info[ward].append(name)\n\n\nfor ward in info.keys():\n print(\"+\", ward)\n\n for name in info[ward]:\n print(\"l - \", name)","sub_path":"chapter3/xml-bou.py","file_name":"xml-bou.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"275245865","text":"from RNN.model_RNN import Model\nfrom no_RNN.data_CNN import DATA\nfrom RNN.RNN_features import features\nfrom parser_2 import arg_parse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\n\nif __name__ == '__main__':\n args = arg_parse()\n\n\n def single_batch_padding(train_X_batch, test=False):\n if test == True:\n padded_sequence = nn.utils.rnn.pad_sequence(train_X_batch)\n length = [len(train_X_batch)]\n else:\n length = [len(x) for x in train_X_batch]\n perm_index = np.argsort(length)[::-1]\n\n # sort by sequence length\n train_X_batch = [train_X_batch[i] for i in perm_index]\n length = [len(x) for x in train_X_batch]\n padded_sequence = nn.utils.rnn.pad_sequence(train_X_batch)\n return padded_sequence, length\n\n model_state = 'RNN_model.pth.tar'\n my_model = Model(2048).eval().cuda()\n my_model.load_state_dict(torch.load(model_state))\n\n valid_videos = torch.utils.data.DataLoader(DATA(args.dir_vid, args.dir_lab),\n batch_size=1,\n num_workers=1,\n shuffle=False)\n\n print('Obtaining Features')\n feat = features(valid_videos)\n\n print('Obtaining Output')\n eval_len = len(feat)\n out_labs = []\n with torch.no_grad():\n for i in range(0, eval_len):\n vid_e = feat[i]\n vid_patch, length = single_batch_padding(vid_e, test=True)\n vid_patch = vid_patch.squeeze().cuda()\n vid_patch = vid_patch.permute(1, 0).unsqueeze(1)\n output, _ = my_model(vid_patch, length)\n output = output.cpu()\n out_label = torch.argmax(output, len(output)).cpu().data\n out_labs = out_labs+(out_label.tolist())\n\n\n print('saving data in file')\n file = open(os.path.join(args.save_dir, 'p2_result.txt'), \"w\")\n for i in range(len(out_labs)):\n if i == len(out_labs)-1:\n file.write(str(out_labs[i]))\n else:\n file.write(str(out_labs[i]) + '\\n')\n file.close()\n\n print('done')\n","sub_path":"RNN_main.py","file_name":"RNN_main.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56568055","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 15 15:09:05 2020\r\n\r\n@author: tanaj\r\nmodule_1 is the decay within issuer\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom math import exp, log\r\n\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nclass SharedDecayCorrData:\r\n \r\n col = ['asof', 'cluster_id', 'box_id', 'symbol', 'issuer',\r\n 'sector_abbr', 'rating', 'ttm', 'is_traded_today', 'is_pivot',\r\n 'trade_date', 'prev_bd', 'prev_trade_static_spread',\r\n 'diff_static_spread', 'adj_spread_pre', 'corr_factor',\r\n 'adj_spread', 'status']\r\n \r\n def __init__(self, date, input_data, clustering_data, lamb):\r\n \r\n # general data\r\n self.date = date\r\n self.input = input_data\r\n self.lamb = lamb\r\n self.cluster_id = np.sort(clustering_data['cluster_id'].unique())\r\n self.box_id = np.sort(clustering_data['box_id'].unique())\r\n self.issuer = np.sort(clustering_data['issuer'].unique())\r\n self.col = ['asof', 'cluster_id', 'box_id', 'symbol', 'issuer',\r\n 'sector_abbr', 'rating', 'ttm', 'is_traded_today', 'is_traded_within_5bd',\r\n 'is_pivot', 'trade_date', 'prev_bd', 'prev_trade_static_spread',\r\n 'diff_static_spread', 'adj_spread_pre', 'corr_factor',\r\n 'adj_spread', 'status']\r\n \r\n # module 1\r\n self.trading_box_id_issuer_dict = None\r\n self.filtered_input = pd.DataFrame()\r\n self.decay_result = pd.DataFrame()\r\n \r\n # module 2\r\n self.pivot_for_trading_box = None\r\n self.box_id_trading_issuer_pair = None\r\n self.box_id_non_trading_issuer = None\r\n \r\n # module 3\r\n self.nontrading_box_id = None\r\n \r\n \r\nclass Filter(SharedDecayCorrData):\r\n \r\n def __init__(self, date, input_data, clustering_data, lamb):\r\n \r\n super().__init__(date, input_data, clustering_data, lamb)\r\n self.filtered_input = pd.DataFrame()\r\n self.trading_box_id_issuer_dict = {}\r\n \r\n def get_trading_box_id_issuer_dict(self):\r\n \r\n trading_box_id_issuer_dict = {}\r\n df = self.input\r\n box_id = self.box_id\r\n for b_id in box_id:\r\n a_filter = (df['box_id']==b_id) \\\r\n & (df['is_traded_today']==True)\r\n temp = list(set(df[a_filter]['box_id'].values))\r\n temp_2 = df[df['box_id'].isin(temp)]\r\n temp_3 = list(set(temp_2['issuer'].values))\r\n# print(temp)\r\n if len(temp) > 0: trading_box_id_issuer_dict[b_id] = temp_3\r\n self.trading_box_id_issuer_dict = trading_box_id_issuer_dict\r\n\r\n def filter_decay_data(self):\r\n\r\n df = self.input\r\n temp = self.filtered_input\r\n a_dict = self.trading_box_id_issuer_dict\r\n for key in a_dict.keys():\r\n for value in a_dict[key]:\r\n a_filter = (df['box_id']==key) & (df['issuer']==value)\r\n df_2 = df[a_filter]\r\n temp = pd.concat([temp, df_2])\r\n temp = temp.sort_values(['box_id','ttm']) # This is a correct sorting. #A very poor design\r\n self.filtered_input = temp\r\n \r\n def fill_in_missing_fields(self):\r\n \r\n temp = self.filtered_input\r\n temp_1 = list(temp.columns)\r\n temp_2 = self.col\r\n add_col = np.setdiff1d(temp_2,temp_1)\r\n for item in add_col: temp[item] = np.nan\r\n # add columns from the standard due to weighted average pivot\r\n col = self.col\r\n col.append('total_volume')\r\n col.append('weighted_average_yield')\r\n col.append('static_spread_tradesum')\r\n self.filtered_input = temp[col]\r\n \r\n def drop_3_added_col(self):\r\n \r\n temp = self.filtered_input\r\n temp_2 = temp.drop(['total_volume','weighted_average_yield','static_spread_tradesum'], axis=1)\r\n self.filtered_input_drop = temp_2\r\n\r\n\r\nclass DecayFunction(SharedDecayCorrData):\r\n \r\n def __init__(self, date, input_data, clustering_data, lamb, filtered_input, trading_cluster_id_box_id_dict, pivot_master):\r\n \r\n super().__init__(date, input_data, clustering_data, lamb)\r\n self.input_data = input_data\r\n self.trading_cluster_id_box_id_dict = trading_cluster_id_box_id_dict\r\n self.pivot_master = pivot_master\r\n self.decay_result = pd.DataFrame()\r\n \r\n def decay(self):\r\n \r\n def limit_within_range(aNum, minN, maxN):\r\n return max(min(aNum, maxN), minN)\r\n \r\n def cal_decay_value(aDF, lamb, **kwargs):\r\n \r\n corr_factor = kwargs.get('c', None)\r\n \r\n pivot_ttm = float(aDF[aDF['is_traded_today']==True]['ttm'].values[0])\r\n pivot_diff_static_spread = float(aDF[aDF['is_traded_today']==True]['diff_static_spread'].values[0])\r\n \r\n try:\r\n func = lambda ttm, lamb, corr_factor, pivot_diff_static_spread, pivot_ttm: (lamb * corr_factor * pivot_diff_static_spread)/(1+np.abs(float(ttm) - pivot_ttm))\r\n aDF['adj_spread'] = aDF['ttm'].apply(func, args=(lamb, corr_factor, pivot_diff_static_spread, pivot_ttm))\r\n aDF['adj_spread'] = aDF['adj_spread'].apply(limit_within_range, args=(-2,2))\r\n \r\n except: \r\n # The line below is subject to error if the ttm of the two issues are duplicate.\r\n func = lambda ttm, lamb, pivot_diff_static_spread, pivot_ttm: (lamb * pivot_diff_static_spread)/(1+np.abs(float(ttm) - pivot_ttm))\r\n aDF['adj_spread'] = aDF['ttm'].apply(func, args=(lamb, pivot_diff_static_spread, pivot_ttm))\r\n aDF['adj_spread'] = aDF['adj_spread'].apply(limit_within_range, args=(-5,5))\r\n \r\n return aDF\r\n \r\n def linear_interpolation(aDF):\r\n \r\n aDF = aDF.set_index('ttm', drop=False)\r\n aDF['adj_spread'] = list(aDF['diff_static_spread'].interpolate())\r\n aDF = aDF.reset_index(drop=True)\r\n \r\n return aDF\r\n \r\n def drop_merge_duplicates(aDF):\r\n \r\n col = [item for item in list(aDF.columns) \\\r\n if (~('adj_spread' in item) or (item=='adj_spread'))]\r\n col.append('symbol')\r\n return aDF[col]\r\n \r\n def get_nontrading_index(aDF):\r\n \r\n aDF = aDF.reset_index(drop=True)\r\n# row = aDF.index[-aDF['is_traded_today']].to_list()\r\n row = aDF[aDF['is_traded_today']==False].index.tolist()\r\n return row\r\n \r\n def decay_wing_function(temp, temp_2, left_pivot_index, right_pivot_index, b_id, issuer, \\\r\n lamb, is_decay, trading_cluster_id_box_id_dict, **kwargs):\r\n \r\n corr_factor = kwargs.get('c', None)\r\n \r\n # get the decay wings\r\n left_decay_wing = pd.DataFrame()\r\n right_decay_wing = pd.DataFrame()\r\n \r\n #get the list of trading box_id\r\n temp_3 = trading_cluster_id_box_id_dict\r\n temp_3 = list(temp_3.values())\r\n temp_3 = np.concatenate(temp_3)\r\n \r\n # get left decay\r\n# print(temp)\r\n left_decay_wing = cal_decay_value(temp[:left_pivot_index+1], lamb, c=corr_factor)\r\n if (left_decay_wing.shape[0] > 1):\r\n row = get_nontrading_index(left_decay_wing)\r\n temp_col = left_decay_wing.columns.get_loc('status')\r\n if is_decay: left_decay_wing.iloc[row, temp_col] = 'decay_left'\r\n else:\r\n if b_id in temp_3:\r\n left_decay_wing.iloc[row, temp_col] = 'inbox_corr_left'\r\n else:\r\n left_decay_wing.iloc[row, temp_col] = 'outbox_corr_left'\r\n# # separating inbox/outbox correlation\r\n \r\n # get right decay\r\n right_decay_wing = cal_decay_value(temp[right_pivot_index:], lamb, c=corr_factor)\r\n if (right_decay_wing.shape[0] > 1):\r\n row = get_nontrading_index(right_decay_wing)\r\n temp_col = right_decay_wing.columns.get_loc('status')\r\n if is_decay: right_decay_wing.iloc[row, temp_col] = 'decay_right'\r\n else: \r\n# # separating inbox/outbox correlation\r\n if b_id in temp_3:\r\n right_decay_wing.iloc[row, temp_col] = 'inbox_corr_right'\r\n else:\r\n right_decay_wing.iloc[row, temp_col] = 'outbox_corr_right'\r\n \r\n # get mid decay\r\n mid_decay_wing = linear_interpolation(temp[left_pivot_index:right_pivot_index+1])\r\n mid_decay_wing = mid_decay_wing.iloc[1:mid_decay_wing.shape[0]-1,:]\r\n if (mid_decay_wing.shape[0] > 0):\r\n row = get_nontrading_index(mid_decay_wing)\r\n temp_col = mid_decay_wing.columns.get_loc('status')\r\n mid_decay_wing.iloc[row, temp_col] = 'decay_mid'\r\n \r\n temp_2 = pd.concat([left_decay_wing, mid_decay_wing, right_decay_wing]).drop_duplicates()\r\n \r\n return temp_2\r\n \r\n #The name pivot for trading box is misleading and should be changed to pivot for non trading box.\r\n pivot_master = self.pivot_master\r\n input_data = self.input_data\r\n input_data['adj_spread'] = np.nan\r\n input_data['status'] = np.nan\r\n col = self.col\r\n temp_2 = pd.DataFrame(columns=col)\r\n b_id_list = input_data['box_id'].unique()\r\n lamb = float(self.lamb)\r\n trading_cluster_id_box_id_dict = self.trading_cluster_id_box_id_dict\r\n \r\n for b_id in b_id_list:\r\n# print(b_id)\r\n issuer_list = input_data['issuer'][input_data['box_id']==b_id].unique()\r\n \r\n for issuer in issuer_list:\r\n \r\n# print(issuer)\r\n wing_filter = (input_data['box_id']==b_id) & (input_data['issuer']==issuer)\r\n temp = input_data[wing_filter].sort_values(['ttm'])\r\n \r\n #This paragraph is to not adjust spread of newly traded bonds.\r\n non_adjust_filter = (temp['is_traded_within_5bd']==True)\r\n temp_non_adjust = temp[non_adjust_filter]\r\n temp_non_adjust['status'] = 'non-adjusted'\r\n temp_non_adjust['adj_spread'] = 0\r\n \r\n adj_filter = (temp['is_traded_within_5bd']==False) | (temp['is_traded_today']==True)\r\n temp = temp[adj_filter]\r\n temp = temp.reset_index(drop=True)\r\n temp = temp[col]\r\n \r\n try: \r\n# b_id, issuer, lamb = 8, 'QH', 28\r\n pivot_index = temp[temp['is_traded_today']==True].index.tolist()\r\n # get the pivot index\r\n left_pivot_index = pivot_index[0]\r\n right_pivot_index = pivot_index[-1]\r\n \r\n is_decay = True\r\n \r\n temp_3 = decay_wing_function(temp, temp_2, left_pivot_index, right_pivot_index, \\\r\n b_id, issuer, lamb, is_decay, trading_cluster_id_box_id_dict)\r\n \r\n except:\r\n# b_id, issuer, lamb = 0, 'BJC', 0.7\r\n try:\r\n filtered_pivot = (pivot_master['box_id']==b_id)\r\n the_pivot = pivot_master[filtered_pivot]\r\n temp = pd.concat([temp,the_pivot])\r\n temp = temp.sort_values(['ttm'])\r\n temp = temp.reset_index(drop=True)\r\n pivot_index = temp[temp['is_traded_today']==True].index.tolist()\r\n \r\n left_pivot_index = pivot_index[0]\r\n right_pivot_index = pivot_index[-1]\r\n \r\n is_decay = False\r\n \r\n box_id_corr_factor_dict = dict(zip(pivot_master['box_id'], \\\r\n pivot_master['corr_factor']))\r\n \r\n corr_factor = box_id_corr_factor_dict[b_id]\r\n temp_3 = decay_wing_function(temp, temp_2, left_pivot_index, right_pivot_index, \\\r\n b_id, issuer, lamb, is_decay, trading_cluster_id_box_id_dict, \\\r\n c=corr_factor)\r\n \r\n except IndexError:\r\n pass\r\n\r\n try:\r\n temp_2 = pd.concat([temp_2, temp_3, temp_non_adjust])\r\n temp_2 = temp_2.sort_values('ttm')\r\n except UnboundLocalError:\r\n pass\r\n \r\n temp_3 = pd.DataFrame()\r\n\r\n self.decay_result = temp_2[self.col]\r\n \r\n def adj_asof(self):\r\n decay_result = self.decay_result\r\n decay_result['asof'] = self.date\r\n self.decay_result = decay_result\r\n \r\n \r\n# Pivots are for issuers with 'no trade'. 1)No trade for the issuer but has trade with the box\r\n# 2) No trade for the issuer and no trade within the box","sub_path":"ca3_module_1.py","file_name":"ca3_module_1.py","file_ext":"py","file_size_in_byte":13624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"93342315","text":"__author__ = 'Will'\n\nimport pygame\nfrom Color import color\nfrom Rectangle import rectangle\n\n\nclass graphics:\n screen = None\n font = None\n\n def initialize(self):\n self.screen = pygame.display.set_mode((1024, 768), pygame.HWSURFACE | pygame.DOUBLEBUF, 32)\n self.font = pygame.font.SysFont(\"couriernew\", 16)\n\n def clear(self):\n self.screen.fill((0, 0, 0))\n return\n\n\n def draw_texture(self, texture, source: rectangle, destination: rectangle):\n self.screen.blit(texture.surface.subsurface(source.X, source.Y, source.Width, source.Height),\n pygame.Rect(destination.X, destination.Y, destination.Width, destination.Height))\n\n return\n\n\n def draw_string(self, text, position):\n string = self.font.render(text, True, (0, 128, 0))\n self.screen.blit(string, position)","sub_path":"src/vft/Graphics.py","file_name":"Graphics.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"645740652","text":"# Given two integers representing the numerator and denominator of a fraction,\n# return the fraction in string format.\n# If the fractional part is repeating, enclose the repeating part in\n# parentheses.\n# For example,\n# Given numerator = 1, denominator = 2, return \"0.5\".\n# Given numerator = 2, denominator = 1, return \"2\".\n# Given numerator = 2, denominator = 3, return \"0.(6)\".\n\nclass Solution:\n\tdef fractionToDecimal(self, numerator, denominator):\n\t\tif not numerator:\n\t\t\treturn '0'\n\t\tans = ''\n\t\tif (numerator < 0) ^ (denominator < 0):\n\t\t\tans += '-'\n\t\ta, b = abs(numerator), abs(denominator)\n\t\tans += str(a / b)\n\t\trem = a % b\n\t\tif not rem:\n\t\t\treturn ans\n\t\tans += '.'\n\t\tdic = {}\n\t\trem *= 10\n\t\twhile rem:\n\t\t\tif rem in dic:\n\t\t\t\tindex = dic[rem]\n\t\t\t\tans = ans[:index] + '(' + ans[index:] + ')'\n\t\t\t\treturn ans\n\n\t\t\tdic[rem] = len(ans)\n\t\t\tans += str(rem / b)\n\t\t\trem = (rem % b) * 10\n\t\treturn ans\n\ns = Solution()\nprint(s.fractionToDecimal(13132, 1000))\nprint(s.fractionToDecimal(1, 7))\n","sub_path":"src/leetcode/LC_166_fractionto_recurring_decimal.py","file_name":"LC_166_fractionto_recurring_decimal.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45264627","text":"from django.forms import ModelForm\nfrom django.utils.safestring import SafeText\nfrom cms.models.pagemodel import Page\nfrom .models import (\n MenuPluginSettings,\n)\n\n\nclass EmbedPagesAdminForm(ModelForm):\n\n class Meta:\n model = MenuPluginSettings\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(EmbedPagesAdminForm, self).__init__(*args, **kwargs)\n choices = [self.fields['root'].choices.__iter__().next()]\n for page in Page.objects.drafts().order_by('node__path'):\n choices.append((\n page.id,\n SafeText(''.join([\n u\" \"*len(page.node.path),\n page.__unicode__()\n ]))\n ))\n\n self.fields['root'].choices = choices\n","sub_path":"cmsplugin_embeddedmenu/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"274077462","text":"import boto3\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('staff')\n\nwith table.batch_writer() as batch:\n batch.put_item(\n Item={\n 'account_type': 'standard_user',\n 'username': 'stefanb',\n 'first_name': 'stefan',\n 'last_name': 'bester',\n 'age': 30,\n 'address': {\n 'road': '1 jamesville street',\n 'city': 'kroonstad',\n 'province': 'free state',\n 'country': 'south africa'\n }\n }\n )\n batch.put_item(\n Item={\n 'account_type': 'administrator',\n 'username': 'ruanb',\n 'first_name': 'ruan',\n 'last_name': 'bekker',\n 'age': 30,\n 'address': {\n 'road': '10 peterville street',\n 'city': 'cape town',\n 'province': 'western cape',\n 'country': 'south africa'\n }\n }\n )\n batch.put_item(\n Item={\n 'account_type': 'standard_user',\n 'username': 'samanthas',\n 'first_name': 'samantha',\n 'last_name': 'smith',\n 'age': 28,\n 'address': {\n 'road': '12 newton street',\n 'city': 'port elizabeth',\n 'province': 'eastern cape',\n 'country': 'south africa'\n }\n }\n )","sub_path":"BatchWriteItem.py","file_name":"BatchWriteItem.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"195052852","text":"import random as r\nclass InvestmentReturns:\n def __init__(self, IPO=10000, initial=10000, start=2000, current=2020):\n self._IPO = IPO\n self._initial = initial\n self._start = start\n self._current = current\n self.balance = -initial\n self.dataset = dict()\n \n def populate(self):\n for year in range(self._start, self._current + 1):\n low = int( self._IPO * r.uniform(-1.5, -1) + (year - self._start + r.randint(50, 5000)) * r.uniform(1, 2.5) )\n high = int( self._IPO * r.uniform(1, 3) + (year - self._start + r.randint(1, 50000))* r.uniform(1, 3))\n mReturn = r.randint(low, high)\n self.balance += mReturn\n self.dataset[year] = [mReturn, self.balance]\n def getData(self):\n for year in self.dataset.keys():\n print(f\"{year}: £{self.dataset[year][0]} \\t\\t Balance: {self.dataset[year][1]}\")\n\n def prefix(self):\n n = len(self.dataset)\n A = [0] * n\n\n for i in range(n):\n total = 0\n for j in range(self._start, self._start + i + 1):\n total = total + self.dataset[j][0]\n A[i] = total/(i + 1)\n\n for i in range(len(A)):\n print(f\"{self._start + i}: {A[i]}\")\n\nIPO = r.randint(50000, 1000000)\ninvestment = IPO * r.uniform(1, 2)\ninitial = r.randint(1980, 2010)\nprint(\"IPO:\", IPO)\nprint(\"Initial investement:\", investment)\n\no = InvestmentReturns(IPO, investment, initial)\no.populate()\no.getData()\nprint(o.prefix())\n\n \n","sub_path":"investment returns.py","file_name":"investment returns.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"332107509","text":"'''\n切除每一帧中标好的框的图片\n'''\n\nimport cv2\nimport sys\nimport os\n\n# Mouse response function\ndef getPosition(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n global x1, y1, x2, y2, clickCount\n if (clickCount == 0):\n x1,y1 = x,y\n clickCount = clickCount + 1\n cv2.circle(frame, (x, y), 10, (0,255,0),-1)\n return\n if (clickCount == 1):\n x2, y2 = x, y\n clickCount = clickCount + 1\n return\n\nif __name__ == '__main__':\n\n # Set up tracker.\n # Instead of MIL, you can also use\n tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']\n\n # tracker_type = tracker_types[2]\n tracker_type = tracker_types[0]\n tracker = cv2.TrackerBoosting_create()\n\n # Read video\n video = cv2.VideoCapture(\"./test.mp4\")\n\n # Exit if video not opened.\n if not video.isOpened():\n print(\"Could not open video\")\n sys.exit()\n\n # Read first frame.\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n sys.exit()\n\n # init global variable\n clickCount = 0\n x1, y1, x2, y2 = -1,-1,-1,-1\n\n # show the first frame of the video\n cv2.namedWindow(\"first frame\")\n\n # add mouse response function to window\n cv2.setMouseCallback('first frame', getPosition)\n \n i = 0\n \n while (clickCount < 2):\n cv2.imshow(\"first frame\", frame)\n cv2.setMouseCallback('first frame', getPosition)\n k = cv2.waitKey(1) & 0xff\n if k == 27: break\n\n cv2.destroyWindow('first frame')\n\n # Define an initial bounding box\n bbox = (x1, y1, abs(x2 - x1), abs(y2 - y1))\n\n # Uncomment the line below to select a different bounding box\n # bbox = cv2.selectROI(frame, False)\n\n # Initialize tracker with first frame and bounding box\n ok = tracker.init(frame, bbox)\n\n\n # make sure the folder exists\n file = \"./output1\"\n if not (os.path.exists(file)):\n os.mkdir(file)\n\n\n while True:\n # Read a new frame\n ok, frame = video.read()\n if not ok:\n break\n\n # Start timer\n timer = cv2.getTickCount()\n\n # Update tracker\n ok, bbox = tracker.update(frame)\n\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n\n # Draw bounding box\n if ok:\n # Tracking success\n p1 = (int(bbox[0]), int(bbox[1])) # left-top\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1])) # right-top\n p3 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) # right-bottom\n p4 = (int(bbox[0]), int(bbox[1] + bbox[3])) # left-bottom\n\n # draw the box\n cv2.rectangle(frame, p1, p3, (255, 0, 0), 2, 1)\n\n else:\n # Tracking failure\n cv2.putText(frame, \"Tracking failure detected\", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n\n # Display result\n cv2.imshow(\"Tracking\", frame)\n cv2.imwrite('output1/{}.jpg'.format(i),frame)\n i+=1\n\n\n # Exit if ESC pressed\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n","sub_path":"tracker/output1.py","file_name":"output1.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"306779590","text":"import os\nimport sys\nimport json\nimport codecs\n\ndef get_product(path, file):\n print('deal product {}'.format(path))\n with open(os.path.join(path, 'intro.json'), 'r') as product_json:\n product = json.loads(product_json.read())\n file.write('{ \"index\" : { \"_index\" : \"weshop\", \"_type\" : \"product\", \"_id\" : \"%s\" } }\\n' % path.split(os.path.sep)[-1]) \n file.write(json.dumps(product).replace('.5\"', '/2\"'))\n file.write('\\n')\n\n\ndef generate_products(products_folder, file):\n for path in os.listdir(products_folder):\n if os.path.isdir(path):\n get_product(path, file)\n\ndef generate_json(input_folder, json_file):\n with open(json_file, 'w') as file:\n products = generate_products(input_folder,file)\n\n\nif __name__ == '__main__':\n json_file = './esdata.json'\n input_folder = '.'\n generate_json(input_folder, json_file)","sub_path":"static/pictures/products/esdata_gen.py","file_name":"esdata_gen.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"522626694","text":"import cPickle as pickle\nfrom sklearn.model_selection import StratifiedKFold\nimport sys\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Convolution1D\nfrom keras.datasets import mnist\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution1D, MaxPooling1D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nimport numpy as np\n\n#parameters: sys.argv[1] = input dataset as matrix of k-mers\nnome_train=sys.argv[1].split(\".\")[0]\t\t\t\t\n\ndef load_data(file):\n\tlista=[]\n\trecords= list(open(file, \"r\"))\n\trecords=records[1:]\n\tfor seq in records:\n\t\telements=seq.split(\",\")\n\t\tlevel=elements[-1].split(\"\\n\")\n\t\tclasse=level[0]\n\t\tlista.append(classe)\n\n\tlista=set(lista)\n\tclasses=sorted(lista)\n\tX=[]\n\tY=[]\n\tfor seq in records:\n\t\telements=seq.split(\",\")\n\t\tX.append(elements[1:-1])\n\t\tlevel=elements[-1].split(\"\\n\")\n\t\tclasse=level[0]\n\t\tY.append(classes.index(classe))\n\tX=np.array(X,dtype=float)\n\tY=np.array(Y,dtype=int)\n\tdata_max= np.amax(X)\n\tX = X/data_max\n\treturn X,Y,len(classes),len(X[0])\n\ndef create_model(nb_classes,input_length):\n model = Sequential()\n model.add(Convolution1D(5,5, border_mode='valid', input_dim=1,input_length=input_length)) #input_dim\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_length=2,border_mode='valid'))\n model.add(Convolution1D(10, 5,border_mode='valid'))\n model.add(Activation('relu'))\n model.add(MaxPooling1D(pool_length=2,border_mode='valid'))\n model.add(Flatten())\n ##\n ##MLP\n model.add(Dense(500))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef train_and_evaluate_model (model, datatr, labelstr, nb_classes):\n\n\n datatr = datatr.reshape(datatr.shape + (1,))\n labelstr = np_utils.to_categorical(labelstr, nb_classes)\n\n history = model.fit(datatr, labelstr, epochs=100, batch_size=20, validation_split=0.1)\n model.save(\"{}.dat\".format(nome_train))\n with open(\"{}.hist\".format(nome_train), \"wb\") as fp:\n pickle.dump(history.history, fp)\n \n\n# def train_and_evaluate_model (i, model, datatr, labelstr, datate, labelste,nb_classes):\n\n\n# datatr = datatr.reshape(datatr.shape + (1,))\n# labelstr = np_utils.to_categorical(labelstr, nb_classes)\n# labelste_bin = np_utils.to_categorical(labelste, nb_classes)\n\n# history = model.fit(datatr, labelstr, nb_epoch=100, batch_size=20, verbose = 0)\n# datate = datate.reshape(datate.shape + (1,))\n# model.save(\"{}-{}.dat\".format(nome_train, i))\n# with open(\"{}-{}.hist\".format(nome_train, i), \"wb\") as fp:\n# pickle.dump(history.history, fp)\n \n# tr_scores = model.evaluate(datatr,labelstr,verbose=0)\n# preds = model.predict_classes(datate,verbose = 0)\n \n# scores = model.evaluate(datate, labelste_bin,verbose=0)\n# return preds, labelste\n\n\n\nif __name__ == \"__main__\":\n\tn_folds = 10\n\tX,Y,nb_classes,input_length = load_data(sys.argv[1])\n\t\n\tmodel = create_model(nb_classes,input_length)\n\ttrain_and_evaluate_model(model, X, Y, nb_classes)\n \n","sub_path":"models/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"84169400","text":"\"\"\"\r\n15. 주민등록번호를 입력하면 남자인지 여자인지 알려주는 프로그램을 작성하시오.\r\n(리스트 split 과 슬라이싱 활용) \r\n\r\n예시\r\n<입력>\r\n주민등록번호 : 941130-3002222\r\n\r\n<출력>\r\n남자\r\n\"\"\"\r\n\r\n\r\nid_card = input(\"주민등록번호 : \") # 주민등록번호 입력란\r\n\r\nid_card = id_card.split(\"-\")[1] # split 분할 메소드 구분자로 하이픈 사용\r\n\r\nif id_card[0] == \"1\" or id_card[0] == \"3\": # 하이픈 다음 0번째 수가 1 이나 3 이라면\r\n print(\"남자\") # 남자 출력\r\nelif id_card[0] == \"2\" or id_card[0] == \"4\": # 하이픈 다음 0번째 수가 2 나 4 라면\r\n print(\"여자\") # 여자 출력\r\n","sub_path":"quiz/pre_python_15.py","file_name":"pre_python_15.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"103062902","text":"import json\r\nimport numpy as np\r\nfrom io import BytesIO\r\nfrom astropy.io import fits\r\nfrom astroscrappy import detect_cosmics\r\nfrom scipy.ndimage import label\r\nfrom skimage.transform import downscale_local_mean\r\nfrom bokeh.plotting import figure, output_file, show\r\nfrom bokeh.palettes import Viridis256\r\nfrom bokeh.models import ColorBar, LinearColorMapper, LogColorMapper, LogTicker\r\nfrom bokeh.models import BoxZoomTool,WheelZoomTool,ResetTool,HoverTool,PanTool,FreehandDrawTool\r\nfrom bokeh.io import output_notebook\r\nfrom pprint import pprint\r\n\r\ndef plot_image(filename, save=False, bg_min=60, bg_max=99):\r\n\r\n hdu = fits.open(filename)\r\n\r\n extension = 0\r\n image_header = hdu[extension].header\r\n while image_header[\"NAXIS\"] == 0:\r\n extension += 1\r\n image_header = hdu[extension].header\r\n\r\n dheader = dict(hdu[extension].header)\r\n djson = {'filename':filename}\r\n for k in dheader:\r\n if len(k) >= 2:\r\n print(f\"{k}: {dheader[k]}\")\r\n djson[k] = str(dheader[k])\r\n\r\n data = hdu[extension].data\r\n\r\n with open('header.json', 'w') as json_file:\r\n json.dump(djson, json_file, indent=4)\r\n print(\"Image header written to header.json\")\r\n\r\n if data.shape[0] > 6000:\r\n image_downscaled = downscale_local_mean(data, (4, 4)).astype(int)\r\n elif data.shape[0] > 2000:\r\n image_downscaled = downscale_local_mean(data, (2, 2)).astype(int)\r\n else:\r\n image_downscaled = downscale_local_mean(data, (1, 1)).astype(int)\r\n\r\n # quick hot pixel/ cosmic ray mask\r\n mask, cdata = detect_cosmics(\r\n data, psfmodel='gauss',\r\n psffwhm=4, psfsize=2*round(4)+1, # just a guess\r\n sepmed=False, sigclip = 4.25,\r\n niter=3, objlim=10, cleantype='idw', verbose=False\r\n )\r\n\r\n # show how many pixels are saturated\r\n SATURATION = 2**(hdu[extension].header['bitpix'])\r\n mmask = cdata >= SATURATION*0.9\r\n labels, ngroups = label(mmask)\r\n print('Saturated Areas:',ngroups)\r\n labeli, counts = np.unique(labels, return_counts=True)\r\n bad_pix = {'x':[], 'y':[], 'value':[]}\r\n # loop through each group to find position\r\n for i in range(1,labeli[-1]+1):\r\n imask = labels == i\r\n yc,xc = np.argwhere(imask).mean(0)\r\n bad_pix['x'].append(xc)\r\n bad_pix['y'].append(yc)\r\n bad_pix['value'].append(cdata[imask].mean())\r\n\r\n pprint(bad_pix)\r\n\r\n # create a figure with text on mouse hover\\\r\n print(\"Saturated pixels are marked with red. These are pixels which have exceeded the maximum value for brightness, and are thus not suitable for use as comparison stars.\")\r\n fig = figure(tooltips=[(\"x\", \"$x\"), (\"y\", \"$y\"), (\"value\", \"@image\")], plot_width=800, plot_height=800,\r\n tools=[PanTool(),BoxZoomTool(),WheelZoomTool(),ResetTool(),HoverTool()])\r\n fig.x_range.range_padding = fig.y_range.range_padding = 0\r\n\r\n r = fig.multi_line('x', 'y', source={'x':[],'y':[]},color='white',line_width=3)\r\n fig.add_tools(FreehandDrawTool(renderers=[r]))\r\n\r\n # set up a colobar + data range\r\n color_mapper = LogColorMapper(palette=\"Cividis256\", low=np.percentile(data, bg_min), high=np.percentile(data, bg_max))\r\n\r\n # must give a vector of image data for image parameter\r\n fig.image(\r\n image=[image_downscaled],\r\n x=0, y=0, dw=hdu[extension].data.shape[1], dh=hdu[extension].data.shape[0],\r\n level=\"image\", color_mapper=color_mapper\r\n )\r\n\r\n # plot saturated stars\r\n fig.x(bad_pix['x'], bad_pix['y'], size=25, color='red', line_width=3)\r\n fig.x(bad_pix['x'], bad_pix['y'], size=25, color='white', line_width=1)\r\n # TODO figure out hover value\r\n\r\n fig.grid.grid_line_width = 0.5\r\n\r\n color_bar = ColorBar(color_mapper=color_mapper, ticker=LogTicker(),\r\n label_standoff=12, border_line_color=None, location=(0,0))\r\n\r\n fig.add_layout(color_bar, 'right')\r\n\r\n if save:\r\n output_file(\"interactivefits.html\")\r\n else:\r\n show(fig)\r\n","sub_path":"exotic/api/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"622852223","text":"\nimport moderngl\nimport numpy as np\nfrom pyrr import Matrix44\n\n\ndef get_vertex_data(world_coords, colors, triangles):\n vertices = np.hstack([world_coords, colors])\n vert_list = []\n for v1, v2, v3 in triangles:\n vert_list.append(vertices[v1])\n vert_list.append(vertices[v2])\n vert_list.append(vertices[v3])\n vert_list = np.array(vert_list)\n return vert_list.astype('f4').tobytes()\n\n\nclass Renderer(object):\n\n def __init__(self):\n self.ctx = moderngl.create_standalone_context()\n self.ctx.enable(moderngl.DEPTH_TEST | moderngl.CULL_FACE)\n self.ctx.depth_func = '<'\n\n # Shaders\n vertex_shader_source = open('shaders/myshader.vert').read()\n fragment_shader_source = open('shaders/myshader.frag').read()\n self.prog = self.ctx.program(vertex_shader=vertex_shader_source, fragment_shader=fragment_shader_source)\n\n def render(self, world_coords, colors, triangles, projection, rotation, translation, out_shape=(224, 224)):\n\n # Matrices and Uniforms\n # fov_rad = 2 * np.arctan(120 / focal_length) # (film_size / 2) / focal_length\n # fov_deg = fov_rad * 180 / np.pi\n\n # projection_mat = Matrix44.perspective_projection(fov_deg, 1.0, 1000, 1200)\n\n translation_mat = Matrix44.from_translation(translation)\n projection_mat = Matrix44.from_matrix33(projection.T)\n rotation_mat = Matrix44.from_matrix33(rotation)\n\n translation = self.prog['translation']\n projection = self.prog['projection']\n rotation = self.prog['rotation']\n\n translation.write(translation_mat.astype('f4').tobytes())\n projection.write(projection_mat.astype('f4').tobytes())\n rotation.write(rotation_mat.astype('f4').tobytes())\n\n # vertex array and buffer (binding the mesh)\n vbo = self.ctx.buffer(get_vertex_data(world_coords, colors, triangles))\n vao = self.ctx.simple_vertex_array(self.prog, vbo, 'in_vert', 'in_color')\n\n # frame buffer setup\n rbo = self.ctx.renderbuffer(out_shape, components=4, dtype='f4')\n dbo = self.ctx.depth_renderbuffer(out_shape)\n fbo = self.ctx.framebuffer([rbo], dbo)\n fbo.use()\n fbo.clear(0.0, 0.0, 0.0, 0.0, depth=1.0)\n\n # render\n vao.render()\n\n data = np.fliplr(np.frombuffer(fbo.read(components=4), dtype=np.dtype('u1')).reshape((out_shape[1], out_shape[0], 4)))\n\n # release memory\n fbo.release()\n rbo.release()\n dbo.release()\n vbo.release()\n vao.release()\n\n return data\n","sub_path":"rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"44291676","text":"#!/usr/bin/env python3\nfrom broker.backend.mongoBackend import MongoBackend\nfrom broker.rabbitMq import RabbitMq\nfrom logger.logger import logger\n\nimport argparse\nfrom conf import conf\n\n\ndef checkParams():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-h\",\"--host\", help=\"broker host\", default=\"rabbitmq\")\n parser.add_argument(\"-p\",\"--port\", help=\"broker port\", default= 16562)\n parser.add_argument(\"-q\",\"--queue\", help=\"consuming queue\", default=\"test\")\n return parser.parse_args()\n\n\ndef main():\n logger.info(\"Run consumer\")\n broker = RabbitMq(conf[\"broker\"])\n broker.setBackend(MongoBackend(conf[\"backend\"]))\n broker.consume()\n logger.info(\"End consumer\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"git-api/src/broker/backend/src/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"94691272","text":"from core.models import ConfigEntry\nimport random\nimport string\n#defaults = [(\"TEST_SETTING\", \"BOB\")]\ndefaults = [\n (\"SLACK_ENABLED\", False),\n (\"SLACK_SUBDOMAIN\", \"SLACK\"),\n ]\n\ndef load_defaults():\n for setting in defaults:\n config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0]\n config.value = setting[1]\n config.save()\n","sub_path":"evewspace/Slack/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391318636","text":"import shapes\r\nimport math\r\nARCHAIC = 1 # Actually, Wikipedia mentions \"sometimes less than one\"\r\nPARTHENON = 1.25\r\nPYCNOSTLE = 1.5\r\nSYSTYLE = 2\r\nEUSTYLE = 2.25 # Vitruvius mentions varying to 3 between middle columns, front and rear\r\nDIASTYLE = 3\r\nDEFAULT_TEXTURE = \"column1_2\"\r\nNUM_STEPS = 3\r\ndef roofed_column_grid(x, y, origin, full_column_height, column_base_side):\r\n\r\n diam = shapes.get_oct_prism_width(column_base_side)\r\n entab_height = int((full_column_height / 7) * 2) # just some makey-up thing to start with... actually eyeballing photos of Archaic Doric suggests 4/15ths\r\n print(\"Diam\" + str(diam))\r\n # I threw in a rounding hack to the shapes.get_oct_prism_width function so this error shouldn't happen\r\n if diam % 2 != 0:\r\n print(\"Deriving actual column diameter from the base side \" + str(column_base_side) + \" unfortunately led to a non-even quantity\")\r\n return\r\n rad = diam / 2 # used to get position of *centre* of columns\r\n brushes = []\r\n for i in range(x):\r\n for j in range(y):\r\n # the plus one is for the column's width\r\n column = (shapes.create_doric_column([rad + (diam * (PARTHENON + 1) * i), rad + (diam * (PARTHENON + 1) * j), 0], full_column_height, column_base_side, DEFAULT_TEXTURE))\r\n for brush in column:\r\n brushes.append(brush)\r\n overall_width = (diam * (PARTHENON + 1) * (x - 1)) + diam # all spacings (including columns' width) + one column's width\r\n overall_breadth = (diam * (PARTHENON + 1) * (y - 1)) + diam # same idea\r\n fill_height = shapes.get_fillet_height(full_column_height)\r\n half_fill_height = math.floor(fill_height / 2)\r\n architrave = shapes.create_cuboid_brush((origin[0], origin[1], origin[2] + full_column_height), (origin[0] + overall_width, origin[1] + overall_breadth, origin[2] + full_column_height + (entab_height / 2 - half_fill_height)), \"column1_4\")\r\n fillet = shapes.create_cuboid_brush((origin[0] - fill_height, origin[1] - fill_height, origin[2] + full_column_height + (entab_height / 2 - math.floor(fill_height / 2))), (origin[0] + overall_width + fill_height, origin[1] + overall_breadth + fill_height, origin[2] + full_column_height + (entab_height / 2 + half_fill_height)), \"column1_4\")\r\n frieze = shapes.create_cuboid_brush((origin[0], origin[1], origin[2] + full_column_height + (entab_height / 2 + math.floor(fill_height / 2))), (origin[0] + overall_width, origin[1] + overall_breadth, origin[2] + full_column_height + entab_height), \"column1_4\")\r\n brushes.append(architrave)\r\n brushes.append(fillet)\r\n brushes.append(frieze)\r\n #roof/pediment\r\n corn_height = shapes.get_ab_height(column_base_side, full_column_height)\r\n corn_border = int(corn_height * 2)\r\n cornice = shapes.create_cuboid_brush((origin[0] - corn_border, origin[1] - corn_border, origin[2] + full_column_height + entab_height), (origin[0] + overall_width + corn_border, origin[1] + overall_breadth + corn_border, origin[2] + full_column_height + entab_height + corn_height), \"column1_4\")\r\n\r\n ped_height = shapes.get_pediment_height(overall_width + corn_border * 2, x)\r\n prism_height = ped_height - corn_height # subtract the height of the pitched cornice (same height as flat cornice, meaning lesser height if measured at angle!) \r\n\r\n brushes.append(cornice)\r\n prism = shapes.create_roof_prism_brush((origin[0] - corn_border, origin[1] - corn_border, origin[2] + full_column_height + entab_height + corn_height), (origin[0] + overall_width + corn_border, origin[1] + overall_breadth + corn_border, origin[2] + full_column_height + entab_height + corn_height + prism_height))\r\n brushes.append(prism)\r\n # the entab only goes as far as the column diameters but the step below should be about equal to the extent of the capitals, so we figure out an extra border size\r\n border = (shapes.get_oct_prism_width(shapes.get_archaic_capital_side(column_base_side)) - diam) / 2\r\n # this replicates the BAD ASSUMPTION MADE IN shapes THAT HEIGHTS ARE NECESSARILY DIVISIBLE BY FIVE\r\n step_height = (full_column_height / 35) * 2 # replicating the logic that takes us from full height 350 to ab height 20\r\n for i in range(NUM_STEPS):\r\n v_shift = step_height * i\r\n h_shift = border * i\r\n step = shapes.create_cuboid_brush((origin[0] - h_shift, origin[1] - h_shift, origin[2] - v_shift), (origin[0] + overall_width + h_shift, origin[1] + overall_breadth + h_shift, origin[2] - v_shift - step_height), \"column1_4\")\r\n brushes.append(step)\r\n\r\n # return brushes and also the bounds as corners of a cuboid, for use in making a surrounding room later\r\n return {\"brushes\": brushes, \"corner_a\": (origin[0], origin[1], origin[2] - (step_height * NUM_STEPS)), \"corner_b\": (origin[0] + overall_width, origin[1] + overall_breadth, origin[2] + full_column_height + entab_height + corn_height)}\r\n \r\n","sub_path":"temple-maker/edifices.py","file_name":"edifices.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"597950163","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# diameter = sum(height of the left + height of the right at every node)\n\n# time: O(n) space: O(n) at worst case and O(logn) if balanced tree\n# Runtime: 36 ms, faster than 96.98% of Python3\n# Memory Usage: 16.4 MB, less than 25.00% of Python3\n\nclass Solution:\n def diameterOfBinaryTree(self, root: TreeNode) -> int:\n def compute_height(root: TreeNode) -> int:\n if not root:\n return -1\n left = compute_height(root.left) + 1\n right = compute_height(root.right) + 1\n\n if result[0] < left + right:\n result[0] = left + right\n\n return max(left, right)\n\n result = [0]\n return max(compute_height(root), result[0])\n","sub_path":"Minseo-Kim/leetcode/543_Diameter_of_binary_tree.py","file_name":"543_Diameter_of_binary_tree.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"588279985","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 旋转函数\n\"\"\"\nfrom typing import List\n\n\n# 利用公式进行处理\nclass Solution:\n def maxRotateFunction(self, A: List[int]) -> int:\n sum_a = 0\n start = 0\n for i, a in enumerate(A):\n sum_a += a\n start += i * a\n max_a = start\n max_d = len(A)\n for i, x in enumerate(A[:-1]):\n v = start - sum_a + max_d * A[i]\n max_a = max(v, max_a)\n start = v\n\n return max_a","sub_path":"out/production/leetcode/src/leetcode/python3/leetcode396.py","file_name":"leetcode396.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34422028","text":"#!/usr/bin/python\nfrom collections import defaultdict\n\ndgold,dfirst,dmatch_first = defaultdict(float),defaultdict(float),defaultdict(float)\nfor s in open('test.data'):\n\tif s.strip() == '':\n\t\tcontinue\n\ts = s.split()\n\tassert len(s) == 4\n\tfirst_ss,gold = s[2],s[3]\n\tif gold == 'O':\n\t\tcontinue\n\tdgold[gold] = dgold[gold] + 1\n\tdfirst[first_ss] = dfirst[first_ss] + 1\n\tif first_ss == gold:\n\t\tdmatch_first[gold] = dmatch_first[gold] + 1\n\nfout = open('result','w')\n\nprint >>fout, '{:<24}p\\tr\\tf'.format('')\nn_first,n_gold,n_match = 0.0,0.0,0.0\nfor k in sorted(set(dfirst.keys()+dgold.keys())):\n\tm,o,g = dmatch_first[k],dfirst[k]+1e-10,dgold[k]+1e-10\n\tp = m/o\n\tr = m/g\n\tf = 2*p*r/(p+r+1e-10)\n\tprint >>fout, '{:<24}{:.2%}\\t{:.2%}\\t{:.2%}\\t{:.0f}'.format(k,p,r,f,g)\n\tn_match += m\n\tn_first += o\n\tn_gold += g\np = n_match/n_first\nr = n_match/n_gold\nf = 2*p*r/(p+r)\nprint >>fout, '{:<24}{:.2%}\\t{:.2%}\\t{:.2%}'.format('total',p,r,f)\n","sub_path":"archieve/test/1-prepare-data/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"322437397","text":"# Copyright 2018 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for ProcessStdout event.\"\"\"\n\nfrom .process_io import ProcessIO\n\n\nclass ProcessStdout(ProcessIO):\n \"\"\"Event emitted when a process generates output on stdout.\"\"\"\n\n name = 'launch.events.process.ProcessStdout'\n\n def __init__(self, *, text: bytes, **kwargs) -> None:\n \"\"\"\n Create a ProcessStdout event.\n\n Unmatched keyword arguments are passed to ProcessEvent, see it for\n details on those arguments.\n\n :param: text is the unicode data associated with the event\n \"\"\"\n super().__init__(text=text, fd=1, **kwargs)\n","sub_path":"launch/launch/events/process/process_stdout.py","file_name":"process_stdout.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"124135652","text":"import numpy as np\nimport pygame\n\n\nclass KnapsackView2D:\n\n def __init__(self, bag_weight_capacity, max_item_value, bag_volume_capacity=None):\n\n self.game_over = False\n self.bag_volume_capacity = bag_volume_capacity\n self.bag_weight_capacity = bag_weight_capacity\n self.max_item_value = max_item_value\n\n self.screen_size = (900, 500)\n pygame.init()\n pygame.font.init()\n pygame.display.set_caption(\"Knapsack\")\n self.screen = pygame.display.set_mode(self.screen_size)\n\n # Create a background\n self.background = pygame.Surface(self.screen.get_size()).convert()\n self.background.fill((0, 0, 0))\n\n # Create a layer for the game\n self.game_surface = pygame.Surface(self.screen.get_size()).convert_alpha()\n self.game_surface.fill((0, 0, 0, 0,))\n\n def reset_game(self):\n self.game_over = False\n\n def update(self, selected_item_queue, reward, item, bag_weight, bag_value, bag_volume=None, mode=\"human\"):\n try:\n img = self.__draw_game(mode, selected_item_queue, reward, item, bag_weight, bag_volume, bag_value)\n self.__handle_pygame_events()\n except Exception as e:\n self.game_over = True\n pygame.display.quit()\n pygame.quit()\n raise e\n else:\n return img\n\n def __draw_game(self, mode, selected_item_queue, reward, item, bag_weight, bag_volume, bag_value):\n\n self.game_surface.fill((0, 0, 0, 0,))\n\n self.__draw_items_in_bag(selected_item_queue)\n self.__draw_next_item(item)\n\n # update the screen\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.game_surface, (0, 0))\n self.screen.blit(self._create_state_surface(bag_weight, bag_volume, bag_value), (200, 450))\n self.screen.blit(self._create_text_surface(f\"Reward: {reward}\", color=(0, 255, 0)), (25, 450))\n self.screen.blit(self._create_text_surface(f\"Item Weight: {item.weight}\"), (175, 225))\n self.screen.blit(self._create_text_surface(f\"Item Value: {item.value}\"), (175, 410))\n weight_capacity_remaining = self.bag_weight_capacity - bag_weight\n self.screen.blit(self._create_text_surface(f\"<--- Weight- Capacity: {self.bag_weight_capacity}\"\n f\" Remaining: {weight_capacity_remaining} --->\"),\n (500, 225))\n if self.bag_volume_capacity:\n volume_capacity_remaining = self.bag_volume_capacity - bag_volume\n self.screen.blit(self._create_text_surface(f\"<--- Volume- Capacity: {self.bag_volume_capacity}\"\n f\" Remaining: {volume_capacity_remaining} --->\"),\n (500, 25))\n self.screen.blit(self._create_text_surface(f\"Item Volume: {item.volume}\"), (175, 25))\n\n if mode == \"human\":\n pygame.display.flip()\n\n return np.flipud(np.rot90(pygame.surfarray.array3d(pygame.display.get_surface())))\n\n def __handle_pygame_events(self):\n if not self.game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_over = True\n pygame.display.quit()\n pygame.quit()\n\n def __draw_items_in_bag(self, selected_item_queue):\n\n # Draw weight dimension\n end_x = 875\n end_y = 250\n start_x = 475\n start_y = 250\n\n pygame.draw.line(self.game_surface, (250, 50, 50), (start_x, start_y), (end_x, end_y), 4)\n\n for item in list(selected_item_queue.queue):\n width = self.__rescale(item.weight, self.bag_weight_capacity, 400)\n height = self.__rescale(item.value, self.max_item_value, 150)\n pygame.draw.rect(self.game_surface, self.__get_color_for_item(item),\n (end_x - width, end_y, width, height))\n end_x = end_x - width\n\n # Draw volume dimension\n if self.bag_volume_capacity:\n end_x = 875\n end_y = 50\n start_x = 475\n start_y = 50\n\n pygame.draw.line(self.game_surface, (250, 50, 50), (start_x, start_y), (end_x, end_y), 4)\n\n for item in list(selected_item_queue.queue):\n width = self.__rescale(item.volume, self.bag_volume_capacity, 400)\n height = self.__rescale(item.value, self.max_item_value, 150)\n pygame.draw.rect(self.game_surface, self.__get_color_for_item(item),\n (end_x - width, end_y, width, height))\n end_x = end_x - width\n\n @staticmethod\n def __get_color_for_item(item):\n r = 255 - item.weight % 255\n g = 255 - item.value % 255\n b = item.volume == 0 if 125 else 255 - item.value % 255\n return r, g, b\n\n @staticmethod\n def _create_text_surface(text, color=(255, 0, 0)):\n font = pygame.font.SysFont(\"arial\", 22)\n return font.render(text, True, color)\n\n @staticmethod\n def _create_state_surface(bag_weight, bag_volume, bag_value):\n font = pygame.font.SysFont(\"arial\", 22)\n if bag_volume:\n return font.render(f\"Bag-Weight: {bag_weight} Bag-Volume: {bag_volume}\"\n f\" Bag-Value: {bag_value}\", True, (0, 0, 255))\n else:\n return font.render(f\"Bag-Weight: {bag_weight}\"\n f\" Bag-Value: {bag_value}\", True, (0, 0, 255))\n\n def __draw_next_item(self, item):\n # Draw weight dimension\n end_x = 425\n end_y = 250\n width = self.__rescale(item.weight, self.bag_weight_capacity, 400)\n height = self.__rescale(item.value, self.max_item_value, 150)\n pygame.draw.rect(self.game_surface, self.__get_color_for_item(item),\n (end_x - width, end_y, width, height))\n\n if self.bag_volume_capacity:\n # Draw volume dimension\n end_x = 425\n end_y = 50\n width = self.__rescale(item.volume, self.bag_volume_capacity, 400)\n height = self.__rescale(item.value, self.max_item_value, 150)\n pygame.draw.rect(self.game_surface, self.__get_color_for_item(item),\n (end_x - width, end_y, width, height))\n\n @staticmethod\n def __rescale(value, max_value, new_max):\n return (value * new_max) / max_value\n","sub_path":"reinforcement_learning/rl_knapsack_coach_custom/src/knapsack_view_2D.py","file_name":"knapsack_view_2D.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"15436962","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse, response\nfrom django.urls.conf import include\nfrom django.template.response import TemplateResponse\nfrom numpy.core.numeric import NaN\nfrom .models import Ateco, Sll, SllRel\nfrom .modulo1 import MyImport as Mi\nfrom neomodel import db\nfrom neomodel.contrib.spatial_properties import NeomodelPoint\nimport json\n\n# Create your views here.\ntemplate_home = 'one/home.html'\ntemplate_uno = 'one/uno.html'\ntemplate_due = 'one/due.html'\ndata_path = '/home/azureuser/PRIN/static/data/'\n\ndef sll(request):\n ateco2021 = Mi.importaAteco(data_path+'Ateco2007.csv') #import description of ATECO codes\n query = 'MATCH (n) DETACH DELETE n'\n db.cypher_query(query)\n\n df = Mi.importa_csv(data_path+'food.csv') #import the sll and Ateco nodes \n d= df.set_index('code')\n punto = Mi.importa_sll_coordinates(data_path+'qgis3.geojson') #iimport the centroid coordinates of Sll \n for j in d.columns: \n if (j != 'name'):\n i = \"{}.{}\".format(str(j)[0:2],str(j)[2:4])\n des = ateco2021.loc[i]['descrizione']\n Ateco(code=j, description=des).save() # Create the Ateco nodes\n for i in d.index: \n try:\n sll = Sll(code=i, name = d.loc[i][0], lat=punto[i][1], lng=punto[i][0]).save() # Create the Sll nodes\n except:\n pass\n n=0\n for j in d.loc[i]: ## per ogni Sll aggiunge le relazioni con gli Ateco\n if n > 0:\n a = Ateco.nodes.get(code=int(d.columns[n]))\n sll.sll_ateco.connect(a,{'imprese':j})\n n=n+1\n ## Mostra i risultati\n return get_sll(request)\n \ndef index(request):\n return TemplateResponse(request, template_home, {'titolo': \"Iniziamo\"})\n\ndef get_sll(request):\n sll_list=Sll.nodes.all()\n ateco_list=Ateco.nodes.all()\n return TemplateResponse(request, template_home, {'sll_list':sll_list, 'ateco_list':ateco_list, 'msg': \"Database Popolato\"})\n\ndef del_all(request):\n query = 'MATCH (n) DETACH DELETE n'\n db.cypher_query(query)\n return TemplateResponse(request, template_home, {'msg': \"Database Pulito\"})\n\ndef get_map(request, fil=0):\n query = 'MATCH (n)-[r:contiene]->(m) WHERE r.imprese>' + str(fil) + ' RETURN { id: n.code, label:head(labels(n)), caption:n.name, lat:n.lat, lng: n.lng } as source, { id: id(m), label:head(labels(m)), caption:m.code } as target, { weight:log(r.imprese)/2, type:type(r), imprese: r.imprese} as rel' \n results, meta = db.cypher_query(query)\n shapes={}\n with open(data_path+'SLL.json','r') as f:\n shapes = json.load(f)\n return TemplateResponse(request, template_due,{'nodi': results, 'filtro':fil, 'shapes':shapes}) \n\ndef get_grafo(request, fil=0):\n query = 'MATCH (n)-[r:contiene]->(m) WHERE r.imprese>' + str(fil) + ' RETURN { id: n.code, label:head(labels(n)), caption:n.name, lat:n.lat, lng: n.lng } as source, { id: m.code, label:head(labels(m)), caption:m.code, description:m.description} as target, { weight:log(r.imprese)/2, type:type(r), imprese: r.imprese} as rel' \n results, meta = db.cypher_query(query)\n shapes={}\n with open(data_path+'SLL.json','r') as f:\n shapes = json.load(f)\n return TemplateResponse(request, template_uno,{'nodi': results, 'filtro':fil, 'shapes':shapes}) \n","sub_path":"one/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"264893546","text":"# глобална променлива\nvalue = 100\n\ndef foo():\n print('Hello pYthon')\n\n# 1. дефиниция\ndef add_numbers(a, b):\n # c е локална променлива\n c = a + b\n return c\n\nif __name__ == '__main__':\n # 2. извикване\n x, y = 10, 20\n\n res = add_numbers(x,y)\n\n print(f'{x} + {y} = { res }')\n\n # z = foo()","sub_path":"ex29.py","file_name":"ex29.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"166454054","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 23:27:36 2018\n\n@author: Karan Sharma\n\"\"\"\n\n# Data Preprocessing Template\n\n# Importing the libraries\nimport pandas as pd\n# Importing the dataset\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)","sub_path":"data_prepocessing.py","file_name":"data_prepocessing.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"590338370","text":"import importlib\nimport pytest\nimport json\nimport sys\n\nimport numpy as np\nimport torch\n\nfrom delphi.GrFN.networks import GroundedFunctionNetwork\n\ndata_dir = \"tests/data/GrFN/\"\nsys.path.insert(0, \"tests/data/program_analysis\")\n\n@pytest.fixture\ndef crop_yield_grfn():\n return GroundedFunctionNetwork.from_fortran_file(\"tests/data/program_analysis/crop_yield.f\")\n\n@pytest.fixture\ndef petpt_grfn():\n return GroundedFunctionNetwork.from_fortran_file(\"tests/data/program_analysis/PETPT.for\")\n\n@pytest.fixture\ndef petasce_grfn():\n return GroundedFunctionNetwork.from_fortran_file(\"tests/data/program_analysis/PETASCE_simple.for\")\n\n\ndef test_petpt_creation_and_execution(petpt_grfn):\n assert isinstance(petpt_grfn, GroundedFunctionNetwork)\n assert len(petpt_grfn.inputs) == 5\n assert len(petpt_grfn.outputs) == 1\n\n values = {name: 1.0 for name in petpt_grfn.inputs}\n res = petpt_grfn.run(values)\n assert res == np.float32(0.029983712)\n\n\ndef test_petasce_creation(petasce_grfn):\n A = petasce_grfn.to_agraph()\n CAG = petasce_grfn.to_CAG_agraph()\n CG = petasce_grfn.to_call_agraph()\n\n values = {\n \"petasce::doy_-1\": 20.0,\n \"petasce::meevp_-1\": \"A\",\n \"petasce::msalb_-1\": 0.5,\n \"petasce::srad_-1\": 15.0,\n \"petasce::tmax_-1\": 10.0,\n \"petasce::tmin_-1\": -10.0,\n \"petasce::xhlai_-1\": 10.0,\n \"petasce::tdew_-1\": 20.0,\n \"petasce::windht_-1\": 5.0,\n \"petasce::windrun_-1\": 450.0,\n \"petasce::xlat_-1\": 45.0,\n \"petasce::xelev_-1\": 3000.0,\n \"petasce::canht_-1\": 2.0,\n }\n\n res = petasce_grfn.run(values)\n assert res == np.float32(0.00012496980836348878)\n\n\ndef test_crop_yield_creation(crop_yield_grfn):\n A = crop_yield_grfn.to_agraph()\n assert isinstance(crop_yield_grfn, GroundedFunctionNetwork)\n\n\n@pytest.mark.skip\ndef test_petasce_torch_execution():\n lambdas = importlib.__import__(\"PETASCE_simple_torch_lambdas\")\n pgm = json.load(open(data_dir + \"PETASCE_simple_torch.json\", \"r\"))\n G = GroundedFunctionNetwork.from_dict(pgm, lambdas)\n\n N = 100\n samples = {\n \"petasce::doy_0\": np.random.randint(1, 100, N),\n \"petasce::meevp_0\": np.where(np.random.rand(N) >= 0.5, 'A', 'W'),\n \"petasce::msalb_0\": np.random.uniform(0, 1, N),\n \"petasce::srad_0\": np.random.uniform(1, 30, N),\n \"petasce::tmax_0\": np.random.uniform(-30, 60, N),\n \"petasce::tmin_0\": np.random.uniform(-30, 60, N),\n \"petasce::xhlai_0\": np.random.uniform(0, 20, N),\n \"petasce::tdew_0\": np.random.uniform(-30, 60, N),\n \"petasce::windht_0\": np.random.uniform(0, 10, N),\n \"petasce::windrun_0\": np.random.uniform(0, 900, N),\n \"petasce::xlat_0\": np.random.uniform(0, 90, N),\n \"petasce::xelev_0\": np.random.uniform(0, 6000, N),\n \"petasce::canht_0\": np.random.uniform(0.001, 3, N),\n }\n\n values = {\n k: torch.tensor(v, dtype=torch.double) if v.dtype != \" 0)\n\n def test_resync(self):\n # error_ids = [1341, 1417, 1587, 1592, 1598, 1760, 1776, 2069, 2236, 2243, 2475, 2630, 2633, 2634, 2739, 2841,\n # 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2897, 2936, 2939,\n # 3321, 3346, 3413, 3519, 3530, 3579, 3711, 3712, 4155, 4190, 4438, 4540, 4545, 4551, 4557, 4560,\n # 4562, 4564, 4566, 4763, 4764, 4766, 4807, 4943, 4961, 4967, 4968, 4989, 5258, 5284, 5288, 5871,\n # 5876, 5880, 6288, 6416, 6431, 6443, 6449, 6451, 6464, 6477, 6531, 6533, 6541, 6558, 6573, 6579,\n # 6581, 6591, 6625, 6641, 6655, 6666, 6668, 6669, 6670, 6671, 6672, 6674, 8028, 8104, 8222, 8341,\n # 8367, 8442, 8454, 8462, 8464, 8466, 8473, 8478, 8480, 8481, 8482, 8488, 8497, 8499, 1240, 1264,\n # 1268, 1566, 1787, 2064, 2065, 2070, 2235, 2643, 2726, 2732, 2736, 2745, 2937, 3066, 3081, 3122,\n # 3147, 3178, 3207, 3306, 3322, 3374, 3426, 3431, 3492, 3562, 3581, 3615, 3672, 4116, 4147, 4167,\n # 4174, 4180, 4183, 4188, 4192, 4198, 4538, 4543, 4550, 4554, 4556, 4561, 4563, 4568, 4714, 4741,\n # 4744, 4754, 4760, 4767, 4806, 4911, 4939, 5220, 5223, 5245, 5291, 5299, 5317, 5543, 5820, 5864,\n # 5878, 6020, 6026, 6170, 6222, 6417, 6418, 6425, 6432, 6435, 6438, 6441, 6446, 6457, 6461, 6462,\n # 6465, 6469, 6472, 6482, 6486, 6492, 6494, 6496, 6497, 6499, 6510, 6512, 6523, 6530, 6532, 6542,\n # 6547, 6556, 6560, 6561, 6568, 6569, 6570, 6574, 6576, 6577, 6578, 6590, 6612, 6613, 6615, 6640,\n # 6654, 6662, 6667, 7402, 8027, 8279, 8342, 8415, 8431, 8437, 8440, 8444, 8455, 8472, 8476, 8477,\n # 8489, 8913, 8928]\n error_ids = [3556]\n # sync_statements(error_ids, 0, False)\n error_ids_after = sync_performance(error_ids)\n print(\"sync_performance error_ids = \", error_ids_after)\n print('count of errors before = ', len(error_ids), ' after = ', len(error_ids_after))\n\n def test_sync(self):\n _sync_statements_with_repository(1580)\n\n def test_tsec_crawler(self):\n crawler = Crawler()\n df = crawler.get_data((2021, 4, 29))\n with pd.ExcelWriter(gen_output_path('data', 'prices.xlsx')) as writer:\n df.to_excel(writer)\n writer.close()\n\n def test_get_prediction(self):\n with open(gen_output_path('data', 'prices.xlsx'), 'rb') as file:\n df = pd.read_excel(file)\n file.close()\n prices = df.loc[:, '收盤價']\n # errors = generate_predictions(prices, get_stock_codes(stock_type='上市') + get_stock_codes(stock_type='上櫃'))\n # print('test_get_prediction errors = ', errors)\n result = generate_prediction(2841, float(prices.loc[str(2841)]))\n print(result)\n\n def test_re_sync_dividend(self):\n resync_for_dividend_policy([1102])\n\n def test_sync_statement(self):\n from evaluation_utils2 import _sync_statements\n statement = _sync_statements(2330)\n # print(statement)\n\n def test_fetch_data_utils(self):\n '''\n stock_code_list = get_stock_codes(stock_type='上市')\n fetch_twse_price_measurement_raw_datas(stock_code_list[0: 1])\n\n tpex_stock_code_list = get_stock_codes(stock_type='上櫃')\n fetch_tpex_price_measurement_raw_datas(tpex_stock_code_list[0:1])\n result = MongoDBRepository(MongoDBMeta.TPEX_PRICE_MEASUREMENT).get_data(stock_code_list[0])\n self.assertIsNotNone(result)\n '''\n '''\n fetch_dividend_policy_raw_datas(2884)\n result = MongoDBRepository(MongoDBMeta.DIVIDEND_POLICY).get_data(2884)\n self.assertIsNotNone(result)\n '''\n # '''\n fetch_shareholder_equity_raw_data(2884, 2020, 3)\n result = MongoDBRepository(MongoDBMeta.SHARE_HOLDER).get_data(2809, {'year': 2020, 'season': 3})\n self.assertIsNotNone(result)\n # '''\n '''\n fetch_simple_balance_sheet_raw_data(2884, 2020, 3)\n result = MongoDBRepository(MongoDBMeta.SIMPLE_BALANCE_SHEET).get_data(2884, {'year': 2020, 'season': 3})\n self.assertIsNotNone(result)\n '''\n '''\n fetch_balance_sheet_raw_data(2884, 2020, 3)\n result = MongoDBRepository(MongoDBMeta.FULL_BALANCE_SHEET).get_data(2884, {'year': 2020, 'season': 3})\n self.assertIsNotNone(result)\n '''\n '''\n fetch_cash_flow_raw_data(2809, 2020, 3)\n result = MongoDBRepository(MongoDBMeta.CASH_FLOW).get_data(2809, {'year': 2020, 'season': 3})\n self.assertIsNotNone(result)\n '''\n def store_raw_data(self, data, output_dir, file_name):\n if data is not None:\n output_path = gen_output_path(output_dir, file_name)\n with open(output_path, 'wb') as output:\n output.write(data)\n output.close()\n\n def get_raw_data(self, input_dir, file_name):\n input_path = gen_output_path(input_dir, file_name)\n with open(input_path, 'rb') as in_put:\n raw_input = in_put.read()\n in_put.close()\n return raw_input\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":26195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"162772227","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ninput :\r\n7 3\r\na\r\nbc\r\nd\r\neba\r\nebc\r\nf\r\n你好\r\n\r\nebcc\r\n你好么\r\nebd\r\n\r\n6 3 \r\na\r\nbc\r\nd\r\neba\r\nebc\r\nf\r\n\r\nyuklx\r\nbcc\r\nff\r\n\r\n7 3\r\na\r\nbc\r\nd\r\neba\r\nebc\r\nf\r\n你好\r\n\r\nebcc\r\n你好么\r\nebd\r\n\r\n6 3 \r\na\r\nbc\r\nd\r\neba\r\nebc\r\nf\r\n\r\nyuklx\r\nbcc\r\nff\r\n\r\n\r\n output: \r\n1\r\n1\r\n-1\r\n-1 \r\n1\r\n1\r\n\r\n\"\"\"\r\nimport sys\r\n\r\ndef find(seek_str,pre):\r\n for item in pre.keys():\r\n if seek_str.startswith(item):\r\n return 1\r\n return -1\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n line1 = sys.stdin.readline().strip().split()\r\n line1 = [int(item) for item in line1]\r\n try:\r\n pre_num = line1[0]\r\n seek_num = line1[1]\r\n pre = {}\r\n seek = []\r\n for i in range(pre_num):\r\n line = str(sys.stdin.readline().strip())\r\n if not pre.has_key(line):\r\n pre[line] = 0\r\n\r\n null_line = sys.stdin.readline()\r\n for i in range(seek_num):\r\n line = str(sys.stdin.readline().strip())\r\n seek.append(line)\r\n\r\n for i in range(seek_num):\r\n print(find(seek[i], pre))\r\n null_line = sys.stdin.readline()\r\n print(' ')\r\n except:\r\n break\r\n\r\n","sub_path":"Toutiao_bishi/02_pre_str.py","file_name":"02_pre_str.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"626173016","text":"# TO-DO: Complete the selection_sort() function below\ndef selection_sort(arr):\n\n for ix in range(0, len(arr)-1):\n min_ix = ix\n for jx in range(ix+1, len(arr)):\n if arr[min_ix] > arr[jx]:\n min_ix = jx\n tmp = arr[ix]\n arr[ix] = arr[min_ix]\n arr[min_ix] = tmp\n\n return arr\n\n\ndef bubble_sort(arr):\n\n for _ in range(len(arr)):\n for ix, jx in zip(range(0, len(arr)-1), range(1, len(arr))):\n if arr[ix] > arr[jx]:\n tmp = arr[ix]\n arr[ix] = arr[jx]\n arr[jx] = tmp\n\n return arr\n\n'''\nSTRETCH: implement the Count Sort function below\n\nCounting sort is a sorting algorithm that works on a set of data where\nwe specifically know the maximum value that can exist in that set of\ndata. The idea behind this algorithm then is that we can create \"buckets\"\nfrom 0 up to the max value. This is most easily done by initializing an\narray of 0s whose length is the max value + 1 (why do we need this \"+ 1\"?).\n\nEach buckets[i] then is responsible for keeping track of how many times \nwe've seen `i` in the input set of data as we iterate through it.\nOnce we know exactly how many times each piece of data in the input set\nshowed up, we can construct a sorted set of the input data from the \nbuckets. \n\nWhat is the time and space complexity of the counting sort algorithm?\n'''\ndef count_sort(arr, maximum=10):\n\n buckets = [0 for _ in range(maximum+1)]\n for el in arr:\n if el < 0 or maximum < el:\n raise ValueError()\n buckets[el] += 1\n\n arr = [x for x, count in enumerate(buckets) for _ in range(count)]\n\n return arr\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"159995380","text":"\n\ndef data(value, token, db, data):\n email = data[\"email\"]\n phone = data[\"phone\"]\n name = data[\"name\"]\n role = data[\"role_name\"]\n\n if len(name) <= 3 or len(name) >= 15:\n return {'Error': 'range between 3-15'}\n elif len(email) <= 10 or len(email) >= 25:\n return {'Error': 'email range between 10-25'}\n elif len(str(phone)) != 10:\n return {'Error': 'phone number must contain 10 digits'}\n\n\n cursor = db.cursor()\n query_id = \"select * from vote where account_id = '\" + str(value) + \"'\"\n cursor.execute(query_id)\n bha = cursor.fetchall()\n login_list = []\n for i in bha:\n k = {\"account_id\": i[0], \"name\": i[1], \"phone\": i[2], \"email\": i[3], \"password\": i[4], \"role_name\": i[5],\n \"token\": i[6]}\n login_list.append(k)\n\n query = \"select * from vote where token = '\" + str(token) + \"'\"\n cursor.execute(query)\n red = cursor.fetchall()\n login_list11 = []\n for i in red:\n k = {\"account_id\": i[0], \"name\": i[1], \"phone\": i[2], \"email\": i[3], \"password\": i[4], \"role_name\": i[5],\n \"token\": i[6]}\n login_list11.append(k)\n\n if len(login_list) == 0:\n return {'Error': \"invalid id \"}\n elif len(login_list11) == 0:\n return {'Error': 'invalid token'}\n\n if login_list[0][\"name\"] == login_list11[0][\"name\"] and login_list[0][\"account_id\"] == login_list11[0][\"account_id\"]:\n try:\n query = \" UPDATE vote SET email = ('\" + str(email) + \"'), phone = ('\" + str(phone) + \"'), name =('\" + str(name) + \"'), role_name =('\" + str(role) + \"') where account_id = '\" + str(value) + \"'\"\n cursor.execute(query)\n db.commit()\n\n return {\"value\": \"email,name,phone, role registered sucessfully\"}\n\n except Exception as e:\n return {\"error\": str(e).split()[1].replace(\"\\\"\",\"\")+str(e).split()[-1].replace(\"vote.\",\"\").replace(\")\",\"\").replace(\"\\\"\",\"\")}\n\n else:\n return {\" vote \": \" enter the valid credentilas \"}\n","sub_path":"hvr/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528914460","text":"from tests import *\nimport matplotlib.pyplot as plt\n\ndef create_fig():\n fig, axes = plt.subplots()\n axes.set_xlabel('Word length') # Add an x-label to the axes.\n axes.set_ylabel('Elapsed time') # Add a y-label to the axes.\n axes.set_title(\"Spent time\") # Add a title to the axes.\n return fig, axes\n\ndef plotter(axes, data1, data2, param_dict):\n out = axes.plot(data1, data2, **param_dict)\n return out\n\ndef main():\n while 1:\n print(\"Меню\")\n print(\"1. Запустить с введёнными строками\")\n print(\"2. Сравнить\")\n print(\"3. Выйти\")\n c = int(input(\"Введите вариант: \"))\n if c == 1:\n s1 = input(\"Введите исходную строку: \")\n s2 = input(\"Введите реузльтирующую строку: \")\n inputed_test(s1, s2)\n \n elif c == 2:\n time_data = measure_time(20, 500, 50)\n fig, ax1 = plt.subplots(1)\n plotter(ax1, time_data[0], time_data[1][0],\n {\"label\": \"Матричный\"})\n plotter(ax1, time_data[0], time_data[1][1], {\n \"label\": \"Матрично-рекурсивный\"})\n plotter(ax1, time_data[0], time_data[1][2],\n {\"label\": \"Рекурсивный\"})\n\n ax1.set_xlabel('Длина слов') \n ax1.set_ylabel('Затраченное время') \n ax1.set_title(\"Зависимость затрат времени от длины слова\")\n plt.legend()\n plt.show()\n\n\n time_data = measure_time(20, 9, 1, True)\n\n fig, ax2 = plt.subplots(1)\n plotter(ax2, time_data[0], time_data[1][0],\n {\"label\": \"Матричный\"})\n plotter(ax2, time_data[0], time_data[1][1], {\n \"label\": \"Матрично-рекурсивный\"})\n plotter(ax2, time_data[0], time_data[1][2],\n {\"label\": \"Рекурсивный\"})\n plotter(ax2, time_data[0], time_data[1][3],\n {\"label\": \"Дамерау-ЛЕвенштейна\"})\n\n ax2.set_xlabel('Длина слов') # Add an x-label to the axes.\n ax2.set_ylabel('Затраченное время') # Add a y-label to the axes.\n ax2.set_title(\"Зависимость затрат времени от длины слова\") # Add a title to the axes.\n plt.legend()\n plt.show()\n\n\n elif c == 3:\n break\n\nmain()\n","sub_path":"lab_01/lab_01.py","file_name":"lab_01.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"338639530","text":"from ParseTeacher import *\nclass Subject:\n\tdef __init__(self,name=\"\",slots=1,duration=1):\n\t\tself.name=name\n\t\tself.slots=slots\n\t\tself.duration=duration\n\tdef __init__(self,code=\"\",name=\"\",designation=\"\",d_rank=0):\n\t\tself.code=code\n\t\tself.name=name\n\t\tself.designation=designation\n\t\tself.d_rank=d_rank\n\tdef display(self):\n\t\t# print(\"%3d\\t%5s\\t%15s\\t%10s\\n\"%(self.d_rank,self.code,self.name,self.designation))\n\t\tprint(self.d_rank,\"#\",self.code,\"#\",self.name,\"#\",self.designation,\"#\",\"\\n\")\n\tdef read():\n\t\tf=open(\"Teaching.csv\",\"r\")\n\t\tinp=f.read()\n\t\t#print(type(inp))\n\t\tl=inp.split(\"\\n\")\n\t\tprof=list()\n\t\terror=0\n\t\tfor i in range(len(l)):\n\t\t\traw=l[i].split(\",\")\n\t\t\tfor j in range(len(raw)):\n\t\t\t\traw[j]=raw[j].strip()\n\t\t\tif(raw[0].isdigit()):\n\t\t\t\tif(raw[2]==\"\"):\n\t\t\t\t\traw[2]=\"Teaching Assistant\"\n\t\t\t\tprof.append(Teacher(raw[3],raw[1],raw[2],int(raw[0])+error))\n\t\t\t\tif(int(raw[0])==84):\n\t\t\t\t\terror=84\n\t\t\t\n\t\treturn prof\n\nsub=Subject.read()\nfor i in range(len(prof)):\n\tsub[i].display()\n\n\n","sub_path":"timeTableScheduling/data/ParseSubject.py","file_name":"ParseSubject.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"168247507","text":"####################\n## Renzhong Lu ##\n## lurz ##\n####################\n\nimport sqlite3\nimport plotly.graph_objs as go\n\n# proj3_choc.py\n# You can change anything in this file you want as long as you pass the tests\n# and meet the project requirements! You will need to implement several new\n# functions.\n\n# Part 1: Read data from a database called choc.db\nDBNAME = 'choc.sqlite'\n\n\ndef location(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the location requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n string\n a string of the partial SQL query\n '''\n if 'none' in command:\n return ''\n elif 'country' in command:\n al2 = ''\n index = command.find('country') + 8\n while index < len(command) and command[index] != ' ':\n al2 += command[index]\n index += 1\n return f\"C.Alpha2='{al2}'\"\n elif 'region' in command:\n name = ''\n index = command.find('region') + 7\n while index < len(command) and command[index] != ' ':\n name += command[index]\n index += 1\n return f\"C.Region='{name}'\"\n else:\n return ''\n\n\ndef sell_source(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the sell or source location requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n string\n a string of the partial SQL query\n '''\n if 'sell' in command:\n return 'B.CompanyLocationId=C.id'\n elif 'source' in command:\n return 'B.BroadBeanOriginId=C.id'\n else:\n return 'B.CompanyLocationId=C.id'\n\n\ndef rating(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the ORDER BY requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n string\n a string of the partial SQL query\n '''\n if 'ratings' in command:\n return ('ORDER BY B.Rating', 'ORDER BY AVG(B.Rating)', 'AVG(B.Rating)')\n elif 'cocoa' in command:\n return ('ORDER BY B.CocoaPercent',\n 'ORDER BY AVG(B.CocoaPercent)', 'AVG(B.CocoaPercent)')\n elif 'number_of_bars' in command:\n return ('', 'ORDER BY COUNT(B.Id)', 'COUNT(B.Id)')\n else:\n return ('ORDER BY B.Rating', 'ORDER BY AVG(B.Rating)', 'AVG(B.Rating)')\n\n\ndef top_bottom(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the ascending or descending requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n string\n a string of the partial SQL query\n '''\n if 'top' in command:\n return 'DESC'\n elif 'bottom' in command:\n return 'ASC'\n else:\n return 'DESC'\n\n\ndef get_integer(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the limited entries requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n string\n a string of the partial SQL query\n '''\n splitted = command.split(' ')\n for word in splitted:\n if word.isnumeric():\n return 'LIMIT ' + word\n return 'LIMIT 10'\n\n\ndef get_sort(command):\n ''' Constructs the partial SQL query to retrieve\n data based on the ratings, order and limit requirements\n \n Parameters\n ----------\n command : string\n the command strings input by users\n \n Returns\n -------\n tuple : (rate, order, limit)\n a tuple of the partial SQL query strings\n '''\n rate = rating(command)\n order = top_bottom(command)\n limit = get_integer(command)\n return (rate, order, limit)\n\n\ndef get_query(query):\n '''Connect to DB and executes SQL query to retrieve\n the data we need. It's a helper function that is used\n in all other functions.\n \n Parameters\n ----------\n query : string\n a query string that we need to fetch from the database\n\n Returns\n -------\n list\n a list of tuples that represent the query result\n '''\n connection = sqlite3.connect(DBNAME)\n cursor = connection.cursor()\n result = cursor.execute(query).fetchall()\n connection.close()\n return result\n\n\n# Part 1: Implement logic to process user commands\ndef process_command(command):\n '''Based on the command input by the user, construct\n the query to the SQL accordingly.\n Connect to DB and executes SQL query to retrieve\n the data we need. Return the results.\n \n Parameters\n ----------\n command : string\n the command strings input by users\n\n Returns\n -------\n list\n a list of tuples that represent the query result\n '''\n option = command.split(' ')[0]\n \n # Command: bars\n if option == 'bars':\n locate = location(command)\n s_s = sell_source(command)\n clause1 = ''\n if len(locate) != 0:\n clause1 = 'INNER JOIN Countries C ON ' + s_s + ' AND ' + locate\n\n rate, order, limit = get_sort(command)\n query = (\"SELECT B.SpecificBeanBarName, B.Company, D.EnglishName, \" +\n \"B.Rating, B.CocoaPercent, E.EnglishName \" +\n f\"FROM Bars B \" +\n \"LEFT JOIN Countries D ON B.CompanyLocationId=D.Id \" +\n f\"LEFT JOIN Countries E ON B.BroadBeanOriginId=E.Id \" +\n f\"{clause1} {rate[0]} {order} {limit}\")\n result = get_query(query)\n return result\n # Command: companies\n elif option == 'companies':\n locate = location(command)\n clause1 = ''\n if len(locate) != 0:\n clause1 = sell_source('sell') + ' AND ' + locate + ' AND '\n\n rate, order, limit = get_sort(command)\n query = (f\"SELECT B.Company, D.EnglishName, {rate[2]} \" +\n \"FROM Bars B, Countries C, Countries D \" +\n f\"WHERE {clause1}B.CompanyLocationId=D.Id GROUP BY \" +\n \"B.Company HAVING COUNT(DISTINCT B.Id)>4 \" +\n f\"{rate[1]} {order} {limit}\")\n result = get_query(query)\n return result\n # Command: countries\n elif option == 'countries':\n locate = location(command)\n s_s = sell_source(command)\n clause1 = s_s\n if len(locate) != 0:\n clause1 = s_s + ' AND ' + locate\n\n rate, order, limit = get_sort(command)\n query = (f\"SELECT C.EnglishName, C.Region, {rate[2]}\" +\n \" FROM Bars B, Countries C \" +\n f\"WHERE {clause1} GROUP BY C.EnglishName \" +\n \"HAVING COUNT(DISTINCT B.Id)>4 \" +\n f\"{rate[1]} {order} {limit}\")\n result = get_query(query)\n return result\n # Command: regions\n elif option == 'regions':\n s_s = sell_source(command)\n rate, order, limit = get_sort(command)\n query = (f\"SELECT C.Region, {rate[2]} FROM Bars B, Countries C \" +\n f\"WHERE {s_s} GROUP BY C.Region HAVING COUNT(DISTINCT B.Id)>4 \" +\n f\"{rate[1]} {order} {limit}\")\n result = get_query(query)\n return result\n else:\n # should never be here\n pass\n\n return []\n\n\ndef print_response(result, command):\n '''Print the query results beautifully.\n According to different results, it prints different\n format of fixed-width columns.\n \n Parameters\n ----------\n result : list\n list of tuples that represent the query result\n command : string\n the command strings input by users\n\n Returns\n -------\n None\n '''\n col = len(result[0])\n row6 = \" {name1:<20s} {name2:<12s} {loc1:<20s} {rate1:1.1f} {rate2:.0%} {loc2:<20s} \".format\n row3rating = \" {name1:<20s} {name2:<20s} {rate1:1.1f} \".format\n row3cocoa = \" {name1:<20s} {name2:<20s} {rate1:.0%} \".format\n row3num = \" {name1:<20s} {name2:<20s} {rate1:5n} \".format\n row2rating = \" {name1:<20s} {rate1:1.1f} \".format\n row2cocoa = \" {name1:<20s} {rate1:.0%} \".format\n row2num = \" {name1:<20s} {rate1:5n} \".format\n\n if col == 6:\n for r in result:\n print(row6(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n name2=r[1] if len(r[1]) < 12 else r[1][0:9] + '...',\n loc1=r[2] if len(r[2]) < 20 else r[2][0:17] + '...',\n rate1=r[3],\n rate2=r[4],\n loc2=r[5] if len(r[5]) < 20 else r[5][0:17] + '...'))\n elif col == 3:\n if 'ratings' in command:\n for r in result:\n print(row3rating(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n name2=r[1] if len(r[1]) < 20 else r[1][0:17] + '...',\n rate1=r[2]))\n elif 'cocoa' in command:\n for r in result:\n print(row3cocoa(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n name2=r[1] if len(r[1]) < 20 else r[1][0:17] + '...',\n rate1=r[2]))\n elif 'number_of_bars' in command:\n for r in result:\n print(row3num(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n name2=r[1] if len(r[1]) < 20 else r[1][0:17] + '...',\n rate1=r[2]))\n else:\n for r in result:\n print(row3rating(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n name2=r[1] if len(r[1]) < 20 else r[1][0:17] + '...',\n rate1=r[2]))\n else:\n if 'ratings' in command:\n for r in result:\n print(row2rating(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n rate1=r[1]))\n elif 'cocoa' in command:\n for r in result:\n print(row2cocoa(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n rate1=r[1]))\n elif 'number_of_bars' in command:\n for r in result:\n print(row2num(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n rate1=r[1]))\n else:\n for r in result:\n print(row2rating(name1=r[0] if len(r[0]) < 20 else r[0][0:17] + '...',\n rate1=r[1]))\n\n\ndef plot_bar(result, command):\n '''Plot the query results in a bar chart.\n According to different results, build up\n a different bar chart.\n \n Parameters\n ----------\n result : list\n list of tuples that represent the query result\n command : string\n the command strings input by users\n\n Returns\n -------\n None\n '''\n x_val = [r[0] for r in result]\n y_val = []\n splitted = command.split(' ')\n if splitted[0] == 'bars':\n if 'ratings' in command:\n y_val = [r[3] for r in result]\n elif 'cocoa' in command:\n y_val = [r[4] for r in result]\n else:\n y_val = [r[3] for r in result]\n elif splitted[0] == 'companies' or splitted[0] == 'countries':\n y_val = [r[2] for r in result]\n else:\n y_val = [r[1] for r in result]\n bar_data = go.Bar(x=x_val, y=y_val)\n fig = go.Figure(data=bar_data)\n fig.show()\n return\n\n\ndef check_command(command):\n '''Check if the user inputs a valid command\n \n Parameters\n ----------\n command : string\n the command strings input by users\n\n Returns\n -------\n bool :\n True if the command is valid. False otherwise.\n '''\n splitted = command.split(' ')\n possible = ['none', 'country', 'region', 'sell', 'source', 'ratings', 'cocoa','number_of_bars', 'top', 'bottom', 'barplot']\n if len(splitted) == 0 or (splitted[0] != 'bars' and splitted[0] != 'companies' and splitted[0] != 'countries' and splitted[0] != 'regions'):\n return False\n \n for i in range(1, len(splitted)):\n cur = splitted[i]\n if cur.isnumeric():\n continue\n if splitted[0] == 'bars' and 'number_of_bars' in cur:\n return False\n if splitted[0] == 'companies' and ('sell' in cur or 'source' in cur):\n return False\n if splitted[0] == 'countries' and 'country' in cur:\n return False\n if splitted[0] == 'regions' and ('none' in cur or 'country' in cur or 'region' in cur):\n return False\n flag = False\n for p in possible:\n if p in cur:\n flag = True\n if not flag:\n return False\n \n\n return True\n\n\ndef load_help_text():\n '''Return the help text for users.\n \n Parameters\n ----------\n None\n\n Returns\n -------\n string :\n string contains the help list from the file.\n '''\n with open('Proj3Help.txt') as f:\n return f.read()\n\n# Part 2 & 3: Implement interactive prompt and plotting. We've started for you!\n\n\ndef interactive_prompt():\n '''Allow a user to interactively input \n commands and to nicely format the results\n for presentation.\n \n Parameters\n ----------\n None\n\n Returns\n -------\n None\n '''\n help_text = load_help_text()\n response = ''\n while response != 'exit':\n response = input('Enter a command: ')\n\n if response == 'help':\n print(help_text)\n continue\n elif response == 'exit':\n print('bye')\n break\n else:\n if check_command(response):\n if 'barplot' in response:\n plot_bar(process_command(response), response)\n print (' ')\n else:\n print_response(process_command(response), response)\n print (' ')\n else:\n print('Command not recognized: ' + response + '\\n')\n\n\n# Make sure nothing runs or prints out when this file is run as a\n# module/library\nif __name__ == \"__main__\":\n interactive_prompt()\n","sub_path":"proj3_choc.py","file_name":"proj3_choc.py","file_ext":"py","file_size_in_byte":14139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177613927","text":"import tensorflow as tf\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom numpy import asarray\r\nimport cv2\r\nimport time\r\nfrom win10toast import ToastNotifier\r\n\r\nasian = 2\r\nwhite = 0\r\nblack = 4\r\nbrown = 1\r\n\r\nraces = {\r\n 2:\"asian\",\r\n 0:\"white\",\r\n 1:\"brown\",\r\n 4:\"black\",\r\n 3:\"south-east asian\"\r\n}\r\n\r\ncomments = {\r\n \"white\":[\r\n \"pasty ass white boi\",\r\n \"go back to the wiggles concert\",\r\n \"u must be gay\",\r\n \"cracker boy\",\r\n \"go chocke on some spice\"\r\n ],\r\n \"asian\":[\r\n \"zipper head\",\r\n \"slit eyes\"\r\n ],\r\n \"brown\":[\r\n \"go eat some curry u currymuncher\",\r\n \"okey dokey\"\r\n ],\r\n \"black\":[\r\n \"THE N WORD\",\r\n \"eat sum kfc\"\r\n ],\r\n \"south-east asian\":[\r\n \"ur weird\",\r\n \"stp being weird\",\r\n ]\r\n}\r\n\r\n\r\ndef load_model(name):\r\n model = tf.keras.models.load_model(name)\r\n\r\n #model.summary()\r\n\r\n return model\r\n\r\n\r\ndef predict(model, image):\r\n # load the image and convert into\r\n # numpy array\r\n img = Image.open(image)\r\n \r\n # asarray() class is used to convert\r\n # PIL images into NumPy arrays\r\n numpydata = asarray(img)\r\n\r\n numpydata = np.resize(numpydata, (1, 48, 48, 1))\r\n\r\n prediction = model.predict(numpydata)\r\n\r\n print(prediction)\r\n\r\n return prediction\r\n\r\ndef main():\r\n model = load_model('model_ethnicity.h5')\r\n\r\n camera_port = 0\r\n camera = cv2.VideoCapture(camera_port)\r\n time.sleep(0.1) # If you don't wait, the image will be dark\r\n return_value, image = camera.read()\r\n cv2.imwrite(\"test.png\", image)\r\n del(camera)\r\n\r\n model = load_model('model_ethnicity.h5')\r\n prediction = predict(model, \"test.png\")[0]\r\n race = get_race(prediction)\r\n\r\n model = load_model('model_gender.h5')\r\n prediction = predict(model, \"test.png\")[0]\r\n gender = get_gender(prediction)\r\n \r\n\r\n toaster = ToastNotifier()\r\n #toaster.show_toast(race, comments[race][0]) \r\n toaster.show_toast(race, gender) \r\n\r\ndef get_race(prediction):\r\n max = 0\r\n for k in range(0, 5):\r\n if prediction[k] >= max:\r\n num = k\r\n max = prediction[k]\r\n print(max)\r\n print(num)\r\n return races[num]\r\n\r\ndef get_gender(prediction):\r\n if prediction[0] > prediction[1]:\r\n return \"female\"\r\n else:\r\n return \"male\"\r\n\r\nmain()\r\n\r\n#print(get_gender(predict(load_model(\"model_ethnicity.h5\"), \"blackman.jpg\")[0]))","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293824809","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n# Chrome のオプションを設定する\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headless')\n\n# Selenium Server に接続する\ndriver = webdriver.Remote(\n command_executor='http://local.selenium:4444/wd/hub',\n desired_capabilities=options.to_capabilities(),\n options=options,\n)\nwidth = 1200\nheight = 900\ndriver.set_window_size(width, height)\n\n# Selenium 経由でブラウザを操作する\ndriver.get('https://tech.manafukurou.com')\nprint(driver.current_url)\n\n\n# 2. 例として記事一覧の 4 ページ目に移動する\ndriver.find_element(By.XPATH, '//*[@id=\"list\"]/a[4]').click()\nprint(driver.current_url)\n\n\n#3. 画面の幅をコンテンツの幅と合わせてスクリーンショットをとる\nw = driver.execute_script('return document.body.scrollWidth')\nh = driver.execute_script('return document.body.scrollHeight')\ndriver.set_window_size(w, h)\ndriver.save_screenshot('screenshot.png')\n\n\n# ブラウザを終了する\ndriver.quit()\n\n\n","sub_path":"python/script/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568972358","text":"import psycopg2\nconn = psycopg2.connect(dbname=\"hospital_db\", host=\"localhost\", user=\"keyurpatel\", password=\"root\")\ncur = conn.cursor()\n\nwhile True:\n print(\"Please enter Patient First Name: \")\n first_name = str(input())\n\n\n print(\"Please enter Patient Last Name: \")\n last_name = str(input())\n\n cur.execute ('''\n SELECT \n DISTINCT CONCAT(p.first_name,' ',p.last_name) AS patient, \n CONCAT(d.first_name,' ',d.last_name) AS Doctor,d.email,d.phone \n FROM visit AS v\n JOIN doctor AS d ON d.doctor_id = v.doctor_id\n JOIN patient AS p ON p.patient_id = v.patient_id\n WHERE p.first_name=%s AND p.last_name= %s\n ''',(first_name,last_name));\n rows = cur.fetchall()\n # print(f\"Patient Visited : \", \"\\n\", rows)\n\n row_count=len(rows)\n print(' Patient Name ', ' Doctor Name', ' Email',' Contact'),\n print('-----------------------------------------------------------------------------------------------------------')\n i=0\n while ip): \r\n b=str(b);a=str(a)\r\n if dirigido==False:Arista2.A[a,b,]=[]\r\n if dirigido==True: Arista2.A[a,b,'Directed',]=[] \r\n return Arista2.A\r\n\r\ndef aristasGeografico(n,r, dirigido=boolean):\r\n for i in range(1,n+1):\r\n Arista2.x[i]=randrange(1,10)\r\n Arista2.y[i]=randrange(1,10)\r\n \r\n for j in range(1,n):\r\n for k in range(1,n):\r\n d=sqrt((pow((Arista2.x[j]-Arista2.x[k]),2))+(pow((Arista2.y[j]-Arista2.y[k]),2))) \r\n \r\n if d.real=0: \r\n print(y)\r\n \r\n if cadena[1]==c2[1]: print('contun')\r\n","sub_path":"Arista5.py","file_name":"Arista5.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"387701691","text":"import os\nimport sys\n\ninfile = open('./tab-separated_value_files/Duplications_full.tsv', 'r')\ninfile2 = open('starting_gene_in_blocks.txt', 'r')\noutfile = open('duplicated_blocks.tsv', 'w')\n\nDuplications = infile.readlines()\nStartOfGeneBlocks = infile2.readlines() #['Zizania_palustris_FUN_018861-T1', 'Zizania_palustris_FUN_001795-T1']\n\noutfile.write('Orthogroup' + '\\t' + 'Genes 1' + '\\t' + 'Genes 2' + '\\n')\n\nfor gene in StartOfGeneBlocks:\n print(gene)\n firstline = True\n for line in Duplications:\n if firstline:\n firstline = False\n continue\n line.strip()\n line = line.split('\\t')\n genes1 = line[5].split(',')\n genes2 = line[6].split(',')\n if gene in genes1:\n outfile.write(line[0] + '\\t' + line[5] + '\\t' + line[6] + '\\n')\n elif gene in genes2:\n outfile.write(line[0] + '\\t' + line[5] + '\\t' + line[6] + '\\n')\n\noutfile.close()\n","sub_path":"assorted_scripts/find_blocks_for_KaKs.py","file_name":"find_blocks_for_KaKs.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"225491281","text":"#!/usr/bin/python\n\nimport sys\nimport configparser\n\nswitchlist = sys.argv[1]\nswitchimages = sys.argv[2]\n\nconfig = configparser.ConfigParser()\nconfig.read(switchimages)\n\nfor sectionname in config.sections():\n section = config[sectionname]\n print('sectionname:', sectionname)\n print(' '*7, 'image:', section['image'])\n print(' '*7, 'md5:', section['md5'])\n \n","sub_path":"trainings/log/detail/2019-10-28/SwitchZeug/michi-switch-upgrade-von-ini.py","file_name":"michi-switch-upgrade-von-ini.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"236152911","text":"#Taking input from the command line\ncodon = input('Input your codon:')\n\ncodonList = ['UAA', 'UAG', 'UGA']\n\n#Sample Switch statement using if and elif\nif codon == \"AUG\":\n print('This codon is the start codon.')\nelif codon in codonList:\n print('This codon is not the start codon.')\nelse:\n print('None of the above')\n","sub_path":"codons.py","file_name":"codons.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"362232013","text":"# Project Euler\n# Problem 32 - Pandigital Products\n# Copyright (c) Project Chang. All rights reserved.\n# Author: Sammy Chang\n#\n# https://github.com/schang1146/project_euler\n\nimport time\nfrom itertools import permutations\n\ndef factor(n):\n # factor finds all factors of 'n' by iterating from 1 to sqrt(n)\n\n factors = []\n for i in range(1,int(n**(1/2)) + 1):\n if n % i == 0:\n factors.append(i)\n\n for num in factors[::-1]:\n factors.append(n//num)\n\n return sorted(list(set(factors)))\n\n\ndef main():\n data = {}\n prod_length = 5\n numbers = [str(x) for x in range(1, 10)]\n while prod_length > 0:\n for k in permutations(numbers, prod_length):\n i = int(''.join(k[0:prod_length]))\n if '0' not in str(i):\n factors = factor(i)\n if len(factors) % 2 != 0:\n factors = factors[0:len(factors)//2 + 1] + factors[len(factors)//2:len(factors)]\n\n for j in range(len(factors)//2):\n if '123456789' == ''.join(sorted(str(i) + str(factors[j]) + str(factors[-j-1]))):\n data[i] = True\n break\n\n prod_length -= 1\n\n answer = 0\n for number in data:\n answer += number\n return answer\n\n\nif __name__ == \"__main__\":\n startTime = time.time()\n print('Answer:', main())\n print('runtime:', time.time()-startTime, 'sec')\n","sub_path":"python/p032.py","file_name":"p032.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"78232272","text":"import pygame\nimport random\n\nALTO=400\nANCHO=600\nROJO=(255,0,0)\nBLANCO=(255,255,255)\nAZUL=(59,131,189)\nVERDE=(0,255,0)\ncentro=[ANCHO/2,ALTO/2]\n\nclass Jugador(pygame.sprite.Sprite):\n def __init__(self,an,al):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.Surface([an,al])\n self.image.fill(ROJO)\n self.rect=self.image.get_rect()\n self.var_x=0\n self.var_y=0\n def update(self):\n lp=pygame.mouse.get_pos()\n self.rect.x=lp[0]-30\n #self.rect.x+=self.var_x\n #self.rect.y+=self.var_y\n #if self.rect.x>=ANCHO-self.rect[2]:\n # self.var_x=-6\n #if self.rect.x<=0:\n # self.var_x=6\n if self.rect.y>=ALTO-self.rect[3]:\n self.var_y=-4\n if self.rect.y<=0:\n self.var_y=4\nclass Final(pygame.sprite.Sprite):\n def __init__(self,an,al):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.Surface([an,al])\n self.image.fill(BLANCO)\n self.rect=self.image.get_rect()\n\nclass Bloque(pygame.sprite.Sprite):\n def __init__(self,an,al):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.Surface([an,al])\n self.image.fill(VERDE)\n self.rect=self.image.get_rect()\n\nclass Rival(pygame.sprite.Sprite):\n def __init__(self,an,al):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.Surface([an,al])\n self.image.fill(AZUL)\n self.rect=self.image.get_rect()\n self.var_x=2\n self.var_y=2\n def update(self):\n #self.rect.x+=self.var_x\n self.rect.y+=self.var_y\n if self.rect.x>=ANCHO-self.rect[2]:\n self.var_x=-2\n if self.rect.x<=0:\n self.var_x=2\n if self.rect.y>=ALTO-self.rect[3]-30:\n self.var_y=-2\n if self.rect.y<=0:\n self.var_y=2\n\nclass Proyectil(pygame.sprite.Sprite):\n def __init__(self,an,al):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.Surface([an,al])\n self.image.fill(VERDE)\n self.rect=self.image.get_rect()\n self.var_x=2\n self.var_y=-2\n def update(self):\n #self.rect.x+=self.var_x\n self.rect.y+=self.var_y\n if self.rect.x>=ANCHO-self.rect[2]:\n self.var_x=-2\n if self.rect.x<=0:\n self.var_x=2\n if self.rect.y>=ALTO-self.rect[3]-30:\n self.var_y=-2\n if self.rect.y<=0:\n self.var_y=2\n\n\n\nif __name__=='__main__':\n pygame.init()\n pantalla=pygame.display.set_mode([ANCHO,ALTO])\n pantalla.fill(BLANCO)\n fuente=pygame.font.Font(None,36)\n jp=Jugador(70,10)\n bar=Final(600,40)\n bar1=Final(600,1)\n bola=Rival(10,10)\n tmp=0\n jugadores=pygame.sprite.Group()\n general=pygame.sprite.Group()\n balas=pygame.sprite.Group()\n general.add(bola)\n general.add(jp)\n general.add(bar)\n general.add(bar1)\n jugadores.add(jp)\n\n reloj=pygame.time.Clock()\n ptos=0\n pas=0\n riv=0\n vidas=0\n jp.rect.x=100\n jp.rect.y=340\n bar.rect.x=0\n bar.rect.y=360\n bar1.rect.x=0\n bar1.rect.y=0\n rivales=pygame.sprite.Group()\n bloques=pygame.sprite.Group()\n rivales.add(bola)\n #for i in range(6):\n #r=Bloque(40,20)\n #r.rect.x=random.randrange(10, ANCHO-20)\n #r.rect.y=random.randrange(0, 200)\n #general.add(r)\n #bloques.add(r)\n for i in range(1):\n r=Rival(5,5)\n r.rect.x=random.randrange(10, ANCHO-20)\n r.rect.y=random.randrange(-400, 0)\n rivales.add(r)\n general.add(r)\n general.draw(pantalla)\n pygame.display.flip()\n fin=False\n while not fin:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n fin=True\n if event.type==pygame.KEYDOWN:\n if event.key==pygame.K_RIGHT:\n jp.var_x=4\n if event.key==pygame.K_LEFT:\n jp.var_x=-4\n #if event.key==pygame.K_UP:\n # jp.var_y=-5\n #if event.key==pygame.K_DOWN:\n # jp.var_y=5\n if event.key==pygame.K_SPACE:\n #jp.var_x=0\n b=Proyectil(10,10)\n b.rect.x=jp.rect.x+40\n b.rect.y=jp.rect.y\n balas.add(b)\n general.add(b)\n #jp.var_y=0\n #ls_col=pygame.sprite.spritecollide(jp,rivales,True)\n lb_col=pygame.sprite.spritecollide(bar,rivales,True)\n lr_col=pygame.sprite.spritecollide(bola,bloques,True)\n lb_col=pygame.sprite.groupcollide(balas,rivales,True,True)\n lb_col=pygame.sprite.spritecollide(bar1,balas,True)\n lt_col=pygame.sprite.groupcollide(rivales,jugadores,False,True)\n for elemento in lb_col:\n vidas-=1\n pas+=1\n #for elemento in ls_col:\n #ptos+=1\n #pas+=1\n #elemento.var_y=-2\n for elemento in lr_col:\n ptos+=1\n pas+=1\n bola.var_y=-bola.var_y\n #print elemento\n if tmp==0:\n r=Rival(20,20)\n r.rect.x=random.randrange(10, ANCHO-20)\n r.rect.y=random.randrange(-400, 0)\n rivales.add(r)\n general.add(r)\n tmp=100\n else:\n tmp-=1\n general.update()\n pantalla.fill(BLANCO)\n general.draw(pantalla)\n pygame.display.flip()\n reloj.tick(60)","sub_path":"Proyectiles/Proyectiles.py","file_name":"Proyectiles.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"472378945","text":"from bangtal import *\nfrom enum import Enum\n\nsetGameOption(GameOption.ROOM_TITLE, False)\nsetGameOption(GameOption.INVENTORY_BUTTON, False)\nsetGameOption(GameOption.MESSAGE_BOX_BUTTON, False)\n\nscene = Scene('오델로','images/background.png')\n\n#상태함수\nclass State(Enum):\n BLANK = 0\n POSSIBLE = 1\n BLACK = 2\n WHITE = 3\n\n#턴함수 \nclass Turn(Enum):\n BLACK = 1\n WHITE = 2\nturn = Turn.BLACK\n\n#상태함수\ndef setState(x,y,s):\n object = board[y][x]\n object.state = s\n if s == State.BLANK:\n object.setImage(\"images/blank.png\")\n elif s == State.BLACK:\n object.setImage(\"images/black.png\")\n elif s == State.WHITE:\n object.setImage(\"images/white.png\")\n elif turn == Turn.BLACK:\n object.setImage(\"images/black possible.png\")\n else:\n object.setImage(\"images/white possible.png\")\n\n#상태함수2\ndef stone_onMouseAction(x,y):\n global turn\n\n object = board[y][x]\n if object.state == State.POSSIBLE:\n if turn == Turn.BLACK:\n setState(x,y,State.BLACK)\n reverse_xy(x, y)\n turn = Turn.WHITE\n else:\n setState(x,y,State.WHITE)\n reverse_xy(x, y)\n turn = Turn.BLACK \n \n if not setPossible():\n if turn == Turn.BLACK: turn = Turn.WHITE\n else: turn = Turn.BLACK\n\n if not setPossible():\n scorecheck(x,y) \n if countB > countW:\n showMessage(\"BLACK WIN\")\n elif countB < countW:\n showMessage(\"WHITE WIN\")\n else:\n showMessage(\"draw\")\n \n \n \n\n#놓을수 있는 경우의 수 체크\ndef setPossible_xy_dir(x, y, dx, dy):\n if turn == Turn.BLACK:\n mine = State.BLACK\n other = State.WHITE\n else:\n mine = State.WHITE\n other = State.BLACK\n\n possible = False\n while True:\n x = x + dx\n y = y + dy\n\n #보드에서 벗어나지 않게\n if x < 0 or x > 7: return False\n if y < 0 or y > 7: return False\n\n object = board[y][x]\n if object.state == other:\n possible = True\n elif object.state == mine:\n return possible\n else: \n return False\n\n\n##8방향체크\ndef setPossible_xy(x, y):\n object = board[y][x]\n if object.state == State.BLACK: return False\n if object.state == State.WHITE: return False\n setState(x, y, State.BLANK)\n\n if setPossible_xy_dir(x,y,0,1): return True\n if setPossible_xy_dir(x,y,1,1): return True\n if setPossible_xy_dir(x,y,1,0): return True\n if setPossible_xy_dir(x,y,1,-1): return True\n if setPossible_xy_dir(x,y,0,-1): return True\n if setPossible_xy_dir(x,y,-1,-1): return True\n if setPossible_xy_dir(x,y,-1,0): return True\n if setPossible_xy_dir(x,y,-1,1): return True\n return False\n\ndef reverse_xy_dir(x, y, dx, dy):\n if turn == Turn.BLACK:\n mine = State.BLACK\n other = State.WHITE\n else:\n mine = State.WHITE\n other = State.BLACK\n\n possible = False\n while True:\n x = x + dx\n y = y + dy\n\n #보드에서 벗어나지 않게\n if x < 0 or x > 7: return \n if y < 0 or y > 7: return \n\n object = board[y][x]\n if object.state == other:\n possible = True\n elif object.state == mine:\n if possible:\n while True:\n x = x - dx\n y = y - dy\n\n object = board[y][x]\n if object.state == other:\n setState(x, y, mine)\n else: return\n\n else: return\n \n\n\n#뒤집기\ndef reverse_xy(x, y):\n reverse_xy_dir(x,y,0,1)\n reverse_xy_dir(x,y,1,1)\n reverse_xy_dir(x,y,1,0)\n reverse_xy_dir(x,y,1,-1)\n reverse_xy_dir(x,y,0,-1)\n reverse_xy_dir(x,y,-1,-1)\n reverse_xy_dir(x,y,-1,0)\n reverse_xy_dir(x,y,-1,1)\n\n\n\n##체크\ndef setPossible():\n possible = False\n for y in range(8):\n for x in range(8):\n if setPossible_xy(x, y):\n setState(x,y,State.POSSIBLE)\n possible = True\n return possible \n\n\n#보드, 돌 위치\nboard = []\nfor y in range(8):\n board.append([])\n for x in range(8):\n object = Object(\"images/blank.png\")\n object.locate(scene, 40 + x * 80,40 + y * 80)\n object.show()\n object.onMouseAction = lambda mx, my, action, ix = x, iy = y: stone_onMouseAction(ix, iy)\n object.state = State.BLANK\n board[y].append(object)\n \nsetState(3, 3,State.BLACK)\nsetState(4, 4,State.BLACK)\nsetState(3, 4,State.WHITE)\nsetState(4, 3,State.WHITE)\n\nsetPossible()\n\n#점수\nscore1 = Object(\"images/L0.png\")\nscore2 = Object(\"images/L0.png\")\nscore3 = Object(\"images/L0.png\")\nscore4 = Object(\"images/L0.png\")\n\ncountB,countW= 0,0\ncountB = int(countB)\ncountW = int(countW) \n\n\n\ndef setScore1(s):\n if s == 0:\n score1.setImage(\"images/L0.png\")\n elif s == 1:\n score1.setImage(\"images/L1.png\")\n elif s == 2:\n score1.setImage(\"images/L2.png\")\n elif s == 3:\n score1.setImage(\"images/L3.png\")\n elif s == 4:\n score1.setImage(\"images/L4.png\")\n elif s == 5:\n score1.setImage(\"images/L5.png\")\n elif s == 6:\n score1.setImage(\"images/L6.png\")\n elif s == 7:\n score1.setImage(\"images/L7.png\")\n elif s == 8:\n score1.setImage(\"images/L8.png\")\n elif s == 9:\n score1.setImage(\"images/L9.png\")\n\ndef setScore2(s):\n if s == 0:\n score2.setImage(\"images/L0.png\")\n elif s == 1:\n score2.setImage(\"images/L1.png\")\n elif s == 2:\n score2.setImage(\"images/L2.png\")\n elif s == 3:\n score2.setImage(\"images/L3.png\")\n elif s == 4:\n score2.setImage(\"images/L4.png\")\n elif s == 5:\n score2.setImage(\"images/L5.png\")\n elif s == 6:\n score2.setImage(\"images/L6.png\")\n elif s == 7:\n score2.setImage(\"images/L7.png\")\n elif s == 8:\n score2.setImage(\"images/L8.png\")\n elif s == 9:\n score2.setImage(\"images/L9.png\")\n\ndef setScore3(s):\n if s == 0:\n score3.setImage(\"images/L0.png\")\n elif s == 1:\n score3.setImage(\"images/L1.png\")\n elif s == 2:\n score3.setImage(\"images/L2.png\")\n elif s == 3:\n score3.setImage(\"images/L3.png\")\n elif s == 4:\n score3.setImage(\"images/L4.png\")\n elif s == 5:\n score3.setImage(\"images/L5.png\")\n elif s == 6:\n score3.setImage(\"images/L6.png\")\n elif s == 7:\n score3.setImage(\"images/L7.png\")\n elif s == 8:\n score3.setImage(\"images/L8.png\")\n elif s == 9:\n score3.setImage(\"images/L9.png\")\n\ndef setScore4(s):\n if s == 0:\n score4.setImage(\"images/L0.png\")\n elif s == 1:\n score4.setImage(\"images/L1.png\")\n elif s == 2:\n score4.setImage(\"images/L2.png\")\n elif s == 3:\n score4.setImage(\"images/L3.png\")\n elif s == 4:\n score4.setImage(\"images/L4.png\")\n elif s == 5:\n score4.setImage(\"images/L5.png\")\n elif s == 6:\n score4.setImage(\"images/L6.png\")\n elif s == 7:\n score4.setImage(\"images/L7.png\")\n elif s == 8:\n score4.setImage(\"images/L8.png\")\n elif s == 9:\n score4.setImage(\"images/L9.png\")\n \n\ndef scorecheck(x,y):\n global countB\n global countW\n for y in range(8):\n for x in range(8):\n object = board[y][x]\n if object.state == State.BLACK:\n countB+=1\n else:\n countW+=1\n \n if countB < 10:\n for i in range(10):\n if countB == i:\n setScore1(countB)\n score1.locate(scene, 750, 220)\n score1.show()\n \n elif countB >= 10:\n for i in range(10):\n if countB//10 == i:\n setScore1(countB//10)\n score1.locate(scene, 750, 220)\n score1.show()\n \n for i in range(10):\n if countB%10 == i:\n setScore2(countB%10)\n score2.locate(scene, 830, 220)\n score2.show()\n \n if countW < 10:\n for i in range(10):\n if countW == i:\n setScore3(countW)\n score3.locate(scene, 1070, 220)\n score3.show()\n \n elif countW >= 10:\n for i in range(10):\n if countW//10 == i:\n setScore3(countW//10)\n score3.locate(scene, 1070, 220)\n score3.show()\n \n for i in range(10):\n if countW%10 == i:\n setScore4(countW%10)\n score4.locate(scene, 1150, 220)\n score4.show()\n \n\n\n\nstartGame(scene)\n","sub_path":"오델로.py","file_name":"오델로.py","file_ext":"py","file_size_in_byte":8808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"513705822","text":"# : Harvey Chang\n# : chnme40cs@gmail.com\n# main function is used to make all configurations and making results:\nimport numpy as np\nimport tensorflow as tf\nimport dataset as D\nfrom matplotlib import pyplot as plt\nimport train.trainer as trainer\n\n\ndef main():\n config = dict()\n # mode\n config['plant'] = 'pid'\n config['mode'] = 'train'\n config['continue'] = True\n\n # changing part:\n config['time_step'] = 1 # predict in segment\n config['training_epochs'] = 600\n config['batch_size'] = 64 if config['plant'] == 'arc' else 128 # arc: 64| pid: 128\n config['learning_rate'] = 1e-3\n # 2: config['learning_rate'] = 1e-3 \n # 1: config['learning_rate'] = 5e-3 | first \n # arc: 64| pid: 128 # arc: 1e-3| pid: 1e-2\n config['scales'] = [10.0, 3e2, 4e4, 1e6, 8e4] \n\n config['dim'] = 4\n config['out_dim'] = 1\n \n config['diff'] = True # if differentiate inside\n # params for signals:\n config['file_path'] = 'matlab_id/data/{}/sample'.format(config['plant']) \n config['val_path'] = 'matlab_id/data/{}/sample'.format(config['plant']) \n\n # log structure\n config['save'] = True\n if (config['mode'] == 'train' or config['mode'] == 'res_train') and (not config['continue']):\n config['restore'] = False\n else:\n config['restore'] = True\n\n # directory for tensorboard\n config['board_dir'] = 'train_log/log'\n\n # read datas:\n mode = config['mode']\n dataX, dataY = D.read_data(config['file_path'], config)\n\n mytrainer = trainer.Trainer(config)\n mytrainer.add_data(dataX, dataY)\n\n if config['mode'] == 'train':\n mytrainer.train()\n elif config['mode'] == 'test':\n mytrainer.test()\n elif config['mode'] == 'implement':\n mytrainer.implement()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"290926843","text":"#!/usr/bin/env python\nimport rospy\nimport robot_eup\nfrom robot_eup.msg import RobotType\n\n#############################################\n\ndef main_loop(robot):\n\n robot.say(text='Welcome to the restaurant. How many people?')\n robot.display_message(\n message='Welcome to the restaurant. How many people?',\n duration=3)\n command = robot.wait_for_speech()\n robot.say(text='Okay, come with me')\n robot.move(x=0.5, y= 0.0, theta=0.5, duration=4)\n\n\n#############################################\n\nif __name__ == '__main__':\n rospy.init_node('robot_eup_dialog')\n robot = robot_eup.RobotFactory().build(RobotType.TURTLEBOT)\n robot.start_robot()\n while not rospy.is_shutdown():\n main_loop(robot)\n","sub_path":"robot_eup_samples/scripts/workshop/dialog_rahil.py","file_name":"dialog_rahil.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"524814453","text":"import datetime\nimport functools\nimport os\nimport re\n\nfrom flask import Flask\nfrom flask import url_for, redirect, render_template, abort\nfrom flask import session, request, g\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy.exc\n# import passlib.hash\n\n__version__ = \"2.1.2\"\n\n\napp = Flask(__name__)\n# setup.py imports __version__ from this module, but it won't have NOODLZ_SETTINGS set. NOODLZ_SETTINGS_IGNORE gives us a workaround for CI.\nif 'NOODLZ_SETTINGS' in os.environ or 'NOODLZ_SETTINGS_IGNORE' not in os.environ:\n\tapp.config.from_envvar('NOODLZ_SETTINGS')\napp.config['RE_USER'] = re.compile(app.config.get('RE_USER', '^[A-Za-z_][A-Za-z0-9-_]{,31}$'))\ndb = SQLAlchemy(app)\nGLOBAL_PARAMS = {'version': __version__}\n\n\nclass User(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(32), unique=True, nullable=False)\n\tpass_hash = db.Column(db.String(128), nullable=False)\n\n\nclass Destination(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(128), unique=True, nullable=False)\n\n\nclass Item(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.Text(), nullable=False)\n\ttag = db.Column(db.String(16), default=None, nullable=True)\n\tprice = db.Column(db.Numeric(9, scale=2), nullable=False)\n\thistorical = db.Column(db.Boolean(), default=False, nullable=False)\n\tdestination_id = db.Column(db.Integer, db.ForeignKey('destination.id'))\n\tdestination = db.relationship('Destination', backref=db.backref('items', lazy=True))\n\n\nclass Trip(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tdate = db.Column(db.Date(), nullable=False)\n\tclosed = db.Column(db.Boolean(), default=False, nullable=False)\n\tdestination_id = db.Column(db.Integer, db.ForeignKey('destination.id'))\n\tdestination = db.relationship('Destination')\n\tuser_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\tuser = db.relationship('User', backref=db.backref('trips', lazy=True))\n\t__table_args__ = (db.UniqueConstraint('user_id', 'date', 'destination_id'),)\n\n\tdef get_items_grouped(self):\n\t\ttrip_items = sorted(set(o.item for o in self.orders), key=lambda i: i.id)\n\t\treturn [{\"item\": item, \"users\": [o.user for o in self.orders if o.item == item]} for item in trip_items]\n\n\tdef get_user_item_count(self, user, item):\n\t\treturn len([o for o in self.orders if o.user == user and o.item == item])\n\n\tdef get_item_users(self, item):\n\t\treturn [o.user for o in self.orders if o.item == item]\n\n\nclass Order(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tsettled = db.Column(db.Boolean(), default=False, nullable=False)\n\titem_id = db.Column(db.Integer, db.ForeignKey('item.id'))\n\titem = db.relationship('Item')\n\ttrip_id = db.Column(db.Integer, db.ForeignKey('trip.id'))\n\ttrip = db.relationship('Trip', backref=db.backref('orders', lazy=True))\n\tuser_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\tuser = db.relationship('User', backref=db.backref('orders', lazy=True))\n\n\ndef parse_date(date_str):\n\treturn datetime.datetime.strptime(date_str, '%Y-%m-%d').date()\n\n\ndef parse_bool(b):\n\tif b in (True, 1, 'true', 'yes', 'on', '1'):\n\t\treturn True\n\telif b in (False, 0, 'false', 'no', 'off', '0'):\n\t\treturn False\n\traise ValueError(\"Not a valid boolean\")\n\n\ndef now():\n\treturn datetime.datetime.now().date()\n\n\n@app.route(\"/\")\ndef index():\n\treturn redirect(url_for(\"date_show\", date=now().isoformat()))\n\n\n@app.route(\"/favicon.ico\")\ndef favicon():\n\treturn app.send_static_file('favicon.ico')\n\n\ndef fullpath(request):\n\tfp = request.path\n\tif request.query_string:\n\t\tfp += '?' + str(request.query_string, 'utf-8')\n\tprint(fp)\n\treturn fp\n\n\ndef require_user(f):\n\t@functools.wraps(f)\n\tdef wrapper(*args, **kwargs):\n\t\tif 'user_id' not in session:\n\t\t\treturn render_template('login.html', version=__version__, redirect=fullpath(request))\n\t\telse:\n\t\t\tg.user = User.query.filter_by(id=session['user_id']).first()\n\t\t\tif g.user is None:\n\t\t\t\tabort(500, \"Your account doesn't exist anymore.\")\n\t\t\treturn f(*args, **kwargs)\n\treturn wrapper\n\n\n@app.route(\"/login\", methods=['POST'])\ndef login():\n\tif not app.config['RE_USER'].match(request.form[\"user\"]):\n\t\tabort(400, \"Invalid username.\")\n\tuser = User.query.filter_by(name=request.form[\"user\"]).first()\n\tif user is None: # or not passlib.hash.bcrypt.verify(request.form[\"pass\"], user.pass_hash):\n\t\tabort(403, \"Invalid username or password\")\n\tsession['user_id'] = user.id\n\treturn redirect(request.args.get('redirect', url_for('date_show', date=now().isoformat())))\n\n\n@app.route(\"/logout\", methods=['GET', 'POST'])\ndef logout():\n\tif 'user_id' in session:\n\t\tdel session['user_id']\n\treturn redirect(request.args.get('redirect', url_for('date_show', date=now().isoformat())))\n\n\n@app.route(\"/terms\", methods=['GET'])\ndef terms():\n\treturn render_template('terms.html', version=__version__)\n\n\n@app.route(\"//\", methods=[\"GET\"])\n@require_user\ndef date_show(date):\n\tdate = parse_date(date)\n\tif date.weekday() != 0:\n\t\treturn render_template(\"notmonday.html\", version=__version__)\n\ttrips = Trip.query.filter_by(date=date).all()\n\tdestinations = Destination.query.all()\n\n\treturn render_template(\"date.html\",\n\t\t**GLOBAL_PARAMS,\n\t\tuser=g.user,\n\t\tdate=date,\n\t\tnext_date=date + datetime.timedelta(days=7),\n\t\tprev_date=date + datetime.timedelta(days=-7),\n\t\ttrips=trips,\n\t\tdestinations=destinations,\n\t\tmsg=request.args.get(\"msg\"),\n\t\tmsg_severity=request.args.get(\"msg_severity\"),\n\t)\n\n\n@app.route(\"//\", methods=[\"POST\"])\n@require_user\ndef date_submit_trip(date):\n\tdestination = Destination.query.filter_by(id=request.form[\"destination\"]).first()\n\ttrip = Trip(user=g.user, destination=destination, date=parse_date(date))\n\tdb.session.add(trip)\n\ttry:\n\t\tdb.session.commit()\n\t\treturn redirect(url_for(\"date_show\", date=date))\n\texcept sqlalchemy.exc.IntegrityError:\n\t\treturn redirect(url_for(\"date_show\", date=date, msg=\"You've already added a trip to that destination!\", msg_severity=\"error\"))\n\n\n@app.route(\"/trip//order\", methods=[\"POST\"])\n@require_user\ndef trip_submit_order(trip_id):\n\ttrip = Trip.query.filter_by(id=trip_id).first()\n\tif trip.closed:\n\t\tabort(400, \"This trip is already closed.\")\n\tfor item_id, count in request.form.to_dict().items():\n\t\tif not item_id.startswith(\"item-\"):\n\t\t\tcontinue\n\t\titem_id = item_id.replace(\"item-\", \"\", 1)\n\t\tcount = int(count)\n\n\t\titem = Item.query.filter_by(id=item_id).first()\n\t\tif item.historical and count != 0:\n\t\t\tabort(400, \"That item is not orderable any more.\")\n\n\t\tif count > int(app.config.get('MAX_ORDER_COUNT', 16)):\n\t\t\tabort(400, \"You can't order that many items. You can thank the person that ordered 65535 drinks once.\")\n\t\tif count < 0:\n\t\t\tabort(400, \"You can't order a negative number of items. What does that even mean?\")\n\n\t\torders = Order.query.filter_by(trip=trip, item=item, user=g.user).all()\n\t\tif len(orders) > count:\n\t\t\tfor order in orders[count:]:\n\t\t\t\tdb.session.delete(order)\n\t\telif len(orders) < count:\n\t\t\tfor i in range(len(orders), count):\n\t\t\t\tdb.session.add(Order(trip=trip, item=item, user=g.user, settled=item.price <= 0 or trip.user==g.user))\n\tdb.session.commit()\n\n\treturn redirect(url_for(\"date_show\", date=trip.date, msg=\"Order accepted!\", msg_severity='success'))\n\n\n@app.route(\"/trip//close\", methods=[\"POST\"])\n@require_user\ndef trip_close(trip_id):\n\ttrip = Trip.query.filter_by(id=trip_id).first()\n\tif g.user != trip.user:\n\t\tabort(403, \"You can't close someone else's trip.\")\n\ttrip.closed = True\n\tdb.session.add(trip)\n\tdb.session.commit()\n\treturn redirect(url_for(\"trip_show\", trip_id=trip_id))\n\n\n@app.route(\"/trip/\")\n@require_user\ndef trip_show(trip_id):\n\ttrip = Trip.query.filter_by(id=trip_id).first()\n\tif g.user != trip.user:\n\t\tabort(403, \"You can't read someone else's order list.\")\n\ttrip_items = trip.get_items_grouped()\n\ttotal = sum(o[\"item\"].price * len(o[\"users\"]) for o in trip_items)\n\treturn render_template(\"trip.html\",\n\t\t**GLOBAL_PARAMS,\n\t\tuser=g.user,\n\t\ttrip=trip,\n\t\ttrip_items=trip_items,\n\t\tshow_users=\"users\" in request.args,\n\t\ttotal=total,\n\t)\n\n\n@app.route(\"/settle\")\n@require_user\ndef settle_show():\n\t# All orders that were ordered by us, but not bought by us\n\tquery_out = db.session.query(Order).filter(Order.user == g.user, Order.trip.has(Trip.user != g.user))\n\t# All orders that were not ordered by us, but were bought by us\n\tquery_in = db.session.query(Order).filter(Order.user != g.user, Order.trip.has(Trip.user == g.user))\n\n\tfiltered = False\n\tif 'trip' in request.args:\n\t\tquery_out = query_out.filter(Order.trip_id.in_(request.args.getlist('trip')))\n\t\tquery_in = query_in.filter(Order.trip_id.in_(request.args.getlist('trip')))\n\t\tfiltered = True\n\tfor after in request.args.getlist('after'):\n\t\tquery_out = query_out.filter(Order.date > after)\n\t\tquery_in = query_in.filter(Order.date > after)\n\t\tfiltered = True\n\tfor since in request.args.getlist('since'):\n\t\tquery_out = query_out.filter(Order.date >= since)\n\t\tquery_in = query_in.filter(Order.date >= since)\n\t\tfiltered = True\n\tfor before in request.args.getlist('before'):\n\t\tquery_out = query_out.filter(Order.date < before)\n\t\tquery_in = query_in.filter(Order.date < before)\n\t\tfiltered = True\n\tfor until in request.args.getlist('until'):\n\t\tquery_out = query_out.filter(Order.date <= until)\n\t\tquery_in = query_in.filter(Order.date <= until)\n\t\tfiltered = True\n\tif 'with' in request.args:\n\t\t# for outgoing (ordered by us), check the Trip's user\n\t\tquery_out = query_out.filter(Order.trip.has(Trip.user_id.in_(map(int, request.args.getlist('with')))))\n\t\t# For incoming (ordered from us), check the Order's user\n\t\tquery_in = query_in.filter(Order.user_id.in_(map(int, request.args.getlist('with'))))\n\t\tfiltered = True\n\tif 'settled' in request.args:\n\t\tfilter_settled = parse_bool(request.args['settled'])\n\t\tquery_out = query_out.filter(Order.settled == filter_settled)\n\t\tquery_in = query_in.filter(Order.settled == filter_settled)\n\t\tfiltered = True\n\n\treturn render_template(\"settle.html\",\n\t\t**GLOBAL_PARAMS,\n\t\tuser=g.user,\n\t\toutgoing=query_out.all(),\n\t\tincoming=query_in.all(),\n\t\tfiltered=filtered,\n\t)\n\n\n@app.route(\"/settle\", methods=[\"POST\"])\n@require_user\ndef settle_update():\n\tform = request.form.to_dict()\n\torders_form = {}\n\tfor key, value in form.items():\n\t\tif key.startswith(\"old-\"):\n\t\t\torder_id = int(key.replace(\"old-\", \"\", 1))\n\t\t\torders_form[order_id] = (value == \"on\", form.get(f\"order-{order_id}\", \"off\") == \"on\")\n\torders_db = db.session.query(Order).filter(Order.id.in_(orders_form)).all()\n\tfor order in orders_db:\n\t\told_state, new_state = orders_form[order.id]\n\t\tif new_state != old_state:\n\t\t\torder.settled = new_state\n\t\t\tdb.session.add(order)\n\tdb.session.commit()\n\treturn redirect(url_for(\"settle_show\"))\n","sub_path":"noodlz/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"540617846","text":"# Invoer\nm = int(input('Eerste serienummer?: '))\n\n# Format\nn, grootsteSerieNummer = 0, 0\nwhile m > 0:\n n += 1\n grootsteSerieNummer = max(m, grootsteSerieNummer)\n m = int(input('Volgende serienummer?: '))\nschatting = round((((n + 1) * grootsteSerieNummer) / n) - 1)\n\n# Uitvoer\nprint('Het aantal geproduceerde tanks wordt geschat op ' + str(schatting) + '.')\n","sub_path":"Uitbreiding/07b + Iteraties - While-lus/Duitse tanks.py","file_name":"Duitse tanks.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"534191432","text":"# def ask_ok(prompt, retries=4, reminder='Please try again!'):\n# while True:\n# ok = input(prompt)\n# # print(prompt, retries, reminder)\n# # print(ok,type(ok))\n# # if ok in ('y', 'ye', 'yes'):\n# if ok == 'yes':\n# # print(\"True\")\n# return True\n# # if ok in ('n', 'no', 'nop', 'nope'):\n# if ok == 'no':\n# # print(\"False\")\n# # return False\n# break\n# retries = retries - 1\n# if retries < 0:\n# raise ValueError('invalid user response')\n# print(reminder)\n#\n#\n# ask_ok(\"no\")\n# print(\"hello world\")\n#\n#\n#\n# # def mytest(a,b=2):\n# # while True:\n# # if a>0:\n# # print(a)\n# # return True\n# # else:\n# # print(\"abs(a):\",abs(a))\n# # return False\n# # a = a-1\n# # if a < 0:\n# # print(\"hello\")\n# # print(\"---\")\n# #\n# # mytest(3)\n\ns = \"\"\nn = 5\nwhile n > 0:\n n -= 1\n if (n % 2) == 0:\n continue\n a = ['foo', 'bar', 'baz']\n\n while a:\n s += str(n) + a.pop(0)\n print(s)\n if len(a) < 2:\n break\n\nprint(s)","sub_path":"learn/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"628508896","text":"import os\nimport logging\nimport platform\nfrom zipfile import ZipFile\nfrom io import BytesIO\nfrom time import sleep\nimport requests\nfrom selenium import webdriver\n\ndef make_executable(path):\n if platform.system() != 'win32':\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2 # copy R bits to X\n os.chmod(path, mode)\n\ndef update_chromedriver(version=''):\n if not version:\n version = requests.get('https://chromedriver.storage.googleapis.com/LATEST_RELEASE').text.strip()\n arch = {'Linux': 'linux64', 'Darwin': 'mac64', 'Windows': 'win32'}[platform.system()] # these are the only options available\n logging.info('%s detected', arch)\n url = 'https://chromedriver.storage.googleapis.com/{}/chromedriver_{}.zip'.format(version, arch)\n logging.info('downloading...')\n zip_file = ZipFile(BytesIO(requests.get(url).content))\n for name in zip_file.namelist():\n if name.startswith('chromedriver'):\n logging.info('unpacking...')\n with open(name, 'wb') as out:\n out.write(zip_file.read(name))\n logging.info('ready')\n break\n\ndef get_driver(headless=False):\n options = webdriver.ChromeOptions()\n options.add_argument('log-level=3')\n options.add_argument(\"user-data-dir=profile/\")\n if headless:\n options.add_argument('headless')\n options.add_argument('disable-gpu')\n\n # assume chrome is installed. anything else is out of scope\n # http://chromedriver.chromium.org/downloads/version-selection\n if not (os.path.isfile('chromedriver') or os.path.isfile('chromedriver.exe')): # bootstrap self\n logging.info('chromedriver not found')\n update_chromedriver() # just get newest, dont worry about automatically matching chrome yet\n make_executable('./chromedriver')\n\n driver = webdriver.Chrome('./chromedriver', chrome_options=options) # assumes our version of chromedriver works with our version of chrome\n chrome_version = driver.capabilities[({'version', 'browserVersion'} & driver.capabilities.keys()).pop()] #1\n CD_version = driver.capabilities['chrome']['chromedriverVersion'].split()[0]\n version_check_url = 'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_{}'.format(chrome_version.rpartition('.')[0]) #2\n matching_CD_version = requests.get(version_check_url).text.strip() #3\n if CD_version != matching_CD_version: #5\n driver.quit() # need to release the file lock\n logging.info('have chromedriver {}. attempting to update to {}'.format(CD_version, matching_CD_version))\n update_chromedriver(matching_CD_version) #4\n driver = webdriver.Chrome('./chromedriver', chrome_options=options) # reopen with fresh new chromedriver\n\n return driver\n\n\n\ndef test():\n try:\n driver = None\n logging.info('get driver...')\n driver = get_driver()\n driver.implicitly_wait(5)\n driver.set_window_size(1024, 768)\n driver.get('https://www.google.com')\n sleep(2)\n\n finally:\n if driver:\n logging.info('cleanup')\n driver.quit()\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s',\n datefmt='%H:%M:%S')\n test()\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"590838072","text":"from django.shortcuts import render,redirect\n#chamar o UserCreationForm que é 1 model feito para cadastrar usuario já\nfrom django.contrib.auth.forms import UserCreationForm #por padrao tem o esse form porem para adicionar mais campos de cadastro vc precisa adicionar um arquivo form e fazer um novo form q herda desse\n#pegar a login_url no settings\nfrom django.conf import settings\nfrom accounts.forms import RegisterForm\n# Cadastro de Usuarios\n\ndef register(request):\n\tif request.method == 'POST':\n\t\tform = RegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect(settings.LOGIN_URL)\n\telse:\n\t\tform = RegisterForm()\n\tcontext = {\n\t\t'form': form\n\t}\t\n\treturn render(request,'accounts/register.html',context)","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"115699759","text":"\"\"\"\n Pygame base template for opening a window\n\n Sample Python/Pygame Programs\n Simpson College Computer Science\n http://programarcadegames.com/\n http://simpson.edu/computer-science/\n\n Explanation video: http://youtu.be/vRB_983kUMc\n\"\"\"\n\nimport pygame\nimport random\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREY = (127, 127, 127)\nORANGE = (255,165,0)\nPINK= (255,192,203)\nPURPLE = (160, 32, 140)\nBROWN = (165, 42, 42)\nCORAL = (255, 127, 80)\nGOLD = (255, 215, 0)\nIVORY = (255, 255, 240)\nLAVENDER = (230, 230, 250)\nLTBLUE = (173, 216, 230)\nMINT = (245, 255, 250)\nTURQUOISE = (64, 224, 208)\n \ncolors = [BLACK, WHITE, RED, BLUE, GREY, ORANGE, GREEN, PINK, PURPLE, BROWN, CORAL, GOLD, IVORY, LAVENDER, LTBLUE, MINT, TURQUOISE]\n\nclength = len(colors)\n\npygame.init()\n\n# Set the width and height of the screen [width, height]\nSCREEN_WIDTH = 700\nSCREEN_HEIGHT = 500\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\npygame.display.set_caption(\"Bouncing Ball Game\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n\n# WRITE YOUR CODE HERE\nx = 350\ny = 250\n\ndx = random.randint(-10,10)\ndy = random.randint(-10,10)\n\nrad = random.randint(20, 80)\nindex = random.randint(0, clength - 1)\ncol = colors[index]\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tdone = True\n\n\n\n\t# --- Game logic should go here\n\n\t\n\t# --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n\tscreen.fill(BLACK)\n\n # --- Drawing code should go here\n\tmpos = pygame.mouse.get_pos()\n\tpygame.draw.circle(screen, col, mpos, rad)\n\tx += dx\n\ty += dy\n\t\n\tif x > SCREEN_WIDTH or x < 0:\n\t\tdx = dx * -1\n\t\tindex = random.randint(0, clength - 1)\n\t\tcol = colors[index]\n\tif y < 0 or y > SCREEN_HEIGHT:\n\t\tdy = dy * -1\n\t\tindex = random.randint(0, clength - 1)\n\t\tcol = colors[index]\n\n\n # --- Go ahead and update the screen with what we've drawn.\n\tpygame.display.flip()\n\n # --- Limit to 60 frames per second\n\tclock.tick(60)\n\n# Close the window and quit.\npygame.quit()\nexit() # Needed when using IDLE\n","sub_path":"clickball.py","file_name":"clickball.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499610385","text":"import datetime\nimport feedparser\nimport json\nimport urllib2\nimport urllib\nfrom flask import Flask\nfrom flask import make_response\nfrom flask import render_template\nfrom flask import request\n\napp = Flask(__name__)\n\n# global variables\n\nRSS_FEEDS = {'bbc': 'http://feeds.bbci.co.uk/news/rss.xml',\n\t\t\t'cnn': 'http://rss.cnn.com/rss/cnn_us.rss',\n\t\t\t'fox': 'http://feeds.foxnews.com/foxnews/latest',\n\t\t\t'bi': 'http://www.businessinsider.in/rss_section_feeds/2147477994.cms',\n\t\t\t}\n\nDEFAULTS = {'publication': 'bbc',\n\t\t\t'city': 'London, UK',\n\t\t\t'currency_from': 'GBP',\n\t\t\t'currency_to': 'USD'}\n\n# apis\napi_url = ('http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=9b1e4abecd10e78d497ccf0a0768b5f5')\n\nCURRENCY_URL = ('http://openexchangerates.org//api/latest.json?app_id=a0ea5ac1e5e4420f8120d503b7737b7b')\n\n\n@app.route(\"/\")\ndef home():\n\t# get customised headlines, based on our user input or default\n\tpublication = get_value_with_fallback(\"publication\")\n\tarticles = get_news(publication)\n\n\t# get customised weather based on user input of default\n\tcity = get_value_with_fallback(\"city\")\n\tweather = get_weather(city)\n\n\t# get cutomised currency based on user input or default\n\tcurrency_from = get_value_with_fallback(\"currency_from\")\n\tcurrency_to = get_value_with_fallback(\"currency_to\")\n\trate, currencies = get_rate(currency_from, currency_to)\n\n\t# save cookies and return template\n\tresponse = make_response(render_template(\"home.html\",\n\t\tarticles=articles,\n\t\tweather=weather, currency_from=currency_from,\n\t\tcurrency_to=currency_to, rate=rate,\n\t\tcurrencies=sorted(currencies)))\n\texpires = datetime.datetime.now() + datetime.timedelta(days=365)\n\tresponse.set_cookie(\"publication\", publication, expires=expires)\n\tresponse.set_cookie(\"city\", city, expires=expires)\n\tresponse.set_cookie(\"currency_from\", currency_from, expires=expires)\n\tresponse.set_cookie(\"currency_to\", currency_to, expires=expires)\n\treturn response\n\n\ndef get_news(query):\n\t# check to see if request has key(publication)\n\tif not query or query.lower() not in RSS_FEEDS:\n\t\tpublication = DEFAULTS['publication']\n\telse:\n\t\tpublication = query.lower()\n\t# parse information from publication\n\tfeed = feedparser.parse(RSS_FEEDS[publication])\n\t# return the news feed\n\treturn feed['entries']\n\n@app.route(\"/\")\n\n\ndef get_weather(query):\n\t# store the url with key\n\t\n\t# filters to no spaces ex %20 in url\n\tquery = urllib.quote(query)\n\turl = api_url.format(query)\n\t# load data over HTTP into a python string ( future add exception handling)\n\tdata = urllib2.urlopen(url).read()\n\t# convert JSON into a python dictionary\n\tparsed = json.loads(data)\n\t# build up a simple python dictionary based on the JSON returned\n\tweather = None\n\tif parsed.get(\"weather\"):\n\t\tweather = {\"description\":\n\t\t\t\t\tparsed[\"weather\"][0][\"description\"],\n\t\t\t\t\t\"temperature\":parsed[\"main\"][\"temp\"],\n\t\t\t\t\t\"city\":parsed[\"name\"],\n\t\t\t\t\t\"country\": parsed['sys']['country']\n\t\t\t\t\t}\n\treturn weather\n\ndef get_rate(frm, to):\n\t# store values from currency api\n\tall_currency = urllib2.urlopen(CURRENCY_URL).read()\n\n\tparsed = json.loads(all_currency).get('rates')\n\tfrm_rate = parsed.get(frm.upper())\n\tto_rate = parsed.get(to.upper())\n\treturn (to_rate / frm_rate, parsed.keys())\n\n\ndef get_value_with_fallback(key):\n\tif request.args.get(key):\n\t\treturn request.args.get(key)\n\tif request.cookies.get(key):\n\t\treturn request.cookies.get(key)\n\treturn DEFAULTS[key]\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\tapp.run(port=5000, debug=True)\n","sub_path":"headlines.py","file_name":"headlines.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"353699211","text":"# coding: utf-8\n\"\"\"\nDump hyper parameters to json file.\n\nusage: tojson.py [options] \n\noptions:\n --hparams= Hyper parameters [default: ].\n --preset= Path of preset parameters (json).\n -h, --help Show help message.\n\"\"\"\nfrom docopt import docopt\n\nimport sys\nimport os\nfrom os.path import dirname, join, basename, splitext\nimport json\n\nfrom hparams import hparams, hparams_debug_string\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n preset = args[\"--preset\"]\n output_json_path = args[\"\"]\n\n os.makedirs(dirname(output_json_path), exist_ok=True)\n\n # Load preset if specified\n if preset is not None:\n with open(preset) as f:\n hparams.parse_json(f.read())\n # Override hyper parameters\n hparams.parse(args[\"--hparams\"])\n assert hparams.name == \"wavenet_vocoder\"\n print(hparams_debug_string())\n\n j = hparams.values()\n\n # for compat legacy\n for k in [\"preset\", \"presets\"]:\n if k in j:\n del j[k]\n\n with open(output_json_path, \"w\") as f:\n json.dump(j, f, indent=2)\n sys.exit(0)\n","sub_path":"tojson.py","file_name":"tojson.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528304875","text":"\nfrom datasets.dataset import *\nfrom datasets.sampler import *\nfrom datasets.transforms import *\nimport torch\nimport torch.utils.data as Data\n\ndata_root={\n 'Market1501':'data/market',\n 'VC':'data/vc',\n 'P-DESTRE':'data/Pdata',\n 'Real28':'data/real28',\n 'Duke':'data/duke'\n}\ndataset_type={\n 'Market1501':Market1501,\n 'VC':VC_Clothes,\n 'P-DESTRE':PDataset,\n 'Real28':Real28,\n 'Duke':Duke\n\n}\n\n\ndef train_collate_fn(batch):\n imgs, pids, camid,path,date = zip(*batch)\n pids = torch.tensor(pids, dtype=torch.int64)\n return torch.stack(imgs, dim=0), pids\n\n###对于验证集而言,为了提高验证的真实性,我们应该防止同一摄像头的图片进入验证(同一摄像头相当于数据泄露)\ndef val_collate_fn_date(batch):\n imgs, pids, camids, path,date= zip(*batch)\n return torch.stack(imgs, dim=0),pids, camids,date\n\n\ndef make_dataloader(dataset, cfg, pid_add=0):\n train_transform = build_transforms(cfg, training=True)\n test_transform = build_transforms(cfg, training=False)\n data = dataset\n num_classes = data.num_train_pids\n train_set=ImageDataset(data.train,train_transform,pid_add)\n val_set = ImageDataset(data.query + data.gallery, test_transform)\n val_collate = val_collate_fn_date\n train_loader = Data.DataLoader(train_set, batch_size=cfg['train_bs'],\n sampler=DateRandomIdentitySampler(data.train,\n cfg['train_bs'], cfg['train_K_instances']),\n num_workers=cfg['num_workers'],\n collate_fn=train_collate_fn)\n val_loader = Data.DataLoader(\n val_set, batch_size=cfg['test_bs'], shuffle=False, num_workers=cfg['num_workers'],\n collate_fn=val_collate\n )\n return train_loader, val_loader, len(data.query), num_classes\n\n\n\ndef MAKE_DATALOADER(cfg):\n train_names = cfg['train_data']\n val_name = cfg['val_data']\n val_loader_dict = {}\n train_loaders = []\n all_num_class = 0\n for name in train_names:\n root = data_root[name]\n dataset=dataset_type[name](root)\n train_loader, val_loader, num_query, num_classes = make_dataloader(dataset, cfg, all_num_class)\n train_loaders.append(train_loader)\n val_loader_dict[name] = [val_loader, num_query]\n all_num_class += num_classes\n val_loader, num_query = val_loader_dict[val_name]\n return train_loaders, val_loader, num_query, all_num_class\n","sub_path":"image_baseline/datasets/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"8354590","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1.3'\n\nsetup(name='taras.recipe.distutils',\n version=version,\n description=\"Recipe for zc.buildout that downloads one or multiple distutils-archives and installs them without requiring for zc.distutils to be installed.\",\n long_description=\"\"\"\\\n\"\"\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Plugins',\n 'Framework :: Buildout',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Installation/Setup',\n 'License :: OSI Approved :: BSD License',\n ],\n keywords='distutils buildout',\n author='Taras Mankovski',\n author_email='tarasm@gmail.com',\n url='http://github.com/taras/taras.recipe.distutils',\n license='BSD License',\n packages=find_packages('src'),\n package_dir = {'': 'src'},\n namespace_packages = ['taras', 'taras.recipe'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n ],\n entry_points={'zc.buildout':\n ['default = taras.recipe.distutils:Recipe']\n },\n)\n","sub_path":"pypi_install_script/taras.recipe.distutils-0.1.3final.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"185419558","text":"# Copyright 2018 - Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport itertools\n\nfrom oslo_utils import uuidutils\n\nfrom vitrage.common.constants import EdgeProperties\nfrom vitrage.common.constants import VertexProperties as VProps\nfrom vitrage.graph import Direction\nfrom vitrage.graph.driver.networkx_graph import NXGraph\nfrom vitrage.graph import Edge\nfrom vitrage.graph import Vertex\nfrom vitrage.tests.mocks import utils\n\nRESOURCES_PATH = utils.get_resources_dir() + '/mock_configurations'\n\n\nclass GraphGenerator(object):\n def __init__(self,\n num_of_networks=2,\n num_of_zones_per_cluster=2,\n num_of_hosts_per_zone=2,\n num_of_zabbix_alarms_per_host=2,\n num_of_instances_per_host=2,\n num_of_ports_per_instance=2,\n num_of_volumes_per_instance=2,\n num_of_vitrage_alarms_per_instance=2,\n num_of_tripleo_controllers=2,\n num_of_zabbix_alarms_per_controller=2):\n self._num_of_networks = num_of_networks\n self._num_of_zones_per_cluster = num_of_zones_per_cluster\n self._num_of_hosts_per_zone = num_of_hosts_per_zone\n self._num_of_zabbix_alarms_per_host = num_of_zabbix_alarms_per_host\n self._num_of_instances_per_host = num_of_instances_per_host\n self._num_of_ports_per_instance = num_of_ports_per_instance\n self._num_of_volumes_per_instance = num_of_volumes_per_instance\n self._num_of_vitrage_alarms_per_instance = \\\n num_of_vitrage_alarms_per_instance\n self._num_of_tripleo_controllers = num_of_tripleo_controllers\n self._num_of_zabbix_alarms_per_controller = \\\n num_of_zabbix_alarms_per_controller\n\n def create_graph(self):\n graph = NXGraph()\n v1 = self._file_to_vertex('openstack-cluster.json')\n graph.add_vertex(v1)\n\n networks = self._create_n_vertices(graph,\n self._num_of_networks,\n 'neutron.network.json')\n zones = self._create_n_neighbors(graph,\n self._num_of_zones_per_cluster,\n [v1],\n 'nova.zone.json',\n 'contains.json')\n hosts = self._create_n_neighbors(graph,\n self._num_of_hosts_per_zone,\n zones,\n 'nova.host.json',\n 'contains.json')\n self._create_n_neighbors(graph,\n self._num_of_zabbix_alarms_per_host,\n hosts,\n 'zabbix.json',\n 'on.json',\n Direction.IN)\n instances = self._create_n_neighbors(graph,\n self._num_of_instances_per_host,\n hosts,\n 'nova.instance.json',\n 'contains.json')\n ports = self._create_n_neighbors(graph,\n self._num_of_ports_per_instance,\n instances,\n 'neutron.port.json',\n 'attached.json',\n direction=Direction.IN)\n\n self._round_robin_edges(graph, networks, ports, 'contains.json')\n\n self._create_n_neighbors(graph,\n self._num_of_volumes_per_instance,\n instances,\n 'cinder.volume.json',\n 'attached.json',\n Direction.IN)\n self._create_n_neighbors(graph,\n self._num_of_vitrage_alarms_per_instance,\n instances,\n 'vitrage.alarm.json',\n 'on.json',\n Direction.IN)\n\n # Also create non connected components:\n tripleo_controller = \\\n self._create_n_vertices(graph,\n self._num_of_tripleo_controllers,\n 'tripleo.controller.json')\n self._create_n_neighbors(graph,\n self._num_of_zabbix_alarms_per_controller,\n tripleo_controller,\n 'zabbix.json',\n 'on.json',\n Direction.IN)\n return graph\n\n def _create_n_vertices(self, g, n, props_file):\n created_vertices = []\n for i in range(n):\n v = self._file_to_vertex(props_file, i)\n created_vertices.append(v)\n g.add_vertex(v)\n return created_vertices\n\n def _create_n_neighbors(self, g, n, source_v_list,\n neighbor_props_file, neighbor_edge_props_file,\n direction=Direction.OUT):\n created_vertices = []\n for source_v in source_v_list:\n for i in range(n):\n v = self._file_to_vertex(neighbor_props_file, i)\n v[VProps.NAME] = source_v[VProps.NAME] + \"-\" + v[VProps.NAME]\n created_vertices.append(v)\n g.add_vertex(v)\n if direction == Direction.OUT:\n g.add_edge(self._file_to_edge(neighbor_edge_props_file,\n source_v.vertex_id,\n v.vertex_id))\n else:\n g.add_edge(\n self._file_to_edge(neighbor_edge_props_file,\n v.vertex_id,\n source_v.vertex_id))\n return created_vertices\n\n def _round_robin_edges(self,\n graph,\n source_vertices,\n target_vertices,\n edge_props_file):\n round_robin_source_vertices = itertools.cycle(source_vertices)\n for v in target_vertices:\n source_v = next(round_robin_source_vertices)\n graph.add_edge(self._file_to_edge(edge_props_file,\n source_v.vertex_id,\n v.vertex_id))\n\n def _file_to_vertex(self, relative_path, index=0):\n full_path = RESOURCES_PATH + \"/vertices/\"\n props = utils.load_specs(relative_path, full_path)\n if props.get(VProps.ID):\n props[VProps.ID] = uuidutils.generate_uuid()\n props[VProps.NAME] = \"%s-%s\" % (props[VProps.VITRAGE_TYPE], str(index))\n props[VProps.VITRAGE_ID] = uuidutils.generate_uuid()\n return Vertex(props[VProps.VITRAGE_ID], props)\n\n @staticmethod\n def _file_to_edge(relative_path, source_id, target_id):\n full_path = RESOURCES_PATH + \"/edges/\"\n props = utils.load_specs(relative_path, full_path)\n return Edge(source_id, target_id,\n props[EdgeProperties.RELATIONSHIP_TYPE],\n props)\n","sub_path":"vitrage/tests/mocks/graph_generator.py","file_name":"graph_generator.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122937146","text":"# MVPN dataset: cope4(scene)\nimport os, time\nimport numpy as np\nimport itertools as it\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom model_settings import sub\n\n# Device configuration\nROI_dir = '/gsfs0/data/fangmg/MVPD/data/ROIs/ses-movie/run_' \n\n# Implement the data loader.\nclass Scene_Dataset(Dataset):\n \"\"\"cope4(scene) dataset.\"\"\"\n def __init__(self, ROIs=[], GMs=[]):\n 'Initialization'\n self.ROIs = []\n self.GMs = []\n\n def get_train(self, this_run=0, total_run=0):\n NULL = True # dataset is empty\n for run in it.chain(range(1,this_run), range(this_run+1,total_run+1)):\n PPA_data = np.load(ROI_dir + str(run) + '_ROIs/' + sub + '_cope4/PPA_80vox.npy')\n TOS_data = np.load(ROI_dir + str(run) + '_ROIs/' + sub + '_cope4/TOS_80vox.npy')\n RSP_data = np.load(ROI_dir + str(run) + '_ROIs/' + sub + '_cope4/RSP_80vox.npy')\n GM_data_run = np.load(ROI_dir + str(run) + '_ROIs/' + sub + '_cope4/GM_vox.npy')\n ROI_data_run = np.concatenate([PPA_data, TOS_data], 1)\n ROI_data_run = np.concatenate([ROI_data_run, RSP_data], 1)\n if NULL:\n ROI_data = ROI_data_run\n GM_data = GM_data_run\n NULL = False\n else: \n ROI_data = np.concatenate([ROI_data, ROI_data_run], 0) \n GM_data = np.concatenate([GM_data, GM_data_run], 0)\n \n # BatchNorm: dataset size modulo batch size is equal to 1 \n num_data = np.shape(ROI_data)[0]\n del_idx = np.random.randint(0, num_data)\n ROI_data = np.delete(ROI_data, del_idx, 0)\n GM_data = np.delete(GM_data, del_idx, 0)\n \n self.ROIs = torch.from_numpy(ROI_data)\n self.GMs = torch.from_numpy(GM_data)\n self.ROIs = self.ROIs.type(torch.FloatTensor)\n self.GMs = self.GMs.type(torch.FloatTensor)\n \n def get_test(self, this_run=0, total_run=0): \n PPA_data = np.load(ROI_dir + str(this_run) + '_ROIs/' + sub + '_cope4/PPA_80vox.npy')\n TOS_data = np.load(ROI_dir + str(this_run) + '_ROIs/' + sub + '_cope4/TOS_80vox.npy')\n RSP_data = np.load(ROI_dir + str(this_run) + '_ROIs/' + sub + '_cope4/RSP_80vox.npy')\n ROI_data = np.concatenate([PPA_data, TOS_data], 1)\n ROI_data = np.concatenate([ROI_data, RSP_data], 1)\n GM_data = np.load(ROI_dir + str(this_run) + '_ROIs/' + sub + '_cope4/GM_vox.npy') \n 'Convert ndarrays in sample to Tensors'\n self.ROIs = torch.from_numpy(ROI_data)\n self.GMs = torch.from_numpy(GM_data)\n self.ROIs = self.ROIs.type(torch.FloatTensor)\n self.GMs = self.GMs.type(torch.FloatTensor)\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.ROIs)\n \n def __getitem__(self, idx):\n 'Generates one sample of data'\n ROI = self.ROIs[idx]\n GM = self.GMs[idx]\n sample = {'ROI': ROI, 'GM': GM}\n return sample\n\n","sub_path":"model/scene_dataset.py","file_name":"scene_dataset.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"583543343","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('',views.home,name='home'),\n path(r'^submit/$',views.submit,name='submit'),\n path(r'^signup/$',views.signup, name='signup'),\n path(r'^login/$',views.login, name='login'),\n path(r'^logout/$',views.logout, name='logout'),\n path(r'^home/$',views.dashboard, name='dashboard')\n]","sub_path":"skin front end/skin_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"332138419","text":"import numpy as np\nimport pandas as pd\npd.set_option('display.unicode.ambiguous_as_wide', True)\npd.set_option('display.unicode.east_asian_width', True)\ndateparse = lambda dates: pd.datetime.strptime(dates, '%Y/%m/%d')\n\npath = \"D:\\\\Users\\\\Corly\\\\Documents\\\\study\\\\Python\\\\study\\\\量化投资创新设计\\\\test\\\\data.csv\"\nfd = pd.read_csv(path,\n header=0,\n index_col=0,\n parse_dates=['trade_date'],\n date_parser=dateparse)\nrate_fd = fd.diff(1) / fd.shift(1)\n\nprint(rate_fd.head())\n\nprint(rate_fd.describe())\n\nprint(fd.cov())\n\nprint(fd.corr())\n","sub_path":"college_course/量化投资创新设计/test/060817107_刘楚胤量化投资/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"562960158","text":"# -*- coding: utf-8 -*-\n\nimport os, sys, qgis.utils\nfrom qgis import core, gui, utils\nfrom qgis.core import *\nfrom PyQt4.QtCore import *\nfrom qgis.core import *\nfrom qgis.utils import iface\nfrom qgis.core import QgsVectorLayer, QgsMapLayerRegistry\nfrom qgis.analysis import QgsRasterCalculator, QgsRasterCalculatorEntry\nfrom subprocess import call\nimport math\nimport commonLibrary\nimport processing\nfrom urllib2 import urlopen\nfrom ftplib import FTP\nimport shutil\nimport re\nimport datetime\nimport psycopg2\nfrom osgeo import gdal\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport matplotlib.cbook as cbook\nimport matplotlib.ticker as ticker\n\n# ДОПУСК ПО ТОЧНОСТИ\neps = 0.001\nmodisGridEps = 17000\n\n\ndef createTempVectorLayerByFeature(vectorFeature, geometryType, tempVectorLayerFullPath):\n # geometryType = 'Point', 'Polyline', 'Polygon'\n fields = QgsFields()\n fields.append(QgsField(\"id\", QVariant.Int))\n\n if geometryType == 'Point':\n writer = QgsVectorFileWriter(tempVectorLayerFullPath, \"UTF-8\", fields, QGis.WKBPoint, None, \"ESRI Shapefile\")\n elif geometryType == 'Polyline':\n writer = QgsVectorFileWriter(tempVectorLayerFullPath, \"UTF-8\", fields, QGis.WKBLineString, None,\n \"ESRI Shapefile\")\n elif geometryType == 'Polygon':\n writer = QgsVectorFileWriter(tempVectorLayerFullPath, \"UTF-8\", fields, QGis.WKBPolygon, None, \"ESRI Shapefile\")\n else:\n return\n\n if writer.hasError() != QgsVectorFileWriter.NoError:\n #print \"Error when creating shapefile: \", writer.hasError()\n return\n\n writer.addFeature(vectorFeature)\n\n del writer\n\ndef saveVectorLayerToSHP (vectorLayer, shpFullPath):\n error = QgsVectorFileWriter.writeAsVectorFormat(vectorLayer, shpFullPath, \"UTF-8\", None, \"ESRI Shapefile\")\n return error\n\n\"\"\"\nОБРАБОТКА РАСТРОВ\n\"\"\"\n\n\ndef createBinaryRasterByComparingTwoRasters(rasterLayer1, rasterLayer2, tempLayerFullPath, option):\n \"\"\"\n Записывает в tempLayer бинарный одноканальный растр, в котором значение \"1\" имеют те пиксели,\n которые у rasterLayer1 большие, равные или меньшие (в зависимости от option), чем у\n rasterLayer2.\n\n\n\n :param rasterLayer1: Первый растровый слой\n :param rasterLayer2: Второй растровый слой\n :param tempLayerFullPath: Путь до временного файла\n :param option: Параметр, как сравнивать растры. Допустимые значения '>', '<', '=', '>=', '<='\n \"\"\"\n entries = []\n # Добавляем в растровый калькулятор первый растр\n rCalcObj1 = QgsRasterCalculatorEntry()\n rCalcObj1.ref = 'rCalcObj1@1'\n rCalcObj1.raster = rasterLayer1\n rCalcObj1.bandNumber = 1\n entries.append(rCalcObj1)\n # Добавляем в растровый калькулятор второй растр\n rCalcObj2 = QgsRasterCalculatorEntry()\n rCalcObj2.ref = 'rCalcObj2@1'\n rCalcObj2.raster = rasterLayer2\n rCalcObj2.bandNumber = 1\n entries.append(rCalcObj2)\n\n # print 'rCalcObj1 ' + str(option) + ' rCalcObj2'\n calc = QgsRasterCalculator('rCalcObj1@1 ' + str(option) + ' rCalcObj2@1', tempLayerFullPath, 'GTiff',\n rasterLayer2.extent(),\n rasterLayer2.width(), rasterLayer2.height(), entries)\n\n calc.processCalculation()\n\n\ndef createBinaryRasterByValue(rasterLayer, value, tempLayerFullPath, option):\n \"\"\"\n Записывает в tempLayer бинарный одноканальный растр, в зависимости от\n option, со значением \"1\" на месте пикселей больших, равных или меньших, чем value\n Используется калькулятор растров qgis\n\n\n :param rasterLayer: Исходный растровый слой\n :param value: Значение\n :param tempLayerFullPath: путь до временного файла\n :param option: Параметр. Если равен '<', то пиксели со значением меньше value станут 1, аналогично '>' и '=',\n '>=', '<='.\n\n \"\"\"\n entries = []\n rCalcObj = QgsRasterCalculatorEntry()\n rCalcObj.ref = 'rCalcObj@1'\n rCalcObj.raster = rasterLayer\n rCalcObj.bandNumber = 1\n entries.append(rCalcObj)\n calc = QgsRasterCalculator('rCalcObj@1 ' + str(option) + ' ' + str(value), tempLayerFullPath, 'GTiff',\n rasterLayer.extent(),\n rasterLayer.width(), rasterLayer.height(), entries)\n calc.processCalculation()\n\ndef rasterToVectorGDAL(rasterLayerFullPath, tempLayerFullPath):\n \"\"\"\n Конвертирует растровый слой в векторный через утилиту gdal_polygonize\n\n\n :param rasterLayerFullPath: Полный путь до растрового слоя\n :param tempLayerFullPath: Полный путь до результирующего векторного слоя\n \"\"\"\n\n gcmd = 'gdal_polygonize.bat \"' + rasterLayerFullPath + '\" -f \"ESRI Shapefile\" \"' + tempLayerFullPath + '\" DN' # Формируем команду\n CREATE_NO_WINDOW = 0x08000000 # Не разрешаем появиться командной строке GDAL\n call(gcmd, creationflags=CREATE_NO_WINDOW)\n\n\ndef reprojectLayerGDAL(rasterLayerFullPath, finalCRSEPSG, reprojectedLayerFullPath):\n gcmd = 'gdalwarp -overwrite -co COMPRESS=PACKBITS ' + '-t_srs EPSG:' + str(\n finalCRSEPSG) + ' -multi -of Gtiff \"' + rasterLayerFullPath + '\" \"' + reprojectedLayerFullPath + '\"'\n CREATE_NO_WINDOW = 0x08000000 # Не разрешаем появиться командной строке GDAL\n call(gcmd, creationflags=CREATE_NO_WINDOW)\n\n\ndef mergeRastersGDAL(rasterLayer1FullPath, rasterLayer2FullPath, mergedRasterLayerFullPath):\n \"\"\"\n ОБъединяет два растровых файла в один (не поканально) через утилиту gdal_merge\n\n :param rasterLayer1FullPath: Полный путь до первого растрового файла\n :param rasterLayer2FullPath: Полный путь до второго растрового файла\n :param mergedRasterLayerFullPath: Полный путь до результирующего растровго файла\n \"\"\"\n\n gcmd = 'gdal_merge.bat -co COMPRESS=PACKBITS -of GTiff -o \"' + mergedRasterLayerFullPath + '\" \"' + rasterLayer1FullPath + '\" \"' + rasterLayer2FullPath + '\"'\n CREATE_NO_WINDOW = 0x08000000 # Не разрешаем появиться командной строке GDAL\n call(gcmd, creationflags=CREATE_NO_WINDOW)\n\n\ndef cutRasterByVectorMaskGDAL(rasterLayerFullPath, vectorLayerFullPath, cuttedRasterLayerFullPath):\n gcmd = 'gdalwarp -q -dstnodata nodata -cutline \"' + vectorLayerFullPath + '\" -crop_to_cutline -of GTiff \"' + rasterLayerFullPath + '\" \"' + cuttedRasterLayerFullPath + '\"'\n CREATE_NO_WINDOW = 0x08000000 # Не разрешаем появиться командной строке GDAL\n call(gcmd, creationflags=CREATE_NO_WINDOW)\n\n# ------------------------------------------- #\n# ------------------------------------------- #\n# -----PostGIS Raster & MODIS Processing----- #\n# ------------------------------------------- #\n# ------------------------------------------- #\n\ndef getEPSGCodeFromLayer(layer):\n \"\"\"\n Возвращает EPSG-код пользовательск��го слоя (только число)\n\n :param layer: Пользовательский слой\n :return: Код (число)\n \"\"\"\n authid = layer.crs().authid()\n crs = ''\n nums = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n for symb in authid:\n if symb in nums:\n crs += symb\n return crs\n\n\ndef loadRasterToPostGIS(rasterFullPath, dbOptions, schemeName, tableName):\n \"\"\"\n Загружает растр из файловой системы в базу данных с указанными опциями.\n Использует для загрузки raster2pgsql.exe и psql.exe, предполагается, что они со всеми библиотеками лежат в папке\n с модулем в директории pgsql\n\n :param rasterFullPath: Полный путь до растра\n :param dbOptions: Опции подключения к базе данных: ['host','port','database','user','password']\n :param schemeName: Название схемы, куда класть\n :param tableName: Название таблицы, в которой будет храниться растр\n \"\"\"\n rasterLayer = QgsRasterLayer(rasterFullPath)\n rasterCrs = getEPSGCodeFromLayer(rasterLayer)\n pluginPath = os.path.dirname(os.path.abspath(__file__))\n\n cmd = 'SET PGPASSWORD=' + dbOptions[4]\n os.system(cmd)\n\n cmd = '\"' + pluginPath + '\\\\pgsql\\\\raster2pgsql.exe\" -s ' + rasterCrs + ' -I -C -M \"' + rasterFullPath + '\" -F \"' + \\\n schemeName + '\".\"' + tableName + '\" | \"' + pluginPath + '\\\\pgsql\\\\psql.exe\" -h ' + dbOptions[0] + ' -p ' + \\\n dbOptions[1] + ' -d ' + dbOptions[2] + ' -U ' + dbOptions[3]\n\n # Пишем строку в батничек и запускаем его\n newBatFile = open(pluginPath+'\\\\temp\\\\newBat.bat','w')\n newBatFile.write(cmd)\n newBatFile.close()\n #call(pluginPath+'\\\\temp\\\\newBat.bat')\n cmd = '\"' + pluginPath+'\\\\temp\\\\newBat.bat\"'\n #print cmd\n os.system(cmd)\n\ndef convertHDFToGTiff(HDFRasterLayerFullPath, HDFDataSetName, HDFType, GTiffRasterLayerFullPath):\n rasterLayerString = HDFType + ':\"' + HDFRasterLayerFullPath + '\":' + HDFDataSetName\n rasterLayer = QgsRasterLayer(rasterLayerString)\n\n provider = rasterLayer.dataProvider()\n pipe = QgsRasterPipe()\n pipe.set(provider.clone())\n\n extent = rasterLayer.extent()\n nCols = rasterLayer.width()\n nRows = rasterLayer.height()\n crs = rasterLayer.crs()\n file_writer = QgsRasterFileWriter(GTiffRasterLayerFullPath)\n file_writer.writeRaster(pipe, nCols, nRows, extent, crs)\n\n\ndef getRasterLayerFromPostGIS(dbOptions, schemeName, tableName):\n \"\"\"\n Возвращает QgsRasterLayer из указанной таблицы в PgSql\n\n :param dbOptions: Опции подключения к базе данных: ['host','port','database','user','password']\n :param schemeName: Название схемы, куда класть\n :param tableName: Название таблицы, в которой будет храниться растр\n :return: QgsRasterLayer\n \"\"\"\n cmd = 'PG: dbname=' + dbOptions[2] + ' host=' + dbOptions[0] + ' user=' + dbOptions[3] + ' password=' + \\\n dbOptions[4] + ' port=' + dbOptions[1] + ' mode=2 schema=' + schemeName + ' column=rast table=' + tableName\n\n nameString = tableName\n rasterLayer = QgsRasterLayer(cmd, nameString)\n\n return rasterLayer\n\ndef getVectorLayerFromPostGIS (dbOptions, schemeName, tableName):\n uri = QgsDataSourceURI()\n uri.setConnection(dbOptions[0], dbOptions[1], dbOptions[2], dbOptions[3], dbOptions[4])\n uri.setDataSource(schemeName, tableName, \"geom\")\n vectorLayer = QgsVectorLayer(uri.uri(), tableName, \"postgres\")\n return vectorLayer\n\ndef downloadMOD10A2ForGivenDateAndPlace(userDate, h, v, rasterLayerFullPath):\n \"\"\"\n Скачивает в указанное место MODIS MOD10A2 за выбранную дату. При успехе возвращает 1. Иначе код ошибки от 2 до 5\n\n :param userDate: Дата в формате yyyy.mm.dd (2015.05.17)\n :param h:\n :param v:\n :param rasterLayerFullPath:\n :return:\n \"\"\"\n try:\n ftp = FTP('n5eil01u.ecs.nsidc.org')\n ftp.login()\n except:\n return 2 # Неполадки с подключением\n\n try:\n ftp.cwd('SAN/MOST/MOD10A2.005')\n ftp.cwd(userDate)\n except:\n return 3 # Недоступна дата\n\n if len(str(h)) == 1:\n h = '0' + str(h)\n if len(str(v)) == 1:\n v = '0' + str(v)\n\n hvString = 'h' + str(h) + 'v' + str(v)\n\n pathString = 'ftp://n5eil01u.ecs.nsidc.org/SAN/MOST/MOD10A2.005/' + userDate + '/'\n\n try:\n files = ftp.nlst()\n except:\n return 4 # Недоступен список файлов\n\n for file in files:\n if (file[-3:] == 'hdf') and (file.find(hvString) != -1):\n pathString += file\n try:\n req = urlopen(pathString)\n dist = open(rasterLayerFullPath, 'wb')\n shutil.copyfileobj(req, dist)\n dist.close()\n except:\n return 5 # Невозможно загрузить файл\n\n return 1\n\n\ndef getMODIShvFromPointCoordinates(x, y, crsEPSG):\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\modis\\\\'\n sourceCRS = QgsCoordinateReferenceSystem(int(crsEPSG))\n\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n\n xform = QgsCoordinateTransform(sourceCRS, sinCRS)\n sinX, sinY = xform.transform(x, y)\n\n modisGridLayer = QgsVectorLayer(tempPath + 'modis_sinusoidal_grid_world.shp', 'MODIS GRID', 'ogr')\n modisGridFeatures = modisGridLayer.getFeatures()\n h = 0\n v = 0\n\n for modisGridFeature in modisGridFeatures:\n geom = modisGridFeature.geometry()\n bbox = geom.boundingBox()\n if sinX >= bbox.xMinimum() and sinX <= bbox.xMaximum():\n if sinY >= bbox.yMinimum() and sinY <= bbox.yMaximum():\n h = modisGridFeature[1]\n v = modisGridFeature[2]\n return h, v\n\n return h, v\n\n\ndef getMODIShvListFromPolygonFeature(polygonFeature, crsEPSG):\n geom = polygonFeature.geometry()\n bbox = geom.boundingBox()\n sourceCRS = QgsCoordinateReferenceSystem(crsEPSG)\n # print crsEPSG\n\n coordListX = [bbox.xMinimum() - modisGridEps, bbox.xMaximum() + modisGridEps]\n coordListY = [bbox.yMinimum() - modisGridEps, bbox.yMaximum() + modisGridEps]\n # print coordListX, coordListY\n hvList = []\n\n for coordinateX in coordListX:\n for coordinateY in coordListY:\n h, v = getMODIShvFromPointCoordinates(coordinateX, coordinateY, crsEPSG)\n if [h, v] not in hvList:\n hvList.append([h, v])\n\n return hvList\n\n\ndef getCurrentCanvasCenterInSelectedCRS(destCRSEPSG):\n canvas = iface.mapCanvas()\n sourceCRSEPSG = canvas.mapRenderer().destinationCrs().authid()\n sourceCRS = QgsCoordinateReferenceSystem(sourceCRSEPSG)\n destCRS = QgsCoordinateReferenceSystem(destCRSEPSG)\n e = iface.mapCanvas().extent()\n centerX = (e.xMaximum() + e.xMinimum()) / 2\n centerY = (e.yMaximum() + e.yMinimum()) / 2\n xform = QgsCoordinateTransform(sourceCRS, destCRS)\n x, y = xform.transform(centerX, centerY)\n return x, y\n\n\ndef convertPointCoordinatesToCRS(x, y, sCRS, dCRS):\n sourceCRS = QgsCoordinateReferenceSystem(sCRS)\n destCRS = QgsCoordinateReferenceSystem(dCRS)\n xform = QgsCoordinateTransform(sourceCRS, destCRS)\n xNew, yNew = xform.transform(x, y)\n return xNew, yNew\n\ndef convertVectorFeatureFromCRSToCRS (vectorFeature, geometryType, sourceCRS, destCRS):\n\n xform = QgsCoordinateTransform(sourceCRS, destCRS)\n if geometryType == 'Polygon':\n geometry = vectorFeature.geometry().asPolygon()\n newGeometry = [[]]\n for coordinates in geometry[0]:\n x, y = xform.transform(coordinates[0],coordinates[1])\n newGeometry[0].append(QgsPoint(x,y))\n fet = QgsFeature()\n fet.setGeometry(QgsGeometry.fromPolygon(newGeometry))\n return fet\n # Написать для линий и точек!\n\n\n\ndef getListOfDatesFromInterval(startDate, endDate):\n dList = []\n sD = startDate.split('.')\n eD = endDate.split('.')\n d1 = datetime.date(int(sD[0]), int(sD[1]), int(sD[2]))\n d2 = datetime.date(int(eD[0]), int(eD[1]), int(eD[2]))\n delta = d2 - d1\n for i in range(delta.days + 1):\n # print str(d1 + datetime.timedelta(days=i))\n dList.append(str(d1 + datetime.timedelta(days=i)))\n return dList\n\n\ndef getHVFromString(inputString):\n h = inputString[1:3]\n v = inputString[4:6]\n return h, v\n\n\ndef HVtoString(h, v):\n if len(str(h)) == 1:\n hNew = '0' + str(h)\n else:\n hNew = str(h)\n if len(str(v)) == 1:\n vNew = '0' + str(v)\n else:\n vNew = str(v)\n\n hvString = 'h' + str(hNew) + 'v' + str(vNew)\n return hvString\n\n\ndef getHVDataFromTableName(inputString):\n h, v = getHVFromString(inputString)\n year = inputString[7:11]\n month = inputString[12:14]\n day = inputString[15:17]\n dataDict = {'h': h, 'v': v, 'year': year, 'month': month, 'day': day}\n return dataDict\n\ndef getTableNameFromHVDate(h,v,year,month,day):\n hvString = HVtoString(h,v)\n if len(str(month)) == 1:\n newMonth = '0' + str(month)\n else:\n newMonth = str(month)\n\n if len(str(day)) == 1:\n newDay = '0' + str(day)\n else:\n newDay = str(day)\n\n DateString = str(year) + '_' + str(newMonth) + '_' + str(newDay)\n resultString = hvString + '_' + DateString\n return resultString\n\ndef getTableNameFromObjectNameAndDate (objectName, year, month, day):\n translit = commonLibrary.transliterateString(objectName)\n if len(str(month)) == 1:\n newMonth = '0' + str(month)\n else:\n newMonth = str(month)\n\n if len(str(day)) == 1:\n newDay = '0' + str(day)\n else:\n newDay = str(day)\n DateString = str(year) + '_' + str(newMonth) + '_' + str(newDay)\n resultString = translit + '_' + DateString\n return resultString\n\ndef checkMOD10A2OrigExistingInDatabase(dboptions, dict, h, v, date):\n # Дата уже в формате yyyy_mm_dd\n DBHost = dboptions[0]\n DBPort = int(dboptions[1])\n DBName = dboptions[2]\n DBUser = dboptions[3]\n DBPassword = dboptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n scheme = dict['db_original_mod10a2_scheme']\n tableName = HVtoString(h, v) + '_' + str(date)\n\n query = 'SELECT EXISTS( SELECT * FROM information_schema.tables WHERE table_schema = \\'' + scheme + '\\' AND table_name = \\'' + tableName + '\\');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n recievedData = cursor.fetchone()\n return recievedData[0]\n\ndef checkMOD10A2ObjectMaskExistingInDatabase(dboptions,configs,objectName,year,month,day):\n DBHost = dboptions[0]\n DBPort = int(dboptions[1])\n DBName = dboptions[2]\n DBUser = dboptions[3]\n DBPassword = dboptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n scheme = configs['db_object_masks_mod10a2_scheme']\n translitName = commonLibrary.transliterateString(objectName)\n tableName = translitName + '_' + str(year) +'_' + str(month) + '_' + str(day)\n query = 'SELECT EXISTS( SELECT * FROM information_schema.tables WHERE table_schema = \\'' + scheme + '\\' AND table_name = \\'' + tableName + '\\');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n recievedData = cursor.fetchone()\n return recievedData[0]\n pass\n\ndef rasterUniqueValuesCount(inputRaster, roundValuesToNDigits=None):\n # load raster\n gdalData = gdal.Open(str(inputRaster))\n\n # get width and heights of the raster\n xsize = gdalData.RasterXSize\n ysize = gdalData.RasterYSize\n\n # get number of bands\n bands = gdalData.RasterCount\n\n uniqueValuesDicts = []\n\n # process the raster\n for i in xrange(1, bands + 1):\n band_i = gdalData.GetRasterBand(i)\n raster = band_i.ReadAsArray()\n\n # create dictionary for unique values count\n count = {}\n\n # count unique values for the given band\n for col in range(xsize):\n for row in range(ysize):\n cell_value = raster[row, col]\n\n # check if cell_value is NaN\n if math.isnan(cell_value):\n cell_value = 'Null'\n\n # round floats if needed\n elif roundValuesToNDigits:\n try:\n cell_value = round(cell_value, int(roundValuesToNDigits))\n except:\n cell_value = round(cell_value)\n\n # add cell_value to dictionary\n try:\n count[cell_value] += 1\n except:\n count[cell_value] = 1\n\n uniqueValuesDicts.append(count)\n return uniqueValuesDicts\n\n\ndef getMOD10A2UniqueValuesDict(rasterLayer):\n uniqueValuesDict = rasterUniqueValuesCount(rasterLayer)[0]\n overageCount = 0\n badCount = 0\n lakeIceCount = 0\n\n badValuesList = [0, 1, 11, 50, 254]\n lakeIceValuesList = [100]\n\n for key in uniqueValuesDict:\n if int(key) in badValuesList:\n badCount += uniqueValuesDict[key]\n if int(key) in lakeIceValuesList:\n lakeIceCount += uniqueValuesDict[key]\n overageCount += uniqueValuesDict[key]\n\n MOD10A2UniqueValuesDict = {'bad': badCount, 'lake_ice': lakeIceCount, 'overage': overageCount}\n return MOD10A2UniqueValuesDict\n\ndef getMOD10A2LakeUniqueValuesDict(rasterLayer):\n uniqueValuesDict = rasterUniqueValuesCount(rasterLayer)[0]\n overageCount = 0\n badCount = 0\n lakeIceCount = 0\n\n badValuesList = [1, 11, 50, 254]\n lakeIceValuesList = [100,200]\n\n for key in uniqueValuesDict:\n if int(key) in badValuesList:\n badCount += uniqueValuesDict[key]\n if int(key) in lakeIceValuesList:\n lakeIceCount += uniqueValuesDict[key]\n if int(key) != 255 and int(key) != 0:\n overageCount += uniqueValuesDict[key]\n\n MOD10A2UniqueValuesDict = {'bad': badCount, 'lake_ice': lakeIceCount, 'overage': overageCount}\n return MOD10A2UniqueValuesDict\n\n\ndef getRasterLayerBasicStatistics(rasterLayer):\n metadata = rasterLayer.metadata()\n splitted = metadata.split('\\n')\n statsDict = {}\n maximum = -1\n minimum = -1\n stDev = -1\n mean = -1\n\n for item in splitted:\n if item[0:19] == 'STATISTICS_MAXIMUM=':\n maximum = item[19:len(item) - 4]\n if item[0:19] == 'STATISTICS_MINIMUM=':\n minimum = item[19:len(item) - 4]\n if item[0:18] == 'STATISTICS_STDDEV=':\n stDev = item[18:len(item) - 4]\n if item[0:16] == 'STATISTICS_MEAN=':\n mean = item[16:len(item) - 4]\n\n statsDict['maximum'] = float(maximum)\n statsDict['minimum'] = float(minimum)\n statsDict['stdev'] = float(stDev)\n statsDict['mean'] = float(mean)\n return statsDict\n\n\ndef getRasterFileBasicStatistics(rasterLayerFullPath):\n rasterLayer = QgsRasterLayer(rasterLayerFullPath)\n statsDict = getRasterLayerBasicStatistics(rasterLayer)\n return statsDict\n\n\ndef loadMOD10A2StatisticsToPostGIS(rasterFullPath, h, v, date, dbOptions, configDict):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n statsDict = getRasterFileBasicStatistics(rasterFullPath)\n MOD10A2StatsDict = getMOD10A2UniqueValuesDict(rasterFullPath)\n\n scheme = configDict['db_mod10a2_statistics_scheme']\n tableName = configDict['db_mod10a2_statistics_table']\n\n query = 'INSERT INTO ' + scheme + '.' + tableName + ' (h,v,date,stats_max,stats_min,stats_mean,stats_stdev,bad_count,overage_count,lake_ice_count) VALUES (' + str(\n h) + ',' + str(v) + ',\\'' + str(date) + '\\',' + str(statsDict['maximum']) + ',' + str(statsDict['minimum']) + ',' + str(statsDict['mean']) + ',' + str(\n statsDict['stdev']) + ',' + str(MOD10A2StatsDict['bad']) + ',' + str(MOD10A2StatsDict['overage']) + ',' + str(MOD10A2StatsDict[\n 'lake_ice']) + ');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n db.commit()\n except:\n return 3\n\ndef loadMOD10A2ObjectMaskStatisticsToPostGIS (rasterFullPath, date, objectName, dbOptions, configDict):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n statsDict = getRasterFileBasicStatistics(rasterFullPath)\n objectStatsDict = getMOD10A2LakeUniqueValuesDict(rasterFullPath)\n\n scheme = configDict['db_mask_object_mod10a2_statistics_scheme']\n tableName = configDict['db_mask_object_mod10a2_statistics_table']\n\n query = 'INSERT INTO ' + scheme + '.' + tableName + ' (object,date,stats_max,stats_min,stats_mean,stats_stdev,bad_count,overage_count,lake_ice_count) VALUES (\\'' + str(\n objectName) + '\\',\\'' + str(date) + '\\',' + str(statsDict['maximum']) + ',' + str(statsDict['minimum']) + ',' + str(statsDict['mean']) + ',' + str(\n statsDict['stdev']) + ',' + str(objectStatsDict['bad']) + ',' + str(objectStatsDict['overage']) + ',' + str(objectStatsDict[\n 'lake_ice']) + ');'\n #print query\n cursor = db.cursor()\n try:\n cursor.execute(query)\n db.commit()\n except:\n return 3\n\ndef generateLakeIceLayerForRasterLayerAndWriteToPostGIS (dbOptions, configs, rasterLayer, objectName, year, month, day, vectorLayerFullPath):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\temp\\\\'\n translit = commonLibrary.transliterateString(objectName)\n tempRasterFileName = translit + '_' + str(year) + '_' + str(month) + '_' + str(day) + '_temp.tif'\n tempRasterFileName2 = translit + '_' + str(year) + '_' + str(month) + '_' + str(day) + '_temp2.tif'\n tempVectorFileName = translit + '_' + str(year) + '_' + str(month) + '_' + str(day) + '_lakeIce.shp'\n tempVectorFileName2 = translit + '_' + str(year) + '_' + str(month) + '_' + str(day) + '_lakeIce2.shp'\n createBinaryRasterByValue(rasterLayer,100,tempPath + tempRasterFileName,'=')\n createBinaryRasterByValue(rasterLayer,200,tempPath + tempRasterFileName2,'=')\n\n #cutRasterByVectorMaskGDAL(tempPath + tempRasterFileName,tempPath + 'vectorTempMask.shp',tempPath + tempRasterFileName2)\n rasterToVectorGDAL(tempPath + tempRasterFileName,tempPath + tempVectorFileName)\n rasterToVectorGDAL(tempPath + tempRasterFileName2,tempPath + tempVectorFileName2)\n\n layerName = translit + '_' + str(year) + '_' + str(month) + '_' + str(day)\n\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n\n destCRS = QgsCoordinateReferenceSystem()\n destCRS.createFromProj4(\"+proj=laea +lat_0=90 +lon_0=0 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs\")\n\n memoryLayerString = \"Polygon?crs=\" + sinCRS.authid()\n memoryLayer = QgsVectorLayer(memoryLayerString, layerName, \"memory\")\n\n vectorLayer = QgsVectorLayer(tempPath + tempVectorFileName,layerName,'ogr')\n vectorLayer2 = QgsVectorLayer(tempPath + tempVectorFileName2,layerName,'ogr')\n\n memoryLayerDataProvider = memoryLayer.dataProvider()\n memoryLayerDataProvider.addAttributes([QgsField(\"ID\", QVariant.Int),\n QgsField(\"ICE\", QVariant.Double),\n QgsField(\"AREA\", QVariant.Double)])\n memoryLayer.updateFields()\n physycalFeatures = vectorLayer.getFeatures()\n i = 0\n for physycalFeature in physycalFeatures:\n if physycalFeature[0] == 1:\n memoryFeature = physycalFeature\n\n # Конвертируем в равновеликую проекцию\n convertedMemoryFeature = convertVectorFeatureFromCRSToCRS(memoryFeature,'Polygon',sinCRS,destCRS)\n\n geom = convertedMemoryFeature.geometry()\n memoryFeature.setAttributes([i, 100, geom.area() / 1000000])\n memoryLayerDataProvider.addFeatures([memoryFeature])\n i += 1\n\n physycalFeatures = vectorLayer2.getFeatures()\n for physycalFeature in physycalFeatures:\n if physycalFeature[0] == 1:\n memoryFeature = physycalFeature\n\n # Конвертируем в равновеликую проекцию\n convertedMemoryFeature = convertVectorFeatureFromCRSToCRS(memoryFeature,'Polygon',sinCRS,destCRS)\n\n geom = convertedMemoryFeature.geometry()\n memoryFeature.setAttributes([i, 100, geom.area() / 1000000])\n memoryLayerDataProvider.addFeatures([memoryFeature])\n i += 1\n\n memoryLayer.commitChanges()\n vectorLayer = None\n vectorLayer2 = None\n\n schema = configs['db_lake_ice_vectors_scheme']\n\n # Экспорт векторного слоя в postgis\n uri = 'dbname=' + dbOptions[2] + ' host=' + dbOptions[0] + ' user=' + dbOptions[3] + ' password=' + \\\n dbOptions[4] + ' port=' + dbOptions[1] + ' key=gid type=POLYGON table=\\'' + schema + '\\'.\\'' + layerName + '\\' (geom) sql='\n\n crs = None\n # layer - QGIS vector layer\n error = QgsVectorLayerImport.importLayer(memoryLayer, uri, \"postgres\", crs, False, False)\n if error[0] != 0:\n #print error[1]\n pass\n #iface.messageBar().pushMessage(u'Error', error[1], QgsMessageBar.CRITICAL, 5)\n memoryLayer = None\n\n\ndef getAllMOD10A2OriginImageryFromPostGIS (dbOptions, configDict):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n schema = configDict['db_original_mod10a2_scheme']\n\n query = 'SELECT table_name FROM information_schema.tables WHERE table_schema = \\'' + schema + '\\';'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n\n imageryList = []\n recievedData = cursor.fetchall()\n for data in recievedData:\n imageryList.append(data[0])\n return imageryList\n\n\n\ndef getAllMOD10A2ObjectMasksFromPostGIS (dbOptions, configDict, objectName):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n schema = configDict['db_object_masks_mod10a2_scheme']\n\n translitName = commonLibrary.transliterateString(objectName)\n query = 'SELECT DISTINCT table_name FROM information_schema.tables WHERE table_name LIKE \\'' + translitName + '%\\';'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n imageryList = []\n recievedData = cursor.fetchall()\n for data in recievedData:\n imageryList.append(data[0])\n return imageryList\n\ndef getDateFromMOD10A2ObjectMaskTableName (TableName):\n year = TableName[-10:-6]\n month = TableName[-5:-3]\n day = TableName[-2:]\n dateDict = {'year':year,'month':month,'day':day}\n return dateDict\n\ndef getHVDateListsFromOriginMOD10A2TableNames (imageryList):\n HVDateList = []\n for imagery in imageryList:\n HVDateElement = []\n HVDateDict = getHVDataFromTableName(imagery)\n HVDateList.append([HVDateDict['h'],HVDateDict['v'],HVDateDict['year'],HVDateDict['month'],HVDateDict['day']])\n #HVDateList.append(HVDateElement)\n return HVDateList\n\ndef getMOD10A2OriginImageryStatsByHVDate (dbOptions, configDict, h, v, date):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n schema = configDict['db_mod10a2_statistics_scheme']\n tableName = configDict['db_mod10a2_statistics_table']\n\n query = 'SELECT stats_max, stats_min, stats_mean, stats_stdev, bad_count, overage_count, lake_ice_count FROM ' + schema + '.' + tableName + ' WHERE h = ' + str(h) + ' AND v = ' + str(v) + ' AND date = \\'' + str(date) + '\\'' ';'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n\n recievedData = cursor.fetchall()\n statsDict = {'maximum':recievedData[0][0],'minimum':recievedData[0][1],'mean':recievedData[0][2],'stdev':recievedData[0][3],'bad_count':recievedData[0][4],'overage_count':recievedData[0][5],'lake_ice':recievedData[0][6]}\n return statsDict\n\ndef getMOD10A2ObjectMaskStatsByObjectNameAndDate (dbOptions, configDict, objectName, year, month, day):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n translitName = commonLibrary.transliterateString(objectName)\n schema = configDict['db_mask_object_mod10a2_statistics_scheme']\n tableName = configDict['db_mask_object_mod10a2_statistics_table']\n date = str(year) + '-' + str(month) + '-' + str(day)\n query = 'SELECT stats_max, stats_min, stats_mean, stats_stdev, bad_count, overage_count, lake_ice_count FROM ' + schema + '.' + tableName + ' WHERE object = \\'' + str(translitName) + '\\' AND date = \\'' + str(date) + '\\'' ';'\n #print query\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n\n recievedData = cursor.fetchall()\n statsDict = {'maximum':recievedData[0][0],'minimum':recievedData[0][1],'mean':recievedData[0][2],'stdev':recievedData[0][3],'bad_count':recievedData[0][4],'overage_count':recievedData[0][5],'lake_ice':recievedData[0][6]}\n return statsDict\n\n\ndef addToProjectOriginMOD10A2ImageryByHVDate (dbOptions, configDict, h,v, year, month, day):\n # Растр скачивается и добавляется в проект\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\downloadedRasters\\\\'\n tempRasterName = tempPath + 'Orig_h' + str(h) + 'v' + str(v) + '_' + str(year) + '_' + str(month) + '_' + str(day)\n LayerName = 'h' + str(h) + 'v' + str(v) + '_' + str(year) + '_' + str(month) + '_' + str(day)\n schema = configDict['db_original_mod10a2_scheme']\n tableName = getTableNameFromHVDate(h,v,year,month,day)\n rasterLayer = getRasterLayerFromPostGIS(dbOptions, schema, tableName)\n saveRasterLayerToPathGeoTiff(rasterLayer, tempRasterName)\n rasterPhysLayer = QgsRasterLayer(tempRasterName,LayerName)\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n rasterPhysLayer.setCrs(sinCRS)\n QgsMapLayerRegistry.instance().addMapLayer(rasterPhysLayer)\n\ndef addToProjectLakeIceVectorMaskByObjectNameAndDate (dbOptions, configDict, objectName, year, month, day):\n schema = configDict['db_lake_ice_vectors_scheme']\n tableName = getTableNameFromObjectNameAndDate(objectName,year,month,day)\n vectorLayer = getVectorLayerFromPostGIS(dbOptions,schema,tableName)\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n vectorLayer.setCrs(sinCRS)\n vectorLayer.setLayerName(u'Озёрный лёд (' + objectName + ' ' + str(year) + '/' + str(month) + '/' + str(day) +u')')\n QgsMapLayerRegistry.instance().addMapLayer(vectorLayer)\n\ndef getLakeIceVectorMaskByObjectNameAndDate(dbOptions, configDict, objectName, year, month, day):\n schema = configDict['db_lake_ice_vectors_scheme']\n tableName = getTableNameFromObjectNameAndDate(objectName,year,month,day)\n vectorLayer = getVectorLayerFromPostGIS(dbOptions,schema,tableName)\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n vectorLayer.setCrs(sinCRS)\n return vectorLayer\n\ndef addToProjectMOD10A2ObjectMaskByObjectNameAndDate (dbOptions, configDict, objectName, year, month, day):\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\downloadedRasters\\\\'\n tempRasterName = tempPath + commonLibrary.transliterateString(objectName) + '_' + str(year) + '_' + str(month) + '_' + str(day)\n LayerName = objectName + '_' + str(year) + '_' + str(month) + '_' + str(day)\n schema = configDict['db_object_masks_mod10a2_scheme']\n tableName = getTableNameFromObjectNameAndDate(objectName,year,month,day)\n rasterLayer = getRasterLayerFromPostGIS(dbOptions, schema, tableName)\n\n saveRasterLayerToPathGeoTiff(rasterLayer, tempRasterName)\n rasterPhysLayer = QgsRasterLayer(tempRasterName,LayerName)\n\n sinCRS = QgsCoordinateReferenceSystem()\n sinCRS.createFromProj4(\"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs\")\n rasterPhysLayer.setCrs(sinCRS)\n QgsMapLayerRegistry.instance().addMapLayer(rasterPhysLayer)\n\ndef saveRasterLayerToPathGeoTiff (rasterLayer, outputFullPath):\n provider = rasterLayer.dataProvider()\n pipe = QgsRasterPipe()\n pipe.set(provider.clone())\n\n extent = rasterLayer.extent()\n nCols = rasterLayer.width()\n nRows = rasterLayer.height()\n crs = rasterLayer.crs()\n\n file_writer = QgsRasterFileWriter(outputFullPath)\n file_writer.writeRaster(pipe, nCols, nRows, extent, crs)\n\n\n\ndef mergeListOfOriginMOD10A2RastersByHVListAndDate (dboptions, configs, HVList, year, month, day, outputFullPath):\n # Скачать все нужные исходники из БД и вернуть скленный растр\n\n schema = configs['db_original_mod10a2_scheme']\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\temp\\\\'\n rastersList = []\n for HV in HVList:\n tableName = getTableNameFromHVDate(int(HV[0]),int(HV[1]),year,month,day)\n rasterLayer = getRasterLayerFromPostGIS(dboptions,schema,tableName)\n rastersList.append(tempPath+tableName+'.tif')\n saveRasterLayerToPathGeoTiff(rasterLayer,tempPath+tableName+'.tif')\n\n i = 0\n\n if len(rastersList) == 2:\n mergeRastersGDAL(rastersList[0],rastersList[1],outputFullPath)\n else:\n while i < len(rastersList):\n if i == 0:\n tempMergedPath = tempPath + 'tempMerge' + str(i) + '.tif'\n mergeRastersGDAL(rastersList[0],rastersList[1],tempMergedPath)\n i += 1\n continue\n elif i == len(rastersList)-1:\n #tempMergedPath = tempPath + 'tempMerge' + str(i) + '.tif'\n mergeRastersGDAL(tempPath+'tempMerge' + str(i-1),rastersList[i],outputFullPath)\n i += 1\n continue\n else:\n tempMergedPath = tempPath + 'tempMerge' + str(i) + '.tif'\n mergeRastersGDAL(tempPath+'tempMerge' + str(i-1),rastersList[i],tempMergedPath)\n i += 1\n continue\n\n\n#########################################################\n################# СЕЗОННЫЙ МОНИТОРИНГ ###################\n#########################################################\n\ndef getListOfAvailableDatesForObjectMask (dbOptions, configDict, objectName):\n itemsList = getAllMOD10A2ObjectMasksFromPostGIS (dbOptions, configDict,objectName)\n datesList = []\n for item in itemsList:\n datesDict = getDateFromMOD10A2ObjectMaskTableName(item)\n curDate = datesDict['year'] + '-' + datesDict['month'] + '-' + datesDict['day']\n datesList.append(curDate)\n return datesList\n\ndef getIceAreaForMOD10A2MaskObjectByObjectNameAndDate (dbOptions, configs, objectName, year, month, day):\n schema = configs['db_lake_ice_vectors_scheme']\n translitName = commonLibrary.transliterateString(objectName)\n tableName = translitName + '_' + str(year) +'_' + str(month) + '_' + str(day)\n vectorLayer = getVectorLayerFromPostGIS(dbOptions,schema,tableName)\n iter = vectorLayer.getFeatures()\n sumArea = 0\n for feature in iter:\n sumArea += feature[3]\n return sumArea\n\ndef generatePlotByDatesAndNumbers(datesList,numbersList,objectName):\n #fig, ax = plt.subplots()\n titleText = datesList[0] + ' - ' + datesList[len(datesList)-1]\n dateObjList = []\n for date in datesList:\n curYear = date[0:4]\n curMonth = date[5:7]\n curDay = date[8:10]\n dateObjList.append(datetime.datetime(int(curYear),int(curMonth),int(curDay)))\n\n #ax.plot(dateObjList, numbersList, 'o-')\n #fig.autofmt_xdate()\n N = len(dateObjList)\n ind = np.arange(N) # the evenly spaced plot indices\n\n def format_date(x, pos=None):\n thisind = np.clip(int(x+0.5), 0, N-1)\n return dateObjList[thisind].strftime('%Y-%m-%d')\n\n fig, ax = plt.subplots()\n ax.plot(ind, numbersList, 'o-')\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))\n fig.autofmt_xdate()\n fig.canvas.set_window_title(titleText)\n plt.show()\n pass\n\ndef differenceBetweenTwoPolygonLayers(vectorLayer1, vectorLayer2, outputVectorLayerFullPath):\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\temp\\\\'\n error1 = saveVectorLayerToSHP(vectorLayer1,tempPath + 'temp_vector_saving1.shp')\n error2 = saveVectorLayerToSHP(vectorLayer2,tempPath + 'temp_vector_saving2.shp')\n try:\n processing.runalg(\"qgis:difference\",tempPath + 'temp_vector_saving1.shp',tempPath + 'temp_vector_saving2.shp',outputVectorLayerFullPath)\n except:\n return 2\n error1 = None\n error2 = None\n return 1\n\ndef twoSidesDifferenceBetweenTwoPolygonLayers(vectorLayer1, vectorLayer2, memoryLayer):\n tempPath = os.path.dirname(os.path.abspath(__file__)) + '\\\\temp\\\\'\n error1 = saveVectorLayerToSHP(vectorLayer1,tempPath + 'temp_vector_saving1.shp')\n error2 = saveVectorLayerToSHP(vectorLayer2,tempPath + 'temp_vector_saving2.shp')\n tempVectorLayer1 = QgsVectorLayer(tempPath + 'temp_vector_saving1.shp','1','ogr')\n tempVectorLayer2 = QgsVectorLayer(tempPath + 'temp_vector_saving2.shp','2','ogr')\n QgsMapLayerRegistry.instance().addMapLayer(tempVectorLayer1)\n QgsMapLayerRegistry.instance().addMapLayer(tempVectorLayer2)\n\n try:\n processing.runalg(\"qgis:difference\",tempVectorLayer1,tempVectorLayer2,tempPath+'diff_vector_start_end.shp')\n processing.runalg(\"qgis:difference\",tempVectorLayer2,tempVectorLayer1,tempPath+'diff_vector_end_start.shp')\n except:\n return 2\n QgsMapLayerRegistry.instance().removeMapLayer(tempVectorLayer1.id())\n QgsMapLayerRegistry.instance().removeMapLayer(tempVectorLayer2.id())\n\n error1 = None\n error2 = None\n\n startEndVectorLayer = QgsVectorLayer(tempPath+'diff_vector_start_end.shp','1','ogr')\n endStartVectorLayer = QgsVectorLayer(tempPath+'diff_vector_end_start.shp','1','ogr')\n QgsMapLayerRegistry.instance().addMapLayer(startEndVectorLayer)\n QgsMapLayerRegistry.instance().addMapLayer(endStartVectorLayer)\n\n # Копируем всё в memory-слой\n memoryLayerDataProvider = memoryLayer.dataProvider()\n memoryLayerDataProvider.addAttributes([QgsField(\"ID\", QVariant.Int),\n QgsField(\"ICE\", QVariant.Double),\n QgsField(\"AREA\", QVariant.Double),\n QgsField(\"TYPE\", QVariant.String)])\n memoryLayer.updateFields()\n physycalFeatures = startEndVectorLayer.getFeatures()\n i = 0\n for physycalFeature in physycalFeatures:\n memoryFeature = physycalFeature\n memoryFeature.setAttributes([i,physycalFeature[1],physycalFeature[2],'Decrease'])\n memoryLayerDataProvider.addFeatures([memoryFeature])\n i += 1\n\n physycalFeatures = endStartVectorLayer.getFeatures()\n for physycalFeature in physycalFeatures:\n memoryFeature = physycalFeature\n memoryFeature.setAttributes([i,physycalFeature[1],physycalFeature[2],'Increase'])\n memoryLayerDataProvider.addFeatures([memoryFeature])\n i += 1\n\n QgsMapLayerRegistry.instance().removeMapLayer(startEndVectorLayer.id())\n QgsMapLayerRegistry.instance().removeMapLayer(endStartVectorLayer.id())\n\n memoryLayer.commitChanges()\n\n return 1\n\ndef checkDatabaseSchemeExistance (dbOptions, schemeName):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n query = 'SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE schema_name = \\'' + schemeName + '\\');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n recievedData = cursor.fetchone()\n return recievedData[0]\n\ndef checkDatabaseTableExistance (dbOptions, schemeName, tableName):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n query = 'SELECT EXISTS( SELECT * FROM information_schema.tables WHERE table_schema = \\'' + schemeName + '\\' AND table_name = \\'' + tableName + '\\');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n recievedData = cursor.fetchone()\n return recievedData[0]\n\n pass\n\ndef checkDatabaseFieldExistance (dbOptions, schemeName, tableName, fieldName):\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n query = 'SELECT EXISTS (SELECT column_name FROM information_schema.columns WHERE table_name = \\'' + tableName + '\\' AND column_name = \\'' + fieldName + '\\');'\n cursor = db.cursor()\n try:\n cursor.execute(query)\n except:\n return 3\n recievedData = cursor.fetchone()\n return recievedData[0]\n pass\n\ndef checkFieldsForMODISStatsTables(dbOptions,configs,param):\n # param - либо 'Origin' либо 'Objects'\n DBHost = dbOptions[0]\n DBPort = int(dbOptions[1])\n DBName = dbOptions[2]\n DBUser = dbOptions[3]\n DBPassword = dbOptions[4]\n\n try:\n db = psycopg2.connect(\n database=DBName,\n user=DBUser,\n password=DBPassword,\n host=DBHost,\n port=DBPort\n )\n except psycopg2.Error as e:\n return 2\n\n fieldsListObject = ['stats_max','stats_min','stats_mean','stats_stdev','bad_count','overage_count','lake_ice_count','object','date']\n fieldsListOrigin = ['stats_max','stats_min','stats_mean','stats_stdev','bad_count','overage_count','lake_ice_count','h','v','date']\n\n if param == 'Origin':\n schemaName = configs['db_mod10a2_statistics_scheme']\n tableName = configs['db_mod10a2_statistics_table']\n for field in fieldsListOrigin:\n if not checkDatabaseFieldExistance(dbOptions,schemaName,tableName,field):\n return field\n\n if param == 'Objects':\n schemaName = configs['db_mask_object_mod10a2_statistics_scheme']\n tableName = configs['db_mask_object_mod10a2_statistics_table']\n for field in fieldsListObject:\n if not checkDatabaseFieldExistance(dbOptions,schemaName,tableName,field):\n return field\n\n return 1","sub_path":"rusloModisLibrary.py","file_name":"rusloModisLibrary.py","file_ext":"py","file_size_in_byte":49747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"105697570","text":"import logging\nimport posixpath\n\nimport os\nimport oss2\n\nfrom django.core.files.storage import Storage\nfrom django.utils import timezone\nfrom django.utils.deconstruct import deconstructible\nfrom oss2.exceptions import NotFound\n\nfrom aliyunoss.conf import alioss_settings\nfrom aliyunoss.storage_file import OSSStorageFile\n\nlogger = logging.getLogger(__name__)\n\n\n@deconstructible\nclass OssStorage(Storage):\n \"\"\"Aliyun Open Storage Service\"\"\"\n\n def __init__(self, bucket_name=alioss_settings.oss_storage_bucket_name,\n access_key_id=alioss_settings.oss_access_key_id,\n access_key_secret=alioss_settings.oss_access_key_secret,\n bucket_prefix=alioss_settings.oss_storage_bucket_prefix,\n endpoint=alioss_settings.oss_storage_endpoint\n ):\n super(OssStorage, self).__init__()\n self._bucket_name = bucket_name\n self._endpoint = endpoint\n if not self._bucket_name or not self._endpoint:\n raise ValueError(\"OSS_STORAGE_DEFAULT_BUCKET_NAME or OSS_STORAGE_DEFAULT_ENDPOINT must be set\")\n\n self._access_key_id = access_key_id\n self._access_key_secret = access_key_secret\n if self._access_key_id and self._access_key_secret:\n self._oss_auth = oss2.Auth(self._access_key_id, self._access_key_secret)\n else:\n self._oss_auth = oss2.AnonymousAuth()\n\n self._service = oss2.Service(self._oss_auth, self._endpoint)\n\n self._bucket = oss2.Bucket(\n self._oss_auth, '%s%s' % ('http://', self._endpoint), self._bucket_name)\n\n self._bucket_prefix = bucket_prefix\n\n @staticmethod\n def _clean_name(name):\n # Useful for windows' paths\n clean_name = posixpath.normpath(name).replace('\\\\', '/')\n\n # os.path.normpath() can strip trailing slashes so we implement\n # a workaround here.\n if name.endswith('/') and not clean_name.endswith('/'):\n # Add a trailing slash as it was stripped.\n return clean_name + '/'\n else:\n return clean_name\n\n def _normalize_name(self, name):\n return os.path.join(self._bucket_prefix, name)\n\n def _open(self, name, mode='rb'):\n return OSSStorageFile(name, self, mode)\n\n def _save(self, name, content):\n cleaned_name = self._clean_name(name)\n name = self._normalize_name(cleaned_name)\n\n if hasattr(content, 'open'):\n # Since Django 1.6, content should be a instance\n # of `django.core.files.File`\n content.open()\n\n if hasattr(content, 'chunks'):\n content_str = b''.join(chunk for chunk in content.chunks())\n else:\n content_str = content.read()\n\n self._put_file(name, content_str)\n content.close()\n return cleaned_name\n\n def _put_file(self, name, content):\n return self._bucket.put_object(name, content)\n\n def _read(self, name):\n cleaned_name = self._clean_name(name)\n name = self._normalize_name(cleaned_name)\n return self._bucket.get_object(name, byte_range=None, headers=None, progress_callback=None, process=None).read()\n\n def modified_time(self, name):\n head_object_result = self._get_head_object(name)\n return timezone.datetime.fromtimestamp(head_object_result.last_modified)\n\n def delete(self, name):\n name = self._normalize_name(self._clean_name(name))\n return self._bucket.delete_object(name)\n\n def _get_head_object(self, name):\n name = self._normalize_name(self._clean_name(name))\n return self._bucket.head_object(name)\n\n def accessed_time(self, name):\n pass\n\n def exists(self, name):\n try:\n self._get_head_object(name)\n except NotFound:\n return False\n else:\n return True\n\n def path(self, name):\n \"\"\"\n Returns a local filesystem path where the file can be retrieved using\n Python's built-in open() function. Storage systems that can't be\n accessed using open() should *not* implement this method.\n \"\"\"\n raise NotImplementedError(\"This backend doesn't support absolute paths.\")\n\n def url(self, name):\n return 'http://%s.%s/%s/%s' % (self._bucket_name, self._endpoint, self._bucket_prefix, name)\n\n def size(self, name):\n return self._get_head_object(name).content_length\n\n def created_time(self, name):\n pass\n\n def listdir(self, path):\n cleaned_path = self._clean_name(path)\n path = self._normalize_name(cleaned_path)\n if path and not path.endswith('/'):\n path += '/'\n\n files = []\n dirs = []\n for i in oss2.ObjectIterator(self._bucket, prefix=path, delimiter='/'):\n if i.is_prefix():\n dirs.append(os.path.basename(os.path.dirname(i.key)))\n else:\n files.append(os.path.basename(i.key))\n return dirs, files\n\noss_storage = OssStorage()\n\n","sub_path":"src/apps/aliyunoss/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"53643514","text":"from setuptools import setup, find_packages\nimport os, sys\n\n\ninstall_requires = [\n \"setuptools\",\n 'pytest'\n]\n\npy_version = sys.version_info[:2]\nPY3 = py_version[0] == 3\n\nif not PY3:\n raise RuntimeError('On Python 3')\n\nversion = \"0.0.1\"\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\n\n\nsetup(\n name=\"find_japanese\",\n version=version,\n description=('This is a find find_japanese'),\n long_description=README,\n keywords=[\"japanese\", \"regular expression\"],\n license=\"Apache License, Version 2.0\",\n author=\"Sato Shun\",\n author_email=\"shun.sato1@gmail.com\",\n url=\"http://satoshun.github.com/\",\n install_requires = install_requires,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n)\n","sub_path":"pypi_install_script/find_japanese-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605232433","text":"import glob, os, datetime, sys, getopt\n\nargv =sys.argv[1:]\n\npath = \"p:\"\n\nopt, arg = getopt.getopt(argv, path)\n\n# print(arg)\n\nexit_file = \"procedures_list_\" +str(datetime.date.today())+\".txt\"\nf = open(exit_file, \"a\")\nos.chdir(arg[0])\n\nfor file in glob.glob(\"*.sql\"):\n with open(file, 'r') as file1:\n for line in file1:\n if \"ALTER PROCEDURE \" in line:\n line_1 = line.replace(\"ALTER PROCEDURE \", '')\n f.write(line_1)\n\n\n if \"CREATE PROCEDURE\" in line:\n line_2 = line.replace(\"CREATE PROCEDURE \", '')\n f.write(line_2)","sub_path":"code directory test/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"152968989","text":"# -*- coding: utf-8 -*-\n'''\nMain file that runs the tornado server for the bot.\n\nRequires the HOOK_SECRET_KEY and GITHUB_TOKEN environment variables to be set.\nOptionally, set the PORT environment variable as well. Default is ``8080``.\n\nThis requires a minimum version of Python 3.4. However, please note that this\nproject has only been tested with Python 3.6.\n'''\n\n# Import Python libs\nimport hmac\nimport hashlib\nimport json\nimport os\nimport sys\n\n# Import Tornado libs\nfrom tornado import gen\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpclient\n\n# Import Tamarack libs\nimport tamarack.event_processor\n\nHOOK_SECRET_KEY = os.environ.get('HOOK_SECRET_KEY')\nGITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')\n\n\nclass EventHandler(tornado.web.RequestHandler):\n '''\n Main handler for the \"/events\" endpoint\n '''\n def data_received(self, chunk):\n pass\n\n @gen.coroutine\n def post(self):\n if not validate_github_signature(self.request):\n raise tornado.web.HTTPError(401)\n\n data = json.loads(self.request.body)\n yield tamarack.event_processor.handle_event(\n data, GITHUB_TOKEN\n )\n\n\ndef make_app():\n return tornado.web.Application([\n ('/events', EventHandler),\n ])\n\n\ndef validate_github_signature(request):\n sha_type, gh_sig = request.headers.get('X-Hub-Signature').split('=')\n if sha_type != 'sha1':\n return False\n\n mac = hmac.new(HOOK_SECRET_KEY.encode('utf-8'),\n msg=request.body, digestmod=hashlib.sha1)\n return hmac.compare_digest(mac.hexdigest(), gh_sig)\n\n\ndef _check_env_vars():\n check_ok = True\n\n if HOOK_SECRET_KEY is None:\n check_ok = False\n print('The bot was started without a WebHook Secret Key.')\n print('Please set the HOOK_SECRET_KEY environment variable.')\n print('To get started:')\n print('(1) Create a secret token.')\n print('(2) Set the token in the \"Secret\" field of the bot\\'s '\n 'GitHub WebHook settings page.')\n print('(3) Click \"Update WebHook\".')\n print('(4) Set the token as an environment variable: '\n '\"export HOOK_SECRET_KEY=your_secret_key\".')\n\n if GITHUB_TOKEN is None:\n check_ok = False\n print('The bot was started without a GitHub authentication token.')\n print('Please set the GITHUB_TOKEN environment variable: '\n '\"export GITHUB_TOKEN=your_token\".')\n\n return check_ok\n\n\nif __name__ == '__main__':\n if _check_env_vars() is False:\n sys.exit()\n\n port = os.environ.get('PORT')\n if port is None:\n port = 8080\n print('No PORT setting found. Using default at \\'{0}\\'.'.format(port))\n print('Starting Tamarack server.')\n print('Listening on port \\'{0}\\'.'.format(port))\n app = make_app()\n app.listen(port)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"tamarack/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563383680","text":"\"\"\"\nIndicator registry that loads and stores indicators from a path containing yaml\nfiles.\n\"\"\"\n\nfrom collections import defaultdict\nfrom os import listdir\nfrom os.path import isfile, join\nimport yaml\nfrom yaml.scanner import ScannerError\n\nfrom .model import Indicator\n\n\nclass RegistryException(Exception):\n pass\n\n\nclass Registry(object):\n def __init__(self, context):\n self._context = context\n self._indicators = defaultdict(dict)\n\n def load_from_path(self, path):\n try:\n file_paths = [\n join(path, file_name) for file_name in listdir(path)\n if isfile(join(path, file_name))]\n except OSError as e:\n raise RegistryException(e)\n for file_path in file_paths:\n try:\n self._load_from_file_path(file_path)\n except (TypeError, ValueError) as e:\n self._indicators = defaultdict(dict)\n raise RegistryException(\n \"File '{0}' contains malformed indicators ({1})\".format(\n file_path, e))\n except ScannerError as e:\n self._indicators = defaultdict(dict)\n raise RegistryException(\n \"File '{0}' is not a valid yaml file ({1})\".format(\n file_path, e))\n\n def _add_indicator_from_dict(self, indicator_dict):\n try:\n indicator = Indicator.instance_from_dict(\n indicator_dict, self._context)\n self._indicators[indicator.service][indicator.id] = indicator\n except (TypeError, ValueError) as e:\n raise e\n\n def _load_from_file_path(self, path):\n with open(path) as source_file:\n for document in yaml.load_all(source_file):\n self._add_indicator_from_dict(document)\n\n def __getitem__(self, item):\n return self._indicators[item]\n\n def __iter__(self):\n return (key for key in self._indicators)\n\n def values(self):\n return self._indicators.values()\n","sub_path":"genweb/core/indicators/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535377742","text":"# -*- coding: utf-8 -*-\n#############################################################################\n## 网络类,接收网络数据,即接收图片数组,其实为cv2传来的 nuppy.array / bgr array\n## 将收到的 array 数据放在类属性 raw_img_arr 里,\n#############################################################################\nimport cv2\nimport struct\nimport numpy as np\nfrom datetime import datetime\nimport socket\nimport time\nimport threading\nimport os\n\nclass Network(object):\n # 类属性\n ImgName = \"\"\n raw_img_arr = 0\n\n def __init__(self):\n self.address = ((\"127.0.0.1\",8080)) # 默认地址\n self.savePath = \"data/receive\" # 存储图片的路径\n\n def set_address(self,_address):\n self.address = _address\n\n def create_connnect(self,_address):\n # 创建连接\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP\n s.connect(_address)\n self.socketObj = s\n print(\"创建连接,地址为:\",self.address)\n\n def send_a_message(self):\n try:\n self.socketObj.send(b\"ok\")\n print(\"发送成功!\")\n except Exception as e:\n print(\"出现异常,即将关闭连接:\",e)\n self.close_connection()\n\n def receice_a_message(self):\n # 连接网络\n s = self.socketObj\n try:\n bin_stream = s.recv(4)\n stream_len = int.from_bytes(bin_stream,byteorder='big')\n print(\"接收字节数: \",stream_len)\n bin_stream=b''\n while len(bin_stream) != stream_len:\n bin_stream += s.recv(2048)\n # 解析二进制流\n w = np.frombuffer(bin_stream, dtype=np.uint32, count=1, offset=0)[0]\n h = np.frombuffer(bin_stream, dtype=np.uint32, count=1, offset=4)[0]\n img_arr = np.frombuffer(bin_stream, dtype=np.uint8, count=w*h*3, offset=8)\n img_arr.resize(w,h,3)\n Network.raw_img_arr = img_arr\n # img_name = datetime.now().date().strftime(\"%y%m%d\")+datetime.now().time().strftime(\"_%H%M%S\")+\".jpg\"\n # img_path = os.path.join(self.savePath,img_name)\n # cv2.imwrite(img_path,img_arr)\n # Network.ImgName = img_path\n print(\"接收图片大小: \",img_arr.shape)\n print(\"接收图片时间: \",str(datetime.now()))\n except Exception:\n print(\"出现异常,即将关闭连接:\",Exception)\n self.close_connection()\n\n def close_connection(self):\n self.socketObj.close()\n\n # def connection(self):\n # # 开启多线程接接收数据\n # try:\n # t1 = threading.Thread(target=self.__connect_to_internet,args=(self.address,))\n # t1.start()\n # except Exception as e:\n # print(\"连接错误,错误原因:\\n\",e)\n\nif __name__ == '__main__':\n net = Network()\n # net.connection()\n net.create_connnect(net.address)\n net.send_a_message()\n net.receice_a_message()\n time.sleep(2)\n net.send_a_message()\n net.receice_a_message()\n net.close_connection()\n","sub_path":"src/monitor/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"317197555","text":"\"\"\"\"Angelo Ortiz Vega\"\"\"\n#Entradas: Numero Entero\n#Salidas: Sumatoria del Numero\n#Restricciones: El Numero debe de ser Entero mayor a 1\n\n#Ejemplo de Corrida: 5: 1+2+3+4+5 = 15\n\n\ndef sumatoria(num):\n if isinstance(num, int) and (num > 0):\n print(\"La sumatoria del Numero y sus anteriores es: \", sumatoria_aux(num))\n else:\n return -1\n\ndef sumatoria_aux(num):\n #print(num, end=\"+\")\n if (num == 0):\n #print(num, end=\"=\")\n return 0\n else:\n return num + sumatoria_aux(num - 1)\n\n\n\n\n\n\n","sub_path":"a.Recursión de Pila/11.sumatoria.py","file_name":"11.sumatoria.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"567561357","text":"from collections import deque\n\nn = int(input())\ngrafo = []\nfor x in range(n+1):\n grafo.append([])\n\nfor x in range(n-1):\n a,b = map(int,input().split())\n grafo[a].append(b)\n grafo[b].append(a)\n\ndef bfs(grafo,s):\n q = deque()\n dist = []\n for inf in range(n+5):\n dist.append(-1)\n q.insert(0,s)\n dist[s] = 0\n distance = -1\n global maior \n maior = 0\n while len(q) != 0:\n popado = q.pop()\n distance += 1\n for adj in grafo[popado]:\n if dist[adj] == -1:\n q.insert(0,adj)\n dist[adj] = dist[popado] + 1\n maior = popado\n return dist\n\n\nbfs(grafo,1)\ncaminhos = bfs(grafo,maior)\nmaxdis = 0\n\nfor i in range(len(caminhos)):\n if caminhos[i] > maxdis:\n maxdis = caminhos[i]\n\n\nprint(maxdis)\n","sub_path":"diametro_de_uma_arvore.py","file_name":"diametro_de_uma_arvore.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247181140","text":"#!/usr/bin/env python3\nimport sys\nimport numpy as np\n\nfin1=open(\"../grid_sym_Al.dat\",\"r\")\nfin1.readline()\nfin1.readline()\nL=[];N=[]\nfor i in range(3):\n line=fin1.readline().split()\n L.append(float(line[i]))\nfor i in range(4):\n fin1.readline()\nline=fin1.readline().split()\nfor i in range(3):\n N.append(int(line[i]))\ndata1=[]\nfor line in fin1:\n ll=line.split()\n for i in range(len(ll)):\n data1.append(float(ll[i]))\nfin1.close()\n\nfin2=open(\"../grid_sym_Sm.dat\",\"r\")\ndata2=[]\nfor i in range(10):\n fin2.readline()\nfor line in fin2:\n ll=line.split()\n for i in range(len(ll)):\n data2.append(float(ll[i]))\nfin2.close()\n\nprint(\"Data Read!\")\nR=[[[0 for k in range(N[2])] for j in range(N[1])] for i in range(N[0])]\nfor k in range(N[2]):\n for j in range(N[1]):\n for i in range(N[0]):\n np = k*N[1]*N[2] + j*N[2] + i\n R[i][j][k]=data1[np]+data2[np]\n\n\nfin=open(\"../grid.dat\",\"r\")\ndata=[]\nfor i in range(10):\n fin.readline()\nfor line in fin:\n ll=line.split()\n for i in range(len(ll)):\n data.append(float(ll[i]))\nfin.close()\nR_real=[[[0 for k in range(N[2])] for j in range(N[1])] for i in range(N[0])]\nR_diff=[[[0 for k in range(N[2])] for j in range(N[1])] for i in range(N[0])]\nfor k in range(N[2]):\n for j in range(N[1]):\n for i in range(N[0]):\n np = k*N[1]*N[2] + j*N[2] + i\n R_real[i][j][k]=data[np]\n R_diff[i][j][k]=R_real[i][j][k]-R[i][j][k]\n\nfout=open(\"grid_diff.dat\",\"w+\")\nprint(\"CHG\",file=fout)\nprint(1.0,file=fout)\nprint(L[0],0,0,file=fout)\nprint(0,L[0],0,file=fout)\nprint(0,0,L[0],file=fout)\nprint(\"1\\nDirect\\n0.5 0.5 0.5\",file=fout)\nprint(\" \",file=fout)\nprint(N[0],N[1],N[2],file=fout)\nnline=5\ncount=0\nfor iz in range(N[2]):\n for iy in range(N[1]):\n for ix in range(N[0]):\n print(R_diff[ix][iy][iz],end=\" \",file=fout)\n count+=1\n if ( count%nline ==0):\n print(\" \",file=fout)\nfout.close()\n\n\nfout=open(\"x_proj.dat\",\"w+\")\nfor i in range(N[0]):\n Rsum=0\n for j in range(N[1]):\n for k in range(N[2]):\n Rsum += R_diff[i][j][k]\n print(i*L[0]/N[0],Rsum*L[1]*L[2]*L[0]/N[0]/N[1]/N[2],file=fout)\nfout.close()\n\nfout=open(\"y_proj.dat\",\"w+\")\nfor j in range(N[1]):\n Rsum=0\n for i in range(N[0]):\n for k in range(N[2]):\n Rsum += R_diff[i][j][k]\n print(j*L[1]/N[1],Rsum*L[0]*L[2]*L[1]/N[1]/N[0]/N[2],file=fout)\nfout.close()\n\nfout=open(\"z_proj.dat\",\"w+\")\nfor k in range(N[2]):\n Rsum=0\n for i in range(N[0]):\n for j in range(N[1]):\n Rsum += R_diff[i][j][k]\n print(k*L[2]/N[2],Rsum*L[0]*L[1]*L[2]/N[2]/N[0]/N[1],file=fout)\nfout.close()\n","sub_path":"diff_fit_superposed/grid_sum.py","file_name":"grid_sum.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"59704904","text":"import tensorflow as tf\r\nfrom os import sys\r\n\r\nsys.path.insert(0,\"C:/Users/bbk/Documents/tensorflow/programme_mnist/functions\")\r\nfrom apply_activation import apply_activation\r\n\r\n\r\ndef create_layer(nb_of_perceptrons, activation_function, input_layer):\r\n\tnb_of_input_nodes = input_layer.get_shape().as_list()[1]\r\n\tweights = tf.Variable(tf.zeros([nb_of_input_nodes,nb_of_perceptrons]))\r\n\tbias = tf.Variable(tf.zeros([nb_of_perceptrons]))\r\n\tlinear_combination = tf.matmul(input_layer,weights) + bias\r\n\ty = apply_activation(activation_function = activation_function, linear_combination = linear_combination)\r\n\treturn(y)\r\n\r\n","sub_path":"tensorflow/programme_mnist/functions/create_layer.py","file_name":"create_layer.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"562462820","text":"#!/usr/bin/python3\n\nimport inspect\nimport os\nimport sys\n\nfrom optparse import OptionParser\n\n\ndef error(message, exit_code=1):\n sys.stderr.write('pyvim: %s\\n' % message)\n sys.exit(exit_code)\n\n\ndef get_object(path):\n parts = path.split('.')\n\n module = parts[0]\n remaining = parts[1:]\n\n if not remaining:\n return __import__(module)\n\n module = __import__(module, fromlist=[remaining[0]])\n\n obj = getattr(module, remaining.pop(0))\n\n while remaining:\n obj = getattr(obj, remaining.pop(0))\n return obj\n\n\ndef import_path_to_file_path(path):\n try:\n obj = get_object(path)\n except (AttributeError, ImportError) as e:\n error('error opening %r: %s' % (path, str(e)))\n\n try:\n _source, lineno = inspect.findsource(obj)\n except TypeError:\n try:\n _source, lineno = inspect.findsource(obj.__class__)\n except TypeError as e:\n error('error opening %r: %s' % (path, str(e)))\n except IOError as e:\n # allow opening an empty module (e.g. __init__.py)\n if inspect.ismodule(obj):\n sourcefile = inspect.getsourcefile(obj)\n if sourcefile:\n lineno = 0\n e = None\n\n if e:\n error('error opening %r: %s' % (path, str(e)))\n\n try:\n path = inspect.getsourcefile(obj)\n except TypeError:\n try:\n path = inspect.getsourcefile(obj.__class__)\n except TypeError as e:\n error('error opening %r: %s' % (path, str(e)))\n except IOError:\n path = obj.__file__\n\n if lineno:\n additional_args = ['+%d' % (lineno+1)]\n else:\n additional_args = []\n\n dirname, basename = os.path.split(path)\n\n return dirname, basename, additional_args\n\n\ndef get_editor():\n editor = os.environ.get('EDITOR', None)\n if editor:\n return editor\n\n editor = '/usr/bin/xdg-open'\n if os.path.isfile(editor):\n return editor\n\n error(\"$EDITOR not configured and no suitable fallback found.\")\n\n\ndef main():\n parser = OptionParser()\n parser.add_option(\n \"-d\", \"--print-dir\",\n dest=\"print_dir\",\n action='store_true',\n help=\"Print directory to standard out rather than opening the file.\")\n\n parser.add_option(\n \"-l\", \"--print-path\",\n dest=\"print_path\",\n action='store_true',\n help=\"Print absolute file path to standard out rather than opening the file.\")\n\n parser.add_option(\n \"-a\", \"--absolute-open\",\n dest=\"absolute_open\",\n action='store_true',\n help=\"Open the file using the absolute path rather than changing to the parent directory.\")\n\n parser.add_option(\n \"-v\", \"--verbose\",\n dest=\"verbose\",\n action='store_true',\n help=\"Display commands before they're run.\")\n\n options, args = parser.parse_args()\n\n if not args:\n parser.error(\"No import path supplied\")\n\n dirname, basename, additional_args = import_path_to_file_path(args[0])\n\n if options.print_dir:\n print(dirname)\n return\n\n if options.print_dir:\n print(dirname)\n return\n\n if options.print_path:\n print(os.path.join(dirname, basename))\n return\n\n editor = get_editor()\n\n # this allows you to do :e . in vim to see the rest of the source code.\n if options.absolute_open:\n path = os.path.join(dirname, basename)\n else:\n if options.verbose:\n print('cd %s' % dirname)\n os.chdir(dirname)\n path = basename\n\n command = [editor, path] + additional_args + args[1:]\n\n if options.verbose:\n print(' '.join(command))\n os.execvp(editor, command)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/pyopen.py","file_name":"pyopen.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"538569643","text":"from __future__ import absolute_import\n\nfrom celery import Celery\nfrom celery import task\n\ncelery = Celery('tasks', backend='amqp', )\ncelery.config_from_object('celeryconfig')\n\n# Optional configuration, see the application user guide.\ncelery.conf.update(\n CELERY_TASK_RESULT_EXPIRES=3600,\n)\n\n\n# Tasks for competitions\n# Currently stubs to test functionality\n@task\ndef validate_submission(url):\n \"\"\"\n Will validate the format of a submission.\n \"\"\"\n return url\n\n@task\ndef evaluate_submission(url):\n # evaluate(inputdir, standard, outputdir)\n return url\n\n\n# For starting the process\nif __name__ == '__main__':\n celery.start()\n","sub_path":"codalab/compsrv/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"595546274","text":"class Solution(object):\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n \"\"\"\n Approach using binary search\n sqrt(10) => consider only n/2 elements [ 1,2,3,4,5]\n now do binary search in these elements with target as n, we need to find lower bound in sorted array\n if square doesn't doesn't exists\n \n \"\"\"\n if x == 1:\n return 1\n \n start, end = 1, x //2\n \n # modified lower bound code \n while start < end:\n \n mid = ( start + end + 1) // 2\n \n if x >= mid * mid: # if mid is lower or equal to target just mark it to start as it might be our solution\n start = mid\n else:\n end = mid -1 # if mid is larger than target, keep going to left to meet the target\n return end","sub_path":"sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"423471193","text":"#! /usr/bin/python\r\n\r\n\"\"\"\r\n Program: ch07_05_pickle_read.py\r\n Function: An exploration of reading a pickle file.\r\n\"\"\"\r\n\r\n\r\n\r\ntry:\r\n import pickle as pickle\r\nexcept:\r\n import pickle\r\nimport sys\r\nimport pprint\r\n\r\ntry:\r\n file_pickle = open( \"pickle_1.txt\", \"rb\" )\r\nexcept:\r\n print(\"Could not open pickle_1.txt for binary read\", \\\r\n file=sys.stderr)\r\n exit(1)\r\n\r\nlist_out = []\r\nwhile True:\r\n try:\r\n list_out.append( pickle.load(file_pickle) )\r\n except:\r\n break\r\n\r\n\r\nfor i in range(0, len(list_out)):\r\n pprint.pprint(list_out[i])\r\n# print( list_out[i] )\r\n\r\nif list_out[-1]['d'] is list_out[-1]['e']:\r\n\tprint(\"d is e\")\r\n\r\nif list_out[-1]['d'] == list_out[-1]['e']:\r\n\tprint(\"d == e\")\r\nprint(\"That's all folks!\")\r\n\r\n \r\n \r\n","sub_path":"Ch08_io/Pickle/ch07_05_pickle_read.py","file_name":"ch07_05_pickle_read.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"129548170","text":"import os\nimport io\nimport re\nimport sys\nimport subprocess\nfrom threading import Thread\n\n# compatibility with python 2/3\ntry:\n from queue import Queue, Empty # python 3\n unicode = str\nexcept ImportError:\n from Queue import Queue, Empty # python 2\n\nclass ProcessExecution(object):\n \"\"\"\n Executes a process.\n \"\"\"\n\n # regex: any alpha numeric, underscore and dash characters are allowed.\n __validateShellArgRegex = re.compile(r\"^[\\w_-]*$\")\n\n def __init__(self, args, env=None, shell=True, cwd=None, stdout=sys.stdout, stderr=subprocess.STDOUT):\n \"\"\"\n Create a ProcessExecution object.\n\n The constructor signature tries mimic the features available by subprocess.Popen.\n \"\"\"\n if env is None:\n env = dict(os.environ)\n\n self.__stdoutContent = ''\n self.__stderrContent = ''\n self.__shell = shell\n self.__cwd = cwd\n\n self.__setArgs(args)\n self.__setEnv(env)\n self.__setStdout(stdout)\n self.__setStderr(stderr)\n self.__createProcess()\n\n def isShell(self):\n \"\"\"\n Return the process should run through a shell session.\n \"\"\"\n return self.__shell\n\n def env(self):\n \"\"\"\n Return the environment for the process.\n \"\"\"\n return self.__env\n\n def cwd(self):\n \"\"\"\n Return the current working directory used to launch the process.\n \"\"\"\n return self.__cwd\n\n def args(self):\n \"\"\"\n Return a list of arguments used by the process.\n \"\"\"\n return self.__args\n\n def stdout(self):\n \"\"\"\n Return the stdout stream.\n \"\"\"\n return self.__stdout\n\n def stderr(self):\n \"\"\"\n Return the stderr stream.\n \"\"\"\n return self.__stderr\n\n def stdoutContent(self):\n \"\"\"\n Return a string containing stdout messages.\n \"\"\"\n return self.__stdoutContent\n\n def stderrContent(self):\n \"\"\"\n Return a string containing stderr messages.\n \"\"\"\n return self.__stderrContent\n\n def executionSuccess(self):\n \"\"\"\n Return a boolean if the execution has been successfully.\n \"\"\"\n return self.exitStatus() == 0\n\n def exitStatus(self):\n \"\"\"\n Return the exist status about the process.\n \"\"\"\n return self.__process.returncode\n\n def pid(self):\n \"\"\"\n Return the process id.\n \"\"\"\n return self.__process.pid\n\n def execute(self):\n \"\"\"\n Execute the process.\n \"\"\"\n # stdout queue\n stdoutQueue = Queue()\n stdoutThread = Thread(\n target=self.__readStreamToQueue,\n args=(self.__process.stdout, stdoutQueue)\n )\n stdoutThread.daemon = True # thread dies with the program\n stdoutThread.start()\n\n # stderr queue\n stderrQueue = None\n stderrThread = None\n if self.__stderr is not subprocess.STDOUT:\n stderrQueue = Queue()\n stderrThread = Thread(\n target=self.__readStreamToQueue,\n args=(self.__process.stderr, stderrQueue)\n )\n stderrThread.daemon = True # thread dies with the program\n stderrThread.start()\n\n # retrieving value from queue\n while stdoutThread.is_alive() or (stderrThread is not None and stderrThread.is_alive()):\n # stdout\n stdoutValue = self.__queryStreamValueFromQueue(stdoutQueue)\n if stdoutValue is not None:\n # required for python2\n if isinstance(self.__stdout, io.StringIO):\n stdoutValue = unicode(stdoutValue) if sys.version_info[0] == 2 else stdoutValue\n\n self.__stdout.write(stdoutValue)\n self.__stdout.flush()\n self.__stdoutContent += stdoutValue\n\n # stderr\n if stderrQueue is None:\n continue\n\n stderrValue = self.__queryStreamValueFromQueue(stderrQueue)\n if stderrValue is not None:\n # required for python2\n if isinstance(self.__stderr, io.StringIO):\n stderrValue = unicode(stderrValue) if sys.version_info[0] == 2 else stderrValue\n\n self.__stderr.write(stderrValue)\n self.__stderr.flush()\n self.__stderrContent += stderrValue\n\n self.__process.wait()\n\n def __setStdout(self, stream):\n \"\"\"\n Set the stdout stream.\n \"\"\"\n self.__stdout = stream\n\n def __setStderr(self, stream):\n \"\"\"\n Set the stderr stream.\n \"\"\"\n self.__stderr = stream\n\n def __setArgs(self, args):\n \"\"\"\n Set a list of arguments that should be used by the process.\n \"\"\"\n assert isinstance(args, (list, tuple)), \"Invalid args list!\"\n\n self.__args = list(args)\n\n def __setEnv(self, env):\n \"\"\"\n Set the environment for the process.\n \"\"\"\n self.__env = dict(env)\n\n def __createProcess(self):\n \"\"\"\n Create a process that later should be executed through {@link run}.\n \"\"\"\n stderrStream = subprocess.STDOUT if self.__stderr is subprocess.STDOUT else subprocess.PIPE\n\n executableArgs = ' '.join(self.__sanitizeShellArgs(self.args())) if self.isShell() else self.args()\n self.__process = subprocess.Popen(\n executableArgs,\n bufsize=1,\n close_fds='posix' in sys.builtin_module_names,\n stdout=subprocess.PIPE,\n stderr=stderrStream,\n shell=self.isShell(),\n env=self.env(),\n cwd=self.cwd()\n )\n\n @classmethod\n def __readStreamToQueue(cls, outStream, queue):\n \"\"\"\n Read the stream and add its contents to a queue.\n \"\"\"\n for line in iter(outStream.readline, b''):\n queue.put(line)\n outStream.close()\n\n @classmethod\n def __queryStreamValueFromQueue(cls, queue):\n \"\"\"\n Return the stream value from the queue or None in case the queue is empty.\n \"\"\"\n result = None\n try:\n line = queue.get_nowait()\n except Empty:\n pass\n else:\n if not isinstance(line, str):\n line = line.decode(\"utf_8\", errors=\"ignore\")\n result = line\n\n return result\n\n @staticmethod\n def __sanitizeShellArgs(args):\n \"\"\"\n Sanitize shell args by escaping shell special characters.\n \"\"\"\n result = []\n\n for index, arg in enumerate(args):\n arg = str(arg)\n\n # we need to avoid to escape the first argument otherwise, it will be\n # interpreted as string rather than a command.\n if index == 0 or ProcessExecution.__validateShellArgRegex.match(arg):\n result.append(arg)\n else:\n result.append('\"{}\"'.format(arg.replace('\"', '\\\\\"')))\n\n return result\n","sub_path":"src/lib/kombi/ProcessExecution.py","file_name":"ProcessExecution.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"525165659","text":"from __future__ import print_function\nimport re\nimport sys\nimport time\n\nfrom btree import BTree\nfrom hashtable import HashTable\n\nbtree = BTree(10)\nhashtable = HashTable()\n\ndef create_btree():\n print('Creating BTree Lexicon, m = 10')\n with open(sys.argv[2], 'r') as dictionary:\n for word in dictionary:\n btree.insert(re.sub('\\W+', '', word).lower())\n\ndef spell_check_btree():\n print('Spell Checking {0}'.format(sys.argv[1]))\n misspelled = 0\n start = time.time()\n with open(sys.argv[1], 'r') as file:\n words = re.sub('\\W+', ' ', file.read()).split(' ')\n for i in range(len(words)):\n if len(words[i]) > 0:\n found = btree.search(words[i].lower().strip())\n if not found:\n misspelled += 1\n end = time.time()\n print('There are {0} misspelled words in {1}'.format(misspelled, sys.argv[1]))\n print('Time Elapsed: {0}'.format(end - start))\n frequent = btree.find_max()\n print('Highest Frequency Word: {0}'.format(frequent.word))\n print('Count: {0}'.format(frequent.count))\n print('')\n\ndef remove_words_btree():\n print('Removing words from BTree Lexicon, m = 10')\n with open(sys.argv[3], 'r') as dictionary:\n for word in dictionary:\n btree.remove(re.sub('\\W+', '', word).lower())\n\ndef create_hashtable():\n print('Creating HashTable Lexicon')\n with open(sys.argv[2], 'r') as dictionary:\n for word in dictionary:\n hashtable.insert(re.sub('\\W+', '', word).lower(), 0)\n\ndef spell_check_hashtable():\n print('Spell Checking {0}'.format(sys.argv[1]))\n misspelled = 0\n start = time.time()\n with open(sys.argv[1], 'r') as file:\n words = re.sub('\\W+', ' ', file.read()).split(' ')\n for i in range(len(words)):\n if len(words[i]) > 0:\n found = hashtable.search(words[i].lower().strip())\n if not found:\n misspelled += 1\n end = time.time()\n print('There are {0} misspelled words in {1}'.format(misspelled, sys.argv[1]))\n print('Time Elapsed: {0}'.format(end - start))\n frequent = hashtable.find_max()\n print('Highest Frequency Word: {0}'.format(frequent.key))\n print('Count: {0}'.format(frequent.value))\n print('')\n\ndef remove_words_hashtable():\n print('Removing words from HashTable Lexicon')\n with open(sys.argv[3], 'r') as dictionary:\n for word in dictionary:\n hashtable.remove(re.sub('\\W+', '', word).lower())\n\nif __name__ == '__main__':\n create_btree()\n spell_check_btree()\n\n create_hashtable()\n spell_check_hashtable()\n\n remove_words_btree()\n spell_check_btree()\n\n remove_words_hashtable()\n hashtable.reset_values()\n spell_check_hashtable()\n","sub_path":"georgetown/datastructures/spellchecker++/spellchecker++.py","file_name":"spellchecker++.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"395819619","text":"import requests\r\nimport base64\r\nimport json\r\nimport sys\r\n\r\nbaseurl = 'http://s0w1.zpdt.local:2050'\r\nurl = baseurl + '/services/GILLJSRV/GetDepartments'\r\nauthstr = 'Basic ' + base64.b64encode(b'MYUSER:xxxxxxxx').decode()\r\nreqhdrs = {\r\n 'Authorization':authstr,\r\n 'Content-Type':'application/json'\r\n}\r\nresponse = requests.post(url,headers=reqhdrs)\r\n\r\nprint('Status code = ',response.status_code)\r\nprint('Returned headers:')\r\nfor name in response.headers:\r\n\tprint(' ',name,': ',response.headers[name])\r\nprint('Returned data:')\r\njson.dump(response.json(),sys.stdout,sort_keys=False,indent=4);","sub_path":"driveGetDepartments.py","file_name":"driveGetDepartments.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"385897404","text":"from py2gmsh import (Mesh, Entity, Field)\nimport axifem\nimport os\nfrom scipy.optimize import minimize,NonlinearConstraint,differential_evolution\nimport numpy as np\nimport time\nh_innan = 1200\nT_inf_innan = 95\nT_inf_utan = 18\nh_utan = 45\nk = 16\nt_veggur = 0.003\nd = 0.003\na = 0.01\nL = 0.01\nd1 = 0.0005\nd2 = 0.0005\nV_sk = np.pi*(a/2)**2*t_veggur+np.pi*(d/2)**2*(L)\nprint('initial volume: {}'.format(V_sk))\n\ntolerance = 1e-10 #tolerance for volume\n# create Mesh class instance\nmy_mesh = Mesh()\ni = 0\n\ndef objective(x, sign=-1.0):\n my_mesh = Mesh()\n d = x[0]\n a = x[1]\n L = x[2]\n if volume(x)>10*V_sk:\n d1=0.05\n elif volume(x)>2*V_sk:\n d1=0.005\n else:\n d1=0.0005\n \n\n filename = 'my_mesh'\n # create points\n p1 = Entity.Point([0.,0., 0.,d1]) #fyrsti punktur neðri vinstri\n # add point to mesh\n my_mesh.addEntity(p1) \n #create more points\n p2 = Entity.Point([0.,a/2, 0.,d1])#2. punktur efri vinstri\n my_mesh.addEntity(p2)\n p3 = Entity.Point([t_veggur,a/2, 0.,d1])#3. punktur efri hægri\n my_mesh.addEntity(p3)\n\n p4 = Entity.Point([t_veggur, (a-d)/2+d-a/2, 0.,d1])#4. punktur niður frá efri hægri\n my_mesh.addEntity(p4)\n\n p5 = Entity.Point([t_veggur+L,(a-d)/2+d-a/2,0.,d1])#5.punktur endi á ribbu efri\n my_mesh.addEntity(p5)\n p6 = Entity.Point([t_veggur+L,0.,0.,d1])#6. punktur endi á ribbu neðri\n my_mesh.addEntity(p6)\n # create curves\n l1 = Entity.Curve([p1, p2]) #innri bein lína upp\n l2 = Entity.Curve([p2, p3]) # efri hlið einangrun\n l3 = Entity.Curve([p3, p4]) # ytri bein lína upp\n l4 = Entity.Curve([p4, p5]) #ribba bein lína upp\n l5 = Entity.Curve([p5, p6]) #ribba endi\n l6 = Entity.Curve([p6, p1]) #ribba bein lína niðri\n\n my_mesh.addEntities([l1, l2, l3, l4, l5, l6])\n\n\n ll1 = Entity.CurveLoop([l1, l2, l3, l4, l5, l6], mesh=my_mesh)\n\n\n\n s1 = Entity.PlaneSurface([ll1], mesh=my_mesh)\n\n\n\n\n g1 = Entity.PhysicalGroup(name='innri')\n g2 = Entity.PhysicalGroup(name='ytri')\n g3 = Entity.PhysicalGroup(name='ribba')\n g4 = Entity.PhysicalGroup(name='einangrun')\n my_mesh.addEntities([g1, g2, g3, g4])\n g1.addEntities([l1])\n g2.addEntities([l3,l4,l5,l6])\n g4.addEntities([l2])\n g3.addEntities([s1])\n # set max element size\n #my_mesh.Options.Mesh.CharacteristicLengthMax = 0.1\n\n # adding Coherence option\n my_mesh.Coherence = True\n # write the geofile\n #os.system('rm .geo')\n try:\n my_mesh.writeGeo('{}.geo'.format(filename))\n os.system('gmsh {}.geo -2 -o {}.msh'.format(filename,filename))\n except:\n return -0.1\n #os.system('gmsh my_mesh.geo')\n try:\n xu, y, tri, T, V, q = axifem.axiHeatCond('{}.msh'.format(filename), \\\n {'ribba':k}, {'ytri':(h_utan,-h_utan*T_inf_utan),'innri':(h_innan,-h_innan*T_inf_innan),'einangrun':(0,0)})\n print(sign*q['ytri'][1])\n except:\n return -0.1\n return sign*q['ytri'][1]\n\n\ndef volume(x):\n d = x[0]\n a = x[1]\n L = x[2]\n v=np.pi*(a/2)**2*t_veggur+np.pi*(d/2)**2*(L)\n \n print(v)\n return v\n\ndef volume1(x):\n d = x[0]\n a = x[1]\n L = x[2]\n v=np.pi*(a/2)**2*t_veggur+np.pi*(d/2)**2*(L)\n \n print(v)\n #return v-V_sk+tolerance\n return v\n\ndef volume2(x):\n d = x[0]\n a = x[1]\n L = x[2]\n v=np.pi*(a/2)**2*t_veggur+np.pi*(d/2)**2*(L)\n \n print(v)\n #return V_sk-v+tolerance\n return V_sk-v+tolerance\n\ndef constraint4(x): #a ekki minna en 2*d\n d = x[0]\n a = x[1]\n return a-2*d\n\n\n\n\n\nbounds = [[0.00025,0.1],[0.,0.1],[0.,0.1]]\n\nx0 = [0.003,0.01,0.01]\ncons=()\n#lcst = LinearConstraint(co,lb,ub)\nfor factor in range(len(bounds)):\n lower, upper = bounds[factor]\n l = {'type': 'ineq',\n 'fun': lambda x, lb=lower, i=factor: x[i] - lb}\n u = {'type': 'ineq',\n 'fun': lambda x, ub=upper, i=factor: ub - x[i]}\n cons+=(l,)\n cons+=(u,)\nconstraint = {'type': 'ineq', 'fun': volume1}\ncons+=(constraint,)\nconstraint = {'type': 'ineq', 'fun': volume2}\ncons+=(constraint,)\nconstraint = {'type': 'ineq','fun': constraint4}\ncons+=(constraint,)\n\n\n\n\n#print(vte)\nprint('initial volume: {}'.format(V_sk))\n#sol = differential_evolution(objective,bounds,constraints = (nlc1,nlc3,nlc4))\nsol = minimize(objective,x0,method='COBYLA', constraints = cons)\na = sol['x'][1]\nprint(a)\n#sol = minimize(objective,x0,method='SLSQP',options={'gtol': 1e-6, 'disp': True}, constraints = cons)\nprint(sol)\n\nprint('initial volume: {}'.format(V_sk))\nx, y, tri, T, V, q = axifem.axiHeatCond('my_mesh.msh', \\\n {'ribba':k}, {'ytri':(h_utan,-h_utan*T_inf_utan),'innri':(h_innan,-h_innan*T_inf_innan),'einangrun':(0,0)})\nmy_mesh = Mesh()\n\n# create points\np1 = Entity.Point([0.,0., 0.,d1]) #fyrsti punktur neðri vinstri\n# add point to mesh\nmy_mesh.addEntity(p1) \n#create more points\np2 = Entity.Point([0.,a/2, 0.,d1])#2. punktur efri vinstri\nmy_mesh.addEntity(p2)\np3 = Entity.Point([t_veggur,a/2, 0.,d1])#3. punktur efri hægri\nmy_mesh.addEntity(p3)\n\np4 = Entity.Point([t_veggur, 0., 0.,d1])#4. punktur niður frá efri hægri\nmy_mesh.addEntity(p4)\n\n# create curves\nl1 = Entity.Curve([p1, p2]) #innri bein lína upp\nl2 = Entity.Curve([p2, p3]) # efri hlið einangrun\nl3 = Entity.Curve([p3, p4]) # ytri bein lína upp\nl4 = Entity.Curve([p4, p1]) #neðri lína\n\nmy_mesh.addEntities([l1, l2, l3, l4])\n\n\nll1 = Entity.CurveLoop([l1, l2, l3, l4], mesh=my_mesh)\n\n\n\ns1 = Entity.PlaneSurface([ll1], mesh=my_mesh)\n\n\n\n\ng1 = Entity.PhysicalGroup(name='innri')\ng2 = Entity.PhysicalGroup(name='ytri')\ng3 = Entity.PhysicalGroup(name='ribba')\ng4 = Entity.PhysicalGroup(name='einangrun')\nmy_mesh.addEntities([g1, g2, g3, g4])\ng1.addEntities([l1])\ng2.addEntities([l3,l4])\ng4.addEntities([l2])\ng3.addEntities([s1])\n# set max element size\n#my_mesh.Options.Mesh.CharacteristicLengthMax = 0.1\n\n# adding Coherence option\nmy_mesh.Coherence = True\n# write the geofile\nmy_mesh.writeGeo('my_mesh.geo')\nos.system('gmsh my_mesh.geo -2 -o my_mesh.msh')\n#os.system('gmsh my_mesh.geo')\n\nx1, y1, tri1, T1, V1, q1 = axifem.axiHeatCond('my_mesh.msh', \\\n {'ribba':k}, {'ytri':(h_utan,-h_utan*T_inf_utan),'innri':(h_innan,-h_innan*T_inf_innan),'einangrun':(0,0)})\n\nvirkni = q['ytri'][1]/q1['ytri'][1]\n\nfrom matplotlib.pyplot import *\nprint('Ribbuvirkni {}:'.format(virkni))\nprint('Rúmmál {}'.format(V['ribba']))\nprint('Varmaflutningur: {:g}'.format(q['ytri'][1]))\nprint('Hámarkshitastig: {:g}'.format(max(T)))\nprint('Lágmarkshitastig: {:g}'.format(min(T)))\nfigure(figsize=(16,3))\ntricontourf(x,y,tri,T,20)\ntricontourf(x,-y,tri,T,20)\ncolorbar()\n\ntext(0.005,0.003,'Varmaflutningur: {:g}W'.format(q['ytri'][1]))\ntext(0.005,0.004,'Ribbuvirkni: {:g}'.format(virkni))\naxis('equal')\ntitle('Bestuð ribba')\nxlabel('Lengd [m]')\nylabel('Hæð [m]')\ntext(0.02,0.005,'Hitastig í '+r'$^{\\circ}$'+'C')\nshow()\n\n\n","sub_path":"varmaflutningsfraedi/tolulegt_verkefni/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"394920093","text":"import os\nimport time\nimport math\nimport curses\n\n\"\"\"\n _ _ _ _ \n __ _ _ __ __ _ _ __ | |__ (_) ___ __ _| | __ _____ _ __ ___(_) ___ _ __ \n / _` | '__/ _` | '_ \\| '_ \\| |/ __/ _` | | \\ \\ / / _ \\ '__/ __| |/ _ \\| '_ \\ \n| (_| | | | (_| | |_) | | | | | (_| (_| | | \\ V / __/ | \\__ \\ | (_) | | | |\n \\__, |_| \\__,_| .__/|_| |_|_|\\___\\__,_|_| \\_/ \\___|_| |___/_|\\___/|_| |_|\n |___/ |_| \n\n\"\"\"\n\n\nclass Colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n BLACKBG = '\\33[40m'\n REDBG = '\\33[41m'\n GREENBG = '\\33[42m'\n YELLOWBG = '\\33[43m'\n BLUEBG = '\\33[44m'\n VIOLETBG = '\\33[45m'\n BEIGEBG = '\\33[46m'\n WHITEBG = '\\33[47m'\n\n GREYBG = '\\33[100m'\n REDBG2 = '\\33[101m'\n GREENBG2 = '\\33[102m'\n YELLOWBG2 = '\\33[103m'\n BLUEBG2 = '\\33[104m'\n VIOLETBG2 = '\\33[105m'\n BEIGEBG2 = '\\33[106m'\n WHITEBG2 = '\\33[107m'\n\n\nWALL = Colors.REDBG + ' ' + Colors.ENDC\nSTART = Colors.WHITEBG + ' ' + Colors.ENDC\nEND = Colors.WHITEBG + ' ' + Colors.ENDC\n\n\nclass Node:\n def __init__(self, x, y, maze, pred=None, cost=0):\n self.x = x\n self.y = y\n self.pred = pred\n self.cost = cost\n self.costN = cost + 1\n\n self.conns = []\n\n if maze[self.x - 1][self.y] != '#':\n self.conns.append((self.x - 1, self.y))\n if maze[self.x][self.y+1] != '#':\n self.conns.append((self.x, self.y + 1))\n if maze[self.x + 1][self.y] != '#':\n self.conns.append((self.x + 1, self.y))\n if maze[self.x][self.y - 1] != '#':\n self.conns.append((self.x, self.y - 1))\n\n self.len = len(self.conns)\n\n def __eq__(self, o):\n return o.x == self.x and o.y == self.y\n\n def __lt__(self, other):\n return self.cost < other.cost\n\n def __le__(self, other):\n return self.cost <= other.cost\n\n def get_next(self, maze):\n if self.len:\n self.len -= 1\n x, y = self.conns.pop()\n return Node(x, y, maze, pred=self, cost=self.costN)\n\n\ndef setup():\n stdscr = curses.initscr()\n curses.start_color()\n curses.noecho()\n curses.cbreak()\n stdscr.keypad(1)\n curses.curs_set(0)\n return stdscr\n\n\ndef main(maze, start_pos, end_pos, loop_delay, loop_back_delay=0.005):\n stdscr = setup()\n MAX_X, MAX_Y = stdscr.getmaxyx()\n main_window = curses.newwin(MAX_X, MAX_Y, 0, 0)\n\n next_node = Node(start_pos[0], start_pos[1], maze)\n start_pos = next_node\n sortedList = [next_node, Node(-1, -1, maze, cost=math.inf)]\n\n # TODO: move to setup and define a setup for all curses programmes\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_RED)\n curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_GREEN)\n curses.init_pair(3, curses.COLOR_CYAN, curses.COLOR_CYAN)\n curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLUE)\n curses.init_pair(5, curses.COLOR_GREEN, curses.COLOR_GREEN)\n\n for i_line in range(0, len(maze)):\n for i_char in range(0, len(maze[i_line])):\n if maze[i_line][i_char] == '#':\n main_window.addstr(i_line, i_char, ' ', curses.color_pair(1))\n elif maze[i_line][i_char] == 'S':\n main_window.addstr(i_line, i_char, ' ', curses.color_pair(2))\n elif maze[i_line][i_char] == 'E':\n main_window.addstr(i_line, i_char, ' ', curses.color_pair(2))\n\n main_window.refresh()\n\n def printList():\n str_list = \"[\"\n for node in sortedList:\n str_list += str(node) + \",\"\n str_list = str_list[:-1] + \"]\"\n print(str_list)\n\n def printMaze():\n for line in maze:\n line_str = \"\"\n for char in line:\n if char == '#':\n line_str += WALL\n elif char == 'S':\n line_str += START\n elif char == 'E':\n line_str += END\n else:\n line_str += char\n print(line_str)\n\n def insert_node(new_node: Node):\n i = len(sortedList)//2\n leng = len(sortedList)\n for n in range(0, i):\n if new_node.cost < sortedList[i].cost:\n if sortedList[i-1].cost <= new_node.cost:\n break\n else:\n i = i // 2\n else:\n if new_node.cost <= sortedList[i+1].cost:\n i += 1\n break\n else:\n i += (leng - i) // 2\n\n sortedList.insert(i, new_node)\n\n #os.system(\"clear\")\n #printMaze()\n\n main_window.getch()\n time.sleep(1)\n\n start = time.time()\n unique_dic = {}\n while maze[next_node.x][next_node.y] != 'E':\n next_node = None\n\n i = 1\n while i < len(sortedList) - 1:\n if not sortedList[i].len:\n main_window.addstr(sortedList[i].x, sortedList[i].y, ' ', curses.color_pair(4))\n del sortedList[i]\n else:\n i += 1\n\n i = 0\n while next_node is None:\n next_node = sortedList[i].get_next(maze)\n i += 1\n\n old_n = unique_dic.get((next_node.x, next_node.y))\n if old_n:\n if next_node.cost < old_n.cost:\n sortedList.remove(next_node)\n insert_node(next_node)\n unique_dic[(next_node.x, next_node.y)] = next_node\n else:\n insert_node(next_node)\n unique_dic[(next_node.x, next_node.y)] = next_node\n\n if maze[next_node.x][next_node.y] != 'E' and maze[next_node.x][next_node.y] != 'S':\n if next_node.len and not old_n:\n main_window.addstr(next_node.x, next_node.y, ' ', curses.color_pair(3))\n main_window.refresh()\n\n time.sleep(loop_delay)\n\n end_time = time.time()\n length = 0\n while next_node != start_pos:\n length += 1\n next_node = next_node.pred\n main_window.addstr(next_node.x, next_node.y, ' ', curses.color_pair(5))\n main_window.refresh()\n time.sleep(loop_back_delay)\n\n main_window.getch()\n time.sleep(2)\n curses.endwin()\n print(\"solved maze in \" + str(end_time - start) + \" seconds: \" + str(length) + \" length\")\n","sub_path":"solver/curses/solverIterativ_curses.py","file_name":"solverIterativ_curses.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401058267","text":"import numpy as np\nfrom wk5_GMM_CNN import spectrum_crop\nfrom wk5_GMM_CNN import load_data\nfrom wk5_GMM_CNN import cepstrum_crop\n\n\ndef feature_extractor_KNN(train_dict, test_dict, batch_num):\n keys = ['0', '1', '2']\n n_class = len(keys)\n test_ys = np.empty((0, n_class))\n test_xs = []\n train_out_dict = {'0': None, '1': None, '2': None}\n for key in keys:\n xs = train_dict[key]\n txs = test_dict[key]\n c = int(key)\n label = np.zeros(n_class)\n label[c] = 1\n\n train_xs = spectrum_crop(xs, batch_num, n_total=0)\n # train_xs = cepstrum_crop(xs, batch_num)\n\n train_xs = np.array(train_xs)\n\n temp_test_xs = spectrum_crop(txs, batch_num, n_total=0)\n # temp_test_xs = cepstrum_crop(txs, batch_num)\n\n train_out_dict[key] = train_xs\n temp_test_ys = np.tile(label, (len(temp_test_xs), 1))\n test_xs += temp_test_xs\n test_ys = np.vstack((test_ys, temp_test_ys))\n test_xs = np.array(test_xs)\n return train_out_dict, test_xs, test_ys\n\n\ndef knnro(train_xs, train_ys, n_class, txs):\n k = 20\n reject_rate = 15\n reject_label = n_class\n inner_product = np.matmul(txs, np.transpose(train_xs))\n product_index = np.argsort(-inner_product) # 降序排列\n product_index = product_index[0][:k]\n nearest_label = [train_ys[i] for i in product_index]\n count_list = [nearest_label.count(j) for j in range(n_class)]\n count_arr = np.array(count_list)\n max_index = np.argmax(count_arr)\n predict = reject_label\n if count_arr[max_index] >= reject_rate:\n predict = max_index\n return predict\n\n\ndef train_jdknnro(train_dict_in, test_dict, batch_num=20):\n n_class = len(train_dict_in)\n train_dict, test_xs, test_ys = feature_extractor_KNN(train_dict_in, test_dict, batch_num)\n\n # 取训练数据中类别数最少的数量作为训练集的size\n size_list = [train_dict[key].shape[0] for key in train_dict]\n size = min(size_list)\n print(size)\n size = 1000\n # 构造训练集\n feature_size = test_xs.shape[1]\n xs = np.empty((0, feature_size))\n xs_y = []\n for key in train_dict:\n c = int(key)\n temp_xs = train_dict[key]\n np.random.shuffle(temp_xs)\n ref_txs = temp_xs[0]\n inner_product = np.matmul(ref_txs, np.transpose(temp_xs))\n product_index = np.argsort(-inner_product) # 降序排列\n selected_xs = [temp_xs[product_index[i]] for i in range(0, temp_xs.shape[0], 10)]\n selected_xs = np.array(selected_xs)\n np.random.shuffle(selected_xs)\n xs = np.vstack((xs, selected_xs[:size]))\n # xs = np.vstack((xs, temp_xs[:size]))\n xs_y += ([c] * size)\n\n sample_num = test_xs.shape[0]\n knnro_list = []\n for i in range(sample_num):\n txs = test_xs[i]\n txs = np.reshape(txs, (1, feature_size))\n knnro_result = knnro(xs, xs_y, n_class, txs)\n knnro_list.append(knnro_result)\n reject_num = 0\n total = 0\n correct = 0\n confusion_mat = np.zeros((n_class,n_class))\n for i in range(batch_num, test_xs.shape[0]):\n current_knnro = knnro_list[i-20:i]\n count_list = [current_knnro.count(j) for j in range(n_class+1)]\n count_arr = np.array(count_list)\n valid_num = sum(count_list[:n_class])\n sorted_index = np.argsort(-count_arr)\n ground = np.argmax(test_ys[i])\n # predict = n_class\n if sorted_index[0] != n_class:\n predict = sorted_index[0]\n elif count_arr[sorted_index[1]] == 0\\\n or count_arr[sorted_index[1]]/valid_num < 0.5: # batch拒识别率\n reject_num += 1\n continue\n else:\n predict = sorted_index[1]\n\n if predict != n_class and predict == ground:\n correct += 1\n total += 1\n confusion_mat[ground, predict] += 1\n\n print('reject num:', reject_num)\n print('recog num:', total)\n print('GMM test accuracy: ', round(correct / total, 3))\n print(confusion_mat)\n total_sample = np.sum(confusion_mat, 1)\n acc_list = []\n for i in range(0, n_class):\n acc = confusion_mat[i, i] / total_sample[i]\n acc_list.append(acc)\n print('label ', i, 'acc = ', acc)\n return acc_list\n\n\nif __name__ == '__main__':\n batch_num = 20\n n_round = 50\n acc_arr_knn = np.empty((0, 3))\n # n_total = 2000\n #\n dict = {'0': '', '1': '', '2': ''}\n # dict[\"0\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet_wk3/beakedwhale\"\n # dict[\"1\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet_wk3/pilot\"\n # dict[\"2\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet_wk3/rissos\"\n\n # dict[\"0\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet12_filtered/Melon\"\n # dict[\"1\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet12_filtered/Spinner\"\n # dict[\"2\"] = \"/home/fish/ROBB/CNN_click/click/CNNDet12_filtered/Tt\"\n\n dict[\"0\"] = \"/home/fish/ROBB/CNN_click/click/Xiamen/bottlenose\"\n dict[\"1\"] = \"/home/fish/ROBB/CNN_click/click/Xiamen/chinesewhite\"\n dict[\"2\"] = \"/home/fish/ROBB/CNN_click/click/Xiamen/Neomeris\"\n\n print(dict)\n for i in range(n_round):\n print('=================round %d=================' % i)\n train_dict, test_dict = load_data(dict)\n print('=========================JDKNNRO %d===========================' % i)\n acc_knn = train_jdknnro(train_dict, test_dict, batch_num=batch_num)\n acc_arr_knn = np.vstack((acc_arr_knn, acc_knn))\n\n print('knn:')\n acc_mean_knn = np.mean(acc_arr_knn, 0)\n acc_std_knn = np.std(acc_arr_knn, 0)\n # acc_arr /= n_round\n print(acc_mean_knn)\n print(acc_std_knn)","sub_path":"JDKNNRO.py","file_name":"JDKNNRO.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"416007561","text":"from bs4 import BeautifulSoup\r\nfrom bs4.element import Comment\r\nimport urllib.request\r\nimport re\r\n\r\n\r\n# This code will help to parse out the plain text from the html https://stackoverflow.com/questions/1936466/beautifulsoup-grab-visible-webpage-text\r\ndef vis_tag(element):\r\n #discard invisible html because it does not matter to this project\r\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\r\n return False\r\n #return visible\r\n if isinstance(element, Comment):\r\n return False\r\n return True\r\n\r\ndef html_to_text(body):\r\n #get the body of html\r\n soup = BeautifulSoup(body, 'html.parser')\r\n #find text\r\n texts = soup.findAll(text=True)\r\n #apply above filter to html\r\n visible_texts = filter(vis_tag, texts) \r\n return u\" \".join(t.strip() for t in visible_texts)\r\n\r\ndef text_from_cards(body):\r\n soup = BeautifulSoup(body, 'html.parser')\r\n cards = soup.findAll(\"div\", {\"class\": \"card\"})\r\n #texts = soup.findAll(text=True)\r\n visible_texts = filter(vis_tag, cards) \r\n return u\" \".join(t.strip() for t in visible_texts)\r\n\r\n\r\n\r\ndef SafeQuestions(listOfFlags,Questions):\r\n for word in listOfFlags:\r\n if word in Questions:#Not safe!\r\n return False\r\n return True #No flag word found\r\n\r\n# followed tutorial to find this. \r\n# This was a hurdle because it seems some webpages will and wont be accessed with beautiful soup\r\nclass AppURLopener(urllib.request.FancyURLopener):\r\n version = \"Mozilla/5.0\"\r\n\r\n#This is used to make a person object for each person the fastpeoplesearch returned\r\n#since we have to investigate all possibilities we found this the easiest way to keep everything straight\r\nclass Person:\r\n def __init__(self, name,age,link,family,aliases,addresses, mothersMaidenNames):\r\n self.name = name\r\n self.age = age\r\n self.link = link\r\n self.family = family\r\n self.aliases = aliases\r\n self.addresses = addresses\r\n self.mothersMaidenNames = mothersMaidenNames\r\n def get_age(self):\r\n return(self.age)\r\n def get_name(self):\r\n return(self.name)\r\n def get_link(self):\r\n return(self.link)\r\n def get_family(self):\r\n return(self.family)\r\n def get_aliases(self):\r\n return(self.aliases)\r\n def get_addresses(self):\r\n return(self.addresses)\r\n def get_Mothers_Maiden_Name(self):\r\n return(self.mothersMaidenNames)\r\n\r\n\r\n\r\n#formats the url to include input from the user\r\ndef formatURL(url):\r\n base = 'https://www.fastpeoplesearch.com'\r\n base += url\r\n return base\r\n\r\n#get high level details on each person returned\r\ndef highLevelDetails(url):\r\n #initialize new opener\r\n opener = AppURLopener()\r\n #read contents returned\r\n html = opener.open(url).read()\r\n #get the soup\r\n soup = BeautifulSoup(html, 'html.parser')\r\n #grab the more in depth details link\r\n links = soup.findAll(\"a\", {\"class\": \"btn btn-primary link-to-details\"}, href=True)\r\n h3s = soup.findAll(\"h3\")\r\n listOfNames = []\r\n listOfAges = []\r\n #get everyones full name and age\r\n for element in h3s:\r\n if \"Full Name\" in str(element):\r\n listOfNames += element\r\n if \"Age\" in str(element):\r\n age = str(element).replace(\"

\",\"\").replace(\"

\",\"\").replace(\"\",\"\").replace(\"\",\"\")\r\n listOfAges.append(age)\r\n #print(\"list of ages\", listOfAges[0])\r\n listOfLinks = []\r\n for link in links:\r\n newLink = formatURL(link['href'])\r\n listOfLinks.append(newLink)\r\n #i = 0\r\n # i dont think this ends up getting used but too scared to take out at this point\r\n addresses = soup.findAll(\"a\", {\"title\": re.compile('^Search people living at')}, text=True)\r\n listOfAddresses = []\r\n for address in addresses:\r\n listOfAddresses += address.contents\r\n \r\n\r\n#checkFurtherDetails(listoflinks[0])\r\n\r\n #print(listOfAddresses)\r\n i = 0\r\n #build list of family members\r\n for person in listOfNames:\r\n ##print(\"**********BEGGINING OF PERSON***********\")\r\n ##print(person, listOfAges[i], listOfLinks[i], listOfAddresses[i])\r\n \r\n familyMembers,aliases,newListOfAddresses,maidenNames = checkFurtherDetails(listOfLinks[i])\r\n ##print(\"**********RELATIVES***********\")\r\n \r\n formattedFamily = []\r\n for member in familyMembers:\r\n formattedFamily.append(member[:-2])\r\n #print(member[:-2])\r\n ##print(formattedFamily)\r\n ##print(\"**********POTENTIAL MOTHERS MAIDEN NAMES***********\") \r\n #mothersMaidenNames=[] \r\n #for member in familyMembers:\r\n # if member[:-2].split()[1] not in mothersMaidenNames:\r\n # #print(member[:-2].split()[1])\r\n # mothersMaidenNames.append(member[:-2].split()[1])\r\n #print(\"Maiden Names////////////////////////////////////////////////\")\r\n\r\n #print(mothersMaidenNames)\r\n ##print(\"**********End OF PERSON***********\")\r\n\r\n\r\n i +=1\r\n return listOfLinks, listOfNames, listOfAges\r\n\r\n# the last function checked high level but returned a link to drill into each person found\r\ndef checkFurtherDetails(url):\r\n #initialize opener\r\n opener = AppURLopener()\r\n #open the link to the person\r\n html = opener.open(url).read()\r\n #get the soup\r\n soup = BeautifulSoup(html, 'html.parser')\r\n #find all relatives\r\n relatives = soup.findAll(\"div\", {\"id\": \"relative-links\"})\r\n #or relative in relatives:\r\n para = relatives[0].find('p')\r\n spans = para.findAll('span')\r\n listOfRevs = []\r\n for people in spans:\r\n listOfRevs += people.contents\r\n #print(people.contents)\r\n formattedFamily = []\r\n for member in listOfRevs:\r\n formattedFamily.append(member[:-2])\r\n\r\n mothersMaidenNames=[] \r\n for member in formattedFamily:\r\n if member.split()[1] not in mothersMaidenNames:\r\n #print(member[:-2].split()[1])\r\n mothersMaidenNames.append(member.split()[1])\r\n spouse = soup.find(\"div\", {\"id\":\"aka-links\"})\r\n ##print(\"**********ALIASES***********\")\r\n listOfAliases = []\r\n aliases = spouse.find('p').contents\r\n for alias in aliases:\r\n alias = str(alias).strip('\\t\\r\\n')\r\n listOfAliases.append(alias)\r\n listOfAliases = listOfAliases[0].split(\" • \")\r\n ##print(listOfAliases)\r\n\r\n addresses = soup.findAll(\"a\", {\"title\": re.compile('^Search people who live at')})\r\n listOfAddresses = []\r\n for address in addresses:\r\n listOfAddresses += address.contents\r\n #print(address.contents)\r\n \r\n #print(addresses)\r\n addresses = soup.findAll(\"div\", {\"class\": \"detail-box-address\"})\r\n ##print(\"**********ADDRESSES***********\")\r\n #listOfAddresses = listOfAddresses[0:3]\r\n #print(listOfAddresses)\r\n i=0\r\n newListOfAddresses = []\r\n for address in listOfAddresses:\r\n address = str(address).strip('\\t\\r\\n')\r\n if \"
\" not in address:\r\n newListOfAddresses.append(address)\r\n #print(address)\r\n ##print(newListOfAddresses)\r\n return(formattedFamily, listOfAliases, newListOfAddresses,mothersMaidenNames)\r\n\r\n\r\ndef publicInformation(first, last, zipcode):\r\n #first = \"Angela\"\r\n #last = \"Tucker\"\r\n #zipcode = \"74012\"\r\n\r\n url = \"https://www.fastpeoplesearch.com/name/\" + first.lower() + \"-\" + last.lower() + \"_\" + zipcode\r\n print(url)\r\n links, names, ages = highLevelDetails(url)\r\n\r\n peopleFound = []\r\n i =0\r\n for name in names:\r\n print(\"_________________BEGIN NEW OUTPUT___________\")\r\n #print(name, \"\\n\", ages[i], \"\\n\", links[i], \"\\n\")\r\n familyMembers,aliases,newListOfAddresses,mothersMaidenNames = checkFurtherDetails(links[i])\r\n\r\n #mothersMaidenNames=[] \r\n #for member in familyMembers:\r\n # if member.split()[1] not in mothersMaidenNames:\r\n #print(member[:-2].split()[1])\r\n # mothersMaidenNames.append(member.split()[1])\r\n #mothersMaidenNames.append(member[:-2].split()[1])\r\n #print(\"Family- \", familyMembers, \"\\nAliases- \", aliases, \"\\nAddresses\", newListOfAddresses, \"\\nPotential Mother's Maiden Names- \", mothersMaidenNames)\r\n tempPerson = Person(name, ages[i], links[i], familyMembers, aliases, newListOfAddresses, mothersMaidenNames)\r\n #print(type(name), type(ages[i]), type(links[i]), type(familyMembers), type(aliases), type(newListOfAddresses), type(mothersMaidenNames))\r\n\r\n peopleFound.append(tempPerson)\r\n #print( peopleFound[i].get_name(), \"\\n\",\r\n # peopleFound[i].get_age(), \"\\n\",\r\n # peopleFound[i].get_addresses(), \"\\n\", \r\n # peopleFound[i].get_aliases(), \"\\n\",\r\n # peopleFound[i].get_family(),\"\\n\",\r\n # peopleFound[i].get_link(),\"\\n\",\r\n # peopleFound[i].get_Mothers_Maiden_Name())\r\n i += 1\r\n\r\n\r\n return peopleFound\r\n\r\n\r\n\r\n#publicInformation(\"Steven\", \"Tucker\", \"74012\")\r\n\r\n\r\n","sub_path":"publicInfoSearch.py","file_name":"publicInfoSearch.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291807458","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the CNN architecture\nclass Net(nn.Module):\n ### TODO: choose an architecture, and complete the class\n def __init__(self):\n super(Net, self).__init__()\n ## Define layers of a CNN\n self.conv1 = nn.Conv2d(3, 3, kernel_size=7, stride=2, padding=1)\n ## (-1, 3, 110, 110)\n self.conv2 = nn.Conv2d(3, 3, kernel_size=5, stride=2, padding=1)\n ## (-1, 3, 54, 54)\n self.conv3 = nn.Conv2d(3, 6, kernel_size=5, stride=2, padding=1)\n ## (-1, 6, 26, 26)\n self.conv4 = nn.Conv2d(6, 9, kernel_size=3, stride=2, padding=1)\n ## (-1, 9, 13, 13)\n self.fc1 = nn.Linear(9 * 13 * 13, 1000)\n self.fc2 = nn.Linear(1000, 500) \n self.fc3 = nn.Linear(500, 133)\n \n def forward(self, x):\n ## Define forward behavior\n x = x.view((-1, 3, 224, 224))\n x = F.relu(self.conv1(x))\n # print (x.shape) # (-1, 3, 110, 110)\n x = F.relu(self.conv2(x))\n # print (x.shape) # (-1, 3, 54, 54)\n x = F.relu(self.conv3(x))\n # print (x.shape) # (-1, 6, 26, 26)\n x = F.relu(self.conv4(x))\n # print (x.shape) # (-1, 9, 13, 13)\n fc_input_dim = x.size(1) * x.size(2) * x.size(3)\n# print (fc_input_dim)\n x = x.view(-1, fc_input_dim)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n#-#-# You so NOT have to modify the code below this line. #-#-#\n\n# instantiate the CNN\nmodel_scratch = Net()\n\n# move tensors to GPU if CUDA is available\nif use_cuda:\n model_scratch.cuda()\n\n\n\n##################################################################### 2\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# define the CNN architecture\nclass Net(nn.Module):\n ### TODO: choose an architecture, and complete the class\n def __init__(self):\n super(Net, self).__init__()\n ## Define layers of a CNN\n self.conv0 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=1)\n self.conv2 = nn.Conv2d(6, 9, kernel_size=5, stride=2, padding=1)\n self.conv3 = nn.Conv2d(9, 12, kernel_size=3, stride=2, padding=1)\n self.fc1 = nn.Linear(12 * 7 * 7, 200)\n self.fc2 = nn.Linear(200, 500) #output layer\n self.fc3 = nn.Linear(500, 133)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x):\n ## Define forward behavior\n x = x.view(-1, 3, 64, 64)\n x = self.pool(F.relu(self.conv0(x)))\n # print(x.shape) \n x = F.relu(self.conv1(x))\n # print (x.shape) # -1, 6, 30, 30\n x = F.relu(self.conv2(x))\n # print (x.shape) # -1, 9, 14, 14\n x = F.relu(self.conv3(x))\n # print (x.shape) # -1, 12, 7, 7\n fc_input_dim = x.size(1) * x.size(2) * x.size(3)\n # print (fc_input_dim)\n x = x.view(-1, fc_input_dim)\n x = F.relu(self.fc1(x))\n x = self.drop(x)\n x = F.relu(self.fc2(x))\n x = self.drop(x)\n x = self.fc3(x)\n return x\n\n#-#-# You so NOT have to modify the code below this line. #-#-#\n\n# instantiate the CNN\nmodel_scratch = Net()\n\n# move tensors to GPU if CUDA is available\nif use_cuda:\n model_scratch.cuda()\n\n\n\n\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# define the CNN architecture\nclass Net(nn.Module):\n ### TODO: choose an architecture, and complete the class\n def __init__(self):\n super(Net, self).__init__()\n ## Define layers of a CNN\n self.conv1 = nn.Conv2d(3, 8, 5, padding=2)\n self.conv2 = nn.Conv2d(8, 16, 5, padding=2)\n self.conv3 = nn.Conv2d(16, 32, 5, padding=2)\n self.conv4 = nn.Conv2d(32, 64, 5, padding=2)\n self.conv5 = nn.Conv2d(64, 64, 5, padding=2)\n # self.conv6 = nn.Conv2d(64, 128, 3, padding=1)\n # self.conv7 = nn.Conv2d(128, 128, 3, padding=1)\n self.pool = nn.MaxPool2d(2)\n self.fc1 = nn.Linear(4096, 1000)\n self.fc2 = nn.Linear(1000, 500)\n self.fc3 = nn.Linear(500, 133)\n self.drop = nn.Dropout(0.5)\n\n \n\n \n def forward(self, x):\n ## Define forward behavior\n x = x.view(-1, 3, 256, 256)\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n x = self.pool(F.relu(self.conv4(x)))\n x = self.pool(F.relu(self.conv5(x)))\n # print (x.shape) # -1, 64, 8, 8\n fc_input_dim = x.size(1) * x.size(2) * x.size(3)\n # print (fc_input_dim)\n x = x.view(-1, fc_input_dim)\n x = F.relu(self.fc1(x))\n x = self.drop(x)\n x = F.relu(self.fc2(x))\n x = self.drop(x)\n x = self.fc3(x)\n return x\n\n#-#-# You so NOT have to modify the code below this line. #-#-#\n\n# instantiate the CNN\nmodel_scratch = Net()\n\n# move tensors to GPU if CUDA is available\nif use_cuda:\n model_scratch.cuda()\n\n\n##################################################################\nimport os\nimport numpy as np\nimport PIL\nfrom torchvision import datasets\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch\n\ndef get_dataloader(batch_size, image_size):\n \n data_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor()\n ])\n\n train_dataset = datasets.ImageFolder(\n root='dog_images/train',\n transform=data_transform\n )\n valid_dataset = datasets.ImageFolder(\n root='dog_images/valid',\n transform=data_transform\n )\n test_dataset = datasets.ImageFolder(\n root='dog_images/test',\n transform=data_transform\n )\n\n train_dataset_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True, num_workers=4\n )\n valid_dateset_loader = torch.utils.data.DataLoader(\n valid_dataset, batch_size=batch_size, shuffle=True, num_workers=4\n )\n test_dataset_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=True, num_workers=4\n )\n\n loaders_scratch = {\n 'train': train_dataset_loader,\n 'valid': valid_dateset_loader,\n 'test': test_dataset_loader\n }\n\n return loaders_scratch\n\n#### neural network\nclass Net(nn.Module):\n ### TODO: choose an architecture, and complete the class\n def __init__(self):\n super(Net, self).__init__()\n ## Define layers of a CNN\n self.conv0 = nn.Conv2d(3, 3, kernel_size=5, stride=1, padding=1)\n self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=1)\n self.conv2 = nn.Conv2d(6, 12, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(12, 24, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv2d(3, 3, kernel_size=5, stride=1, padding=1)\n self.fc1 = nn.Linear(24 * 3 * 3, 200)\n self.fc2 = nn.Linear(200, 500) #output layer\n self.fc3 = nn.Linear(500, 133)\n self.drop = nn.Dropout(0.3)\n self.pool = nn.MaxPool2d(2, 2)\n \n def forward(self, x):\n ## Define forward behavior\n x = x.view(-1, 3, 130, 130)\n x = self.pool(F.relu(self.conv4(x)))\n # print(x.shape, \"expect -1, 3, 64, 64\")\n x = self.pool(F.relu(self.conv0(x)))\n # print(x.shape, \"expect -1, 3, 31, 31\")\n x = self.pool(F.relu(self.conv1(x)))\n # print(x.shape, \"expect -1, 6, 14, 14\") \n x = self.pool(F.relu(self.conv2(x)))\n # print(x.shape, \"expect -1, 12, 7, 7\") \n x = self.pool(F.relu(self.conv3(x)))\n # print(x.shape, \"expect -1, 24, 3, 3\") \n fc_input_dim = x.size(1) * x.size(2) * x.size(3)\n x = x.view(-1, fc_input_dim)\n x = F.relu(self.fc1(x))\n x = self.drop(x)\n x = F.relu(self.fc2(x))\n x = self.drop(x)\n x = self.fc3(x)\n return x\n\ndef train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):\n print (use_cuda)\n \"\"\"returns trained model\"\"\"\n # initialize tracker for minimum validation loss\n valid_loss_min = np.Inf \n \n for epoch in range(1, n_epochs+1):\n # initialize variables to monitor training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n ###################\n # train the model #\n ###################\n model.train()\n for batch_idx, (data, target) in enumerate(loaders['train']):\n # move to GPU\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n ## find the loss and update the model parameters accordingly\n optimizer.zero_grad()\n outputs = model(data)\n loss = criterion(outputs, target)\n loss.backward()\n optimizer.step()\n \n ## record the average training loss, using something like\n train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))\n if batch_idx % 20 == 0:\n print (train_loss)\n \n ###################### \n # validate the model #\n ######################\n model.eval()\n for batch_idx, (data, target) in enumerate(loaders['valid']):\n # move to GPU\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n ## update the average validation loss\n outputs = model(data)\n loss = criterion(outputs, target)\n valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))\n\n\n # print training/validation statistics \n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\n epoch, \n train_loss,\n valid_loss\n ))\n print('Minimum validation loss : {}'.format(valid_loss_min))\n \n ## TODO: save the model if validation loss has decreased\n if valid_loss < valid_loss_min:\n valid_loss_min = valid_loss\n torch.save(model.state_dict(), save_path)\n \n \n # return trained model\n return model\n\n\nif __name__ == '__main__':\n\n batch_size = 64\n use_cuda = torch.cuda.is_available()\n print ('use_cuda: {}'.format(use_cuda))\n loaders_scratch = get_dataloader(batch_size, 130)\n model_scratch = Net()\n if use_cuda:\n\n model_scratch.cuda()\n\n #test block:\n for _, sample in enumerate(loaders_scratch['train']):\n test = model_scratch(sample[0].cuda())\n print(test.shape)\n break\n\n criterion_scratch = nn.CrossEntropyLoss()\n optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.07)\n\n # train the model\n model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, \n criterion_scratch, use_cuda, 'model_scratch.pt')\n\n # load the model that got the best validation accuracy\n # model_scratch.load_state_dict(torch.load('model_scratch.pt'))","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":11137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"416233561","text":"import os\n\nimport cv2\nimport depthai as dai\nimport numpy as np\nfrom model_api import AgeGenderLabel, BaseInferenceModel, Label\n\nfrom .face_processing import FaceProcessor, pad_img, wait_for_results\n\n\nclass InferenceModel(BaseInferenceModel):\n def __init__(\n self,\n model_path: str,\n model_name: str = \"\",\n model_description: str = \"\",\n threshold: float = 0.1,\n area_threshold: float = 0.6,\n **kwargs,\n ):\n super().__init__(model_path, model_name, model_description, **kwargs)\n self.area_threshold = area_threshold\n self.threshold = threshold\n self.classes = [\"female\", \"male\"]\n self.face_processor = FaceProcessor(threshold)\n self.input_width, self.input_height = (\n 62,\n 62,\n )\n\n def preprocess(self, data):\n face_bboxes = self.get_faces(data)\n preprocessed_data = []\n preprocessed_bboxes = []\n if face_bboxes == [[]]:\n return None\n for i, img in enumerate(data):\n areas = [\n (bbox.y2 - bbox.y1) * (bbox.x2 - bbox.x1) for bbox in face_bboxes[i]\n ]\n max_area = max(areas)\n preprocessed_img = []\n img_bboxes = []\n img = np.array(img)[:, :, ::-1]\n for j, face_bbox in enumerate(face_bboxes[i]):\n if areas[j] < self.area_threshold * max_area:\n continue\n cropped_face = img[\n int(face_bbox.y1) : int(face_bbox.y2),\n int(face_bbox.x1) : int(face_bbox.x2),\n ]\n height, width, _ = cropped_face.shape\n if self.input_height / self.input_width < height / width:\n scale = self.input_height / height\n else:\n scale = self.input_width / width\n\n scaled_img = cv2.resize(\n cropped_face,\n (0, 0),\n fx=scale,\n fy=scale,\n interpolation=cv2.INTER_CUBIC,\n )\n padded_img, pad = pad_img(\n scaled_img, (0, 0, 0), [self.input_height, self.input_width],\n )\n\n padded_img = padded_img.transpose((2, 0, 1))\n planar_img = padded_img.flatten().astype(np.float32)\n preprocessed_img.append(planar_img)\n img_bboxes.append(face_bbox)\n preprocessed_data.append(preprocessed_img)\n preprocessed_bboxes.append(img_bboxes)\n\n return [preprocessed_data, preprocessed_bboxes]\n\n def postprocess(self, predictions):\n postprocessed_result = []\n\n for result, face_bboxes in zip(predictions[0], predictions[1]):\n image_predictions = []\n for face_bbox, face_result in zip(face_bboxes, result):\n age, gender_score = [\n face_result.getLayerFp16(tensor.name)\n for tensor in face_result.getAllLayers()\n ]\n gender_idx = np.argsort(gender_score)\n image_predictions.append(\n AgeGenderLabel(\n bbox=face_bbox,\n age=int(age[0] * 100),\n gender=[\n Label(\n score=gender_score[idx], class_name=self.classes[idx],\n )\n for idx in reversed(gender_idx)\n ],\n ),\n )\n postprocessed_result.append(image_predictions)\n\n return postprocessed_result\n\n def create_pipeline(self, model_blob):\n self.pipeline = dai.Pipeline()\n\n face_detector_in = self.pipeline.createXLinkIn()\n face_detector_in.setStreamName(\"face_detector_in\")\n\n face_detector = self.pipeline.createNeuralNetwork()\n face_detector.setBlobPath(model_blob[\"detector\"])\n\n face_detector_out = self.pipeline.createXLinkOut()\n face_detector_out.setStreamName(\"face_detector_out\")\n\n age_gender_in = self.pipeline.createXLinkIn()\n age_gender_in.setStreamName(\"age_gender_in\")\n\n age_gender_nn = self.pipeline.createNeuralNetwork()\n age_gender_nn.setBlobPath(model_blob[\"age_gender\"])\n\n age_gender_out = self.pipeline.createXLinkOut()\n age_gender_out.setStreamName(\"age_gender_out\")\n\n face_detector_in.out.link(face_detector.input)\n face_detector.out.link(face_detector_out.input)\n age_gender_in.out.link(age_gender_nn.input)\n age_gender_nn.out.link(age_gender_out.input)\n\n def model_load(self):\n\n model_blob = {\n \"detector\": os.path.join(self.model_path, \"stage_1.blob\"),\n \"age_gender\": os.path.join(self.model_path, \"stage_2.blob\"),\n }\n\n self.create_pipeline(model_blob)\n\n self.oak_device = dai.Device(self.pipeline)\n self.oak_device.startPipeline()\n\n self.face_detector_in = self.oak_device.getInputQueue(\"face_detector_in\")\n self.face_detector_out = self.oak_device.getOutputQueue(\"face_detector_out\")\n self.age_gender_in = self.oak_device.getInputQueue(\"age_gender_in\")\n self.age_gender_out = self.oak_device.getOutputQueue(\"age_gender_out\")\n\n def forward(self, data, stage=\"age-gender\"):\n results = []\n for sample in data[0]:\n sample_results = []\n for face in sample:\n nn_data = dai.NNData()\n nn_data.setLayer(\"data\", face)\n self.age_gender_in.send(nn_data)\n assert wait_for_results(self.age_gender_out)\n sample_results.append(self.age_gender_out.get())\n results.append(sample_results)\n data[0] = results\n return data\n\n def process_sample(self, image):\n data = self.preprocess([image])\n if data is None:\n return []\n output = self.forward(data)\n results = self.postprocess(output)\n return results[0]\n\n def get_faces(self, data):\n preprocessed_data = self.face_processor.preprocess(data)\n face_output = self.face_processor.forward(\n self.face_detector_in, self.face_detector_out, preprocessed_data,\n )\n face_bboxes = self.face_processor.postprocess(face_output)\n return face_bboxes\n\n def add_cam_to_pipeline(self, width, height):\n cam = self.pipeline.createColorCamera()\n cam.setPreviewSize(width, height)\n cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n cam.setInterleaved(False)\n cam.setCamId(0)\n cam_out = self.pipeline.createXLinkOut()\n cam_out.setStreamName(\"cam_out\")\n cam.preview.link(cam_out.input)\n\n del self.oak_device\n\n self.oak_device = dai.Device(self.pipeline)\n self.oak_device.startPipeline()\n\n cam_queue = self.oak_device.getOutputQueue(\"cam_out\", 1, True)\n self.face_detector_in = self.oak_device.getInputQueue(\"face_detector_in\")\n self.face_detector_out = self.oak_device.getOutputQueue(\"face_detector_out\")\n self.age_gender_in = self.oak_device.getInputQueue(\"age_gender_in\")\n self.age_gender_out = self.oak_device.getOutputQueue(\"age_gender_out\")\n\n return cam_queue\n","sub_path":"age_gender_recognition_retail/age_gender_recognition_retail/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110884117","text":"\"\"\" Beaker backend for redis\n\"\"\"\n\nimport pickle\nimport logging\n\nfrom redis import Redis\nfrom beaker.container import NamespaceManager, Container\nfrom beaker.util import verify_directory\n\nlog = logging.getLogger(__name__)\n\n\nclass RedisBackend(NamespaceManager):\n \"\"\" Beaker backend for redis\n\n :param str hkey: name for redis hkey for store data\n (default: `sessions`)\n \"\"\"\n hkey = 'sessions'\n\n def __init__(\n self,\n namespace,\n url,\n data_dir=None,\n lock_dir=None,\n hkey='sessions',\n **params):\n\n super().__init__(namespace)\n\n if lock_dir:\n self.lock_dir = lock_dir\n elif data_dir:\n self.lock_dir = data_dir + \"/container_tcd_lock\"\n else:\n self.lock_dir = None\n\n if self.lock_dir:\n verify_directory(self.lock_dir)\n\n self.hkey = hkey\n\n host, port_db = url.split(':', 1)\n port, db = (int(i) for i in port_db.split('/', 1))\n\n self.db = Redis(host=host, port=port, db=db)\n\n log.debug('sessions redis backend setuped: namespace={}'.format(namespace))\n\n def __contains__(self, key):\n return self.db.hexists(self.hkey, self.format_key(key))\n\n def __getitem__(self, key):\n data = self.db.hget(self.hkey, self.format_key(key))\n\n if data is not None:\n return pickle.loads(data)\n else:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n key = self.format_key(key)\n log.debug('session set key ({}: \"{!r}\")'.format(key, value))\n self.db.hset(self.hkey, key, pickle.dumps(value))\n\n def __delitem__(self, key):\n key = self.format_key(key)\n self.db.hdel(self.hkey, key)\n\n def format_key(self, key):\n return '{} {}'.format(self.namespace, key)\n\n def do_remove(self):\n log.debug('remove sessions')\n self.db.delete(self.hkey)\n\n def keys(self):\n raise self.db.hkeys(self.hkey)\n\n\nclass RedisContainer(Container):\n namespace_manager = RedisBackend\n","sub_path":"beaker_redis.py","file_name":"beaker_redis.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"217817219","text":"'''\nStatement\nIn mathematics, the factorial of an integer n, denoted by n! is the following product:\n\nn! = 1 × 2 × … × n\n\nFor the given integer n calculate the value\n\n1! + 2! + 3! + ... + n!\n\nTry to discover the solution that uses only one for-loop. And don't use math module in this exercise.\n\nExample input\n4\n\nExample output\n33\n'''\nimport math\n\nsum = 0\n\nfor i in range(1, int(input()) + 1):\n sum += math.factorial(i)\n\nprint(sum)\n","sub_path":"Python_Challenge_115/4/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"456335501","text":"import pandas as pd\r\nimport random\r\n#from scipy.\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ntrainSet = pd.read_csv('Train.csv') #, index_col='Item_Identifier') # 导入数据\r\ntestSet = pd.read_csv('Test.csv') #, index_col='Item_Identifier') # 导入数据\r\n\r\ntrainSet['source'] = 'train'\r\ntestSet['source'] = 'test'\r\n\r\n#print(trainSet.shape)\r\n#missdata = trainSet.apply(lambda x: sum(x.isnull()), axis=0) # 计算缺失值\r\n#print(missdata) # 显示缺失值\r\n#print(trainSet.apply(lambda x:len(x.unique())))\r\n#\r\n#print(testSet.shape)\r\n#missdata = testSet.apply(lambda x: sum(x.isnull()), axis=0) # 计算缺失值\r\n#print(missdata) # 显示缺失值\r\n#print(testSet.apply(lambda x:len(x.unique())))\r\n#\r\ndataSet = pd.concat([trainSet, testSet], ignore_index=True) # 合并训练数据和测试数据\r\n#print(dataSet.shape)\r\n#missdata = dataSet.apply(lambda x: sum(x.isnull()), axis=0) # 计算缺失值\r\n#print(missdata) # 显示缺失值\r\n#print(dataSet.apply(lambda x:len(x.unique())))\r\n#\r\n#print(dataSet.describe())\r\n#\r\n##Filter categorical variables\r\n#categorical_columns = [x for x in dataSet.columns if dataSet.dtypes[x]=='object']\r\n##Exclude ID cols and source:\r\n#categorical_columns = [x for x in categorical_columns if x not in ['Item_Identifier', 'Outlet_Identifier', 'source']]\r\n#\r\n#for col in categorical_columns:\r\n# print('\\nFrequency of Categories for varible %s' % col)\r\n# print(dataSet[col].value_counts())\r\n#\r\n#STEP2, DATA CLEANING\r\n#计算分组平均数,pivot-table默认聚合类型\r\n#item_avg_weight = dataSet.pivot_table(values='Item_Weight', index='Item_Identifier')\r\n##标记缺失值'Item_Weight'\r\n#miss_bool = dataSet['Item_Weight'].isnull()\r\n#print('#total miss data is #%d' % sum(miss_bool))\r\n##填充缺失值\r\n#dataSet.ix[miss_bool, 'Item_Weight'] = dataSet.ix[miss_bool, 'Item_Identifier'].apply(lambda x: item_avg_weight[x])\r\n#miss_bool = dataSet['Item_Weight'].isnull()\r\n#print('#total miss data is #%d' % sum(miss_bool))\r\n\r\n#outlet_size_mode = dataSet.pivot_table(values='Outlet_Size', columns='Outlet_Type',aggfunc=(lambda x:mode(x).mode[0]) )\r\n#print(outl#et_size_mode)\r\n\r\n#miss_bool = dataSet['Outlet_Size'].isnull()\r\n#print(dataSet.ix[miss_bool, ['Outlet_Size', 'Outlet_Type']])\r\n'''\r\nOutlet_Size的图形分析,\r\nOutlet_Size和Outlet_Type,以及Outlet_Location_Type有关\r\n\r\noutlet_size = dataSet[['Outlet_Size', 'Outlet_Type', 'Outlet_Location_Type']]\r\noutlet_size = outlet_size.dropna()\r\noutlet_size.ix[outlet_size['Outlet_Size']=='Small', 'Outlet_Size'] = 1.0\r\noutlet_size.ix[outlet_size['Outlet_Size']=='Medium', 'Outlet_Size'] = 2.0\r\noutlet_size.ix[outlet_size['Outlet_Size']=='High', 'Outlet_Size'] = 3.0\r\n\r\noutlet_size.ix[outlet_size['Outlet_Type']=='Grocery Store', 'Outlet_Type'] = 1.0\r\noutlet_size.ix[outlet_size['Outlet_Type']=='Supermarket Type1', 'Outlet_Type'] = 2.0\r\noutlet_size.ix[outlet_size['Outlet_Type']=='Supermarket Type2', 'Outlet_Type'] = 3.0\r\noutlet_size.ix[outlet_size['Outlet_Type']=='Supermarket Type3', 'Outlet_Type'] = 4.0\r\n\r\noutlet_size.ix[outlet_size['Outlet_Location_Type']=='Tier 1', 'Outlet_Location_Type'] = 'r'\r\noutlet_size.ix[outlet_size['Outlet_Location_Type']=='Tier 2', 'Outlet_Location_Type'] = 'g'\r\noutlet_size.ix[outlet_size['Outlet_Location_Type']=='Tier 3', 'Outlet_Location_Type'] = 'b'\r\n\r\nT = outlet_size['Outlet_Location_Type']\r\n\r\n\r\nfor x in outlet_size.index:\r\n outlet_size.ix[x, 'Outlet_Size'] = outlet_size.ix[x, 'Outlet_Size'] + random.uniform(-0.3,0.3)\r\n outlet_size.ix[x, 'Outlet_Type'] = outlet_size.ix[x, 'Outlet_Type'] + random.uniform(-0.3, 0.3)\r\n\r\nplt.scatter(outlet_size['Outlet_Type'], outlet_size['Outlet_Size'], c=np.array(T), alpha=0.2)\r\nplt.show()\r\n'''\r\nmiss_bool = dataSet['Outlet_Size'].isnull()\r\nprint('#Total miss data is %d'% sum(miss_bool))\r\n\r\ndataSet.ix[miss_bool, 'Outlet_Size'] = 'Small'\r\nmiss_bool = dataSet['Outlet_Size'].isnull()\r\nprint('#Total miss data is %d' % sum(miss_bool))","sub_path":"01bigmart/bigmart.py","file_name":"bigmart.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"609677621","text":"from datetime import datetime\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import messages\nfrom django.views.decorators.http import require_POST\nfrom json import dumps\nfrom .models import Task\nfrom .forms import TaskForm\nfrom .common import get_task_status_id, TaskStatuses\n\n\ndef _get_tasks(status=None):\n q = Task.objects.filter(\n status=status) if status in TaskStatuses.keys() else Task.objects.all()\n return q.order_by('-created')\n\n\ndef index(request):\n return render(request, 'todo/index.html', {'tasks': _get_tasks()})\n\n\ndef task_list(request, task_status):\n ctx = {'tasks': _get_tasks(get_task_status_id(task_status))}\n return render(request, 'todo/index.html', ctx)\n\n\n@require_POST\ndef update_task(request):\n task_id = request.POST['task_id']\n if not task_id:\n raise ValueError('task_id not specified')\n\n t = Task.objects.get(pk=task_id)\n stat = request.POST['status']\n t.status = True if stat and stat.lower() == 'true' else False\n t.completed = datetime.utcnow() if t.status else None\n t.save()\n\n return HttpResponse(dumps({'r': 'success'}),\n content_type='application/json')\n\n\ndef new_task(request):\n if request.method == 'POST':\n task_form = TaskForm(request.POST)\n\n if task_form.is_valid():\n task = Task(**task_form.cleaned_data)\n task.created = datetime.utcnow()\n task.save()\n messages.add_message(request, messages.SUCCESS, 'New task added.')\n return HttpResponseRedirect('/')\n else:\n task_form = TaskForm()\n return render(request, 'todo/task_form.html',\n {'form': task_form, 'req': request})\n\n\ndef task_edit(request, task_id):\n task = Task.objects.get(pk=task_id)\n if request.method == 'GET':\n task_form = TaskForm(instance=task)\n task_form.is_updating = True\n\n if request.method == 'POST':\n task_form = TaskForm(request.POST)\n if task_form.is_valid():\n task.title = task_form.cleaned_data['title']\n task.text = task_form.cleaned_data['text']\n task.priority = task_form.cleaned_data['priority']\n task.save()\n return HttpResponseRedirect('/')\n\n return render(request, 'todo/task_form.html',\n {'form': task_form, 'req': request})\n","sub_path":"djtodo/djtodo/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432797439","text":"# -*- coding: utf-8 -*-\nfrom base import Base\nimport numpy as np\nimport numpy.linalg as la\nimport cv2\n\n\nclass Training(Base):\n def __init__(self):\n Base.__init__(self)\n\n self.points_img1 = [(110, 47), (89, 54), (77, 61), (76, 78), (73, 109),\n (68, 123), (66, 142), (64, 154), (62, 161), (74, 157),\n (84, 157), (96, 159), (110, 168), (112, 155), (112, 141),\n (115, 123), (116, 110), (118, 96), (121, 77), (123, 64)]\n\n self.points_img2 = [(108, 52), (90, 54), (74, 60), (77, 79), (74, 103),\n (68, 124), (69, 137), (65, 154), (63, 165), (59, 180),\n (70, 175), (82, 173), (102, 173), (110, 177), (109, 164),\n (110, 149), (112, 132), (113, 111), (115, 87), (119, 69)]\n\n self.points_img3 = [(112, 45), (97, 52), (80, 59), (75, 78), (75, 92),\n (74, 105), (72, 118), (70, 132), (65, 143), (79, 137),\n (88, 138), (103, 144), (111, 153), (113, 141), (113, 128),\n (118, 104), (124, 90), (126, 80), (123, 67), (120, 56)]\n\n self.points_img4 = [(113, 45), (96, 54), (82, 58), (78, 69), (75, 83),\n (75, 98), (72, 107), (71, 118), (69, 137), (64, 147),\n (62, 160), (75, 143), (88, 153), (102, 157), (110, 164),\n (111, 152), (112, 137), (116, 112), (121, 90), (123, 74)]\n\n self.points_img5 = [(115, 44), (94, 53), (79, 60), (73, 76), (76, 93),\n (73, 108), (72, 126), (66, 141), (63, 159), (76, 152),\n (89, 152), (103, 159), (110, 164), (111, 150), (113, 136),\n (114, 121), (115, 98), (123, 83), (124, 67), (123, 59)]\n\n self.avg_shape = [self.points_img1,\n self.points_img2,\n self.points_img3,\n self.points_img4,\n self.points_img5]\n\n def average_shape(self):\n return (\n (np.array(self.points_img1) +\n np.array(self.points_img2) +\n np.array(self.points_img3) +\n np.array(self.points_img4) +\n np.array(self.points_img5)) / 5).tolist()\n\n # Gray levels for each point of the training images\n def gp(self, k, n_points=20):\n pass\n image_number = 1\n images = []\n for img in range(5):\n if image_number > 9:\n img_name = str(image_number) + \"_treino.png\"\n else:\n img_name = \"0\" + str(image_number) + \"_treino.png\"\n\n points = self.normal(self.avg_shape[img], img_name, k)\n images.append(points)\n\n image_number = image_number + 1\n\n normal_points_img1 = images[0]\n normal_points_img2 = images[1]\n normal_points_img3 = images[2]\n normal_points_img4 = images[3]\n normal_points_img5 = images[4]\n\n l_avg_point = []\n l_avg = []\n for i in range(n_points):\n point_value_img1 = normal_points_img1[i]\n point_value_img2 = normal_points_img2[i]\n point_value_img3 = normal_points_img3[i]\n point_value_img4 = normal_points_img4[i]\n point_value_img5 = normal_points_img5[i]\n\n for j in range(len(point_value_img1)):\n avg = (point_value_img1[j][1][0] +\n point_value_img2[j][1][0] +\n point_value_img3[j][1][0] +\n point_value_img4[j][1][0] +\n point_value_img5[j][1][0]) / 5\n l_avg_point.append(avg)\n l_avg.append(l_avg_point)\n l_avg_point = []\n\n return l_avg\n\n # s = (1/(s - 1))*E(xi - xp)*(xi - xp)T\n def cov(self, xp, fv):\n # (xi - xp)*(xi - xp)T\n m_img1 = np.dot(\n (np.array(self.points_img1) - xp),\n ((np.array(self.points_img1) - xp).T)\n )\n m_img2 = np.dot(\n (np.array(self.points_img2) - xp),\n ((np.array(self.points_img2) - xp).T)\n )\n m_img3 = np.dot(\n (np.array(self.points_img3) - xp),\n ((np.array(self.points_img3) - xp).T)\n )\n m_img4 = np.dot(\n (np.array(self.points_img4) - xp),\n ((np.array(self.points_img4) - xp).T)\n )\n m_img5 = np.dot(\n (np.array(self.points_img5) - xp),\n ((np.array(self.points_img5) - xp).T)\n )\n\n # 1/(s - 1) * E (somatorio)\n m_cov = (m_img1 + m_img2 + m_img3 + m_img4 + m_img5) / 4\n\n # Obtem autovalores e autovetores\n eigenvalues, eigenvectors = la.eig(m_cov)\n # print(\"autovalores => \", eigenvalues)\n # print(\"autovetores => \", eigenvectors)\n\n eigenvectors_t = np.array(eigenvectors).T\n\n # Obtem índices para ordenação decrescente dos autovalores\n index = np.argsort(eigenvalues)[::-1]\n # autovalores_dec = eigenvalues[index]\n # autovetores_dec = eigenvectors[index]\n\n # Define 5 PC\n matrix_eigenvectors = []\n for i in range(5):\n index_PC = index[i]\n matrix_eigenvectors.append(eigenvectors_t[index_PC])\n\n # print(\"index => \", index)\n # print(\"autovalores_dec => \", autovalores_dec)\n # print(\"autovetores_dec => \", autovetores_dec)\n\n # print(\"matrix_eigenvectors => \", np.array(matrix_eigenvectors))\n # print(\"type matrix_eigenvectors => \", type(np.array(matrix_eigenvectors)))\n # print(\"matrix_eigenvectors.T => \", np.array(matrix_eigenvectors).T)\n # print(\"type matrix_eigenvectors.T => \", type(np.array(matrix_eigenvectors).T))\n\n return np.array(matrix_eigenvectors), np.array(matrix_eigenvectors).T\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110635648","text":"#!/usr/bin/python3\n\n\"\"\"\n== Лото ==\n\nПравила игры в лото.\n\nИгра ведется с помощью специальных карточек, на которых отмечены числа, \nи фишек (бочонков) с цифрами.\n\nКоличество бочонков — 90 штук (с цифрами от 1 до 90).\n\nКаждая карточка содержит 3 строки по 9 клеток. В каждой строке по 5 случайных цифр, \nрасположенных по возрастанию. Все цифры в карточке уникальны. Пример карточки:\n\n--------------------------\n 9 43 62 74 90\n 2 27 75 78 82\n 41 56 63 76 86 \n--------------------------\n\nВ игре 2 игрока: пользователь и компьютер. Каждому в начале выдается \nслучайная карточка. \n\nКаждый ход выбирается один случайный бочонок и выводится на экран.\nТакже выводятся карточка игрока и карточка компьютера.\n\nПользователю предлагается зачеркнуть цифру на карточке или продолжить.\nЕсли игрок выбрал \"зачеркнуть\":\n\tЕсли цифра есть на карточке - она зачеркивается и игра продолжается.\n\tЕсли цифры на карточке нет - игрок проигрывает и игра завершается.\nЕсли игрок выбрал \"продолжить\":\n\tЕсли цифра есть на карточке - игрок проигрывает и игра завершается.\n\tЕсли цифры на карточке нет - игра продолжается.\n\t\nПобеждает тот, кто первый закроет все числа на своей карточке.\n\nПример одного хода:\n\nНовый бочонок: 70 (осталось 76)\n------ Ваша карточка -----\n 6 7 49 57 58\n 14 26 - 78 85\n23 33 38 48 71 \n--------------------------\n-- Карточка ко��пьютера ---\n 7 87 - 14 11 \n 16 49 55 88 77 \n 15 20 - 76 -\n--------------------------\nЗачеркнуть цифру? (y/n)\n\nПодсказка: каждый следующий случайный бочонок из мешка удобно получать \nс помощью функции-генератора.\n\nПодсказка: для работы с псевдослучайными числами удобно использовать \nмодуль random: http://docs.python.org/3/library/random.html\n\n\"\"\"\n\nimport random\nimport sys\n\n\n# Так и не получилось прикрутить проверку ряда на == 'X'\n# Вернее в какой-то момент получилось, захотел сделать красивее - и доломал :(\n\nclass Gambler(object):\n def __init__(self):\n self.card = Card()\n self.points = 0\n\n def cross(self, number=None):\n if self.check_number(number):\n print('''Crossed out!\n AI is playing...''')\n self.points += 1\n self.card.update(number)\n else:\n print('''No such number in your card!\n ~~~GAME OVER!~~~\n ''')\n sys.exit()\n\n def next(self, number=None):\n if self.check_number(number):\n print('''You missed the number! Be more attentive next time!\n ~~~GAME OVER!~~~\n ''')\n sys.exit()\n else:\n pass\n\n def check_number(self, number):\n line = self.card.store_card()\n for seq in line:\n for el in seq:\n if el == number:\n return True\n return False\n\n\nclass Human(Gambler):\n def __init__(self):\n super().__init__()\n self.card.print_new_card()\n\n def make_turn(self, number=None):\n print('**** YOUR CARD ****')\n self.card.show_card()\n print(''' Getting number!\n *rolling*\n ******* \n ***** \n ***''')\n print('Number is: ', number)\n print('''What now?\n \"x\" to cross out the number\n \"n\" to draw next number \n \"q\" to end the game''')\n\n turn = input('')\n if turn == 'x':\n self.cross(number)\n if turn == 'n':\n self.next(number)\n if turn == 'q':\n print('NO WINNER TODAY! Thanks for playing and have a nice day!')\n sys.exit()\n while turn not in ('x', 'n', 'q'):\n turn = input('Invalid input!')\n\n\nclass Computer(Gambler):\n def __init__(self):\n super().__init__()\n self.card.print_new_card()\n\n def make_turn(self, number=None):\n print('***** AI CARD *****')\n if self.check_number(number):\n self.points += 1\n self.card.update(number)\n self.card.show_card()\n\n\nclass Card(object):\n def __init__(self):\n self.all_cards = [x for x in range(1, 91)]\n self.numbers_max = 15\n self.card = self.print_new_card()\n\n def print_new_card(self):\n c = 0\n self.random_numbers = random.sample(self.all_cards, self.numbers_max)\n self.card = [['' for x in range(9)] for i in range(3)]\n for i in range(len(self.card)):\n for j in range(len(self.card[i])):\n if j <= 4:\n self.card[i][j] = self.random_numbers[c]\n c += 1\n for i in range(len(self.card)):\n random.shuffle(self.card[i])\n return self.card\n\n def store_card(self, number=None):\n self.c = self.card[:]\n if self.update(number):\n self.c = self.update(number)\n return self.c\n\n # def check_row(self):\n # self.c = self.card[:]\n # for x in self.c:\n # if list(filter(None, x))\n #\n #\n # if all(list(filter(None, list))) == 'X':\n # return True\n\n # def check_equal(self):\n # list =\n # return list[1:] == list[:-1]\n\n\n def show_card(self):\n for i in self.store_card():\n print(' '.join(map(str, i)))\n print('*******************')\n\n def update(self, number):\n for seq in self.card:\n for el in range(len(seq)):\n if seq[el] == number:\n seq[el] = 'X'\n return self.card\n\n\nclass NumbersBank(object):\n def __init__(self):\n self.pool = [x for x in range(1, 91)]\n\n def roll(self):\n pick = random.choice(self.pool)\n self.pool.remove(pick)\n return pick\n\n\nif __name__ == '__main__':\n print('LOTTERY 5/90! v. 1.0')\n answer = input('Wanna play? Type \"y\" or \"n\":')\n if answer == 'n':\n print('Maybe another time! See you in Vegas!')\n sys.exit()\n while answer not in ('y', 'n'):\n answer = input('Invalid input!')\n\n bank = NumbersBank()\n player = Human()\n AI = Computer()\n card = Card()\n\n while True:\n num = bank.roll()\n player.make_turn(num)\n AI.make_turn(num)\n print('Your points: ', player.points)\n print('AI points: ', AI.points)\n # if card.check_row():\n # print('Row crossed out! Game finished!')\n # sys.exit()\n if player.points == 5:\n print('YOU WON!')\n sys.exit()\n elif AI.points == 5:\n print('AI WON!')\n sys.exit()\n","sub_path":"lesson07/home_work/loto.py","file_name":"loto.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"47949813","text":"import subprocess\n\n#ptpcam --show-property=0x5001\n\n# Get Battery\nproc = subprocess.Popen([\"ptpcam\", \"--show-property=0x5001\"], stdout=subprocess.PIPE)\n(out, err) = proc.communicate()\n\n# Parse result\nbattery = int(out[50:len(out) - 1])\nprint('Battery Level: ' + str(battery))\n\n\n# Check video mode\nproc = subprocess.Popen([\"ptpcam\", \"--show-property=0x5013\"], stdout=subprocess.PIPE)\n(out, err) = proc.communicate()\n\nmode = int(out[57:len(out) - 10])\n\nisVideoMode = mode==8002\n\n\nprint(isVideoMode)\n","sub_path":"rpiScripts/ricoh/ricohTest.py","file_name":"ricohTest.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"59725500","text":"import json\n\nVIDEO_URL = 'https://cdnapisec.kaltura.com/html5/html5lib/v2.36/mwEmbedFrame.php/p/1829221/uiconf_id/28733761/entry_id/%s?wid=_1829221&iframeembed=true&playerId=kaltura_player&entry_id=%s'\n\nwith open(\"primo.json\", encoding='utf-8') as f:\n d = json.load(f)\n\ndocs = d['SEGMENTS']['JAGROOT']['RESULT']['DOCSET']['DOC']\n\nrecords = []\nfor i, d in enumerate(docs):\n records.append(d['PrimoNMBib']['record']['display'])\n\nclean_records = []\n\n\ndef parse_record(record):\n d = {'system_id': record['lds08']}\n d['year'] = ''.join(\n [_ for _ in record.get('creationdate', record.get('lds04', '')) if\n ord(_) >= 48 and ord(_) <= 57])\n d['series'] = record.get('ispartof', record.get('lds09', ''))\n d['episode'] = record.get('lds31', '')[\n record.get('lds31', '').find(\n \"פרק\") + 4:record.get('lds31', '').find(\n \":\")].strip()\n d['title'] = record.get('lds31', '')[\n record.get('lds31', '').find(\":\") + 1:].strip()\n d['full_name'] = record.get('title')\n if record.get('language') == 'heb':\n d['language'] = 'עברית'\n elif record.get('language') == 'ara':\n d['language'] = 'ערבית'\n else:\n d['language'] = record.get('language')\n d['synopsys'] = record.get('description',\n record.get('lds03', ''))\n d['audience'] = record.get('lds23')\n d['genres'] = [_.strip() for _ in\n record.get('subject', '').split(sep=';')]\n d['primo_url'] = record.get('lds21')\n entry_id = record['lds41'][record['lds41'].find('entry_id/') + 9:]\n entry_id = entry_id[:entry_id.find('/')].strip()\n d['entry_id'] = entry_id\n return d\n\n\nfor i, record in enumerate(records):\n if \"videoid=hadar\" in record['lds42']:\n continue\n clean_records.append(parse_record(record))\n\nwith open(\"assets_file.json\", \"w\", encoding='utf-8') as f:\n d = json.dump(clean_records, f, indent=2)\n\nprint(\"OK\", len(clean_records))\n","sub_path":"backend/parse_assets.py","file_name":"parse_assets.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"636364316","text":"import numpy as np\nimport scipy.stats as stats\nimport subprocess\nimport sys\nimport os\n\nsys.path.insert(1, os.path.dirname(__file__))\nfrom core import SimpleLogger\n\n\nhas_rpy2 = False\ne = subprocess.call('which R', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\ntry:\n import rpy2\n import rfunc\n if e==0:\n has_rpy2 = True\nexcept:\n pass\nif not has_rpy2:\n print(\"Warning: 'rfunc' cannot be imported. R and the 'rpy2' Python package are needed.\")\n\n\ndef calculate_qvalues(res_df, fdr=0.05, qvalue_lambda=None, logger=None):\n \"\"\"Annotate permutation results with q-values, p-value threshold\"\"\"\n if logger is None:\n logger = SimpleLogger()\n\n logger.write('Computing q-values')\n logger.write(' * Number of phenotypes tested: {}'.format(res_df.shape[0]))\n logger.write(' * Correlation between Beta-approximated and empirical p-values: : {:.4f}'.format(\n stats.pearsonr(res_df['pval_perm'], res_df['pval_beta'])[0]))\n\n # calculate q-values\n if qvalue_lambda is None:\n qval, pi0 = rfunc.qvalue(res_df['pval_beta'])\n else:\n logger.write(' * Calculating q-values with lambda = {:.3f}'.format(qvalue_lambda))\n qval, pi0 = rfunc.qvalue(res_df['pval_beta'], qvalue_lambda)\n res_df['qval'] = qval\n logger.write(' * Proportion of significant phenotypes (1-pi0): {:.2f}'.format(1 - pi0))\n logger.write(' * QTL phenotypes @ FDR {:.2f}: {}'.format(fdr, np.sum(res_df['qval']<=fdr)))\n\n # determine global min(p) significance threshold and calculate nominal p-value threshold for each gene\n ub = res_df.loc[res_df['qval']>fdr, 'pval_beta'].sort_values()[0]\n lb = res_df.loc[res_df['qval']<=fdr, 'pval_beta'].sort_values()[-1]\n pthreshold = (lb+ub)/2\n logger.write(' * min p-value threshold @ FDR {}: {:.6g}'.format(fdr, pthreshold))\n res_df['pval_nominal_threshold'] = stats.beta.ppf(pthreshold, res_df['beta_shape1'], res_df['beta_shape2'])\n\n\ndef annotate_genes(gene_df, annotation_gtf, lookup_df=None):\n \"\"\"\n Add gene and variant annotations (e.g., gene_name, rs_id, etc.) to gene-level output\n\n gene_df: output from map_cis()\n annotation_gtf: gene annotation in GTF format\n lookup_df: DataFrame with variant annotations, indexed by 'variant_id'\n \"\"\"\n gene_dict = {}\n print('['+datetime.now().strftime(\"%b %d %H:%M:%S\")+'] Adding gene and variant annotations', flush=True)\n print(' * parsing GTF', flush=True)\n with open(annotation_gtf) as gtf:\n for row in gtf:\n row = row.strip().split('\\t')\n if row[0][0]=='#' or row[2]!='gene': continue\n # get gene_id and gene_name from attributes\n attr = dict([i.split() for i in row[8].replace('\"','').split(';') if i!=''])\n # gene_name, gene_chr, gene_start, gene_end, strand\n gene_dict[attr['gene_id']] = [attr['gene_name'], row[0], row[3], row[4], row[6]]\n\n print(' * annotating genes', flush=True)\n if 'group_id' in gene_df:\n gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df['group_id']],\n columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],\n index=gene_df.index)\n else:\n gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df.index],\n columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],\n index=gene_df.index)\n gene_df = pd.concat([gene_info, gene_df], axis=1)\n assert np.all(gene_df.index==gene_info.index)\n\n col_order = ['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand',\n 'num_var', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df', 'variant_id', 'tss_distance']\n if lookup_df is not None:\n print(' * adding variant annotations from lookup table', flush=True)\n gene_df = gene_df.join(lookup_df, on='variant_id') # add variant information\n col_order += list(lookup_df.columns)\n col_order += ['ma_samples', 'ma_count', 'maf', 'ref_factor',\n 'pval_nominal', 'slope', 'slope_se', 'pval_perm', 'pval_beta']\n if 'group_id' in gene_df:\n col_order += ['group_id', 'group_size']\n col_order += ['qval', 'pval_nominal_threshold']\n gene_df = gene_df[col_order]\n print('done.', flush=True)\n return gene_df\n\n\ndef get_significant_pairs(res_df, nominal_prefix, fdr=0.05):\n \"\"\"Significant variant-phenotype pairs based on nominal p-value threshold for each phenotype\"\"\"\n print('['+datetime.now().strftime(\"%b %d %H:%M:%S\")+'] tensorQTL: filtering significant variant-phenotype pairs', flush=True)\n assert 'qval' in res_df\n\n # significant phenotypes (apply FDR threshold)\n df = res_df.loc[res_df['qval']<=fdr, ['pval_nominal_threshold', 'pval_nominal', 'pval_beta']].copy()\n df.rename(columns={'pval_nominal': 'min_pval_nominal'}, inplace=True)\n signif_phenotype_ids = set(df.index)\n threshold_dict = df['pval_nominal_threshold'].to_dict()\n\n nominal_files = {os.path.basename(i).split('.')[-2]:i for i in glob.glob(nominal_prefix+'*.parquet')}\n chroms = sorted(nominal_files.keys(), key=lambda x: int(x.replace('chr','').replace('X','100')))\n signif_df = []\n for k,c in enumerate(chroms, 1):\n print(' * parsing significant variant-phenotype pairs for chr. {}/{}'.format(k, len(chroms)), end='\\r', flush=True)\n nominal_df = pd.read_parquet(nominal_files[c])\n nominal_df = nominal_df[nominal_df['phenotype_id'].isin(signif_phenotype_ids)]\n\n m = nominal_df['pval_nominal']balance:\n\t\t\treturn 'insufficient funds'\n\t\telse:\n\t\t\tbalance-=amount\n\t\treturn balance\n\treturn withdraw\n\nwithdraw=make_withdraw(100)\nwithdraw(50)\nwithdraw(25)\nwithdraw(60)","sub_path":"exercise/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"138250709","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimport time\r\nimport serial\r\nimport high_level_three as cntrl\r\n\r\n#arduino = serial.Serial('/dev/serial0',9600) # Establish the connection on a specific port\r\narduino = serial.Serial('/dev/ttyACM0',9600)\r\ntime.sleep(2) #wait for connection to be established\r\n\r\n# DAMN_ITS_OFFSET\r\n# calibration parameters from checkboard\r\n\r\ncamera_Matrix = np.array([[800.7466328584435, 0.0, 335.3685359582425],\r\n [0.0, 804.1665602634919, 213.2964152084579],\r\n [0.0, 0.0, 1.0]])\r\n\r\ndist_Coff = np.array([0.5446652394728176, -5.5233881377989045,\r\n -0.014441057933817907, 0.008332787614176447,\r\n 15.6171842384956675])\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 640) # set frame width\r\ncap.set(4, 480) # set frame height\r\ncap.set(cv2.CAP_PROP_AUTOFOCUS, 0) # disabling autofocus\r\n\r\n# dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_1000)\r\n\r\ndictionary = aruco.getPredefinedDictionary(aruco.DICT_6X6_250)\r\n\r\n# dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_ARUCO_ORIGINAL)\r\n\r\nboard = aruco.GridBoard_create(5, 7, 0.033, 0.006, dictionary)\r\n\r\narucoParams = aruco.DetectorParameters_create() # detector with default parameter created\r\narucoParams.cornerRefinementMethod = aruco.CORNER_REFINE_SUBPIX\r\nprint(arucoParams.cornerRefinementMethod)\r\nmarkerLength = 0.05 # side length of the printed tag is 0.05m.\r\nids = np.zeros((1,35),'uint8')\r\nwhile True:\r\n\r\n # Capture frame-by-frame\r\n\r\n (ret, frame) = cap.read()\r\n\r\n # frame = frame[300:640,100:560]\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n # cv2.imshow('original', frame)\r\n\r\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\r\n #cv2.imshow('grayed', gray)\r\n\r\n # ret3,gray = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n # cv2.imshow(\"thresholded\", gray)\r\n\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=arucoParams) # Detect aruco\r\n aruco.refineDetectedMarkers(gray, board, corners, ids, rejectedImgPoints)\r\n if ids is not None: # if the aruco marker detected\r\n\r\n # rvec, tvec, objpoints = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_Matrix, dist_Coff) # For a single marker\r\n # board = aruco.Board_create(objpoints,dictionary,ids)\r\n imgWithAruco = aruco.drawDetectedMarkers(frame, corners, ids,(0, 255, 0))\r\n retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, camera_Matrix, dist_Coff)\r\n # imgWithAruco = aruco.drawAxis(frame, camera_Matrix, dist_Coff, rvec, tvec, 0.1)\r\n # imgWithArucwo = aruco.drawAxis(imgWithAruco, camera_Matrix, dist_Coff, rvec, tvec, 10) #balash weghet nazar abo balash katar meno # axis length 100 can be changed according to your requirement\r\n\r\n x_dist = round(tvec[0][0] * 100, 1)\r\n y_dist = round(tvec[1][0] * 100, 1)\r\n z_dist = round(tvec[2][0] * 100, 1)\r\n x_rot = round(rvec[0][0], 2)\r\n y_rot = round(rvec[1][0], 2)\r\n z_rot = round(rvec[2][0], 2)\r\n pose = np.array([x_dist,z_dist,y_rot])\r\n speeds=cntrl.high_level_control('product',pose,0,offset=0,kp = 0.35,ka=0.65,kb=-0.06) #0.65 ka hanzwdddd kb=0.037 ka=0.3\r\n print(speeds)\r\n wheels_s=cntrl.inverse(speeds,14.8,5)\r\n #print(wheels_s)\r\n wheels_spds_rounded = np.rint(wheels_s)\r\n wheels_spds_rint = wheels_spds_rounded.astype(int)\r\n arduino.write(bytes(chr(wheels_spds_rint[0]),\"utf-8\"))\r\n arduino.write(bytes(chr(wheels_spds_rint[1]),\"utf-8\"))\r\n else:\r\n arduino.write(bytes(chr(0),\"utf-8\"))\r\n arduino.write(bytes(chr(0),\"utf-8\"))\r\n\r\n\r\n cv2.imshow('withtag', frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"aruco_Board_weza_1.py","file_name":"aruco_Board_weza_1.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"244543483","text":"import sys\nfrom wallpaper_switch import Ui_Wallpaper # Timer2为ui对于py文件的名字\nfrom PyQt5 import QtCore, QtWidgets\n\n\n\n\n# pyinstaller -F -w \"App.py\"\n\nclass MyPyQT_Form(QtWidgets.QWidget,Ui_Wallpaper):\n # 下面这个方法自动执行,相当于初始化,但是可以自定义一个初始化函数 initUI()\n def __init__(self):\n super(MyPyQT_Form,self).__init__()\n self.setupUi(self)\n self.initUI() ## 此处给出了调用一般函数的方法\n\n def initUI(self): # 定义初始化界面的方法\n # ----------信号连接自定义的槽---------\n self.pushButton_start.clicked.connect(self.btn_start_click)\n\n def btn_start_click(self):\n # 粘贴程序的地方\n import random\n import urllib.request\n import requests\n import os.path\n import ctypes\n import time\n from bs4 import BeautifulSoup\n\n def get_img_url():\n\n try:\n print(\"---------- 正在获取下载链接 ----------\")\n num = random.randint(1, 26000)\n url = \"http://www.netbian.com/desk/{}.htm\".format(num)\n r = requests.get(url)\n demo = r.text\n soup = BeautifulSoup(demo, \"html.parser\")\n piclist = []\n for link in soup.find_all('img'):\n link_list = link.get('src')\n if link_list != None:\n piclist.append(link_list)\n img_url = piclist[2]\n print('img_url:', img_url)\n except:\n img_url = \"http://pic.netbian.com/uploads/allimg/190824/212516-15666531161ade.jpg\"\n return img_url\n\n def save_img(img_url, dirname):\n try:\n if not os.path.exists(dirname):\n print('文件夹', dirname, '不存在,重新建立')\n # os.mkdir(dirname)\n os.makedirs(dirname)\n # 获得图片文件名,包括后缀\n tt = time.strftime(\"%Y%m%d-%H%M\", time.localtime())\n basename = tt + \".jpg\"\n # 拼接目录与文件名,得到图片路径\n filepath = os.path.join(dirname, basename)\n # 下载图片,并保存到文件夹中\n print(\"---------- downloading ----------\")\n urllib.request.urlretrieve(img_url, filepath)\n except:\n pass\n\n print(\"Save\", filepath, \"successfully!\")\n\n return filepath\n\n def set_img_as_wallpaper(filepath):\n print(\"---------- 正在设置壁纸中 ---------\")\n ctypes.windll.user32.SystemParametersInfoW(20, 0, filepath, 3)\n\n def main():\n dirname = \"C:/wallpaper_switch/\" # 图片要被保存在的位置\n img_url = get_img_url()\n filepath = save_img(img_url, dirname) # 图片文件的的路径\n print(filepath)\n set_img_as_wallpaper(filepath)\n\n main()\n\n\nif __name__ == '__main__': # 四句话:继承-实例化-显示-退出\n\n app = QtWidgets.QApplication(sys.argv)\n main_form = MyPyQT_Form() # 实例化,类的名字,可更改等号前面名字 MyPyQT_Form()继承自Ui_Form\n main_form.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) # 窗口置顶\n main_form.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint) # 禁止窗口最大化\n main_form.setFixedSize(main_form.width(), main_form.height()) # 禁止拉伸窗口\n main_form.show()\n sys.exit(app.exec_())","sub_path":"RunAsApp/Wallpaper_switch_App/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"487808909","text":"from snap import TNEANet\nimport re\n\n'''\nScript for parsing .gml file to SNAP TNEANet network.\n'''\n\ndef read_gml(path):\n with open(path, 'r') as f:\n line = f.readline()\n while line:\n line = line.strip()\n if line.startswith('graph'):\n graph = _read_gml_graph(f)\n return graph\n else:\n line = f.readline()\n raise Exception(\"Missing top level graph element\")\n\n\ndef _read_gml_graph(file):\n directed = False\n comment = ''\n id = 0\n graph = TNEANet().New()\n line = file.readline()\n while line:\n line = line.strip()\n if line.startswith(\"comment\"):\n comment = _read_gml_comment(line)\n elif line.startswith(\"id\"):\n id = _read_gml_int_id(line)\n elif line.startswith(\"label\"):\n label = _read_gml_label(line)\n elif line.startswith(\"directed\"):\n directed = _read_gml_directed(line)\n elif line.startswith(\"node\"):\n graph = _read_gml_node(file, graph)\n elif line.startswith(\"edge\"):\n graph = _read_gml_edge(file, graph)\n elif line.startswith(\"]\"):\n return graph\n else:\n pass\n line = file.readline()\n raise Exception(\"Unexpected end of file.\")\n\ndef _read_gml_directed(line):\n return line.strip().split(' ')[-1] == '1'\n\ndef _read_gml_comment(line):\n return line.strip().split('\"')[-2]\n\ndef _read_gml_int_id(line):\n return int(line.strip().split(' ')[-1])\n\ndef _read_gml_label(line):\n return line.strip().split('\"')[-2]\n\ndef _read_gml_attribute(line):\n attr, val = line.strip().split(' ')\n if val[0] == '\"' and val[-1] == '\"':\n return attr,val.strip('\"'),\"str\"\n else:\n if re.match(\"^-?\\d+?\\.\\d+?$\", val):\n return attr,float(val),\"float\"\n elif re.match(\"^-?\\d+?$\", val):\n return attr,int(val),\"int\"\n else:\n raise Exception(\"Invalid attribute type. Viable types: string, int, float\")\n\n\ndef _read_gml_node(file, graph):\n line = file.readline()\n while line:\n line = line.strip()\n if line.startswith('id'):\n id = _read_gml_int_id(line)\n graph.AddNode(id)\n nodei = graph.GetNI(id)\n elif line.startswith('label'):\n label = _read_gml_label(line)\n graph.AddStrAttrDatN(nodei, label, \"label\")\n elif line.startswith(\"]\"):\n return graph\n elif line == \"[\":\n # Start of node attribute is in separate line.\n pass\n else:\n attr, val, type = _read_gml_attribute(line)\n if type == \"str\":\n graph.AddStrAttrDatN(nodei, val, attr)\n elif type == \"int\":\n graph.AddIntAttrDatN(nodei, val, attr)\n elif type == \"float\":\n graph.AddIntAttrDatN(nodei, val, attr)\n else:\n raise Exception(\"Inconsistency between _read_gml_node and _read_gml_attribute.\")\n line = file.readline()\n raise Exception(\"Unexpected end of file.\")\n\ndef _read_gml_edge(file, graph):\n line = file.readline()\n edgeAttrs = []\n sourceId = None\n targetId = None\n while line:\n line = line.strip()\n if line.startswith(\"source\"):\n sourceId = _read_gml_int_id(line)\n elif line.startswith(\"target\"):\n targetId = _read_gml_int_id(line)\n elif line == \"[\":\n # Start of edge in separate line.\n pass\n elif line.startswith(\"]\"):\n if sourceId is None or targetId is None:\n raise Exception(\"Missing sourceId or targetId for an edge.\")\n edgeid = graph.AddEdge(sourceId, targetId)\n for attr, val, type in edgeAttrs:\n if type == \"str\":\n graph.AddStrAttrDatE(edgeid, val, attr)\n elif type == \"int\":\n graph.AddIntAttrDatE(edgeid, val, attr)\n elif type == \"float\":\n graph.AddIntAttrDatE(edgeid, val, attr)\n else:\n raise Exception(\"Inconsistency between _read_gml_edge and _read_gml_attribute.\")\n return graph\n else:\n attr, val, type = _read_gml_attribute(line)\n edgeAttrs.append((attr, val, type))\n line = file.readline()\n","sub_path":"load_gml.py","file_name":"load_gml.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"575314040","text":"#this is a new python file\n#it's purpose is to test for primes\n\ndef isPrime(a):\n for x in range(2, sqrt(a)):\n if a%x == 0:\n return False\n\n return True\n \n","sub_path":"isprime.py","file_name":"isprime.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"523569045","text":"#!/usr/bin/env python\n# poker_calc.py - Poker odds calculator by Sergey 2013\n\n\"\"\"\nPoker odds calculator\n\nThe problem uses No-limit Holdem Poker game rules. It takes players hands as an\ninput and returns odds for each player winning (or tie).\n\nEach hand consists of two characters one for rank: 2..9,T,J,Q,K,A,* and suits:\ns,h,d,c,*. Where * represents any other type of card from available cards\n\nSample test(s)\ninput_str\n\nAsAh ****\n\nOutput\n\n82%\n\n\"\"\"\n\n# Standard modules\nimport unittest\nimport sys\nimport os\nimport argparse\nimport re\n\n# Additional modules\n\n\n###############################################################################\n# Card Class\n###############################################################################\n\n\nclass Card:\n \"\"\" Playing Card representation \"\"\"\n\n # Rank\n T = 10\n J = 11\n Q = 12\n K = 13\n A = 14\n ANY = 1000\n\n cardranks = {\"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8,\n \"9\": 9, \"T\": T, \"J\": J, \"Q\": Q, \"K\": K, \"A\": A, \"X\": ANY}\n\n # Suits\n SPADES = 0\n HEARTS = 1\n DIAMONDS = 2\n CLUBS = 3\n cardsuits = {\"s\": SPADES, \"h\": HEARTS, \"d\": DIAMONDS, \"c\": CLUBS, \"x\": ANY}\n\n def __init__(self, rank, suit):\n \"\"\" Default constructor \"\"\"\n self.rank = rank\n self.suit = suit\n\n def __eq__(self, other):\n return self.rank == other.rank and self.suit == other.suit\n\n def __str__(self):\n return Card.rank_to_str(self.rank) + Card.suit_to_str(self.suit)\n\n @staticmethod\n def from_str(s):\n \"\"\" Decoding an input string \"\"\"\n card_match = re.match(\"(\\S)(\\S)\", s)\n if card_match:\n (rank_str, suit_str) = card_match.group(1, 2)\n else:\n assert False, \"Wrong card format %s\" % s\n\n rank = Card.str_to_rank(rank_str)\n suit = Card.str_to_suit(suit_str)\n\n return Card(rank, suit)\n\n @staticmethod\n def str_to_rank(s):\n assert s in Card.cardranks, \"Wrong card rank %s\" % s\n return Card.cardranks[s]\n\n @staticmethod\n def rank_to_str(d):\n for s, rank in Card.cardranks.items():\n if (rank == d):\n return s\n assert False, \"Wrong card rank %d\" % d\n\n @staticmethod\n def str_to_suit(s):\n assert s in Card.cardsuits, \"Wrong card suit %s\" % s\n return Card.cardsuits[s]\n\n @staticmethod\n def suit_to_str(d):\n for s, suit in Card.cardsuits.items():\n if (suit == d):\n return s\n assert False, \"Wrong card rank %d\" % d\n\n def is_match(self, other_card):\n return (self.rank == other_card.rank and self.suit == other_card.suit)\n\n###############################################################################\n# Holdem Hand Class\n###############################################################################\n\n\nclass HoldemHand:\n \"\"\" Contains a Texas Holdem Hand representation \"\"\"\n\n def __init__(self, card1, card2):\n \"\"\" Default constructor \"\"\"\n self.cards = [card1, card2]\n\n def __eq__(self, other):\n return ((self.cards[0] == other.cards[0] and\n self.cards[1] == other.cards[1]) or\n (self.cards[0] == other.cards[1] and\n self.cards[1] == other.cards[0]))\n\n def __str__(self):\n return str(self.cards[0]) + str(self.cards[1])\n\n @staticmethod\n def from_str(s):\n \"\"\" Decoding an input string \"\"\"\n hand_match = re.match(\"(\\S\\S)(\\S\\S)\", s)\n if hand_match:\n (card1_str, card2_str) = hand_match.group(1, 2)\n else:\n assert False, \"Wrong hand format %s\" % s\n return HoldemHand(Card.from_str(card1_str), Card.from_str(card2_str))\n\n\n###############################################################################\n# Holdem Board Class\n###############################################################################\n\n\nclass HoldemBoard:\n \"\"\" Contains a Texas Holdem Board representation \"\"\"\n\n def __init__(self, card1, card2):\n \"\"\" Default constructor \"\"\"\n self.cards = [card1, card2]\n\n def __str__(self):\n return self.cards[0] + self.cards[1]\n\n @staticmethod\n def from_str(s):\n \"\"\" Decoding an input string \"\"\"\n\n\n###############################################################################\n# Executable code\n###############################################################################\n\ndef main():\n\n # Parsing arguments\n parser = argparse.ArgumentParser(description=\"\")\n args = parser.parse_args()\n\n # Sandbox\n\n\n###############################################################################\n# Unit Tests\n###############################################################################\n\nclass unitTests(unittest.TestCase):\n\n def test_Card__basic_tests(self):\n \"\"\" Testing Card class \"\"\"\n twosp = Card.from_str(\"2s\")\n self.assertEqual(twosp.rank, 2)\n self.assertEqual(twosp.suit, Card.SPADES)\n self.assertEqual(str(twosp), \"2s\")\n\n aceh = Card.from_str(\"Ah\")\n self.assertEqual(aceh.rank, Card.A)\n self.assertEqual(aceh.suit, Card.HEARTS)\n\n anyc = Card.from_str(\"Xx\")\n self.assertEqual(anyc.suit, Card.ANY)\n self.assertEqual(str(anyc), \"Xx\")\n\n def test_HoldemHand__basic_tests(self):\n \"\"\" Testing Hand class \"\"\"\n aces = HoldemHand.from_str(\"AsAc\")\n self.assertEqual(str(aces), \"AsAc\")\n\n def test_HoldemBoard__basic_tests(self):\n \"\"\" Testing Board class \"\"\"\n aces = HoldemBoard.from_str(\"AsAc\")\n\n\nif __name__ == '__main__':\n if sys.argv[-1] == \"-ut\":\n unittest.main(argv=[\" \"])\n main()\n","sub_path":"wip/poker_calc.py","file_name":"poker_calc.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"403039370","text":"import itertools\nimport pathlib\nfrom dataclasses import dataclass, asdict\nfrom pathlib import Path\nfrom typing import Union, Dict, List\n\nimport yaml\n\nfrom pybm.exceptions import PybmError\nfrom pybm.mixins import StateMixin\nfrom pybm.specs import CoreGroup, BuilderGroup, RunnerGroup, GitGroup, \\\n ReporterGroup\nfrom pybm.util.imports import import_from_module\n\n__all__ = [\"PybmConfig\",\n \"get_builder_class\",\n \"get_runner_class\",\n \"get_runner_requirements\",\n \"get_reporter_class\",\n \"get_all_names\",\n \"get_all_keys\"]\n\nDescriptions = Dict[str, str]\n\n\n@dataclass\nclass PybmConfig(StateMixin):\n core: CoreGroup = CoreGroup()\n git: GitGroup = GitGroup()\n builder: BuilderGroup = BuilderGroup()\n runner: RunnerGroup = RunnerGroup()\n reporter: ReporterGroup = ReporterGroup()\n\n @classmethod\n def load(cls, path: Union[str, pathlib.Path]):\n if isinstance(path, str):\n path = Path(path)\n if not path.exists() or not path.is_file():\n raise PybmError(f\"Configuration file {path} does not exist. \"\n f\"Make sure to run `pybm init` before using pybm \"\n f\"to set up environments or run benchmarks.\")\n with open(path, \"r\") as config_file:\n spec = yaml.load(config_file, Loader=yaml.FullLoader)\n\n return PybmConfig(\n core=CoreGroup(**spec[\"core\"]),\n git=GitGroup(**spec[\"git\"]),\n runner=RunnerGroup(**spec[\"runner\"]),\n builder=BuilderGroup(**spec[\"builder\"]),\n reporter=ReporterGroup(**spec[\"reporter\"]))\n\n def to_dict(self):\n return {\"core\": asdict(self.core),\n \"git\": asdict(self.git),\n \"runner\": asdict(self.runner),\n \"builder\": asdict(self.builder),\n \"reporter\": asdict(self.reporter)}\n\n def save(self, path: Union[str, pathlib.Path]):\n with open(path, \"w\") as config_file:\n yaml.dump(self.to_dict(), config_file)\n\n def describe(self, attr):\n current = self.get_value(attr)\n group, name = attr.split(\".\")\n annotations: Dict[str, type] = self.get_value(group +\n \".__annotations__\")\n value_type = annotations[name].__name__\n print(f\"Describing configuration option {attr!r}.\")\n print(f\"Value type: {value_type}\")\n print(f\"Current value: {current!r}\")\n print(description_db[group][name] or\n f\"No description available for {group} attribute {name}.\")\n\n\ndef get_builder_class(config: PybmConfig):\n class_name = import_from_module(config.get_value(\"builder.className\"))\n return class_name(config)\n\n\ndef get_runner_class(config: PybmConfig):\n class_name = import_from_module(config.get_value(\"runner.className\"))\n return class_name(config)\n\n\ndef get_reporter_class(config: PybmConfig):\n class_name = import_from_module(config.get_value(\"reporter.className\"))\n return class_name(config)\n\n\ndef get_runner_requirements(config: PybmConfig):\n return get_runner_class(config).required_packages\n\n\ndef get_all_names(cls) -> List[str]:\n return [k for k in vars(cls).keys() if not k.startswith(\"_\")]\n\n\ndef get_all_keys(config: PybmConfig) -> List[str]:\n groups = get_all_names(config)\n names = [get_all_names(group) for group in groups]\n return list(itertools.chain.from_iterable(names))\n\n\ndescription_db: Dict[str, Descriptions] = {\n \"core\": {\n \"datetimeFormatter\": \"Datetime format string used to format \"\n \"timestamps for environment creation and \"\n \"modification. For a comprehensive list of \"\n \"identifiers and options, check the Python \"\n \"standard library documentation on \"\n \"datetime.strftime: \"\n \"https://docs.python.org/3/library/\"\n \"datetime.html#strftime-strptime-behavior.\",\n \"defaultLevel\": \"Default level to be used in pybm logging.\",\n \"logFile\": \"Name of the log file to write debug logs to, like `pip \"\n \"install` or `git worktree` command outputs.\",\n \"loggingFormatter\": \"Formatter string used to format logs in pybm. \"\n \"For a comprehensive list of identifiers and \"\n \"options, check the Python standard library \"\n \"documentation on logging formatters: \"\n \"https://docs.python.org/3/library/\"\n \"logging.html#formatter-objects.\",\n },\n \"git\": {\n \"createWorktreeInParentDirectory\": \"Whether to create worktrees \"\n \"in the parent directory of \"\n \"your git repository by default. \"\n \"Some IDEs may get confused \"\n \"when you initialize another \"\n \"git worktree inside your main \"\n \"repository, so this option \"\n \"provides a way to keep your main \"\n \"repo folder clean without having \"\n \"to explicitly type \\\"../my-dir\\\" \"\n \"every time you create a git \"\n \"worktree.\",\n },\n \"builder\": {\n \"className\": \"Name of the builder class used in pybm to build \"\n \"virtual Python environments. If you want to supply \"\n \"your own custom builder class, set this value to \"\n \"point to your custom subclass of \"\n \"pybm.builders.PythonEnvBuilder.\",\n \"homeDirectory\": \"Optional home directory containing pre-built \"\n \"virtual environments. The default for pybm is to \"\n \"create the virtual environment directly into \"\n \"the new git worktree, but you can also choose \"\n \"to link existing environments as subdirectories \"\n \"of this location.\",\n \"localWheelCaches\": \"A string of local directories separated by \"\n \"colons (\\\":\\\"), like a Unix PATH variable,\"\n \"containing prebuilt wheels for Python packages. \"\n \"Set this if you request a package that has no \"\n \"wheels for your Python version or architecture \"\n \"available, and have to build target-specific \"\n \"wheels yourself.\",\n \"persistentPipInstallOptions\": \"Comma-separated list of options \"\n \"passed to `pip install` in a \"\n \"pip-based builder. Set this if you \"\n \"use a number of `pip install` \"\n \"options consistently, and do not want \"\n \"to type them out in every call to \"\n \"`pybm env install`.\",\n \"persistentPipUninstallOptions\": \"Comma-separated list of options \"\n \"passed to `pip uninstall` in a \"\n \"pip-based builder. Set this if you \"\n \"use a number of `pip uninstall` \"\n \"options consistently, and do not \"\n \"want to type them out in every \"\n \"call to `pybm env uninstall`.\",\n \"persistentVenvOptions\": \"Comma-separated list of options \"\n \"for virtual environment creation in a \"\n \"builder using venv. Set this if you \"\n \"use a number of `python -m venv` \"\n \"options consistently, and do not want \"\n \"to type them out in every call to \"\n \"`pybm env create`.\",\n },\n \"runner\": {\n \"className\": \"Name of the runner class used in pybm to run \"\n \"benchmarks inside Python virtual environments. If you \"\n \"want to supply your own custom runner class, set this \"\n \"value to point to your custom subclass of \"\n \"pybm.runners.BenchmarkRunner.\",\n \"failFast\": \"\",\n \"numRepetitions\": \"\",\n \"contextProviders\": \"\",\n \"GoogleBenchmarkWithRandomInterleaving\": \"\",\n \"GoogleBenchmarkSaveAggregatesOnly\": \"\",\n },\n \"reporter\": {\n \"className\": \"\",\n \"resultDirectory\": \"\",\n \"targetTimeUnit\": \"\",\n \"significantDigits\": \"\",\n }\n}\n","sub_path":"pybm/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577427748","text":"import requests\nimport json\n\nclass btcCalc (object):\n\n def getBitcoinPrice(self):\n url = 'https://www.bitstamp.net/api/ticker/'\n try:\n r = requests.get(url)\n self.priceFloat = json.loads(r.text)['last']\n return self.priceFloat\n except requests.ConnectionError:\n print (\"Error querying Bitstamp API\")\n\n def bitcoin_to_cad(self):\n\n bitcoinPrice = self.getBitcoinPrice()\n\n print(\"How much BTC would you like to calculate?\")\n\n btc = input()\n amount = float(btc) * float(self.priceFloat)\n\n print(\"... is equivalent to \" + \"$\" + str(amount))\n\n def cad_to_bitcoin(self):\n\n bitcoinPrice = self.getBitcoinPrice()\n\n print(\"How much money would you like to convert?\")\n\n cad = input()\n amount = float(cad) / float(self.priceFloat)\n\n print(\"... is equivalent to \" + str(amount) + \"btc.\")\n\n def main(self):\n\n self.getBitcoinPrice()\n\n while True:\n print(\"Enter '1' to convert BTC to CAD.\")\n print(\"Enter '2' to convert CAD to BTC.\")\n\n x = int(input())\n\n if x is 1:\n self.bitcoin_to_cad()\n print(\"Would you like to convert again?\")\n print(\"Enter '1' for another conversion.\")\n print(\"Enter '2' to exit the program\")\n\n y = int(input())\n\n if y is 1:\n self.main()\n elif y is 2:\n break\n return\n\n elif x is 2:\n self.cad_to_bitcoin()\n\n print(\"Would you like to convert again?\")\n print(\"Enter '1' for another conversion.\")\n print(\"Enter '2' to exit the program\")\n\n y = int(input())\n\n if y is 1:\n self.main()\n elif y is 2:\n break\n return\n\n else:\n print(\"I'm sorry that's an invalid entry.\")\n\n\nbtcCalc().main()","sub_path":"Python/btcToCAD.py","file_name":"btcToCAD.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"208168101","text":"import random\nimport sys\nimport subprocess\nimport mimetypes\nimport array as arr\nimport os\nimport numpy as np\nimport time\nimport datetime\nimport helper\n\nfrom os import listdir\nfrom os.path import join, isdir, realpath\nfrom optparse import OptionParser\n\n\ndef main(inputdir, outputdir):\n\n print(\"optimizing...\")\n (current_best, obj1, obj2) = optimize()\n print(\"Fitness ({},{}). {} files were selected.\".format(obj1, obj2, current_best.count(1)))\n print(\"saving files...\")\n num = 0\n for val in current_best:\n if val == 1:\n filename = join(inputdir, helper.dict_id_filename[num])\n filename = filename[0:filename.find(\".cov\")]\n if (subprocess.call([\"cp\", filename, join(helper.this_dir_path, outputdir)])==1):\n raise Exception(\"fatal error!\")\n num += 1\n\n \n \n## \n# MOSA (Multi-Objective Simulated Annealing) optimization\n##\n\ndef optimize():\n random.seed(helper.seed)\n ## initial individual\n ind = [1] * helper.num_files\n a_orig, b_orig = eval(selection=ind, file_index=-1) # evaluate original fitness\n ## one round of optimization\n search_order = list(range(helper.num_files)) ## list 0..numFiles-1\n random.shuffle(search_order) ## shuffle elements in this list\n ## create best individual\n current_best = ind.copy()\n num = 0.0\n for x in search_order:\n print(\" {}% completed\\r\".format(round(100*num/helper.num_files, 2)), end='')\n num += 1\n current_best[x] = 0 # set to 0\n a_mod, b_mod = eval(selection=current_best, file_index=x)\n # print(\"({},{})\".format(a_mod, b_mod))\n if (a_orig == a_mod and b_mod < b_orig) :\n # local optimum\n a_orig = a_mod\n b_orig = b_mod\n else: # undo\n current_best[x] = 1\n increment_counters(x) \n return (current_best, a_orig, b_orig)\n \n######################## \n# evaluate fitness\n########################\n\n## caching results for efficiency!\nbranch_counters = None\n\ndef increment_counters(file_index):\n for b in helper.dict_coverage[file_index]:\n branch_counters[b] = branch_counters[b] + 1\n\ndef decrement_counters(file_index):\n for b in helper.dict_coverage[file_index]:\n branch_counters[b] = branch_counters[b] - 1\n\ndef eval(selection, file_index):\n global branch_counters\n if (branch_counters == None):\n branch_counters = [0] * helper.num_branches\n ## initialize counters\n for index in range(len(selection)):\n increment_counters(index)\n else:\n decrement_counters(file_index)\n\n # number of coverage branches is the same as number of non-zero counters \n num_covered_branches = len(branch_counters) - branch_counters.count(0)\n # number of covered branches\n objective1 = 1 - num_covered_branches / helper.num_branches\n # number of tests selected\n objective2 = selection.count(1) / helper.num_files\n\n return (objective1, objective2)\n","sub_path":"__promising_seeds__/mosa.py","file_name":"mosa.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293546311","text":"# importing all required package library\nfrom keras.preprocessing import image\nfrom keras.models import model_from_json, load_model\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nimport numpy as np\n\n\n\n# A class for image prediction based on ResNet pre-trained model\nclass ResNetPredict:\n def __init__(self):\n self.image = None\n self.model = None\n self.result_value = None\n\n def load_model(self):\n #self.model = ResNet50(weights=\"imagenet\")\n # Model reconstruction from JSON file\n\n with open('model_architecture.json', 'r') as f:\n self.model = model_from_json(f.read())\n\n # Load weights into the new model\n self.model.load_weights('model_weights.h5')\n\n\n def prepare_image(self, img, target):\n if img.mode != \"RGB\":\n img = img.convert(\"RGB\")\n img = img.resize(target)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n return x\n\n\n def predict(self, img):\n prediction = self.model.predict(img)\n\n print()\n print('****************************')\n print(prediction)\n print(prediction[0])\n index = self.getIndex(prediction[0])\n label = self.getLabel(index)\n print(index)\n print(label)\n print('****************************')\n return label\n\n def getLabel(self, index):\n letras = ['a', 'b', 'c', 'd', 'e', 'f', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 't', 'u', 'v', 'w', 'y']\n label = letras[index]\n return label\n\n def getIndex(self, array):\n n = len(array)\n for i in range(n+1):\n if array[i] == 1:\n return i\n return -1\n","sub_path":"backend-trained-model/ResNetPredict.py","file_name":"ResNetPredict.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345732310","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\n\nfrom .forms import NewEntryForm\nfrom .forms import NewSearchForm\n\nfrom . import util\n\nfrom random import choice\n\nimport markdown2\n\n\ndef index(request):\n \"\"\"\n request: is a HTTP request\n This returns an HTTP response for the following url:\n index is in the urls.py\n \"\"\"\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries(),\n \"search_form\": NewSearchForm()\n })\n\n\ndef render_wiki(request, wiki_name):\n \"\"\"\n request: request object\n wiki_name: The name of the entry to be rendered\n \"\"\"\n if not util.entry_exists(wiki_name):\n return error_reply(request, wiki_name)\n\n context = {\n \"wiki_title\": wiki_name,\n \"wiki_body\": markdown2.markdown(util.get_entry(wiki_name))\n }\n\n return render(request, \"encyclopedia/wiki_layout.html\", context)\n\n\ndef search(request):\n \"\"\"\n search view:\n -Renders a wiki if the request string matches (case sensitive) with an entry filename\n -If there isn't a matching file, it returns a list with matching substrings.\n -If the post request is not valid, it renders the index\n request: Http request with the POST form with a \"searchField\" name.\n \"\"\"\n if request.method == \"POST\":\n form = NewSearchForm(request.POST)\n if form.is_valid():\n search_string = form.cleaned_data[\"searchField\"]\n\n if util.entry_exists(search_string):\n return render_wiki(request, search_string)\n else:\n return view_similar_results(request, search_string)\n else:\n return render(request, \"encyclopedia/index.html\")\n\ndef view_similar_results(request, search_string):\n \"\"\"\n renders a list with substring matching filenames of the search_string.\n request: Request object.\n search_string: the search string.\n \"\"\"\n similar_entries = util.similar_results(search_string)\n\n return render(request, \"encyclopedia/search_results.html\", {\n \"entry_size\": len(similar_entries),\n \"is_one_result\": bool(len(similar_entries) == 1),\n \"entries\": similar_entries\n })\n\n\ndef error_reply(request, wiki_name):\n \"\"\"\n Renders a 404 page with a custom message.\n wiki_name: String of the missing entry.\n \"\"\"\n return render(request, \"encyclopedia/entry_not_found.html\", {\n \"wiki_title\": f\"{wiki_name} not found\",\n \"entry_name\": wiki_name\n }\n )\n\n\ndef view_error404(request, exception):\n \"\"\"\n Default 404 page for production\n exception: 404 exception, not used, but needed to be used as the default 404 page\n \"\"\"\n print(exception)\n return error_reply(request, \"404\")\n\n\ndef edit_entry(request):\n \"\"\"\n View that edits an entry.\n It uses the same template as add_entry()\n \"\"\"\n entry = request.GET.get(\"entry\")\n\n if not util.entry_exists(entry):\n return error_reply(request, entry)\n\n body = \"\\n\".join(util.get_entry(entry).split(\"\\n\")[2:])\n form = NewEntryForm(initial={'body': body})\n\n if request.POST.get(\"done\") == \"True\":\n util.save_entry(entry, request.POST.get(\"body\"))\n return render_wiki(request, entry)\n\n context = {\n \"form\": form,\n \"is_edit_page\": True,\n \"title_edit\": entry\n }\n\n if request.POST.get(\"preview\") == \"True\":\n body = markdown2.markdown(request.POST['body'])\n\n context[\"preview\"] = True\n context[\"md_title\"] = entry\n context[\"md_content_as_HTML\"] = body\n context[\"form\"] = NewEntryForm(initial={'body': request.POST.get(\"body\")})\n\n return render(request, \"encyclopedia/new_page.html\", context)\n\n\ndef add_entry(request):\n \"\"\"\n View to add a new entry.\n \"\"\"\n form = NewEntryForm(request.POST or None)\n\n context = {\n \"form\": form,\n }\n\n if request.POST.get(\"title\") is None:\n return render(request, \"encyclopedia/new_page.html\", {\n \"form\": form\n })\n if form.is_valid() and request.POST.get(\"done\") == \"True\":\n util.save_entry(request.POST.get(\"title\"), request.POST.get(\"body\"))\n return render_wiki(request, request.POST.get(\"title\"))\n\n if form.is_valid() and request.POST.get(\"preview\") == \"True\":\n context[\"preview\"] = True\n context[\"md_title\"] = form.cleaned_data[\"title\"]\n context[\"md_content_as_HTML\"] = markdown2.markdown(form.cleaned_data['body'])\n\n if not form.is_valid():\n context[\"feedback\"] = True\n\n return render(request, \"encyclopedia/new_page.html\", context)\n\n\ndef view_random_entry(request):\n \"\"\"\n View for /random\n render a random entry.\n \"\"\"\n return redirect(f\"wiki/{choice(util.list_entries())}\")\n","sub_path":"wiki/encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"561620217","text":"from bit import wif_to_key\n\n# replace with your private key (do not send this to students)\nkey = wif_to_key(\"cQpxiiQHueFJgK3EaHKvDqAE7cm8gWKMrSj3ZPMMHkDr7v5gbkQW\")\n\n# replace with student addresses\naddresses = [\"mn9CfkoXJpkCMkPbRcBfUphso7QaDmBmgz\", \"mv4rnyY3Su5gjcDNzbMLKBQkBicCtHUtFB\"]\n\noutputs = []\n\nfor address in addresses:\n outputs.append((address, 0.001, \"btc\"))\n\nprint(key.send(outputs))\n","sub_path":"19-Blockchain-Python/2/Activities/01-Stu_Wallet_Check/Solved/multi-output-testnet-tx.py","file_name":"multi-output-testnet-tx.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"101045352","text":"import pyautogui \nfrom time import sleep\n#exbir informação \npyautogui.alert('O Codigo Vai comerça !! Teamo Thially')\n# 2 abrir o google\npyautogui.moveTo(x=34, y=197)\npyautogui.click(clicks=1)\nsleep(3)\n# 4 digitar o link do instagram\npyautogui.write('https://www.instagram.com/')\npyautogui.press('enter')\n# 5 pegar a url de um link de uma # \npyautogui.moveTo(x=366, y=84)\npyautogui.click(clicks=1)\n# 6 entra na pag\npyautogui.write('https://www.instagram.com/explore/tags/fitness/')\npyautogui.press('enter')\nsleep(3)\n# 7 apertar em uma foto\npyautogui.moveTo(x=541, y=486)\npyautogui.click(clicks=1)\nsleep(3)\n# 8 deixar o like e proxima foto\nfor c in range(30):\n pyautogui.moveTo(x=668, y=597)\n sleep(1)\n pyautogui.click(clicks=2)\n pyautogui.moveTo(x=1364, y=592)\n pyautogui.click(clicks=1)\n sleep(3)\n","sub_path":"BotForLike.py","file_name":"BotForLike.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598587254","text":"\"\"\"\nterub_stn_multisyn_test.py\n\"\"\"\n\nimport os\nimport nest\nimport unittest\nimport numpy as np\nfrom pynestml.frontend.pynestml_frontend import to_nest, install_nest\nfrom numpy.random import rand\nfrom os.path import join\n\nnp.random.seed(4)\n\ndirectories = ['../models', 'resources', 'target']\nfor i in directories:\n if not os.path.exists(i):\n os.makedirs(i)\n\ntry:\n import matplotlib.pyplot as plt\n TEST_PLOTS = True\nexcept:\n TEST_PLOTS = False\n\n\nclass NestSTNExpTest(unittest.TestCase):\n\n def test_terub_stn_multisyn(self):\n\n model_name = \"terub_stn_multisyn\"\n\n if not os.path.exists(\"target\"):\n os.makedirs(\"target\")\n\n input_path = os.path.join(os.path.realpath(os.path.join(\n os.path.dirname(__file__), \"../models\", \"{}.nestml\".format(model_name))))\n target_path = \"target\"\n module_name = '{}_module'.format(model_name)\n nest_path = \"/home/abolfazl/prog/nest-build/\"\n suffix = '_nestml'\n\n if 1: #! compile\n to_nest(input_path=input_path,\n target_path=target_path,\n logging_level=\"INFO\",\n suffix=suffix,\n module_name=module_name)\n\n install_nest(target_path, nest_path)\n\n nest.Install(module_name)\n model = \"{}_nestml\".format(model_name)\n\n dt = 0.01\n t_simulation = 1500.0\n nest.SetKernelStatus({\"resolution\": dt})\n\n neuron1 = nest.Create(model, 1)\n neuron2 = nest.Create(model, 1)\n # parameters = nest.GetDefaults(model)\n\n # if 0:\n # for i in parameters:\n # print(i, parameters[i])\n\n for neuron in neuron1+neuron2:\n nest.SetStatus([neuron], {'I_e': 0.0 + rand() * 20.0 - 20})\n nest.SetStatus([neuron], {'V_m': -65.0 + rand() * 10. - 5.})\n \n nest.Connect(neuron1, neuron2, syn_spec={\"receptor_type\": 1}) # AMPA\n nest.Connect(neuron1, neuron2, syn_spec={\"receptor_type\": 2}) # NMDA\n nest.Connect(neuron1, neuron2, syn_spec={\"receptor_type\": 3}) # GABAA\n nest.Connect(neuron1, neuron2, syn_spec={\"receptor_type\": 4}) # GABAB\n\n record_from = [\"V_m\", \"I_syn_ampa\", \"I_syn_nmda\",\n \"I_syn_gaba_a\", \"I_syn_gaba_b\"]\n\n multimeter = nest.Create(\"multimeter\", 2)\n nest.SetStatus(multimeter, {\"withtime\": True,\n \"record_from\": record_from,\n \"interval\": dt})\n spikedetector = nest.Create(\"spike_detector\",\n params={\"withgid\": True,\n \"withtime\": True})\n nest.Connect(multimeter, neuron1+neuron2, \"one_to_one\")\n nest.Connect(neuron1+neuron2, spikedetector)\n nest.Simulate(t_simulation)\n\n dSD = nest.GetStatus(spikedetector, keys='events')[0]\n spikes = dSD['senders']\n\n firing_rate = len(spikes) / t_simulation * 1000\n print(\"firing rate is \", firing_rate / 2)\n\n def plot_data(index=[0]):\n\n fig, ax = plt.subplots(3, figsize=(10, 6), sharex=True)\n for i in index:\n dmm = nest.GetStatus(multimeter, keys='events')[i]\n Voltages = dmm[\"V_m\"]\n tv = dmm[\"times\"]\n ax[0].plot(tv, Voltages, lw=1, label=str(i+1))\n\n labels = [\"ampa\", \"nmda\", \"gaba_a\", \"gaba_b\"]\n j = 0\n dmm = nest.GetStatus(multimeter) [1]\n tv = dmm['events'][\"times\"]\n for i in record_from[1:]:\n g = dmm[\"events\"][i]\n ax[1].plot(tv, g, lw=2, label=labels[j])\n j += 1\n \n dSD = nest.GetStatus(spikedetector, keys='events')[0]\n spikes = dSD['senders']\n ts = dSD[\"times\"]\n\n ax[2].plot(ts, spikes, 'ko', ms=3)\n ax[2].set_xlabel(\"Time [ms]\")\n ax[2].set_xlim(0, t_simulation)\n ax[2].set_ylabel(\"Spikes\")\n ax[0].set_title(\"recording from PSP\")\n ax[0].set_ylabel(\"v [ms]\")\n ax[1].set_ylabel(\"I_syn\")\n ax[1].legend(frameon=False, loc=\"upper right\")\n ax[0].legend()\n\n\n plt.savefig(join(\"resources\", \"terub_stn_multisyn.png\"), dpi=150)\n # plt.show()\n\n plot_data(index=[0, 1])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"terman2002/NEST/test_files/terub_stn_multisyn_test.py","file_name":"terub_stn_multisyn_test.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553264987","text":"import logging\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', filename='logging_example.log', filemode='w',\n level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\nlogger.warning('warning')\nlogger.info('info')\nlogger.debug('bedug')\n","sub_path":"logging_example.py","file_name":"logging_example.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"608018527","text":"print(\"Hi\")\n\nimport requests\nimport re\nimport os\nimport uuid\n\nclass Updater:\n def __init__(self, target_link):\n self.target_link = target_link\n\n def checkUpdate(self, link):\n os.chdir(\"C:\\\\teleport\\\\Code\\\\Python Updating Concept\\\\\")\n r = requests.get(link, allow_redirects=True)\n filename = self.getFileName(link)\n open(filename, \"wb\").write(r.content)\n f = open(filename, \"r\")\n f1 = open(\"updatebranchLOCAL.txt\", \"r\")\n\n if str(f.readline()) == str(f1.readline()):\n print(f.readline(), \"Up to date\")\n return False\n else:\n print(f.readline(), \"Different than (local): \" + f1.readline())\n return True\n\n def update(self):\n os.chdir(\"C:\\\\teleport\\\\Code\\\\Python Updating Concept\\\\updated\")\n\n r = requests.get(self.target_link, allow_redirects=True)\n filename = self.getFileName(self.target_link)\n open(filename, \"wb\").write(r.content)\n\n def updateChanges(self):\n os.chdir(\"C:\\\\teleport\\\\Code\\\\Python Updating Concept\\\\\")\n\n pathtonewfile = \"updated/updater.py\"\n a = open(pathtonewfile, 'r')\n newContents = a.read()\n a.close()\n\n with open(\"updater.py\", \"w\") as f:\n f.write(newContents)\n f.close()\n\n\n def getFileName(self, url):\n fname = url.split(\"/\")\n fname = fname[len(fname)-1]\n return fname\n\n def uid():\n return uuid.uuid1()\n\n\nu = Updater(\"https://raw.githubusercontent.com/electricSoda/python-updater/main/updater.py\")\ncheck = u.checkUpdate(\"https://raw.githubusercontent.com/electricSoda/python-updater/main/updatebranch.txt\")\n\nif check:\n u.update()\n u.updateChanges()\n","sub_path":"backup/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"635906016","text":"#########################################################################\n#### Ted Brundage, M.S.E., Princeton University\n#### Advisor: Barbara Engelhardt\n####\n#### Code: PM Greedy and Sampling Predictor Generator\n####\n#### Last updated: 4/28/16\n####\n#### Notes and disclaimers:\n#### - Use only numpy.ndarray, not numpy.matrix to avoid any confusion\n#### - If something is a column vector - MAKE IT A COLUMN VECTOR. Makes \n#### manipulation annoying, but it keeps matrix algebra logical. \n####\n#########################################################################\n\n\n#########################################################################\n###\n### IMPORTS\n###\n\nimport os\nimport sys\nimport time\nfrom copy import deepcopy as dc\nimport datetime\n\nmainpath = \"/Users/Ted/__Engelhardt/Engelhardt_DPP\"\nsys.path.append(os.path.abspath(mainpath))\n\nimport numpy as np\nimport pickle\nimport Utils.DPPutils as DPPutils\nimport ThetaOptimizers.VariationalLearning as VL\nimport GammaSelectors.Greedy as Greedy\nimport GammaSelectors.Sampling as Sampling\nimport Predictor as Predictor\nimport warnings\n\n\n#########################################################################\n\nsetStart = int(sys.argv[1])\nsetFinal = int(sys.argv[2])\n\ndataFolders = ['n_025','n_050','n_075','n_100','n_150','n_200','n_400']\nTthetas = [1000,5000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,25000]\nKappas = [2,4,10]\n\nfor i in range(setStart,setFinal):\n setDir = 'Set%02d/' % i\n for df in dataFolders:\n currentDir = '%s%s/' % (setDir,df)\n X_tr = np.load('%sX_tr.npy' % currentDir)\n y_tr = np.load('%sy_tr.npy' % currentDir)\n\n for kappa in Kappas:\n for T in Tthetas:\n try:\n logDir = '%sVL_Kappa_%02d_Theta_%03d/' % (currentDir,kappa,T)\n if T == Tthetas[0]:\n VL_theta = VL.VL(X_tr,y_tr,max_T=T,kappa=kappa,verbose=False,dir=logDir)\n else:\n VL_theta = VL.VL(VL_theta,max_T=T,kappa=kappa,verbose=False,dir=logDir)\n except: \n break\n\n ###########################\n ## Make Greedy Predictor ##\n ###########################\n \n #############################\n ## Make Sampling Predictor ##\n #############################\n try:\n sampling_gamma = Sampling.Sampling(VL_theta)\n sampling_predictor = Predictor.Predictor(X_tr,y_tr,sampling_gamma.gamma, c=sampling_gamma.c)\n pickle.dump(sampling_predictor, open('%sVL_Kappa%02d_Theta%03d_Sampling.p' % (logDir,kappa,T),'wb'))\n except:\n greedy_gamma = Greedy.Greedy(VL_theta)\n greedy_predictor = Predictor.Predictor(X_tr,y_tr,greedy_gamma.gamma,c=greedy_gamma.c)\n pickle.dump(greedy_predictor, open('%sVL_Kappa%02d_Theta%03d_Greedy.p' % (logDir,kappa,T),'wb'))\n\n","sub_path":"SimulationTests/VL_GreedySampling.py","file_name":"VL_GreedySampling.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"14125638","text":"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tensorflow lowercasing operation for UTF8 strings.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\n\nfrom tensorflow.python.framework import load_library\nfrom tensorflow.python.platform import resource_loader\ngen_normalize_ops = load_library.load_op_library(resource_loader.get_path_to_datafile('_normalize_ops.so'))\n\n\n# pylint: disable=redefined-builtin\ndef case_fold_utf8(input, name=None):\n \"\"\"Applies case folding to every UTF-8 string in the input.\n\n The input is a `Tensor` or `RaggedTensor` of any shape, and the resulting\n output has the same shape as the input. Note that NFKC normalization is\n implicitly applied to the strings.\n\n For example:\n\n ```python\n >>> case_fold_utf8(['The Quick-Brown',\n ... 'CAT jumped over',\n ... 'the lazy dog !! ']\n tf.Tensor(['the quick-brown' 'cat jumped over' 'the lazy dog !! '],\n shape=(3,), dtype=string)\n ```\n\n Args:\n input: A `Tensor` or `RaggedTensor` of UTF-8 encoded strings.\n name: The name for this op (optional).\n\n Returns:\n A `Tensor` or `RaggedTensor` of type string, with case-folded contents.\n \"\"\"\n with ops.name_scope(name, \"CaseFoldUTF8\", [input]):\n input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n input, dtype=dtypes.string)\n if ragged_tensor.is_ragged(input_tensor):\n result = gen_normalize_ops.case_fold_utf8(input_tensor.flat_values)\n return input_tensor.with_flat_values(result)\n else:\n return gen_normalize_ops.case_fold_utf8(input_tensor)\n\n\n# pylint: disable=redefined-builtin)\ndef normalize_utf8(input, normalization_form=\"NFKC\", name=None):\n \"\"\"Normalizes each UTF-8 string in the input tensor using the specified rule.\n\n See http://unicode.org/reports/tr15/\n\n Args:\n input: A `Tensor` or `RaggedTensor` of type string. (Must be UTF-8.)\n normalization_form: One of the following string values ('NFC', 'NFKC',\n 'NFD', 'NFKD'). Default is 'NFKC'.\n name: The name for this op (optional).\n\n Returns:\n A `Tensor` or `RaggedTensor` of type string, with normalized contents.\n \"\"\"\n with ops.name_scope(name, \"NormalizeUTF8\", [input]):\n input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n input, dtype=dtypes.string)\n if ragged_tensor.is_ragged(input_tensor):\n result = gen_normalize_ops.normalize_utf8(input_tensor.flat_values,\n normalization_form)\n return input_tensor.with_flat_values(result)\n else:\n return gen_normalize_ops.normalize_utf8(input_tensor, normalization_form)\n","sub_path":"tensorflow_text/python/ops/normalize_ops.py","file_name":"normalize_ops.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"115796931","text":"from rest_framework import generics\nfrom django.db import models\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom movies.models import Movie, Actor\nfrom .serializers import (\n MovieListSerializer,\n MovieDetailSerializer,\n ReviewCreateSerializer,\n CreateRatingSerializer,\n ActorListSerializer,\n ActorDetailSerializer,)\nfrom .service import get_client_ip\n\n\nclass MovieListView(APIView):\n \"\"\"Вывод списка фильмов\"\"\"\n\n def get(self, request):\n movies = Movie.objects.filter(draft=False).annotate(\n rating_user=models.Count(\"ratings\", filter=models.Q(ratings__ip=get_client_ip(request)))).annotate(\n middle_star=models.Sum(models.F('ratings__star')) / models.Count(models.F('ratings'))\n )\n serializer = MovieListSerializer(movies, many=True)\n return Response(serializer.data)\n\n\nclass MovieDetailView(APIView):\n \"\"\"Вывод фильма\"\"\"\n\n def get(self, request, pk):\n movie = Movie.objects.get(id=pk, draft=False)\n serializer = MovieDetailSerializer(movie)\n return Response(serializer.data)\n\n\nclass ReviewCreateView(APIView):\n \"\"\"Добавление отзыва к фильму\"\"\"\n\n def post(self, request):\n review = ReviewCreateSerializer(data=request.data)\n if review.is_valid():\n review.save()\n return Response(status=201)\n\n\nclass AddStarRatingView(APIView):\n \"\"\"Добавление рейтинга фильму\"\"\"\n\n def post(self, request):\n serializer = CreateRatingSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(ip=get_client_ip(request))\n return Response(status=201)\n else:\n return Response(status=400)\n\n\nclass ActorsListView(generics.ListAPIView):\n \"\"\"Вывод списка актеров\"\"\"\n queryset = Actor.objects.all()\n serializer_class = ActorListSerializer\n\n\nclass ActorsDetailView(generics.RetrieveAPIView):\n \"\"\"Вывод списка актеров или режисеров\"\"\"\n queryset = Actor.objects.all()\n serializer_class = ActorDetailSerializer\n","sub_path":"movies/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"25124958","text":"\"\"\"\n v2.0 IpPool\n # 1. align IpPool's 'save' parameter 'True' to create a 'pool.txt' to stored pickled ip\n 2. IpPool(target_site_url, ip_number, foreign).give_me_ip() will return a list of valid ip\n e.g: ['http://host:port', 'http://host2:port2', .....]\n\"\"\"\n\nfrom bs4 import BeautifulSoup as Bs\nimport requests\nimport random\nimport re\n\n_re_ip = re.compile(r'^\\d{1,3}(\\.\\d{1,3}){3}$')\n\nua = [\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',\n 'Opera/9.25 (Windows NT 5.1; U; en)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',\n 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',\n 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) \"\n \"Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\",\n \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \",\n]\n\n\nclass IpPool(object):\n def __init__(self, target_site_url, *, ip_number=4, foreign=True):\n if foreign:\n self.url = 'http://www.xicidaili.com/wn/'\n else:\n self.url = 'http://www.xicidaili.com/nn/'\n\n self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36'}\n\n if 'http' not in target_site_url:\n target_site_url = 'http://' + target_site_url\n self.target_site = target_site_url\n self.ip_number = ip_number\n self.new_ip = self.get_ip()\n self.valid_ip = self.choose_valid_ip()\n\n def give_me_ip(self):\n # method to give you valid proxies list\n # [ ('ip:port'), ...... ]\n return self.valid_ip\n\n def get_ip(self):\n print('开始获取ip...')\n host_list = []\n port_list = []\n html = requests.get(self.url, headers=self.headers)\n soup = Bs(html.text, 'lxml')\n for i in soup.find_all(string=_re_ip):\n host_list.append(i)\n port_list.append(i.parent.next_sibling.next_sibling.string)\n ip = ['%s:%s' % (host, port) for host, port in zip(host_list, port_list)]\n return ip\n\n def choose_valid_ip(self):\n valid_ip = []\n for ip in self.new_ip:\n try:\n header = {'User-Agent': random.choice(ua)}\n\n proxy = {'http': 'http://' + ip}\n page = requests.get(self.target_site, allow_redirects=False, timeout=3, proxies=proxy,\n headers=header)\n print(ip)\n # print('THIS WORK')\n valid_ip.append('http://' + ip)\n if len(valid_ip) >= self.ip_number:\n break\n except:\n print('[%s] not work ..... check another one' % ip)\n return valid_ip\n\n\nif __name__ == '__main__':\n ips = IpPool('http://91.t9l.space/forumdisplay.php?fid=19&page=1', foreign=True).give_me_ip()\n print(ips)\n '''\n header = {'User-Agent': random.choice(ua),\n 'Referer': 'https://www.douban.com/',\n 'Host': 'book.douban.com'}\n cookies = {\n 'cookie': '__ads_session=VPH0JhNv0wgn+j8uTwA=; domain=.douban.com; path=/'\n }\n page = requests.get('https://book.douban.com/subject/26895253/', timeout=2,\n headers=header)\n print(page.status_code)'''\n","sub_path":"v2/Ip_pool_foreign.py","file_name":"Ip_pool_foreign.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"461147602","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n#\nimport time\nimport uuid\n\nfrom azure_monitor.protocol import LiveMetricEnvelope\nfrom azure_monitor.utils import azure_monitor_context\n\nDEFAULT_LIVEMETRICS_ENDPOINT = \"https://rt.services.visualstudio.com\"\nLIVE_METRICS_SUBSCRIBED_HEADER = \"x-ms-qps-subscribed\"\nLIVE_METRICS_TRANSMISSION_TIME_HEADER = \"x-ms-qps-transmission-time\"\nSTREAM_ID = str(uuid.uuid4())\n\n\ndef create_metric_envelope(instrumentation_key: str):\n envelope = LiveMetricEnvelope(\n documents=None,\n instance=azure_monitor_context.get(\"ai.cloud.roleInstance\"),\n instrumentation_key=instrumentation_key,\n invariant_version=1, # 1 -> v1 QPS protocol,\n machine_name=azure_monitor_context.get(\"ai.device.id\"),\n metrics=None,\n stream_id=STREAM_ID,\n timestamp=\"/Date({0})/\".format(str(int(time.time()) * 1000)),\n version=azure_monitor_context.get(\"ai.internal.sdkVersion\"),\n )\n return envelope\n","sub_path":"azure_monitor/src/azure_monitor/sdk/auto_collection/live_metrics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259140995","text":"import numpy as np\n\ndef calc_score_variance(true_labels, var_fs):\n uncertainties = var_fs \n assert true_labels.shape[0] == uncertainties.shape[0]\n\n binary_labels = np.where(true_labels > 100, 1, 0) #live: 0, spoof: 1\n tnr95, da, roc = calc_paper_results(binary_labels, uncertainties) \n print(\"TNR95: {}\\nDA : {}\\nROC : {}\".format(tnr95, da, roc))\n return binary_labels, uncertainties, [tnr95, da, roc]\n\n\ndef calc_auroc(binary_labels, uncertainties):\n from sklearn import metrics\n fpr, tpr, thresholds = metrics.roc_curve(binary_labels, uncertainties)\n auc = metrics.auc(fpr, tpr)\n return auc\n\ndef calc_paper_results(binary_labels, uncertainties):\n N = np.shape(binary_labels)[0]\n auroc = calc_auroc(binary_labels, uncertainties)\n\n order = np.argsort(uncertainties)\n uncertainties = uncertainties[order]\n binary_labels = binary_labels[order]\n print(N)\n\n fnrs = np.empty(N)\n fprs = np.empty(N)\n tprs = np.empty(N)\n for T in range(1, N+1):\n fp = np.sum(binary_labels[:T])\n tp = T - fp\n tn = np.sum(binary_labels[T:])\n fn = (N - T) - tn\n fnrs[T-1] = fn / (fn + tp)\n fprs[T-1] = fp / (fp + tn)\n tprs[T-1] = tp / (tp + fn)\n idx95TPR = np.argmin(np.where(tprs < 0.95, 100, tprs))\n tnr95tpr = 1.0 - fprs[idx95TPR]\n d_acc = 1.0 - np.amin(np.add(fprs, fnrs) * 0.5) # live:spoof = 50:50\n\n return tnr95tpr, d_acc, auroc\n","sub_path":"eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"584759202","text":"\n\nfrom xai.brain.wordbase.nouns._deed import _DEED\n\n#calss header\nclass _DEEDS(_DEED, ):\n\tdef __init__(self,): \n\t\t_DEED.__init__(self)\n\t\tself.name = \"DEEDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"deed\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_deeds.py","file_name":"_deeds.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391290413","text":"import argparse\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\n\n# 实例化出一个chrome浏览器\nchrome_options = Options()\n\n# 基础设置\nprefs = {\n # 禁止加载图片\n \"profile.managed_default_content_settings.images\": 2,\n # 禁用css\n 'permissions.default.stylesheet': 2,\n # 禁止弹窗\n 'profile.default_content_setting_values': {'notifications': 2}\n}\n\n\n# # chrome_options.add_experimental_option(\"prefs\", prefs)\n# chrome_options.add_experimental_option('useAutomationExtension', False)\n\n\n# 设置为开发者模式\nchrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\n#\n# # 浏览器异常忽略\nchrome_options.add_argument('--ignore-certificate-errors')\nchrome_options.add_argument('--ignore-ssl-errors')\nchrome_options.add_argument('--ignore-certificate-errors-spki-list')\nchrome_options.add_argument('--allow-insecure-localhost')\n#\n# # 懒加载模式,不等待页面加载完毕\ncapa = DesiredCapabilities.CHROME\ncapa[\"pageLoadStrategy\"] = \"none\"\n\ndriver = webdriver.Chrome(\n options=chrome_options,\n # desired_capabilities=capa\n )\n# desired_capabilities=capa\n\n\n\n\ndriver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n \"source\": \"\"\"\n Object.defineProperty(navigator, 'webdriver', {\n get: () => undefined\n })\n \"\"\"\n})\ndriver.execute_cdp_cmd(\"Network.enable\", {})\ndriver.execute_cdp_cmd(\"Network.setExtraHTTPHeaders\", {\"headers\": {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36\"}})\n\ndriver.get(\"\")\n# # 设置浏览器窗口的位置和大小\n# driver.set_window_position(20, 40)\n# driver.set_window_size(1200, 800)\n","sub_path":"面试/裁判文书.py","file_name":"裁判文书.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"261379805","text":"\"\"\"\n\nKill Plex paused video transcoding streams and receive notification.\n\nPlexPy > Settings > Notification Agents > Scripts > Bell icon:\n [X] Notify on playback pause\n\nPlexPy > Settings > Notification Agents > Scripts > Gear icon:\n Playback Pause: kill_trans_pause.py\n\n\"\"\"\n\nimport sys\nimport requests\nfrom plexapi.server import PlexServer\n\n\n## EDIT THESE SETTINGS ##\nPLEX_URL = 'http://localhost:32400'\nPLEX_TOKEN = 'xxxxx'\nPLEXPY_APIKEY = 'xxxxx' # Your PlexPy API key\nPLEXPY_URL = 'http://localhost:8181/' # Your PlexPy URL\n\nKILL_MESSAGE = 'This stream has ended due to being paused and transcoding.'\n\nUSER_IGNORE = ('') # ('Username','User2')\n\nSUBJECT_TEXT = \"Killed Paused Transcoded Stream.\"\nBODY_TEXT = \"Killed {user}'s paused transcoded stream of {title}.\"\n\nAGENT_ID = 14 # Notification agent ID for PlexPy\n# Find Notification agent ID here:\n# https://github.com/JonnyWong16/plexpy/blob/master/API.md#notify\n\n##/EDIT THESE SETTINGS ##\n\nsess = requests.Session()\nsess.verify = False\nplex = PlexServer(PLEX_URL, PLEX_TOKEN, session=sess)\n\ndef send_notification(subject_text, body_text):\n # Send the notification through PlexPy\n payload = {'apikey': PLEXPY_APIKEY,\n 'cmd': 'notify',\n 'agent_id': AGENT_ID,\n 'subject': subject_text,\n 'body': body_text}\n\n try:\n r = requests.post(PLEXPY_URL.rstrip('/') + '/api/v2', params=payload)\n response = r.json()\n\n if response['response']['result'] == 'success':\n sys.stdout.write(\"Successfully sent PlexPy notification.\")\n else:\n raise Exception(response['response']['message'])\n except Exception as e:\n sys.stderr.write(\"PlexPy API 'notify' request failed: {0}.\".format(e))\n return None\n\n\nfor session in plex.sessions():\n username = session.usernames[0]\n state = session.players[0].state\n video_decision = session.transcodeSessions[0].videoDecision\n title = (session.grandparentTitle + ' - ' if session.type == 'episode' else '') + session.title\n\n if video_decision == 'transcode' and state == 'paused' and username not in USER_IGNORE:\n sys.stdout.write(\"Killing {user}'s stream of {title}.\".format(user=username, title=title))\n session.stop(reason=KILL_MESSAGE)\n send_notification(SUBJECT_TEXT, BODY_TEXT.format(user=username, title=title))\n","sub_path":"killstream/kill_trans_pause_notify.py","file_name":"kill_trans_pause_notify.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"84442190","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom raiden.utils import make_address, get_contract_path, privatekey_to_address\nfrom raiden.network.discovery import ContractDiscovery\n\n\n@pytest.mark.parametrize('number_of_nodes', [1])\ndef test_endpointregistry(private_keys, blockchain_services):\n chain = blockchain_services.blockchain_services[0]\n my_address = privatekey_to_address(private_keys[0])\n\n endpointregistry_address = chain.deploy_contract(\n 'EndpointRegistry',\n get_contract_path('EndpointRegistry.sol'),\n )\n discovery_proxy = chain.discovery(endpointregistry_address)\n\n contract_discovery = ContractDiscovery(my_address, discovery_proxy)\n\n unregistered_address = make_address()\n\n # get should raise for unregistered addresses\n with pytest.raises(KeyError):\n contract_discovery.get(my_address)\n\n with pytest.raises(KeyError):\n contract_discovery.get(unregistered_address)\n\n assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 44444)) is None\n\n contract_discovery.register(my_address, '127.0.0.1', 44444)\n\n assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 44444)) == my_address\n assert contract_discovery.get(my_address) == ('127.0.0.1', 44444)\n\n contract_discovery.register(my_address, '127.0.0.1', 88888)\n\n assert contract_discovery.nodeid_by_host_port(('127.0.0.1', 88888)) == my_address\n assert contract_discovery.get(my_address) == ('127.0.0.1', 88888)\n\n with pytest.raises(KeyError):\n contract_discovery.get(unregistered_address)\n","sub_path":"raiden/raiden/tests/integration/test_endpointregistry.py","file_name":"test_endpointregistry.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"305944329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n########################################################################\n#\n# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved\n#\n# File: a.py\n# Author: suweiyue(suweiyue@baidu.com)\n# Date: 2021/04/13 21:58:37\n#\n########################################################################\n\"\"\"\n Comment.\n\"\"\"\n\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\n\n\ntrain_hrt = np.load(\"./dataset/wikikg90m_kddcup2021/processed/train_hrt.npy\", mmap_mode=\"r\")\ndata = [dict() for _ in range(1315)]\n\nfor h, r, t in tqdm(train_hrt):\n if not h in data[r]:\n data[r][h] = 1\n else:\n data[r][h] += 1\n\nfor r in range(1315):\n r_sum = sum(data[r].values())\n for h in data[r]:\n data[r][h] /= r_sum\n\n\npickle.dump(data, open(\"feature_output/r2h_prob.pkl\", \"wb\"))\n","sub_path":"examples/kddcup2021/WikiKG90M/feature/walk_probability/r2h.py","file_name":"r2h.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315215078","text":"\n#its a data structure\n#un-ordered key value pair\ndictionary = {\n 'a':1,\n 'b':2\n}\n\nprint(dictionary['a'])\nprint(dictionary)\n\n#Lists and dictionary\nmy_list = [{\n 'a':[1,2,3],\n 'b':2\n},\n{\n 'c':[4,5,6],\n 'd':2\n}\n]\nprint(my_list[0]['a'][2])\n\n#dict has no order\n#List is ordered\n#each data struct has pros and cons\n\n#keys cannot be changed\n\ndict_1 = {\n 123: [1, 2, 3],\n True: 'Hello',\n 'is_Magic': True,\n 123: \"New values\",\n}\n\nprint(dict_1[123])\n\nprint(dict_1[True])\n\n#key cannnot be list, key must be immutable\n\nuser = {\n 'basket' : [1, 2, 3],\n \"greet\" : 'Hello'\n}\n\nprint(user.get('basket'))\nprint(user.get('age'))\nprint(user.get('age',55))\n\n#keys cant be expression(shud be variable)\nuser_2 = dict(name='Jay')\nprint(user_2)\n\n#################\nuser = {\n 'basket' : [1, 2, 3],\n \"greet\" : 'Hello',\n 'age' : 25\n}\n\nprint('size' in user)\n\nprint('age' in user.keys())\nprint(25 in user.values())\n\nuser2= user.copy()\nuser.clear()\nprint(user)\nprint(user2)\n\n#print(user2.pop('age'))\nprint(user2.popitem()) #pops any item\nprint(user2)\n\nprint(user2.update({'age':26}))\nprint(user2)\n\n\n\n\n\n\n\n","sub_path":"Basics/Dictionary.py","file_name":"Dictionary.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"302258658","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('project', '0002_project_members'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='taskmaker',\n name='status',\n field=models.CharField(default=b'undone', max_length=30),\n ),\n ]\n","sub_path":"project/migrations/0003_taskmaker_status.py","file_name":"0003_taskmaker_status.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"69393676","text":"import sc2reader\nimport numpy as np\nimport subprocess\nimport json\nimport sys\nimport os\n\ndef main():\n try:\n file_sc2replay = sys.argv[1]\n except:\n print('No replay file.')\n return\n\n # load replay in another format\n replay = sc2reader.load_replay(file_sc2replay)\n\n # List of Devs/Mods\n list_godspeak_handles = [\"1-S2-1-1790706\",\n \"1-S2-1-5862849\",\n \"1-S2-1-10501340\",\n \"1-S2-1-586913\",\n \"1-S2-1-575623\",\n \"1-S2-1-5947630\",\n \"2-S2-1-5202779\",\n \"1-S2-1-619412\",\n \"2-S2-1-2049961\",\n \"1-S2-1-5133855\",\n \"3-S2-1-364257\",\n \"3-S2-1-4522456\",\n \"3-S2-1-5159369\",\n \"3-S2-1-4674764\",\n \"1-S2-1-4815582\",\n \"2-S2-1-4072182\",\n \"1-S2-1-5862849\",\n \"1-S2-1-10501340\",\n \"1-S2-1-586913\",\n \"1-S2-1-3270554\",\n \"3-S2-1-5691919\",\n \"2-S2-1-6580713\",\n \"3-S2-1-3830671\",\n ]\n\n # Get player names\n list_player_name = [data['name'] for data in replay.raw_data['replay.initData']['user_initial_data'][:12]]\n list_player_clan = [data['clan_tag'] for data in replay.raw_data['replay.initData']['user_initial_data'][:12]]\n\n # Get player handles\n list_player_handles = [data['toon_handle'] for data in replay.raw_data['replay.initData']['lobby_state']['slots'][:12]]\n\n # Get player colors\n list_color_map = ['red', 'blue', 'teal', 'purp', 'yell', 'oj', 'grn', 'lp', 'N/A', 'grey', 'dg', 'brwn',\n 'N/A', 'blk', 'pink']\n list_player_color_id = [slot['colorPref'] for slot in replay.raw_data['replay.initData']['lobby_state']['slots']][:12]\n list_player_color_txt = [list_color_map[id - 1] for id in list_player_color_id]\n\n # Get role assignments\n key_role = {}\n key_role['CaptainUpgrade'] = 'Cap'\n key_role['ChiefMaitanenceOfficerUpgrade2222'] = 'Maj'\n key_role['ChiefMaitanenceOfficerUpgrade22222'] = 'Sgt'\n key_role['ChiefMaitanenceOfficerUpgrade222222'] = 'Doc'\n key_role['ChiefMaitanenceOfficerUpgrade2222222'] = 'LT'\n key_role['ChiefMaitanenceOfficerUpgrade2222223'] = 'Eng'\n key_role['SecurityOfficer'] = 'Off'\n key_role['ChiefMaitanenceOfficerUpgrade2'] = 'SG'\n key_role['ChiefMaitanenceOfficerUpgrade22'] = 'DSM'\n\n list_player_role = ['Unknown'] * 12\n list_event_role_assn = [event for event in replay.events if\n event.name == 'UpgradeCompleteEvent' and event.upgrade_type_name in key_role.keys()]\n for event in list_event_role_assn:\n try:\n list_player_role[event.player.sid] = key_role[event.upgrade_type_name]\n except:\n pass\n\n # Display player list\n print('\\nPlayer List:')\n for ii in range(len(list_player_name)):\n tmp_metadata = ('[#%2d] [%-15s] [%3s] [%6s] ' % (ii+1, list_player_handles[ii], list_player_role[ii], list_player_color_txt[ii])).encode('utf-8')\n try:\n if len(list_player_clan[ii]) > 0:\n tmp_playername = ('<%s> %s'%(list_player_clan[ii],list_player_name[ii])).encode('utf-8')\n else:\n tmp_playername = ('%s'%(list_player_name[ii])).encode('utf-8')\n print(tmp_metadata+tmp_playername)\n except:\n pass\n\n # Check for player name search\n print('\\n')\n list_search_names = raw_input('To filter the chat...\\nPlease type in the names or player numbers (separated by space) then press enter.\\nLeave blank for unfiltered.\\n').split()\n print('\\n')\n if len(list_search_names) > 0:\n enable_filter = True\n search_ID = []\n for x in list_search_names:\n if x.isdigit():\n if int(x) >= 1 and int(x) <= 12:\n search_ID.append(int(x)-1)\n print('Adding %s (#%d) to filter list'%(list_player_name[int(x)-1],int(x)))\n else:\n print('Player Number #%d incorrect. Please enter a number between 1 and 12' % int(x))\n else:\n player_found = False\n for yy in range(len(list_player_name)):\n if x.lower() in list_player_name[yy].lower():\n player_found = True\n search_ID.append(yy)\n print('Adding %s (#%d) to filter list'%(list_player_name[yy],yy+1))\n if not player_found:\n print('Could not find player: %s'%x)\n if len(search_ID) == 0:\n print('No names to filter')\n return\n else:\n enable_filter = False\n\n # Get player death times\n output_msg = []\n list_player_death_times = [np.inf]*12\n for entity_key in replay.entity.keys():\n list_marine_scv_died_at = [unit.died_at for unit in replay.entity[entity_key].units if unit.name in ['SCV', 'Marine']]\n\n # skip if player did not die\n if len(list_marine_scv_died_at) == 0 or np.any([died_at == None for died_at in list_marine_scv_died_at]):\n continue\n\n id_dst = replay.entity[entity_key].sid\n name_dst = list_player_name[id_dst] + ' (#%02d)' % (1 + id_dst)\n time_gameloop = max(list_marine_scv_died_at)\n time_min = np.floor(time_gameloop / 1000. * 62.5 / 60).astype('int')\n time_sec = np.floor(time_gameloop / 1000. * 62.5 % 60)\n list_player_death_times[id_dst] = time_gameloop\n if (not enable_filter) or (enable_filter and id_dst in search_ID):\n output_msg.append([time_gameloop, '[%02d:%02d] [---DEATH] [%4s] [%3s] %s has died' % (time_min, time_sec, list_player_color_txt[id_dst], list_player_role[id_dst], name_dst)])\n\n # Extract messages and categorize into Observer, Infested, or All chat\n # Currently unable to differentiate between Alien and Human form while they are talking in All-Chat\n print('\\n\\nNow extracting chat log...\\n')\n str_cmd_json = r'python \"' + os.path.join(os.path.dirname(os.path.realpath(__file__)),'s2protocol-master\\s2protocol\\s2_cli.py') + '\" --all --ndjson \"' + file_sc2replay\n data_json = [json.loads(line) for line in subprocess.check_output(str_cmd_json).split('\\n')[:-1]]\n list_player_alienchat_mode = [0]*12\n list_player_godspeak_mode = [0]*12\n list_player_observer_mode = [0]*12\n for datum in data_json:\n if '_event' in datum.keys() and datum['_event'] == 'NNet.Game.STriggerDialogControlEvent' and 'm_eventData' in datum.keys() and 'MouseButton' in datum['m_eventData'].keys() and datum['m_eventData']['MouseButton'] == 1:\n id_dst = datum['_userid']['m_userId']\n if list_player_handles[id_dst] in list_godspeak_handles:\n if datum['m_controlId'] == 23:\n list_player_godspeak_mode[id_dst] = not list_player_godspeak_mode[id_dst]\n elif datum['m_controlId'] == 25:\n list_player_observer_mode[id_dst] = not list_player_observer_mode[id_dst]\n elif datum['m_controlId'] == 27:\n list_player_alienchat_mode[id_dst] = not list_player_alienchat_mode[id_dst]\n else:\n list_player_alienchat_mode[id_dst] = not list_player_alienchat_mode[id_dst]\n\n\n # Check who is Alien Host/Spawn\n if 'm_upgradeTypeName' in datum.keys() and datum['m_upgradeTypeName'] == 'CanUseGeneModAlien':\n id_dst = datum['m_playerId'] - 1\n name_dst = list_player_name[id_dst] + ' (#%02d)'%(1+id_dst)\n time_min = np.floor(datum['_gameloop'] / 1000. * 62.5 / 60).astype('int')\n time_sec = np.floor(datum['_gameloop'] / 1000. * 62.5 % 60)\n time_gameloop = datum['_gameloop']\n if time_gameloop <= 50:\n if (not enable_filter) or (enable_filter and id_dst in search_ID):\n output_msg.append([time_gameloop, '[%02d:%02d] [NEWSPAWN] [%4s] [%3s] %s is Alien Host' % (time_min, time_sec, list_player_color_txt[id_dst], list_player_role[id_dst], name_dst)])\n else:\n if (not enable_filter) or (enable_filter and id_dst in search_ID):\n output_msg.append([time_gameloop, '[%02d:%02d] [NEWSPAWN] [%4s] [%3s] %s is now an Alien Spawn' % (time_min, time_sec, list_player_color_txt[id_dst], list_player_role[id_dst], name_dst)])\n\n # This is an older implementation of GodSpeak. Latest implementation uses a button click similar to alien chat\n # if '_event' in datum.keys() and datum['_event'] == 'NNet.Game.STriggerKeyPressedEvent' and 'm_key' in datum.keys() and datum['m_key'] in [18,31]:\n # id_dst = datum['_userid']['m_userId']\n # if list_player_handles[id_dst] in list_godspeak_handles:\n # list_player_godspeak_mode[id_dst] = not list_player_godspeak_mode[id_dst]\n\n if '_event' in datum.keys() and datum['_event'] == 'NNet.Game.STriggerChatMessageEvent':\n id_dst = datum['_userid']['m_userId']\n name_dst = list_player_name[id_dst] + ' (#%02d)'%(1+id_dst)\n time_min = np.floor(datum['_gameloop'] / 1000. * 62.5 / 60).astype('int')\n time_sec = np.floor(datum['_gameloop'] / 1000. * 62.5 % 60)\n time_gameloop = datum['_gameloop']\n msg = datum['m_chatMessage']\n msg = msg.encode('ISO-8859-1').decode('utf-8')\n\n if list_player_godspeak_mode[id_dst]:\n chat_mode = 'GodSpeak'\n elif time_gameloop >= list_player_death_times[id_dst] or list_player_observer_mode[id_dst]:\n chat_mode = 'Observer'\n elif list_player_alienchat_mode[id_dst]:\n chat_mode = 'Infested'\n else:\n chat_mode = 'All'\n\n if (not enable_filter) or (enable_filter and id_dst in search_ID):\n output_msg.append([time_gameloop, '[%02d:%02d] [%8s] [%4s] [%3s] %s: %s' % (time_min, time_sec, chat_mode, list_player_color_txt[id_dst], list_player_role[id_dst], name_dst, msg)])\n\n output_msg = [output_msg[idx][1] for idx in np.argsort(np.array([out[0] for out in output_msg]))]\n print('\\nEvents:')\n for ii in range(len(output_msg)):\n if ii>0 and ii%3 == 0:\n print('')\n print(output_msg[ii].encode('utf-8'))\n\nif __name__ == '__main__':\n main()","sub_path":"extract_chat.py","file_name":"extract_chat.py","file_ext":"py","file_size_in_byte":10692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528515775","text":"import tensorflow as tf\nimport time\nimport numpy as np\n\nimport helpers.helper_funcs as helpers\n#import helpers.cifar_models as models\n\ndef main():\n print('Loading data...')\n x_train, y_train, x_test, y_test = helpers.get_cifar10_data()\n y_test = tf.squeeze(y_test)\n\n print(\"Loading models...\")\n l1_model = tf.keras.models.load_model('models/cifar/l1_model')\n l2_model = tf.keras.models.load_model('models/cifar/l2_model')\n l3_model = tf.keras.models.load_model('models/cifar/l3_model')\n l4_model = tf.keras.models.load_model('models/cifar/l4_model')\n l5_model = tf.keras.models.load_model('models/cifar/l5_model')\n l6_model = tf.keras.models.load_model('models/cifar/l6_model')\n l7_model = tf.keras.models.load_model('models/cifar/l7_model')\n l8_model = tf.keras.models.load_model('models/cifar/l8_model')\n l9_model = tf.keras.models.load_model('models/cifar/l9_model')\n l10_model = tf.keras.models.load_model('models/cifar/l10_model')\n\n models = [l1_model, l2_model, l3_model, l4_model, l5_model, l6_model, l7_model, l8_model, l9_model, l10_model]\n num_classes = 10\n accuracies = compute_class_matrix_A(models, num_classes, x_test, y_test)\n\n best_models = np.argmax(accuracies, axis=0)\n\n # Classify the test data into buckets based off of their true class\n buckets = [[] for i in range(num_classes)]\n for i in range(x_test.shape[0]):\n class_peek = y_test[i]\n buckets[class_peek].append(x_test[i])\n\n # Loop through each of the class buckets and run the corresponding model, before adding up the correct predictions\n total_correct = 0 \n for i in range(num_classes):\n bucket_inputs = np.array(buckets[i])\n print(best_models[i])\n model = models[best_models[i]]\n probs = model.predict(bucket_inputs)\n preds = np.argmax(probs, axis=1)\n total_correct += np.sum(preds == i)\n\n print(\"Final accuracy: \", total_correct / x_test.shape[0])\n\n\ndef compute_class_matrix_A(models, num_classes, x_test, y_test):\n # Get dictionary of counts of each class in y_test\n y_test_np = y_test.numpy()\n unique, counts = np.unique(y_test_np, return_counts=True)\n count_dict = dict(zip(unique, counts))\n\n # Set up accuracy grid\n num_models = len(models)\n accuracies = np.zeros((num_models, num_classes))\n\n # Iterate over all models and get their predicted outputs\n for i in range(num_models):\n model = models[i]\n\n model_probs = model.predict(x_test)\n model_preds = np.argmax(model_probs, axis=1)\n\n # Iterate over all 10 classes\n for j in range(num_classes):\n # Compute the number of times where the prediction matches the test output for that class\n class_count = len(np.where((model_preds == j) & (y_test_np == j))[0])\n accuracies[i][j] = class_count / count_dict[j]\n\n print(accuracies)\n return accuracies\n\n\ndef compute_class_matrix_B(models, num_classes, x_test, y_test):\n # Get dictionary of counts of each class in y_test\n y_test_np = y_test.numpy()\n count_dicts = []\n\n # Set up accuracy grid\n num_models = len(models)\n accuracies = np.zeros((num_models, num_classes))\n\n # Iterate over all models and get their predicted outputs\n for i in range(num_models):\n model = models[i]\n\n model_probs = model.predict(x_test)\n model_preds = np.argmax(model_probs, axis=1)\n\n unique, counts = np.unique(model_preds, return_counts=True)\n count_dicts.append(dict(zip(unique, counts)))\n\n # Iterate over all 10 classes\n for j in range(num_classes):\n # Compute the number of times where the prediction matches the test output for that class\n class_count = len(np.where((model_preds == j) & (y_test_np == j))[0])\n accuracies[i][j] = class_count / count_dicts[i][j]\n\n print(accuracies)\n return accuracies\n\n\nif __name__ == '__main__':\n main()","sub_path":"DiffNumModelCombinations/per_class_optimization_limit.py","file_name":"per_class_optimization_limit.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349632635","text":"import glob\nimport json\nfrom base64 import b64encode\nfrom collections import defaultdict\n\nfrom app.objects.c_planner import Planner\nfrom app.service.base_service import BaseService\nfrom app.utility.rule import RuleAction\n\n\nclass DataService(BaseService):\n\n def __init__(self, dao):\n self.dao = dao\n self.log = self.add_service('data_svc', self)\n self.ram = dict(agents=[], planners=[])\n\n async def load_data(self, directory=None, schema='conf/core.sql'):\n \"\"\"\n Read all the data sources to populate the SQL database\n :param directory:\n :param schema:\n :return: None\n \"\"\"\n with open(schema) as schema:\n await self.dao.build(schema.read())\n if directory:\n self.log.debug('Loading data from %s' % directory)\n await self._load_abilities(directory='%s/abilities' % directory)\n await self._load_adversaries(directory='%s/adversaries' % directory)\n await self._load_facts(directory='%s/facts' % directory)\n await self._load_planners(directory='%s/planners' % directory)\n\n async def save(self, object_name, object_dict):\n \"\"\"\n Save a dict() for any object\n :param object_name:\n :param object_dict:\n :return:\n \"\"\"\n try:\n if object_name == 'operation':\n return await self._create_operation(**object_dict)\n elif object_name == 'link':\n return await self._create_link(object_dict)\n elif object_name == 'adversary':\n return await self._create_adversary(**object_dict)\n elif object_name == 'ability':\n return await self._create_ability(**object_dict)\n elif object_name == 'relationship':\n return await self.dao.create('core_relationship', object_dict)\n elif object_name == 'fact':\n return await self.dao.create('core_fact', object_dict)\n elif object_name == 'result':\n return await self.dao.create('core_result', object_dict)\n self.log.warning('[!] SAVE on non-core type: %s' % object_name)\n return await self.dao.create(object_name, object_dict)\n except Exception as e:\n self.log.error('[!] SAVE %s: %s' % (object_name, e))\n\n async def delete(self, object_name, criteria):\n \"\"\"\n Delete any object in the database by table name and ID\n :param object_name: the name of the table\n :param criteria: a dict of key/value pairs to match on\n \"\"\"\n self.log.debug('Deleting %s from %s' % (criteria, object_name))\n await self.dao.delete('core_%s' % object_name, data=criteria)\n\n async def update(self, object_name, key, value, data):\n \"\"\"\n Update any field in any table in the database\n :param object_name:\n :param key:\n :param value:\n :param data:\n :return: None\n \"\"\"\n await self.dao.update('core_%s' % object_name, key, value, data)\n\n async def get(self, object_name, criteria):\n \"\"\"\n Get the contents of any object\n :param object_name:\n :param criteria:\n :return: a list of dictionary results\n \"\"\"\n try:\n if object_name == 'operation':\n return await self.dao.get('core_operation', criteria)\n elif object_name == 'chain':\n return await self.dao.get('core_chain', criteria)\n elif object_name == 'ability':\n return await self.dao.get('core_ability', criteria)\n elif object_name == 'payload':\n return await self.dao.get('core_payload', criteria)\n elif object_name == 'used':\n return await self.dao.get('core_used', criteria)\n elif object_name == 'fact':\n return await self.dao.get('core_fact', criteria)\n self.log.warning('[!] GET on non-core type: %s' % object_name)\n return await self.dao.get(object_name, criteria)\n except Exception as e:\n self.log.error('[!] GET %s: %s' % (object_name, e))\n\n async def explode(self, object_name, criteria=None):\n \"\"\"\n Get an exploded version of any object\n :param object_name:\n :param criteria:\n :return:\n \"\"\"\n try:\n if object_name == 'operation':\n return await self._explode_operation(criteria)\n elif object_name == 'chain':\n return await self._explode_chain(criteria)\n elif object_name == 'adversary':\n return await self._explode_adversaries(criteria)\n elif object_name == 'ability':\n return await self._explode_abilities(criteria)\n elif object_name == 'parser':\n return await self._explode_parser(criteria)\n elif object_name == 'source':\n return await self._explode_sources(criteria)\n elif object_name == 'result':\n return await self._explode_results(criteria)\n elif object_name == 'used':\n return await self._explode_used(criteria)\n self.log.error('[!] EXPLODE on unknown type: %s' % object_name)\n except Exception as e:\n self.log.error('[!] EXPLODE %s: %s' % (object_name, e))\n\n async def store(self, c_object):\n \"\"\"\n Accept any c_object type and store it (create/update) in RAM\n :param c_object:\n :return: a single c_object\n \"\"\"\n try:\n return c_object.store(self.ram)\n except Exception as e:\n self.log.error('[!] STORE: %s' % e)\n\n async def locate(self, object_name, match=None):\n \"\"\"\n Find all c_objects which match a search. Return all c_objects if no match.\n :param object_name:\n :param match: dict()\n :return: a list of c_object types\n \"\"\"\n try:\n return [obj for obj in self.ram[object_name] if obj.match(match)]\n except Exception as e:\n self.log.error('[!] LOCATE: %s' % e)\n\n async def remove(self, object_name, match):\n \"\"\"\n Remove any c_objects which match a search\n :param object_name:\n :param match: dict()\n :return:\n \"\"\"\n try:\n self.ram[object_name][:] = [obj for obj in self.ram[object_name] if not obj.match(match)]\n except Exception as e:\n self.log.error('[!] REMOVE: %s' % e)\n\n \"\"\" PRIVATE \"\"\"\n\n async def _explode_abilities(self, criteria=None):\n abilities = await self.dao.get('core_ability', criteria=criteria)\n for ab in abilities:\n ab['cleanup'] = '' if ab['cleanup'] is None else ab['cleanup']\n ab['parsers'] = await self.dao.get('core_parser', dict(ability=ab['id']))\n ab['payload'] = await self.dao.get('core_payload', dict(ability=ab['id']))\n ab['requirements'] = await self.dao.get('core_requirement', dict(ability=ab['id']))\n for r in ab['requirements']:\n r['enforcements'] = (await self.dao.get('core_requirement_map', dict(requirement_id=r['id'])))[0]\n return abilities\n\n async def _explode_adversaries(self, criteria=None):\n adversaries = await self.dao.get('core_adversary', criteria)\n for adv in adversaries:\n phases = defaultdict(list)\n for t in await self.dao.get('core_adversary_map', dict(adversary_id=adv['adversary_id'])):\n for ability in await self._explode_abilities(dict(ability_id=t['ability_id'])):\n ability['adversary_map_id'] = t['id']\n phases[t['phase']].append(ability)\n adv['phases'] = dict(phases)\n return adversaries\n\n async def _explode_operation(self, criteria=None):\n operations = await self.dao.get('core_operation', criteria)\n for op in operations:\n op['chain'] = sorted(await self._explode_chain(criteria=dict(op_id=op['id'])), key=lambda k: k['id'])\n adversaries = await self._explode_adversaries(dict(id=op['adversary_id']))\n op['adversary'] = adversaries[0]\n op['host_group'] = await self.locate('agents', match=dict(group=op['host_group']))\n sources = await self.dao.get('core_source_map', dict(op_id=op['id']))\n source_list = [s['source_id'] for s in sources]\n op['facts'] = await self.dao.get_in('core_fact', 'source_id', source_list)\n for fact in op['facts']:\n fact['relationships'] = await self._add_fact_relationships(dict(source=fact['id']))\n op['rules'] = await self._sort_rules_by_fact(await self.dao.get_in('core_rule', 'source_id', source_list))\n return operations\n\n async def _explode_results(self, criteria=None):\n results = await self.dao.get('core_result', criteria=criteria)\n for r in results:\n link = await self.dao.get('core_chain', dict(id=r['link_id']))\n link[0]['facts'] = await self.dao.get('core_fact', dict(link_id=link[0]['id']))\n r['link'] = link[0]\n return results\n\n async def _explode_chain(self, criteria=None):\n chain = []\n for link in await self.dao.get('core_chain', criteria=criteria):\n a = await self.dao.get('core_ability', criteria=dict(id=link['ability']))\n chain.append(dict(abilityName=a[0]['name'], abilityDescription=a[0]['description'], **link))\n return chain\n\n async def _explode_sources(self, criteria=None):\n sources = await self.dao.get('core_source', criteria=criteria)\n for s in sources:\n s['facts'] = await self.dao.get('core_fact', dict(source_id=s['id']))\n return sources\n\n async def _explode_parser(self, criteria=None):\n parsers = await self.dao.get('core_parser', criteria)\n for parser in parsers:\n parser['mappers'] = await self.dao.get('core_parser_map', dict(parser_id=parser['id']))\n return parsers\n\n async def _explode_used(self, criteria=None):\n used_facts = await self.dao.get('core_used', criteria=criteria)\n for uf in used_facts:\n fact = (await self.dao.get('core_fact', dict(id=uf['fact_id'])))[0]\n uf['property'] = fact['property']\n uf['value'] = fact['value']\n return used_facts\n\n async def _create_link(self, link):\n used = link.pop('used', [])\n link_id = await self.dao.create('core_chain', link)\n for uf in used:\n await self.dao.create('core_used', dict(link_id=link_id, fact_id=uf))\n\n async def _create_adversary(self, i, name, description, phases):\n identifier = await self.dao.create('core_adversary',\n dict(adversary_id=i, name=name, description=description))\n\n await self.dao.delete('core_adversary_map', data=dict(adversary_id=i))\n for ability in phases:\n a = dict(adversary_id=i, phase=ability['phase'], ability_id=ability['id'])\n await self.dao.create('core_adversary_map', a)\n return identifier\n\n async def _write_ability(self, filename):\n for entries in self.strip_yml(filename):\n for ab in entries:\n for pl, executors in ab['platforms'].items():\n for name, info in executors.items():\n for e in name.split(','):\n encoded_test = b64encode(info['command'].strip().encode('utf-8'))\n await self._create_ability(ability_id=ab.get('id'), tactic=ab['tactic'].lower(),\n technique_name=ab['technique']['name'],\n technique_id=ab['technique']['attack_id'],\n test=encoded_test.decode(),\n description=ab.get('description') or '',\n executor=e, name=ab['name'], platform=pl,\n cleanup=b64encode(\n info['cleanup'].strip().encode(\n 'utf-8')).decode() if info.get(\n 'cleanup') else None,\n payload=info.get('payload'), parsers=info.get('parsers', []),\n requirements=ab.get('requirements', []))\n await self._delete_stale_abilities(ab)\n\n async def _load_abilities(self, directory):\n for filename in glob.iglob('%s/**/*.yml' % directory, recursive=True):\n await self._write_ability(filename)\n\n async def _load_adversaries(self, directory):\n for filename in glob.iglob('%s/*.yml' % directory, recursive=True):\n for adv in self.strip_yml(filename):\n phases = [dict(phase=k, id=i) for k, v in adv.get('phases', dict()).items() for i in v]\n for pack in [await self._add_adversary_packs(p) for p in adv.get('packs', [])]:\n phases += pack\n if adv.get('visible', True):\n await self._create_adversary(adv['id'], adv['name'], adv['description'], phases)\n\n async def _load_facts(self, directory):\n for filename in glob.iglob('%s/*.yml' % directory, recursive=False):\n for source in self.strip_yml(filename):\n source_id = await self.dao.create('core_source', dict(name=source['name']))\n for fact in source.get('facts', []):\n fact['source_id'] = source_id\n fact['score'] = fact.get('score', 1)\n await self.save('fact', fact)\n\n for rule in source.get('rules', []):\n rule['source_id'] = source_id\n await self._create_rule(**rule)\n\n async def _load_planners(self, directory):\n for filename in glob.iglob('%s/*.yml' % directory, recursive=False):\n for planner in self.strip_yml(filename):\n await self.store(\n Planner(name=planner.get('name'), module=planner.get('module'),\n params=json.dumps(planner.get('params')))\n )\n self.log.debug('Loaded %s planners' % len(self.ram['planners']))\n\n async def _create_rule(self, fact, source_id, action='DENY', match='.*'):\n try:\n action = RuleAction[action.upper()].value\n await self.dao.create('core_rule', dict(fact=fact, source_id=source_id, action=action, match=match))\n except KeyError:\n self.log.error(\n 'Rule action must be in [%s] not %s' % (', '.join(RuleAction.__members__.keys()), action.upper()))\n\n async def _create_ability(self, ability_id, tactic, technique_name, technique_id, name, test, description, executor,\n platform, cleanup=None, payload=None, parsers=None, requirements=None):\n ability = dict(ability_id=ability_id, name=name, test=test, tactic=tactic,\n technique_id=technique_id, technique_name=technique_name,\n executor=executor, platform=platform, description=description,\n cleanup=cleanup)\n # update\n unique_criteria = dict(ability_id=ability_id, platform=platform, executor=executor)\n for entry in await self.dao.get('core_ability', unique_criteria):\n await self.update('core_ability', 'id', entry['id'], ability)\n for parser in await self.dao.get('core_parser', dict(ability=entry['id'])):\n await self.dao.delete('core_parser_map', dict(parser_id=parser['id']))\n for requirement in await self.dao.get('core_requirement', dict(ability=entry['id'])):\n await self.dao.delete('core_requirement_map', dict(requirement_id=requirement['id']))\n await self.dao.delete('core_parser', dict(ability=entry['id']))\n await self.dao.delete('core_payload', dict(ability=entry['id']))\n return await self._save_ability_extras(entry['id'], payload, parsers, requirements)\n\n # new\n identifier = await self.dao.create('core_ability', ability)\n return await self._save_ability_extras(identifier, payload, parsers, requirements)\n\n @staticmethod\n async def _sort_rules_by_fact(rules):\n organized_rules = defaultdict(list)\n for rule in rules:\n fact = rule.pop('fact')\n organized_rules[fact].append(rule)\n return organized_rules\n\n async def _save_ability_extras(self, identifier, payload, parsers, requirements):\n if payload:\n await self.dao.create('core_payload', dict(ability=identifier, payload=payload))\n await self._save_ability_relationships(identifier, table='core_parser', id_type='parser_id',\n relationships=parsers)\n await self._save_ability_relationships(identifier, table='core_requirement', id_type='requirement_id',\n relationships=requirements)\n return identifier\n\n async def _save_ability_relationships(self, identifier, table, id_type, relationships):\n for module in relationships:\n _id = await self.dao.create(table, dict(ability=identifier, module=module))\n for r in relationships.get(module):\n relationship = {id_type: _id, 'source': r.get('source'), 'edge': r.get('edge'),\n 'target': r.get('target')}\n await self.dao.create('%s_map' % table, relationship)\n\n async def _delete_stale_abilities(self, ability):\n for saved in await self.dao.get('core_ability', dict(ability_id=ability.get('id'))):\n for platform, executors in ability['platforms'].items():\n if platform == saved['platform'] and not saved['executor'] in str(executors.keys()):\n await self.dao.delete('core_ability', dict(id=saved['id']))\n if saved['platform'] not in ability['platforms']:\n await self.dao.delete('core_ability', dict(id=saved['id']))\n\n async def _add_adversary_packs(self, pack):\n _, filename = await self.get_service('file_svc').find_file_path('%s.yml' % pack, location='data')\n for adv in self.strip_yml(filename):\n return [dict(phase=k, id=i) for k, v in adv.get('phases').items() for i in v]\n\n async def _add_fact_relationships(self, criteria=None):\n relationships = await self.dao.get('core_relationship', criteria)\n return [dict(edge=r.get('edge'), target=(await self.dao.get('core_fact', dict(id=r.get('target'))))[0])\n for r in relationships if r.get('target')]\n\n async def _create_operation(self, name, group, adversary_id, jitter='2/8', sources=[],\n planner=None, state=None, allow_untrusted=False, autonomous=True):\n op_id = await self.dao.create('core_operation', dict(\n name=name, host_group=group, adversary_id=adversary_id, finish=None, phase=0, jitter=jitter,\n start=self.get_current_timestamp(), planner=planner, state=state,\n allow_untrusted=allow_untrusted, autonomous=autonomous))\n source_id = await self.dao.create('core_source', dict(name=name))\n await self.dao.create('core_source_map', dict(op_id=op_id, source_id=source_id))\n for s_id in [s for s in sources if s]:\n await self.dao.create('core_source_map', dict(op_id=op_id, source_id=s_id))\n return op_id\n","sub_path":"caldera/app/service/data_svc.py","file_name":"data_svc.py","file_ext":"py","file_size_in_byte":19839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157808236","text":"\"\"\"\nThis module provides a simplified interface to the\nxml.dom.minidom objects. It supports a limited subset\nof its features and is meant for very simple xml\nfiles only.\n\nCopyright (c) 2009, Arkadiusz Wahlig \n\nDistributed under the new BSD License, see the\naccompanying LICENSE file for more information.\n\"\"\"\n\nfrom xml.dom import minidom\n\n\nclass Element(object):\n \"\"\"Encapsulates a single minidom element. Provides a list/dict-like\n index and get methods. If the element contains text/cdata only, it\n has no subelements and the text is available as the \"text\" attribute.\n Element attributes are available in attrs dictionary.\n \"\"\"\n\n def __init__(self, element):\n self._element = element\n self.name = str(element.tagName)\n self.text = None\n nodes = element.childNodes\n if len(nodes) == 1 and nodes[0].nodeType == element.TEXT_NODE:\n self.text = nodes[0].data.strip()\n self._nodes = []\n else:\n self._nodes = [node for node in nodes if node.nodeType != element.TEXT_NODE]\n if len(self._nodes) == 1 and self._nodes[0].nodeType == \\\n element.CDATA_SECTION_NODE:\n self.text = self._nodes[0].data.strip()\n self._nodes = []\n self.attrs = {}\n for name, value in element.attributes.items():\n self.attrs[name] = value\n\n def index(self, name):\n \"\"\"Returns an index of the first subelement with the given name.\n Raises an IndexError if not found.\n \"\"\"\n for i, c in enumerate(self._nodes):\n if str(c.tagName) == name:\n return i\n raise IndexError('%s is not in the sub-tags' % name)\n\n def get(self, name):\n \"\"\"Returns the first subelement with the given name.\n Raises a KeyError if not found.\n \"\"\"\n try:\n return Element(self._nodes[self.index(name)])\n except IndexError:\n raise KeyError(name)\n\n def has_key(self, name):\n \"\"\"Returns True if at least one subelement with the\n given name exists. Otherwise returns False.\n \"\"\"\n try:\n self.index(name)\n return True\n except IndexError:\n return False\n\n def toxml(self):\n \"\"\"Returns the element and all subelements as xml.\n \"\"\"\n return self._element.toxml()\n\n def __getitem__(self, i):\n \"\"\"Returns the subelement at given index.\n Raises an IndexError if index out of range.\n \"\"\"\n return Element(self._nodes[i])\n\n def __len__(self):\n \"\"\"Returns the number of subelements.\n \"\"\"\n return len(self._nodes)\n \n def __contains__(self, name):\n \"\"\"Tests if given element can be found in the subelements.\n \"\"\"\n try:\n self.index(name)\n except IndexError:\n return False\n return True\n\n def __repr__(self):\n text = ''\n if self.text is not None:\n text = ', text=%s' % repr(self.text)\n return '<%s.%s %s at 0x%x%s>' % (self.__class__.__module__,\n self.__class__.__name__, repr(self.name), id(self), text)\n\n\ndef parse(xml):\n \"\"\"Parses an xml and returns the Element object of the root element.\n xml may be either a string or a file-alike object.\n \"\"\"\n if isinstance(xml, str):\n dom = minidom.parseString(xml)\n else:\n dom = minidom.parse(xml)\n return Element(dom.documentElement)\n","sub_path":"xmlparse.py","file_name":"xmlparse.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"263949945","text":"\n\nimport os\nufile = input('파일 이름을 입력하세요.: ')\n\nscorelist = []\n\n# 입력받은 파일이름이 없을 때 예외처리하고 다시 이름 받기\nwhile True:\n try:\n f = open(\"%s.csv\" % ufile, 'r', encoding='CP949')\n except FileNotFoundError:\n print('파일을 찾지 못했습니다!')\n ufile = input('파일 이름을 입력하세요.: ')\n else:\n print('파일을 찾았습니다!')\n lines = f.readlines()\n for line in lines[1:]:\n line = line[:-1]\n s = line.split(',')\n try:\n s.append(int(s[1])+int(s[2])+int(s[3]))\n s.append(int(s[1])+int(s[2])+int(s[3]))\n s.append(int(s[4])/3)\n except ValueError:\n print('데이터를 수정하세요.')\n s.append(0)\n s.append(0)\n s.append(0)\n scorelist.append(s)\n f.close()\n break\n\n\n# 평균 리스트만 뽑기\naverlist = []\nfor i in range(10):\n averlist.append(int(scorelist[i][-1]))\naverlist.sort()\naverlist.reverse()\n\n# 평균을 가지고 같은 순서대로 집어넣으면 정렬이 됨.\nranklist = []\nfor i in averlist:\n for j in range(10):\n if i == scorelist[j][-1]:\n ranklist.append(scorelist[j])\n\n# 파일 덮어씌울지 말지 정해서 파일 생성하기\nif os.path.exists(\"/Users/harampark/Desktop/visualprogramming/_report.txt\") == True:\n print('동일한 이름의 파일이 있습니다.\\n 덮어씌우시겠습니까?')\n answer = input('예, 아니오로만 입력하세요.: ')\n if answer == '예':\n f = open(\"_report.txt\", 'w', encoding='CP949')\n else:\n newName = input('새로운 이름을 입력하세요.: ')\n f = open(\"%s.txt\" % newName, 'w', encoding='CP949')\nelse:\n f = open(\"_report.txt\", 'w', encoding='CP949')\nindex = ['ranking', 'number', 'korean', 'English', 'Math', 'Sum', 'Average']\nf.write(\"%7s %6s %6s %7s %4s %3s %7s\\n\" %\n (index[0], index[1], index[2], index[3], index[4], index[5], index[6]))\nfor i in range(7):\n f.write(\"%7d %6s %6s %7s %4s %3d %7.1f\\n\" % (\n (i+1), ranklist[i][0], ranklist[i][1], ranklist[i][2], ranklist[i][3], ranklist[i][4], ranklist[i][5]))\nf.close()\n","sub_path":"2019-2_visualprogramming/assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"138869411","text":"# future\nfrom __future__ import annotations\n\n# stdlib\nfrom collections import deque\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Deque\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\n\n# from dataclasses import replace\nfrom typing import TYPE_CHECKING\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nimport flax\nimport jax\nfrom jax import numpy as jnp\nimport numpy as np\n\n# from numpy.random import randint\nfrom numpy.typing import ArrayLike\n\n# from scipy import optimize\nfrom scipy.optimize import shgo\n\n# relative\nfrom .... import lib\nfrom ....ast.klass import pointerize_args_and_kwargs\n\n# from ....core.adp.data_subject import DataSubject\nfrom ....core.node.common.action.get_or_set_property_action import (\n GetOrSetPropertyAction,\n)\nfrom ....core.node.common.action.get_or_set_property_action import PropertyActions\nfrom ....lib.python.util import upcast\nfrom ....util import inherit_tags\nfrom ...adp.data_subject_ledger import DataSubjectLedger\n\n# from ...adp.data_subject_list import DataSubjectList\n# from ...adp.data_subject_list import DataSubjectArray\n# from ...adp.data_subject_list import dslarraytonumpyutf8\n# from ...adp.data_subject_list import numpyutf8todslarray\nfrom ...adp.vectorized_publish import publish\nfrom ...common.serde.serializable import serializable\nfrom ...common.uid import UID\nfrom ...node.abstract.node import AbstractNodeClient\nfrom ...node.common.action.run_class_method_action import RunClassMethodAction\nfrom ...node.enums import PointerStatus\nfrom ...pointer.pointer import Pointer\nfrom ..config import DEFAULT_INT_NUMPY_TYPE\nfrom ..fixed_precision_tensor import FixedPrecisionTensor\nfrom ..passthrough import PassthroughTensor # type: ignore\nfrom ..smpc import utils\nfrom ..smpc.mpc_tensor import MPCTensor\nfrom ..smpc.utils import TYPE_TO_RING_SIZE\nfrom ..util import implements\nfrom .gamma_tensor_ops import GAMMA_TENSOR_OP\nfrom .gamma_tensor_ops import GAMMA_TENSOR_OP_FUNC\nfrom .jax_ops import SyftJaxInfixOp\nfrom .jax_ops import SyftJaxOp\nfrom .jax_ops import SyftJaxUnaryOp\n\nif TYPE_CHECKING:\n # stdlib\n from dataclasses import dataclass\nelse:\n # third party\n from flax.struct import dataclass\n\n\nINPLACE_OPS = {\"resize\", \"sort\"}\n\n\ndef debox_other(other: Any, attr: str) -> Any:\n if not isinstance(other, GammaTensor):\n return other\n return getattr(other, attr)\n\n\ndef debox_child(other: Any) -> Any:\n return debox_other(other, \"child\")\n\n\ndef debox_linear(other: Any) -> Any:\n if not isinstance(other, GammaTensor):\n return True\n return debox_other(other, \"is_linear\")\n\n\ndef debox_phi(other: Any) -> Any:\n # relative\n from .phi_tensor import PhiTensor\n\n if not isinstance(other, PhiTensor):\n return other\n return other.gamma\n\n\ndef update_state(state: Dict, other: Any) -> Dict:\n if isinstance(other, GammaTensor):\n state.update(other.sources)\n return state\n\n\nSingleOrTupleInt = Union[int, Tuple[int, ...]]\nOptionalAxisArg = Optional[SingleOrTupleInt]\n\n\n@serializable(recursive_serde=True)\nclass TensorWrappedGammaTensorPointer(Pointer):\n __name__ = \"TensorWrappedGammaTensorPointer\"\n __module__ = \"syft.core.tensor.autodp.gamma_tensor\"\n __attr_allowlist__ = [\n # default pointer attrs\n \"client\",\n \"id_at_location\",\n \"object_type\",\n \"tags\",\n \"description\",\n # phi_tensor attrs\n \"public_dtype\",\n \"public_shape\",\n ]\n\n __serde_overrides__: Dict[str, Sequence[Callable]] = {\n \"client\": [lambda x: x.address, lambda y: y],\n \"public_shape\": [lambda x: x, lambda y: upcast(y)],\n # \"data_subjects\": [dslarraytonumpyutf8, numpyutf8todslarray],\n \"public_dtype\": [lambda x: str(x), lambda y: np.dtype(y)],\n }\n _exhausted = False\n is_enum = False\n PUBLISH_POINTER_TYPE = \"numpy.ndarray\"\n __array_ufunc__ = None\n\n def __init__(\n self,\n client: Any,\n id_at_location: Optional[UID] = None,\n object_type: str = \"\",\n tags: Optional[List[str]] = None,\n description: str = \"\",\n public_shape: Optional[Tuple[int, ...]] = None,\n public_dtype: Optional[np.dtype] = None,\n ):\n super().__init__(\n client=client,\n id_at_location=id_at_location,\n object_type=object_type,\n tags=tags,\n description=description,\n )\n self.public_shape = public_shape\n self.public_dtype = public_dtype\n\n # TODO: Modify for large arrays\n @property\n def synthetic(self) -> np.ndarray:\n public_dtype_func = getattr(\n self.public_dtype, \"upcast\", lambda: self.public_dtype\n )\n return (\n np.random.rand(*list(self.public_shape)) # type: ignore\n * (self.max_vals.to_numpy() - self.min_vals.to_numpy())\n + self.min_vals.to_numpy()\n ).astype(public_dtype_func())\n\n def __repr__(self) -> str:\n repr_string = f\"PointerId: {self.id_at_location.no_dash}\"\n if hasattr(self.client, \"obj_exists\"):\n _ptr_status = (\n PointerStatus.READY.value\n if self.exists\n else PointerStatus.PROCESSING.value\n )\n repr_string += f\"\\nStatus: {_ptr_status}\"\n repr_string += f\"\\nRepresentation: {self.synthetic.__repr__()}\"\n repr_string += \"\\n\\n(The data printed above is synthetic - it's an imitation of the real data.)\"\n return repr_string\n\n def share(self, *parties: Tuple[AbstractNodeClient, ...]) -> MPCTensor:\n all_parties = list(parties) + [self.client]\n ring_size = TYPE_TO_RING_SIZE.get(self.public_dtype, None)\n self_mpc = MPCTensor(\n secret=self,\n shape=self.public_shape,\n ring_size=ring_size,\n parties=all_parties,\n )\n return self_mpc\n\n @property\n def shape(self) -> Optional[Tuple[int, ...]]:\n if hasattr(self, \"public_shape\"):\n return self.public_shape\n else:\n return None\n\n def _apply_tensor_op(self, other: Any, op_str: str) -> Any:\n # we want to get the return type which matches the attr_path_and_name\n # so we ask lib_ast for the return type name that matches out\n # attr_path_and_name and then use that to get the actual pointer klass\n # then set the result to that pointer klass\n # We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data\n attr_path_and_name = f\"syft.core.tensor.tensor.Tensor.{op_str}\"\n\n result = TensorWrappedGammaTensorPointer(\n client=self.client,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=[other], kwargs={})\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=self.client.node_uid,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=[other],\n kwargs={},\n )\n\n result_public_shape = None\n\n if isinstance(other, TensorWrappedGammaTensorPointer):\n other_shape = other.public_shape\n other_dtype = other.public_dtype\n elif isinstance(other, (int, float)):\n other_shape = (1,)\n other_dtype = DEFAULT_INT_NUMPY_TYPE\n elif isinstance(other, bool):\n other_shape = (1,)\n other_dtype = np.dtype(\"bool\")\n elif isinstance(other, np.ndarray):\n other_shape = other.shape\n other_dtype = other.dtype\n else:\n raise ValueError(\n f\"Invalid Type for TensorWrappedGammaTensorPointer:{type(other)}\"\n )\n\n if self.public_shape is not None and other_shape is not None:\n result_public_shape = utils.get_shape(\n op_str, self.public_shape, other_shape\n )\n\n if self.public_dtype is None or other_dtype is None:\n if self.public_dtype != other_dtype:\n raise ValueError(\n f\"Dtype for self: {self.public_dtype} and other :{other_dtype} should not be None\"\n )\n\n # calculate the dtype of the result based on the op_str\n result_public_dtype = utils.get_dtype(\n op_str, self.public_shape, other_shape, self.public_dtype, other_dtype\n )\n\n result.public_shape = result_public_shape\n result.public_dtype = result_public_dtype\n\n result.client.processing_pointers[result.id_at_location] = True\n\n return result\n\n @staticmethod\n def _apply_op(\n self: TensorWrappedGammaTensorPointer,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n op_str: str,\n ) -> Union[MPCTensor, TensorWrappedGammaTensorPointer]:\n \"\"\"Performs the operation based on op_str\n\n Args:\n other (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]): second operand.\n\n Returns:\n Tuple[MPCTensor,Union[MPCTensor,int,float,np.ndarray]] : Result of the operation\n \"\"\"\n # relative\n from ..autodp.phi_tensor import TensorWrappedPhiTensorPointer\n\n if isinstance(other, TensorWrappedPhiTensorPointer):\n other = other.gamma\n\n if (\n isinstance(other, TensorWrappedGammaTensorPointer)\n and self.client != other.client\n ):\n parties = [self.client, other.client]\n\n self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)\n other_mpc = MPCTensor(\n secret=other, shape=other.public_shape, parties=parties\n )\n\n return getattr(self_mpc, op_str)(other_mpc)\n\n elif isinstance(other, MPCTensor):\n return getattr(other, op_str)(self)\n\n return self._apply_tensor_op(other=other, op_str=op_str)\n\n def _apply_self_tensor_op(self, op_str: str, *args: Any, **kwargs: Any) -> Any:\n # we want to get the return type which matches the attr_path_and_name\n # so we ask lib_ast for the return type name that matches out\n # attr_path_and_name and then use that to get the actual pointer klass\n # then set the result to that pointer klass\n\n # We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data\n attr_path_and_name = f\"syft.core.tensor.tensor.Tensor.{op_str}\"\n result = TensorWrappedGammaTensorPointer(\n client=self.client,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=self.client.node_uid,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=args,\n kwargs=kwargs,\n )\n\n # relative\n from ..autodp.phi_tensor import TensorWrappedPhiTensorPointer\n\n if op_str == \"choose\":\n dummy_res = np.ones(self.public_shape, dtype=np.int64)\n if isinstance(\n args[0],\n (TensorWrappedPhiTensorPointer, TensorWrappedGammaTensorPointer),\n ):\n temp_args = (np.ones(args[0].shape, dtype=np.int64), *args[1:])\n dummy_res = getattr(dummy_res, op_str)(*temp_args, **kwargs)\n else:\n dummy_res = getattr(dummy_res, op_str)(*args, **kwargs)\n else:\n dummy_res = np.empty(self.public_shape)\n if hasattr(dummy_res, op_str):\n if op_str in INPLACE_OPS:\n getattr(dummy_res, op_str)(*args, **kwargs)\n else:\n dummy_res = getattr(dummy_res, op_str)(*args, **kwargs)\n elif hasattr(np, op_str):\n dummy_res = getattr(np, op_str)(dummy_res, *args, *kwargs)\n else:\n raise ValueError(f\"Invalid Numpy Operation: {op_str} for Pointer\")\n\n result.public_shape = dummy_res.shape\n result.public_dtype = dummy_res.dtype\n\n return result\n\n def copy(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n return self._apply_self_tensor_op(\"copy\", *args, **kwargs)\n\n def __add__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"add\" operation between \"self\" and \"other\"\n\n Args:\n (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__add__\")\n\n def __radd__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"radd\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__radd__\")\n\n def __sub__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"sub\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__sub__\")\n\n def __rsub__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rsub\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rsub__\")\n\n def __mul__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"mul\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__mul__\")\n\n def __rmul__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rmul\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rmul__\")\n\n def __matmul__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"matmul\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__matmul__\")\n\n def __rmatmul__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rmatmul\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rmatmul__\")\n\n def __lt__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"lt\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__lt__\")\n\n def __gt__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"gt\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__gt__\")\n\n def __ge__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"ge\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__ge__\")\n\n def __le__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"le\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__le__\")\n\n def __eq__( # type: ignore\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"eq\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__eq__\")\n\n def __ne__( # type: ignore\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"ne\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__ne__\")\n\n def concatenate(\n self,\n other: TensorWrappedGammaTensorPointer,\n *args: Any,\n **kwargs: Any,\n ) -> MPCTensor:\n \"\"\"Apply the \"concatenate\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n if not isinstance(other, TensorWrappedGammaTensorPointer):\n raise ValueError(\n f\"Concatenate works only for TensorWrappedGammaTensorPointer got type: {type(other)}\"\n )\n\n if self.client != other.client:\n parties = [self.client, other.client]\n\n self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)\n other_mpc = MPCTensor(\n secret=other, shape=other.public_shape, parties=parties\n )\n\n return self_mpc.concatenate(other_mpc, *args, **kwargs)\n\n else:\n raise ValueError(\n \"Concatenate method currently works only between two different clients.\"\n )\n\n def __truediv__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"truediv\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__truediv__\")\n\n def __rtruediv__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rtruediv\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rtruediv__\")\n\n def __mod__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__mod__\")\n\n def __and__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"and\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__and__\")\n\n def __or__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"or\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__or__\")\n\n def __floordiv__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"floordiv\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__floordiv__\")\n\n def __rfloordiv__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rfloordiv\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rfloordiv__\")\n\n def __divmod__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Tuple[\n Union[TensorWrappedGammaTensorPointer, MPCTensor],\n Union[TensorWrappedGammaTensorPointer, MPCTensor],\n ]:\n \"\"\"Apply the \"divmod\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self.divmod(other)\n\n def divmod(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Tuple[\n Union[TensorWrappedGammaTensorPointer, MPCTensor],\n Union[TensorWrappedGammaTensorPointer, MPCTensor],\n ]:\n \"\"\"Apply the \"divmod\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return TensorWrappedGammaTensorPointer._apply_op(\n self, other, \"__floordiv__\"\n ), TensorWrappedGammaTensorPointer._apply_op(self, other, \"__mod__\")\n\n def sum(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Sum of array elements over a given axis.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which a sum is performed.\n The default, axis=None, will sum all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a\n single axis or all the axes as before.\n keepdims: bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the input array.\n If the default value is passed, then keepdims will not be passed through to the sum method of\n sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not\n implement keepdims any exceptions will be raised.\n initial: scalar, optional\n Starting value for the sum. See reduce for details.\n where: array_like of bool, optional\n Elements to include in the sum. See reduce for details.\n \"\"\"\n return self._apply_self_tensor_op(\"sum\", *args, **kwargs)\n\n def ptp(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"ptp\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"ptp\", *args, **kwargs)\n\n def __lshift__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"lshift\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__lshift__\")\n\n def argmax(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"argmax\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return self._apply_self_tensor_op(\"argmax\", *args, **kwargs)\n\n def __rshift__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"rshift\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__rshift__\")\n\n def argmin(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"argmin\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return self._apply_self_tensor_op(\"argmin\", *args, **kwargs)\n\n def __abs__(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"abs\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"__abs__\", *args, **kwargs)\n\n def all(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"all\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return self._apply_self_tensor_op(\"all\", *args, **kwargs)\n\n def any(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"any\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return self._apply_self_tensor_op(\"any\", *args, **kwargs)\n\n def round(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n return self._apply_self_tensor_op(\"round\", *args, **kwargs)\n\n def __round__(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n return self.round(*args, **kwargs)\n\n def __pos__(self) -> TensorWrappedGammaTensorPointer:\n \"\"\"Apply the pos (+) operator on self.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(op_str=\"__pos__\")\n\n def var(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Compute the variance along the specified axis of the array elements, a measure of the spread of a distribution.\n The variance is computed for the flattened array by default, otherwise over the specified axis.\n\n Parameters\n\n axis: None or int or tuple of ints, optional\n Axis or axes along which the variance is computed.\n The default is to compute the variance of the flattened array.\n If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all\n the axes as before.\n\n ddof: int, optional\n “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof, where N represents the\n number of elements. By default ddof is zero.\n\n keepdims: bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the input array.\n If the default value is passed, then keepdims will not be passed through to the var method of\n sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not\n implement keepdims any exceptions will be raised.\n\n where: array_like of bool, optional\n Elements to include in the variance. See reduce for details.\n \"\"\"\n return self._apply_self_tensor_op(\"var\", *args, **kwargs)\n\n def cumsum(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\" \"\n Return the cumulative sum of the elements along a given axis.\n\n Parameters\n axis: int, optional\n Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the\n flattened array.\n Returns\n cumsum_along_axis: PhiTensor\n A new array holding the result is returned. The result has the same size as input, and the same shape as\n a if axis is not None or a is 1-d.\n \"\"\"\n return self._apply_self_tensor_op(\"cumsum\", *args, **kwargs)\n\n def cumprod(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return the cumulative product of the elements along a given axis.\n\n Parameters\n axis: int, optional\n Axis along which the cumulative product is computed. The default (None) is to compute the cumprod over\n the flattened array.\n Returns\n cumprod_along_axis: PhiTensor\n A new array holding the result is returned. The result has the same size as input, and the same shape as\n a if axis is not None or a is 1-d.\n \"\"\"\n return self._apply_self_tensor_op(\"cumprod\", *args, **kwargs)\n\n def prod(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return the product of array elements over a given axis.\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which a product is performed.\n The default, axis=None, will calculate the product of all the elements in the input array.\n If axis is negative it counts from the last to the first axis.\n If axis is a tuple of ints, a product is performed on all of the axes specified in the tuple instead of\n a single axis or all the axes as before.\n keepdims: bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the input array.\n If the default value is passed, then keepdims will not be passed through to the prod method of\n sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not\n implement keepdims any exceptions will be raised.\n initial: scalar, optional\n The starting value for this product. See reduce for details.\n where: array_like of bool, optional\n Elements to include in the product. See reduce for details.\n \"\"\"\n return self._apply_self_tensor_op(\"prod\", *args, **kwargs)\n\n def __xor__(\n self,\n other: Union[\n TensorWrappedGammaTensorPointer, MPCTensor, int, float, np.ndarray\n ],\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"xor\" operation between \"self\" and \"other\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n raise NotImplementedError\n # return TensorWrappedGammaTensorPointer._apply_op(self, other, \"__xor__\")\n\n def __pow__(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Raise each base in x1 to the positionally-corresponding power in x2.\n x1 and x2 must be broadcastable to the same shape.\n An integer type raised to a negative integer power will raise a ValueError.\n Negative values raised to a non-integral value will return nan.\n\n Parameters\n x2: array_like\n\n The exponents. If self.shape != x2.shape, they must be broadcastable to a common shape.\n\n where: array_like, optional\n\n This condition is broadcast over the input. At locations where the condition is True, the out array will\n be set to the ufunc result.\n Elsewhere, the out array will retain its original value.\n\n **kwargs\n For other keyword-only arguments, see the ufunc docs.\n\n Returns\n y: PhiTensorPointer\n The bases in the tensor raised to the exponents in x2. This is a scalar if both self and x2 are scalars.\n \"\"\"\n return self._apply_self_tensor_op(\"__pow__\", *args, **kwargs)\n\n def mean(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n \"\"\"\n Compute the arithmetic mean along the specified axis.\n\n Returns the average of the array elements. The average is taken over the flattened array by default, otherwise\n over the specified axis.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened\n array.\n \"\"\"\n return self._apply_self_tensor_op(\"mean\", *args, **kwargs)\n\n def std(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution, of the array elements.\n The standard deviation is computed for the flattened array by default, otherwise over the specified axis.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed.\n The default is to compute the standard deviation of the flattened array.\n If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single\n axis or all the axes as before.\n\n out: ndarray, optional\n Alternative output array in which to place the result. It must have the same shape as the expected\n output but the type (of the calculated values) will be cast if necessary.\n\n ddof: int, optional\n ddof = Delta Degrees of Freedom. By default ddof is zero.\n The divisor used in calculations is N - ddof, where N represents the number of elements.\n\n keepdims: bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the input array.\n\n If the default value is passed, then keepdims will not be passed through to the std method of\n sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not\n implement keepdims any exceptions will be raised.\n\n where: array_like of bool, optional\n Elements to include in the standard deviation. See reduce for details.\n\n Returns\n\n standard_deviation: PhiTensor\n \"\"\"\n attr_path_and_name = \"syft.core.tensor.tensor.Tensor.std\"\n result = TensorWrappedGammaTensorPointer(\n client=self.client,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=self.client.node_uid,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=[],\n kwargs={},\n )\n result.public_shape = self.client.shape # data_subjects.shape\n result.public_dtype = self.public_dtype\n\n return result\n\n def trace(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return the sum along diagonals of the array.\n\n If a is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements\n a[i,i+offset] for all i.\n\n If a has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D\n sub-arrays whose traces are returned. The shape of the resulting array is the same as that of a with axis1 and\n axis2 removed.\n\n Parameters\n\n offset: int, optional\n Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to 0.\n\n axis1, axis2: int, optional\n Axes to be used as the first and second axis of the 2-D sub-arrays from which the diagonals should be\n taken. Defaults are the first two axes of a.\n\n Returns\n\n Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.\n If a is 2-D, the sum along the diagonal is returned.\n If a has larger dimensions, then an array of sums along diagonals is returned.\n\n \"\"\"\n return self._apply_self_tensor_op(\"trace\", *args, **kwargs)\n\n def sort(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n \"\"\"\n Return a sorted copy of an array.\n\n Parameters\n\n a: array_like\n Array to be sorted.\n\n axis: int or None, optional\n Axis along which to sort. If None, the array is flattened before sorting.\n The default is -1, which sorts along the last axis.\n\n kind{‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’}, optional\n Sorting algorithm. The default is ‘quicksort’.\n Note that both ‘stable’ and ‘mergesort’ use timsort or radix sort under the covers and, in general,\n the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards\n compatibility.\n\n Changed in version 1.15.0.: The ‘stable’ option was added.\n\n order: str or list of str, optional\n When a is an array with fields defined, this argument specifies which fields to compare first, second,\n etc. A single field can be specified as a string, and not all fields need be specified, but unspecified\n fields will still be used, in the order in which they come up in the dtype, to break ties.\n\n Please see docs here: https://numpy.org/doc/stable/reference/generated/numpy.sort.html\n \"\"\"\n return self._apply_self_tensor_op(\"sort\", *args, **kwargs)\n\n def argsort(self, *args: Any, **kwargs: Any) -> TensorWrappedGammaTensorPointer:\n \"\"\"\n Returns the indices that would sort an array.\n\n Perform an indirect sort along the given axis using the algorithm specified by the kind keyword.\n It returns an array of indices of the same shape as a that index data along the given axis in sorted order.\n\n Parameters\n axis: int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used.\n kind: {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’}, optional\n Sorting algorithm. The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort\n under the covers and, in general, the actual implementation will vary with data type. The ‘mergesort’\n option is retained for backwards compatibility.\n order: str or list of str, optional\n When a is an array with fields defined, this argument specifies which fields to compare 1st, 2nd, etc.\n A single field can be specified as a string, and not all fields need be specified, but unspecified\n fields will still be used, in the order in which they come up in the dtype, to break ties.\n\n Returns\n index_array: ndarray, int\n Array of indices that sort a along the specified axis. If a is one-dimensional, a[index_array] yields a\n sorted a. More generally, np.take_along_axis(a, index_array, axis=axis) always yields the sorted a,\n irrespective of dimensionality.\n \"\"\"\n raise NotImplementedError\n # return self._apply_self_tensor_op(\"argsort\", *args, **kwargs)\n\n def min(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return the minimum of an array or minimum along an axis.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which to operate. By default, flattened input is used.\n If this is a tuple of ints, the minimum is selected over multiple axes,\n instead of a single axis or all the axes as before.\n\n Returns\n a_min: PhiTensor\n Minimum of a.\n If axis is None, the result is a scalar value.\n If axis is given, the result is an array of dimension a.ndim - 1.\n \"\"\"\n return self._apply_self_tensor_op(\"min\", *args, **kwargs)\n\n def max(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return the maximum of an array or along an axis.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Axis or axes along which to operate. By default, flattened input is used.\n If this is a tuple of ints, the minimum is selected over multiple axes,\n instead of a single axis or all the axes as before.\n\n Returns\n a_max: PhiTensor\n Maximum of a.\n If axis is None, the result is a scalar value.\n If axis is given, the result is an array of dimension a.ndim - 1.\n \"\"\"\n return self._apply_self_tensor_op(\"max\", *args, **kwargs)\n\n def compress(\n self, *args: Any, **kwargs: Any\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return selected slices of an array along given axis.\n\n When working along a given axis, a slice along that axis is returned in output for each index\n where condition evaluates to True. When working on a 1-D array, compress is equivalent to extract.\n\n Parameters\n condition: 1-D array of bools\n Array that selects which entries to return. If len(condition) is less than the size of\n a along the given axis,then output is truncated to the length of the condition array.\n\n axis: int, optional\n Axis along which to take slices. If None (default), work on the flattened array.\n\n Returns:\n compressed_array: PhiTensor\n A copy of a without the slices along axis for which condition is false.\n \"\"\"\n return self._apply_self_tensor_op(\"compress\", *args, **kwargs)\n\n def squeeze(\n self, *args: Any, **kwargs: Any\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Remove axes of length one from a.\n\n Parameters\n axis: None or int or tuple of ints, optional\n Selects a subset of the entries of length one in the shape.\n If an axis is selected with shape entry greater than one, an error is raised.\n\n Returns:\n squeezed: PhiTensor\n The input array, but with all or a subset of the dimensions of length 1 removed.\n This is always a itself or a view into a.\n Note that if all axes are squeezed, the result is a 0d array and not a scalar.\n \"\"\"\n return self._apply_self_tensor_op(\"squeeze\", *args, **kwargs)\n\n def __getitem__(\n self, key: Union[int, bool, slice]\n ) -> TensorWrappedGammaTensorPointer:\n \"\"\"Return self[key].\n Args:\n y (Union[int,bool,slice]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"__getitem__\", key)\n\n def zeros_like(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"zeros_like\" operation on \"self\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"zeros_like\", *args, **kwargs)\n\n def ones_like(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the \"ones_like\" operation on \"self\"\n\n Args:\n y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"ones_like\", *args, **kwargs)\n\n def transpose(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Reverse or permute the axes of an array; returns the modified array.\n\n Returns\n p: ndarray\n array with its axes permuted. A view is returned whenever possible.\n \"\"\"\n\n return self._apply_self_tensor_op(\"transpose\", *args, **kwargs)\n\n def resize(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return a new array with the specified shape.\n\n Parameters\n new_shape: int or tuple of int\n Shape of resized array.\n\n Returns\n reshaped_array: ndarray\n The new array is formed from the data in the old array,\n repeated if necessary to fill out the required number of elements.\n The data are repeated iterating over the array in C-order.\n\n \"\"\"\n return self._apply_self_tensor_op(\"resize\", *args, **kwargs)\n\n def reshape(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Gives a new shape to an array without changing its data.\n\n Parameters\n new_shape: int or tuple of int\n The new shape should be compatible with the original shape. If an integer, then the result will\n be a 1-D array of that length. One shape dimension can be -1. In this case,\n the value is inferred from the length of the array and remaining dimensions.\n\n Returns\n reshaped_array: ndarray\n This will be a new view object if possible; otherwise, it will be a copy.\n Note there is no guarantee of the memory layout (C- or Fortran- contiguous) of the returned array.\n \"\"\"\n return self._apply_self_tensor_op(\"reshape\", *args, **kwargs)\n\n def repeat(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"Apply the repeat\" operation\n\n Args:\n y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.\n\n Returns:\n Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"repeat\", *args, **kwargs)\n\n def diagonal(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return specified diagonals.\n If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements\n of the form a[i, i+offset].\n\n If a has more than two dimensions, then the axes specified by axis1 and axis are used to determine\n the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by\n removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals.\n\n Parameters\n\n offset: int, optional\n Offset of the diagonal from the main diagonal. Can be positive or negative.\n Defaults to main diagonal (0).\n axis1, axis2: int, optional\n Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken.\n Defaults are the first two axes of a.\n\n Returns\n array_of_diagonals : Union[TensorWrappedPhiTensorPointer,MPCTensor]\n If a is 2-D, then a 1-D array containing the diagonal and of the same type as a is returned unless\n a is a matrix, in which case\n a 1-D array rather than a (2-D) matrix is returned in order to maintain backward compatibility.\n\n If a.ndim > 2, then the dimensions specified by axis1 and axis2 are removed, and a new axis\n inserted at the end corresponding to the diagonal.\n \"\"\"\n return self._apply_self_tensor_op(\"diagonal\", *args, **kwargs)\n\n def flatten(\n self, *args: Any, **kwargs: Any\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return a copy of the array collapsed into one dimension.\n\n Parameters\n order: {‘C’, ‘F’, ‘A’, ‘K’}, optional\n ‘C’ means to flatten in row-major (C-style) order.\n ‘F’ means to flatten in column-major (Fortran- style) order.\n ‘A’ means to flatten in column-major order if a is Fortran contiguous in memory, row-major order otherwise.\n ‘K’ means to flatten a in the order the elements occur in memory. The default is ‘C’.\n\n Returns\n y: PhiTensor\n A copy of the input array, flattened to one dimension.\n \"\"\"\n return self._apply_self_tensor_op(\"flatten\", *args, **kwargs)\n\n def ravel(\n self, *args: Any, **kwargs: Any\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Return a contiguous flattened array.\n\n A 1-D array, containing the elements of the input, is returned. A copy is made only if needed.\n\n As of NumPy 1.10, the returned array will have the same type as the input array.\n (for example, a masked array will be returned for a masked array input)\n Parameters\n order: {‘C’,’F’, ‘A’, ‘K’}, optional\n The elements of a are read using this index order.\n ‘C’ means to index the elements in row-major,\n C-style order, with the last axis index changing fastest, back to the first axis index changing slowest.\n ‘F’ means to index the elements in column-major, Fortran-style order, with the first index changing fastest,\n and the last index changing slowest.\n Note that the ‘C’ and ‘F’ options take no account of the memory layout of the underlying array,\n and only refer to the order of axis indexing.\n ‘A’ means to read the elements in Fortran-like index order if a is Fortran contiguous in memory,\n C-like order otherwise.\n ‘K’ means to read the elements in the order they occur in memory, except for reversing the data\n when strides are negative.\n By default, ‘C’ index order is used.\n\n Returns:\n y: PhiTensor\n y is an array of the same subtype as a, with shape (a.size,).\n Note that matrices are special cased for backward compatibility,\n if a is a matrix, then y is a 1-D ndarray.\n \"\"\"\n return self._apply_self_tensor_op(\"ravel\", *args, **kwargs)\n\n def take(\n self, *args: Any, **kwargs: Any\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as “fancy” indexing (indexing arrays using arrays);\n however, it can be easier to use if you need elements along a given axis.\n A call such as np.take(arr, indices, axis=3) is equivalent to arr[:,:,:,indices,...].\n\n Explained without fancy indexing, this is equivalent to the following use of ndindex, \\\n which sets each of ii, jj, and kk to a tuple of indices:\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n indices: array_like (Nj…)\n The indices of the values to extract.\n\n axis: int, optional\n The axis over which to select values. By default, the flattened input array is used.\n\n mode: {‘raise’, ‘wrap’, ‘clip’}, optional\n Specifies how out-of-bounds indices will behave.\n\n * ‘raise’ – raise an error (default)\n\n * ‘wrap’ – wrap around\n\n * ‘clip’ – clip to the range\n\n ‘clip’ mode means that all indices that are too large are replaced by the index\n that addresses the last element along that axis.\n Note that this disables indexing with negative numbers.\n\n Returns\n out: PhiTensor\n The returned array has the same type as a.\n \"\"\"\n return self._apply_self_tensor_op(\"take\", *args, **kwargs)\n\n def clip(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Clip (limit) the values in an array.\n\n Parameters\n a : array_like\n Array containing elements to clip.\n a_min, a_max : array_like or None\n Minimum and maximum value. If None, clipping is not performed on\n the corresponding edge. Only one of a_min and a_max may be\n None. Both are broadcast against a.\n Returns:\n Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.\n \"\"\"\n return self._apply_self_tensor_op(\"clip\", *args, **kwargs)\n\n def choose(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]:\n \"\"\"\n Construct an array from an index array and a list of arrays to choose from.\n\n First of all, if confused or uncertain, definitely look at the Examples - in its full generality,\n this function is less simple than it might seem from the following code description\n (below ndi = numpy.lib.index_tricks):\n\n np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).\n\n But this omits some subtleties. Here is a fully general summary:\n\n Given an “index” array (a) of integers and a sequence of n arrays (choices), a and each choice array are first\n broadcast, as necessary, to arrays of a common shape; calling these Ba and Bchoices[i], i = 0,…,n-1 we have that\n necessarily, Ba.shape == Bchoices[i].shape for each i. Then, a new array with shape Ba.shape is created\n as follows:\n\n if mode='raise' (the default), then, first of all, each element of a (and thus Ba) must be in the range\n [0, n-1]; now, suppose that i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -\n then the value at the same position in the new array is the value in Bchoices[i] at that same position;\n\n if mode='wrap', values in a (and thus Ba) may be any (signed) integer; modular arithmetic is used to map\n integers outside the range [0, n-1] back into that range; and then the new array is constructed as above;\n\n if mode='clip', values in a (and thus Ba) may be any (signed) integer; negative integers are mapped to 0;\n values greater than n-1 are mapped to n-1; and then the new array is constructed as above.\n\n Parameters\n\n choices: sequence of arrays\n\n Choice arrays. a and all of the choices must be broadcastable to the same shape. If choices is itself an\n array (not recommended), then its outermost dimension (i.e., the one corresponding to choices.shape[0])\n is taken as defining the “sequence”.\n\n out: array, optional\n\n If provided, the result will be inserted into this array. It should be of the appropriate shape and\n dtype. Note that out is always buffered if mode='raise'; use other modes for better performance.\n\n mode{‘raise’ (default), ‘wrap’, ‘clip’}, optional\n\n Specifies how indices outside [0, n-1] will be treated:\n\n ‘raise’ : an exception is raised\n\n ‘wrap’ : value becomes value mod n\n\n ‘clip’ : values < 0 are mapped to 0, values > n-1 are mapped to n-1\n\n Returns\n merged_array: PhiTensor\n The merged result.\n\n Raises\n ValueError: shape mismatch\n If a and each choice array are not all broadcastable to the same shape.\n\n \"\"\"\n return self._apply_self_tensor_op(\"choose\", *args, **kwargs)\n\n @property\n def T(self) -> TensorWrappedGammaTensorPointer:\n # We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data\n attr_path_and_name = \"syft.core.tensor.tensor.Tensor.T\"\n\n result = TensorWrappedGammaTensorPointer(\n client=self.client,\n )\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args,\n kwargs=downcast_kwargs,\n client=self.client,\n gc_enabled=False,\n )\n\n cmd = GetOrSetPropertyAction(\n path=attr_path_and_name,\n id_at_location=result_id_at_location,\n address=self.client.node_uid,\n _self=self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n action=PropertyActions.GET,\n map_to_dyn=False,\n )\n self.client.send_immediate_msg_without_reply(msg=cmd)\n\n inherit_tags(\n attr_path_and_name=attr_path_and_name,\n result=result,\n self_obj=self,\n args=[],\n kwargs={},\n )\n\n result_public_shape = np.empty(self.public_shape).T.shape\n\n result.public_shape = result_public_shape\n result.public_dtype = self.public_dtype\n\n return result\n\n def to_local_object_without_private_data_child(self) -> GammaTensor:\n \"\"\"Convert this pointer into a partial version of the GammaTensor but without\n any of the private data therein.\"\"\"\n # relative\n from ..tensor import Tensor\n\n public_shape = getattr(self, \"public_shape\", None)\n public_dtype = getattr(self, \"public_dtype\", None)\n return Tensor(\n child=GammaTensor(\n child=FixedPrecisionTensor(value=None), # TODO 0.7 fix this\n sources={},\n jax_op=None,\n is_linear=False,\n ),\n public_shape=public_shape,\n public_dtype=public_dtype,\n )\n\n\n@implements(TensorWrappedGammaTensorPointer, np.zeros_like)\ndef zeros_like(\n tensor: TensorWrappedGammaTensorPointer,\n *args: Any,\n **kwargs: Any,\n) -> TensorWrappedGammaTensorPointer:\n return tensor.zeros_like(*args, **kwargs)\n\n\n@implements(TensorWrappedGammaTensorPointer, np.ones_like)\ndef ones_like(\n tensor: TensorWrappedGammaTensorPointer,\n *args: Any,\n **kwargs: Any,\n) -> TensorWrappedGammaTensorPointer:\n return tensor.ones_like(*args, **kwargs)\n\n\ndef create_lookup_tables(dictionary: dict) -> Tuple[List[str], dict, List[dict]]:\n index2key: List = [str(x) for x in dictionary.keys()]\n key2index: dict = {key: i for i, key in enumerate(index2key)}\n # Note this maps to GammaTensor, not to GammaTensor.child as name may imply\n index2values: List = [dictionary[i] for i in index2key]\n\n return index2key, key2index, index2values\n\n\ndef create_new_lookup_tables(\n dictionary: dict,\n) -> Tuple[Deque[str], dict, Deque[dict], Deque[int]]:\n index2key: Deque = deque()\n key2index: dict = {}\n index2values: Deque = (\n deque()\n ) # Note this maps to GammaTensor, not to GammaTensor.child as name may imply\n index2size: Deque = deque()\n for index, key in enumerate(dictionary.keys()):\n key = str(key)\n index2key.append(key)\n key2index[key] = index\n index2values.append(dictionary[key])\n index2size.append(len(dictionary[key]))\n\n return index2key, key2index, index2values, index2size\n\n\ndef jax2numpy(value: jnp.array, dtype: np.dtype) -> np.array:\n # are we incurring copying here?\n return np.asarray(value, dtype=dtype)\n\n\ndef numpy2jax(value: np.array, dtype: np.dtype) -> jnp.array:\n return jnp.asarray(value, dtype=dtype)\n\n\n# ATTENTION: Shouldn't this be a subclass of some kind of base tensor so all the numpy\n# methods and properties don't need to be re-implemented on it?\n@dataclass\n@serializable(recursive_serde=True)\nclass GammaTensor:\n child: jnp.array\n jax_op: SyftJaxOp = flax.struct.field(pytree_node=False)\n sources: dict = flax.struct.field(pytree_node=False)\n is_linear: bool = False\n id: str = flax.struct.field(pytree_node=False, default_factory=lambda: UID())\n\n __attr_allowlist__ = (\n \"child\",\n \"jax_op\",\n \"sources\",\n \"is_linear\",\n \"id\",\n )\n\n @classmethod\n def serde_constructor(cls, kwargs: Dict[str, Any]) -> GammaTensor:\n return GammaTensor(**kwargs)\n\n \"\"\"\n A differential privacy tensor that contains data belonging to atleast 2 or more unique data subjects.\n\n Attributes:\n child: jnp.array\n The private data itself.\n min_vals: lazyrepeatarray\n (DP Metadata) A custom class that keeps track of (data-independent) minimum values for this tensor.\n max_vals: lazyrepeatarray\n (DP Metadata) A custom class that keeps track of (data-independent) maximum values for this tensor.\n jax_op: SyftJaxOp\n is_linear: bool\n Whether the \"func_str\" for this tensor is a linear query or not. This impacts the epsilon calculations\n when publishing.\n sources: dict\n A dictionary containing all the Tensors, integers, etc that were used to create this tensor.\n It maps an integer to each input object.\n id: int\n A 32-bit integer that is used when this GammaTensor needs to be added to the \"sources\" dictionary.\n\n Methods:\n All efforts were made to make this tensor's API as similar to the NumPy API as possible.\n Special, unique methods are listed below:\n\n reconstruct(sources: Optional[dict]):\n rebuilds the tensor from the sources dictionary provided, or from the current self.sources.\n This is exclusively used when adding DP Noise, if the data scientist doesn't have enough privacy budget to\n use one of the input tensors, thus requiring that tensor's data to be removed from the computation.\n\n swap_state(sources: Optional[Dict]):\n calls reconstruct() and populates the rest of the GammaTensor's attributes based on the current tensor.\n Used exclusively when adding DP Noise.\n\n\n\n decode():\n occasionally the use of a FixedPrecisionTensor (FPT) is needed during SMPC[1]. This helps convert back from\n FPT to regular numpy/jax arrays.\n\n (https://en.wikipedia.org/wiki/Secure_multi-party_computation)\n\n\n\n\n \"\"\"\n\n PointerClassOverride = TensorWrappedGammaTensorPointer\n __array_ufunc__ = None\n\n child: jnp.array\n jax_op: SyftJaxOp = flax.struct.field(pytree_node=False)\n sources: dict = flax.struct.field(pytree_node=False)\n is_linear: bool = False\n id: str = flax.struct.field(pytree_node=False, default_factory=lambda: UID())\n\n def decode(self) -> np.ndarray:\n if isinstance(self.child, FixedPrecisionTensor):\n return self.child.decode()\n else:\n return self.child\n\n @property\n def proxy_public_kwargs(self) -> Dict[str, Any]:\n return {\n \"min_vals\": self.min_vals,\n \"max_vals\": self.max_vals,\n \"data_subjects\": self.data_subjects,\n } # TODO 0.7: maybe this is obsolete now?\n\n def reconstruct(self, state: Dict) -> GammaTensor:\n return self.func(state)\n\n def swap_state(self, state: dict) -> GammaTensor:\n return GammaTensor(\n child=self.reconstruct(state),\n sources=state,\n jax_op=self.jax_op,\n is_linear=self.is_linear,\n )\n\n def astype(self, new_type: str) -> GammaTensor:\n return GammaTensor(\n child=self.child.astype(new_type),\n jax_op=self.jax_op,\n sources=self.sources,\n is_linear=self.is_linear,\n id=self.id,\n )\n\n @property\n def size(self) -> int:\n if (\n isinstance(self.child, float)\n or isinstance(self.child, int)\n or isinstance(self.child, bool)\n ):\n return 1\n\n if hasattr(self.child, \"size\"):\n return self.child.size\n elif hasattr(self.child, \"shape\"):\n return np.prod(self.child.shape)\n\n raise Exception(f\"{type(self)} has no attribute size.\")\n\n def func(self, state: Dict) -> GammaTensor:\n return self.jax_op.func(state)\n\n # infix operations\n\n @staticmethod\n def _infix_func(\n left: Any, right: Any, gamma_op: GAMMA_TENSOR_OP, is_linear_op: bool\n ) -> GammaTensor:\n left = debox_phi(left)\n right = debox_phi(right)\n state = left.sources.copy() if hasattr(left, \"sources\") else {}\n output_state = update_state(state, right)\n child = GAMMA_TENSOR_OP_FUNC[gamma_op](debox_child(left), debox_child(right))\n is_linear = debox_linear(left) and debox_linear(right) and is_linear_op\n jax_op = SyftJaxInfixOp(jax_op=gamma_op, left=left, right=right)\n\n return GammaTensor(\n child=child, jax_op=jax_op, sources=output_state, is_linear=is_linear\n )\n\n def _rinfix(\n self, other: Any, gamma_op: GAMMA_TENSOR_OP, is_linear_op: bool\n ) -> GammaTensor:\n return self._infix_func(\n left=other, right=self, gamma_op=gamma_op, is_linear_op=is_linear_op\n )\n\n def _infix(\n self, other: Any, gamma_op: GAMMA_TENSOR_OP, is_linear_op: bool\n ) -> GammaTensor:\n return self._infix_func(\n left=self, right=other, gamma_op=gamma_op, is_linear_op=is_linear_op\n )\n\n def _unary_op(\n self,\n gamma_op: GAMMA_TENSOR_OP,\n is_linear: bool = False,\n args: Optional[Any] = None,\n kwargs: Optional[Any] = None,\n ) -> GammaTensor:\n args = (\n args if args is not None else []\n ) # can't use collections in default params\n kwargs = (\n kwargs if kwargs is not None else {}\n ) # can't use collections in default params\n output_state = self.sources.copy()\n child = GAMMA_TENSOR_OP_FUNC[gamma_op](self.child, *args, **kwargs)\n jax_op = SyftJaxUnaryOp(jax_op=gamma_op, operand=self, args=args, kwargs=kwargs)\n return GammaTensor(\n child=child,\n jax_op=jax_op,\n sources=output_state,\n is_linear=is_linear and self.is_linear,\n )\n\n def __add__(self, other: Any) -> GammaTensor:\n return self._infix(other, gamma_op=GAMMA_TENSOR_OP.ADD, is_linear_op=True)\n\n def __sub__(self, other: Any) -> GammaTensor:\n return self._infix(other, gamma_op=GAMMA_TENSOR_OP.SUBTRACT, is_linear_op=True)\n\n def __mod__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.MOD, is_linear_op=False)\n\n def __mul__(self, other: Any) -> GammaTensor:\n return self._infix(other, gamma_op=GAMMA_TENSOR_OP.MULTIPLY, is_linear_op=True)\n\n def __truediv__(self, other: Any) -> GammaTensor:\n return self._infix(\n other, gamma_op=GAMMA_TENSOR_OP.TRUE_DIVIDE, is_linear_op=True\n )\n\n def __floordiv__(self, other: Any) -> GammaTensor:\n return self._infix(\n other, gamma_op=GAMMA_TENSOR_OP.FLOOR_DIVIDE, is_linear_op=True\n )\n\n def __matmul__(self, other: Any) -> GammaTensor:\n return self._infix(other, gamma_op=GAMMA_TENSOR_OP.MATMUL, is_linear_op=False)\n\n def __gt__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.GREATER, is_linear_op=False)\n\n def __ge__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.GREATER_EQUAL, is_linear_op=False\n # )\n\n def __lt__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.LESS, is_linear_op=False)\n\n def __le__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.LESS_EQUAL, is_linear_op=False\n # )\n\n def __eq__(self, other: Any) -> GammaTensor: # type: ignore\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.EQUAL, is_linear_op=False)\n\n def __ne__(self, other: Any) -> GammaTensor: # type: ignore\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.NOT_EQUAL, is_linear_op=False\n # )\n\n def __and__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.BITWISE_AND, is_linear_op=False\n # )\n\n def __or__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.BITWISE_OR, is_linear_op=False\n # )\n\n def __lshift__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.LSHIFT, is_linear_op=False)\n\n def __rshift__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(other, gamma_op=GAMMA_TENSOR_OP.RSHIFT, is_linear_op=False)\n\n def __xor__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._infix(\n # other, gamma_op=GAMMA_TENSOR_OP.BITWISE_XOR, is_linear_op=False\n # )\n\n def dot(self, other: Union[np.ndarray, GammaTensor]) -> GammaTensor:\n # QUESTION: is there a reason other can't be a non gamma tensor numpy array?\n return self._infix(other, gamma_op=GAMMA_TENSOR_OP.DOT, is_linear_op=False)\n\n # __r*__ infix operations\n\n def __radd__(self, other: Any) -> GammaTensor:\n # return self.__add__(other)\n return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.ADD, is_linear_op=True)\n\n def __rsub__(self, other: Any) -> GammaTensor:\n # return (self.__sub__(other)) * -1\n return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.SUBTRACT, is_linear_op=True)\n\n def __rmod__(self, other: Any) -> GammaTensor:\n raise NotImplementedError\n # return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.MOD, is_linear_op=False)\n\n def __rmul__(self, other: Any) -> GammaTensor:\n return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.MULTIPLY, is_linear_op=True)\n\n def __rtruediv__(self, other: Any) -> GammaTensor:\n return self._rinfix(\n other, gamma_op=GAMMA_TENSOR_OP.TRUE_DIVIDE, is_linear_op=True\n )\n\n def __rfloordiv__(self, other: Any) -> GammaTensor:\n return self._rinfix(\n other, gamma_op=GAMMA_TENSOR_OP.FLOOR_DIVIDE, is_linear_op=True\n )\n\n def __rmatmul__(self, other: Any) -> GammaTensor:\n return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.MATMUL, is_linear_op=False)\n\n def __divmod__(self, other: Any) -> GammaTensor:\n return self._rinfix(other, gamma_op=GAMMA_TENSOR_OP.DIVMOD, is_linear_op=False)\n\n def divmod(self, other: Any) -> GammaTensor:\n return self.__divmod__(other)\n\n # unary operations\n\n def __abs__(self) -> GammaTensor:\n return self._unary_op(gamma_op=GAMMA_TENSOR_OP.ABS, is_linear=False)\n\n def argmax(self, axis: Optional[int] = None) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(\n # gamma_op=GAMMA_TENSOR_OP.ARGMAX, is_linear=False, args=[axis]\n # )\n\n def argmin(self, axis: Optional[int] = None) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(\n # gamma_op=GAMMA_TENSOR_OP.ARGMIN, is_linear=False, args=[axis]\n # )\n\n def log(self) -> GammaTensor: # TODO 0.7: this needs a test\n return self._unary_op(gamma_op=GAMMA_TENSOR_OP.LOG, is_linear=False)\n\n def flatten(self, order: str = \"C\") -> GammaTensor: # TODO 0.7: this needs a test\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.FLATTEN, is_linear=True, args=order\n )\n\n def transpose(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.TRANSPOSE, is_linear=True, args=args, kwargs=kwargs\n )\n\n @property\n def T(self) -> GammaTensor:\n return self.transpose()\n\n def sum(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.SUM, is_linear=True, args=args, kwargs=kwargs\n )\n\n def __pow__(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.POWER, is_linear=False, args=args, kwargs=kwargs\n )\n\n def ones_like(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.ONES_LIKE, is_linear=True, args=args, kwargs=kwargs\n )\n\n def zeros_like(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.ZEROS_LIKE,\n is_linear=True,\n args=args,\n kwargs=kwargs,\n )\n\n def filtered(self, *args: Any, **kwargs: Any) -> GammaTensor: # TODO\n raise NotImplementedError\n # return GammaTensor(\n # child=jnp.zeros_like(self.child), jax_op=, sources=self.sources.copy()\n # )\n\n # def filtered(self) -> GammaTensor:\n # # This is only used during publish to filter out data in GammaTensors with no_op. It serves no other purpose.\n # def _filtered(state: Dict) -> GammaTensor:\n # return self.reconstruct(state)\n\n # func = _filtered\n\n # return GammaTensor(\n # child=jnp.zeros_like(self.child),\n # func=func,\n # sources=self.sources.copy(),\n # )\n\n def __round__(self, n: int = 0) -> GammaTensor:\n return self._unary_op(gamma_op=GAMMA_TENSOR_OP.ROUND, is_linear=False, args=[n])\n\n def round(self, n: int = 0) -> GammaTensor:\n return self.__round__(n)\n\n def squeeze(self, axis: OptionalAxisArg = None) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.SQUEEZE, is_linear=True, args=[axis]\n )\n\n def mean(self, axis: OptionalAxisArg = None, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.MEAN, is_linear=True, args=[axis], kwargs=kwargs\n )\n\n def ravel(self, order: Optional[str] = \"C\") -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.RAVEL, is_linear=True, args=[order]\n )\n\n def resize(self, new_shape: SingleOrTupleInt) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.RESIZE, is_linear=True, args=[new_shape]\n )\n\n def compress(\n self, condition: List[bool], axis: Optional[int] = None\n ) -> GammaTensor:\n output_state = self.sources.copy()\n child = jnp.compress(condition, self.child, axis=axis)\n jax_op = SyftJaxUnaryOp(\n jax_op=GAMMA_TENSOR_OP.COMPRESS,\n operand=self,\n args=[condition],\n kwargs={\"axis\": axis},\n operand_before=False,\n )\n return GammaTensor(\n child=child,\n jax_op=jax_op,\n sources=output_state,\n is_linear=self.is_linear,\n )\n\n def any(\n self,\n axis: OptionalAxisArg = None,\n keepdims: Optional[bool] = None,\n ) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(\n # gamma_op=GAMMA_TENSOR_OP.ANY,\n # is_linear=False,\n # kwargs={\"axis\": axis, \"keepdims\": keepdims},\n # )\n\n def all(\n self,\n axis: OptionalAxisArg = None,\n keepdims: Optional[bool] = None,\n ) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(\n # gamma_op=GAMMA_TENSOR_OP.ALL,\n # is_linear=False,\n # kwargs={\"axis\": axis, \"keepdims\": keepdims},\n # )\n\n def __pos__(self) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.POSITIVE,\n is_linear=True,\n )\n\n def __neg__(self) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.NEGATIVE,\n is_linear=True,\n )\n\n def reshape(\n self, newshape: SingleOrTupleInt, order: Optional[str] = \"C\"\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.RESHAPE,\n is_linear=True,\n args=[newshape],\n kwargs={\"order\": order},\n )\n\n def std(\n self,\n axis: OptionalAxisArg = None,\n dtype: Optional[np.dtype] = None,\n ddof: Optional[int] = 0,\n keepdims: Optional[bool] = None,\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.STD,\n is_linear=True,\n kwargs={\n \"axis\": axis,\n \"dtype\": dtype,\n \"ddof\": ddof,\n \"keepdims\": keepdims,\n },\n )\n\n def var(\n self,\n axis: OptionalAxisArg = None,\n dtype: Optional[np.dtype] = None,\n ddof: Optional[int] = 0,\n keepdims: Optional[bool] = None,\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.VAR,\n is_linear=True,\n kwargs={\n \"axis\": axis,\n \"dtype\": dtype,\n \"ddof\": ddof,\n \"keepdims\": keepdims,\n },\n )\n\n def sqrt(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.SQRT,\n is_linear=False,\n args=args,\n kwargs=kwargs,\n )\n\n def abs(self, *args: Any, **kwargs: Any) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.ABS,\n is_linear=False,\n args=args,\n kwargs=kwargs,\n )\n\n def clip(\n self,\n a_min: Optional[ArrayLike] = None,\n a_max: Optional[ArrayLike] = None,\n **kwargs: Any,\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.CLIP,\n is_linear=False,\n args=[a_min, a_max],\n kwargs=kwargs,\n )\n\n def nonzero(self) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(gamma_op=GAMMA_TENSOR_OP.NONZERO, is_linear=False)\n\n def swapaxes(self, axis1: int, axis2: int) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.SWAPAXES, is_linear=True, args=[axis1, axis2]\n )\n\n def __len__(self) -> int:\n if not hasattr(self.child, \"__len__\"):\n if self.child is None:\n return 0\n return 1\n try:\n return len(self.child)\n except Exception: # nosec\n return self.child.size\n\n def __getitem__(self, key: Union[int, slice, ArrayLike]) -> GammaTensor:\n # TODO: I think we can move the mapping of the final getattr(jnp, \"op\") to be more general\n # to also accommodate this kind of pattern where its a lambda or whever\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.PY_GETITEM, is_linear=True, args=[key]\n )\n\n def __setitem__(\n self, key: Union[int, slice, ArrayLike], value: Union[GammaTensor, ArrayLike]\n ) -> None:\n raise NotImplementedError\n # QUESTION: Is mutation allowed and if so how do we trace that in the graph?\n # self._unary_op(gamma_op=GAMMA_TENSOR_OP.PY_GETITEM, is_linear=True, args=[key])\n # # relative\n # from .phi_tensor import PhiTensor\n\n # # TODO: fix this\n # if isinstance(value, (PhiTensor, GammaTensor)):\n # self.child[key] = value.child\n # elif isinstance(value, np.ndarray):\n # self.child[key] = value\n # else:\n # raise NotImplementedError\n\n def copy(self, order: str = \"C\") -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.COPY, is_linear=True, args=[order]\n )\n\n def ptp(self, axis: OptionalAxisArg = None) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.PTP, is_linear=False, args=[axis]\n )\n\n def take(\n self,\n indices: ArrayLike,\n axis: Optional[int] = None,\n mode: str = \"clip\",\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.TAKE,\n is_linear=True,\n args=[indices],\n kwargs={\"axis\": axis, \"mode\": mode},\n )\n\n def put(\n self,\n ind: ArrayLike,\n v: ArrayLike,\n mode: str = \"raise\",\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.PUT,\n is_linear=True,\n args=[ind, v],\n kwargs={\"mode\": mode},\n )\n\n def repeat(\n self, repeats: SingleOrTupleInt, axis: Optional[int] = None\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.REPEAT,\n is_linear=True,\n args=[repeats],\n kwargs={\"axis\": axis},\n )\n\n def cumsum(\n self, axis: Optional[int] = None, dtype: Optional[np.dtype] = None\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.CUMSUM,\n is_linear=False,\n kwargs={\"axis\": axis, \"dtype\": dtype},\n )\n\n def cumprod(\n self, axis: Optional[int] = None, dtype: Optional[np.dtype] = None\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.CUMPROD,\n is_linear=False,\n kwargs={\"axis\": axis, \"dtype\": dtype},\n )\n\n @property\n def lipschitz_bound(self) -> float:\n if self.is_linear:\n return 1.0\n\n def convert_array_to_dict_state(array_state: Dict, input_sizes: Dict) -> Dict:\n start_id = 0\n state = {}\n\n for id, shape in input_sizes.items():\n total_size = 1\n for size in shape:\n total_size *= size\n state[id] = np.reshape(\n array_state[start_id : start_id + total_size], shape # noqa: E203\n )\n start_id += total_size\n\n return state\n\n def convert_state_to_bounds(input_sizes: Dict, input_states: Dict) -> List:\n bounds = []\n for id in input_sizes:\n bounds.extend(\n list(\n zip(\n input_states[id].min_vals.to_numpy().flatten(),\n input_states[id].max_vals.to_numpy().flatten(),\n )\n )\n )\n return bounds\n\n grad_fn = jax.grad(jax.jit(lambda state: jnp.sum(self.func(state))))\n\n input_sizes = {tensor.id: tensor.shape for tensor in self.sources.values()}\n bounds = convert_state_to_bounds(input_sizes, self.sources)\n\n def search(array_state: Dict) -> jnp.DeviceArray:\n dict_state = convert_array_to_dict_state(array_state, input_sizes)\n grads = grad_fn(dict_state)\n return -jnp.max(jnp.array(list(grads.values())))\n\n return -shgo(search, bounds=bounds, sampling_method=\"simplicial\").fun\n\n def prod(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.PROD, is_linear=False, kwargs={\"axis\": axis}\n )\n\n def trace(self, offset: int = 0, axis1: int = 0, axis2: int = 1) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.TRACE,\n is_linear=True,\n kwargs={\"offset\": offset, \"axis1\": axis1, \"axis2\": axis2},\n )\n\n def diagonal(self, offset: int = 0, axis1: int = 0, axis2: int = 1) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.DIAGONAL,\n is_linear=True,\n kwargs={\"offset\": offset, \"axis1\": axis1, \"axis2\": axis2},\n )\n\n def min(\n self,\n axis: Optional[int] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[float] = None,\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.MIN,\n is_linear=False,\n kwargs={\"axis\": axis, \"keepdims\": keepdims, \"initial\": initial},\n )\n\n def max(\n self,\n axis: Optional[int] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[float] = None,\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.MAX,\n is_linear=False,\n kwargs={\"axis\": axis, \"keepdims\": keepdims, \"initial\": initial},\n )\n\n def sort(self, axis: int = -1, kind: Optional[str] = None) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.SORT,\n is_linear=False,\n kwargs={\"axis\": axis, \"kind\": kind},\n )\n\n def argsort(self, axis: int = -1, kind: Optional[str] = None) -> GammaTensor:\n raise NotImplementedError\n # return self._unary_op(\n # gamma_op=GAMMA_TENSOR_OP.ARGSORT,\n # is_linear=False,\n # kwargs={\"axis\": axis, \"kind\": kind},\n # )\n\n def choose(\n self,\n choices: Union[Sequence, np.ndarray, PassthroughTensor],\n mode: Optional[str] = \"raise\",\n ) -> GammaTensor:\n return self._unary_op(\n gamma_op=GAMMA_TENSOR_OP.CHOOSE,\n is_linear=True,\n kwargs={\"choices\": choices, \"mode\": mode},\n )\n\n # @staticmethod\n # def convert_dsl(\n # state: dict, new_state: Optional[dict] = None\n # ) -> Dict: # TODO 0.7: maybe this is not required?\n # if new_state is None:\n # new_state = dict()\n # if state:\n # for tensor in list(state.values()):\n # if isinstance(tensor.data_subjects, np.ndarray):\n # new_tensor = GammaTensor(\n # child=tensor.child,\n # func=tensor.func,\n # sources=GammaTensor.convert_dsl(tensor.sources, {}),\n # )\n # else:\n\n # new_tensor = tensor\n # new_state[new_tensor.id] = new_tensor\n # return new_state\n # else:\n # return {}\n\n def publish(\n self,\n get_budget_for_user: Callable,\n deduct_epsilon_for_user: Callable,\n ledger: DataSubjectLedger,\n sigma: float,\n private: bool,\n ) -> np.ndarray:\n return publish(\n tensor=self,\n ledger=ledger,\n get_budget_for_user=get_budget_for_user,\n deduct_epsilon_for_user=deduct_epsilon_for_user,\n sigma=sigma,\n is_linear=self.is_linear,\n private=private,\n )\n\n @property\n def shape(self) -> Tuple[int, ...]:\n return self.child.shape\n\n @property\n def dtype(self) -> np.dtype:\n return self.child.dtype\n\n # def _object2bytes(self) -> bytes:\n # # TODO Tudor: fix this\n # schema = get_capnp_schema(schema_file=\"gamma_tensor.capnp\")\n\n # gamma_tensor_struct: CapnpModule = schema.GammaTensor # type: ignore\n # gamma_msg = gamma_tensor_struct.new_message()\n # # this is how we dispatch correct deserialization of bytes\n # gamma_msg.magicHeader = serde_magic_header(type(self))\n\n # # do we need to serde func? if so how?\n # # what about the state dict?\n\n # if isinstance(self.child, np.ndarray) or np.isscalar(self.child):\n # chunk_bytes(capnp_serialize(np.array(self.child), to_bytes=True), \"child\", gamma_msg) # type: ignore\n # gamma_msg.isNumpy = True\n # elif isinstance(self.child, jnp.ndarray):\n # chunk_bytes(\n # capnp_serialize(jax2numpy(self.child, self.child.dtype), to_bytes=True),\n # \"child\",\n # gamma_msg,\n # )\n # gamma_msg.isNumpy = True\n # else:\n # chunk_bytes(serialize(self.child, to_bytes=True), \"child\", gamma_msg) # type: ignore\n # gamma_msg.isNumpy = False\n\n # gamma_msg.sources = serialize(self.sources, to_bytes=True)\n # gamma_msg.isLinear = self.is_linear\n # gamma_msg.id = self.id.to_string()\n # gamma_msg.jaxOp = serialize(self.jax_op, to_bytes=True)\n\n # # return gamma_msg.to_bytes_packed()\n # return gamma_msg.to_bytes()\n\n # @staticmethod\n # def _bytes2object(buf: bytes) -> GammaTensor:\n # # TODO Tudor: fix this\n # schema = get_capnp_schema(schema_file=\"gamma_tensor.capnp\")\n # gamma_struct: CapnpModule = schema.GammaTensor # type: ignore\n # # https://stackoverflow.com/questions/48458839/capnproto-maximum-filesize\n # MAX_TRAVERSAL_LIMIT = 2**64 - 1\n # # capnp from_bytes is now a context\n # with gamma_struct.from_bytes(\n # buf, traversal_limit_in_words=MAX_TRAVERSAL_LIMIT\n # ) as gamma_msg:\n\n # if gamma_msg.isNumpy:\n # child = capnp_deserialize(\n # combine_bytes(gamma_msg.child), from_bytes=True\n # )\n # else:\n # child = deserialize(combine_bytes(gamma_msg.child), from_bytes=True)\n\n # state = deserialize(gamma_msg.sources, from_bytes=True)\n # is_linear = gamma_msg.isLinear\n # id_str = UID.from_string(gamma_msg.id)\n # jax_op = deserialize(gamma_msg.jaxOp, from_bytes=True)\n\n # return GammaTensor(\n # child=child,\n # is_linear=is_linear,\n # sources=state,\n # id=id_str,\n # jax_op=jax_op,\n # )\n","sub_path":"packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py","file_name":"gamma_tensor.py","file_ext":"py","file_size_in_byte":103273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"541156055","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api\nimport requests\n\nclass APIFB(models.Model):\n _name = 'api.facebook'\n\n api_key= fields.Char(readonly=True)\n is_default = fields.Boolean(default=False, readonly=True)\n fields_list = fields.Char(string='Fields list')\n cmt = fields.Char(string='cmt')\n active = fields.Boolean(default=True)\n\nclass MySetting(models.TransientModel):\n _name = 'api.facebook.config.settings'\n _inherit = 'res.config.settings'\n\n # default_name = fields.Char(default_model='api')\n api_key = fields.Char()\n\n @api.model\n def get_default_tuythich(self, fields):\n data = self.env['api.facebook'].search([],limit=1)\n return {\n 'api_key': data.api_key,\n }\n @api.one\n def set_tuythich_abc(self):\n data = self.env['api.facebook'].search([],limit=1)\n data[0].api_key = self.api_key\n\nclass GETFB(models.Model):\n _inherit = 'res.partner'\n\n name_fb = fields.Char(string='Facebook name')\n link_fb = fields.Char(string='Link FB')\n\n def get_fb(self):\n key = self.env['api.facebook'].search([],limit=1).api_key\n user_id = 'me'\n data = self.env['api.facebook'].search([('active','=',True)])\n field = []\n field_get = ''\n for i in data:\n field.append(i.fields_list)\n for k in field:\n field_get +=k+','\n field_get = field_get.rstrip(',')\n\n a = requests.get(\"https://graph.facebook.com/v2.12/\" + user_id + '?fields=' + field_get,\n params={'access_token': key}).json()\n\n c = a['feed']['data']\n for k in c:\n for p in k['likes']['data']:\n # print p['name']\n # print p['link']\n self.create({'name_fb':p['name'],\n 'link_fb':p['link'],\n 'name': p['name']\n\n })\n","sub_path":"api_facebook/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"100279915","text":"from torch.utils.data import Dataset, DataLoader, TensorDataset\nfrom torch.utils.data.dataset import random_split\nimport torch\nimport torchvision.transforms as transforms\nfrom customtransforms import RandomHorizontalFlipTensor, RandomVerticalFlipTensor\nimport random\nimport numpy as np\nfrom PIL import Image\nimport torchvision \nfrom tqdm import tqdm\n\n\ndef get_all_data(dataset, num_workers=30, shuffle=False):\n dataset_size = len(dataset)\n data_loader = DataLoader(dataset, batch_size=dataset_size,\n num_workers=num_workers, shuffle=shuffle)\n all_data = {}\n for i_batch, sample_batched in tqdm(enumerate(data_loader)):\n all_data = sample_batched\n return all_data\n\ndef flip_label(y, pattern, ratio, one_hot=True):\n #Origin: https://github.com/chenpf1025/noisy_label_understanding_utilizing/blob/master/data.py\n #y: true label, one hot\n #pattern: 'pair' or 'sym'\n #p: float, noisy ratio\n \n #convert one hot label to int\n if one_hot:\n y = np.argmax(y,axis=1)#[np.where(r==1)[0][0] for r in y]\n n_class = max(y)+1\n \n #filp label\n for i in range(len(y)):\n if pattern=='sym':\n p1 = ratio/(n_class-1)*np.ones(n_class)\n p1[y[i]] = 1-ratio\n y[i] = np.random.choice(n_class,p=p1)\n elif pattern=='asym':\n y[i] = np.random.choice([y[i],(y[i]+1)%n_class],p=[1-ratio,ratio]) \n \n #convert back to one hot\n if one_hot:\n y = np.eye(n_class)[y]\n return y\n\ndef get_class_subset(imgs, lbls, n_classes):\n selectedIdx = lbls < n_classes\n return imgs[selectedIdx], lbls[selectedIdx]\n\n\nclass CustomCIFAR(Dataset):\n def __init__(self, subset, transform=None, target_transform=None):\n self.subset = subset\n self.transform = transform\n self.target_transform = target_transform\n \n def __getitem__(self, index):\n img, target = self.subset[index]\n\n if self.transform:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n \n def __len__(self):\n return len(self.subset)\n\nclass CustomTensorDataset(Dataset):\n \"\"\"TensorDataset with support of transforms.\n \"\"\"\n def __init__(self, tensors, transform=None, target_transform=None):\n assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)\n self.tensors = tensors\n self.transform = transform\n self.target_transform = target_transform\n\n def __getitem__(self, index):\n x = self.tensors[0][index]\n y = self.tensors[1][index]\n\n if self.transform:\n x = self.transform(x)\n\n if self.target_transform is not None:\n y = self.target_transform(y)\n\n return x, y\n\n def __len__(self):\n return self.tensors[0].size(0)\n \n \nclass DataLoadersCreator(): \n def __init__(self, batch_size, num_workers, shuffle, cifar_root=r'/media/HDD2TB/rupali/Work104/Dataset/CIFAR10', \n noise_pattern='sym', noise_ratio=0.5, n_classes=None):\n # def __init__(self, batch_size, num_workers, shuffle, cifar_root=r'F:\\CIFAR10' /media/HDD_3TB2/rupali/Dataset/CIFAR10, noise_pattern='sym', noise_ratio=0.5):\n #set noise_pattern to None if no noise is intended to be added in the trainset\n #set n_classes to None if you want all the classes\n\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.shuffle = shuffle\n self.cifar_root = cifar_root\n self.noise_pattern = noise_pattern\n self.noise_ratio = noise_ratio\n self.n_classes = n_classes\n \n def run(self):\n self.transform_augments = transforms.Compose([\n #transforms.RandomSizedCrop(224),\n RandomHorizontalFlipTensor(),\n ]) # meanstd transformation\n\n self.transform_noaugment = transforms.Compose([\n #transforms.Resize(256),\n #transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),\n ]) \n\n trainval_dataset = torchvision.datasets.CIFAR10(self.cifar_root, train=True, transform=self.transform_noaugment, target_transform=None, download=True)\n test_dataset = torchvision.datasets.CIFAR10(self.cifar_root, train=False, transform=self.transform_noaugment, target_transform=None, download=True)\n \n\n lengths = [int(len(trainval_dataset)*0.8), int(len(trainval_dataset)*0.2)]\n train_dataset, val_dataset = random_split(trainval_dataset, lengths)\n \n train_imgs, train_targets = get_all_data(train_dataset, num_workers=self.num_workers)\n\n if self.n_classes is not None:\n train_imgs, train_targets = get_class_subset(train_imgs, train_targets, self.n_classes)\n \n if self.noise_pattern is not None:\n original_train_targets_np = train_targets.numpy()\n train_targets_np = flip_label(original_train_targets_np.copy(), pattern=self.noise_pattern, ratio=self.noise_ratio, one_hot=False)\n n_noisy_labels = original_train_targets_np.size - (original_train_targets_np==train_targets_np).sum()\n print('no of noisy labels : '+str(n_noisy_labels))\n train_targets = torch.from_numpy(train_targets_np)\n\n train_dataset = CustomTensorDataset([train_imgs, train_targets], transform=self.transform_augments)\n\n val_imgs, val_targets = get_all_data(val_dataset, num_workers=self.num_workers)\n if self.n_classes is not None:\n val_imgs, val_targets = get_class_subset(val_imgs, val_targets, self.n_classes)\n val_dataset = TensorDataset(val_imgs, val_targets)\n\n if self.n_classes is not None:\n test_imgs, test_targets = get_all_data(test_dataset, num_workers=self.num_workers)\n test_imgs, test_targets = get_class_subset(test_imgs, test_targets, self.n_classes)\n test_dataset = TensorDataset(test_imgs, test_targets)\n \n train_loader = DataLoader(\n dataset=train_dataset, \n batch_size=self.batch_size,\n shuffle=self.shuffle,\n num_workers=self.num_workers) \n test_loader = DataLoader(\n dataset=test_dataset, \n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers) \n val_loader = DataLoader(\n dataset=val_dataset, \n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers) \n return train_loader, val_loader, test_loader\n\n\nif __name__ == \"__main__\":\n x=DataLoadersCreator(batch_size=10, num_workers=0, shuffle=True)\n tr, te, v = x.run()\n for i_batch, sample_batched in tqdm(enumerate(tr)):\n print('d')\n print('shit')","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"19354502","text":"import requests\nfrom pymongo import MongoClient\n\n\nicorating_api_url = 'https://icorating.com/ico/all/load/?page=1'\nCLIENT = MongoClient('mongodb://127.0.0.1:27017')\nICORATING_DB = CLIENT.icorating\nICORATING_COLLECTION = ICORATING_DB.icos\n\n\nclass Icorating:\n icos = []\n next = None\n previous = None\n\n def __init__(self, url):\n\n while True:\n if self.next:\n data = self.get_next_data(self.next)\n else:\n data = self.get_next_data(url)\n\n for item in data.get('results'):\n self.icos.append(item)\n # ICORATING_COLLECTION.insert_one(item)\n\n for key, value in data.items():\n setattr(self, key, value)\n\n if not data['next']:\n break\n\n ICORATING_COLLECTION.insert_many(self.icos)\n\n def get_next_data(self, url):\n return requests.get(url).json()\n\n\nif __name__ == '__main__':\n collection = Icorating(icorating_api_url)\n print('***')\n","sub_path":"icorating_parser_mongodb.py","file_name":"icorating_parser_mongodb.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"1209460","text":"import vtk\nimport numpy as np\nimport time\n\n\nclass VisualPoint():\n def __init__(self):\n self.point_cloud = VtkPointCloud()\n self.current_point = []\n \n def add_view(self, point_cloud):\n #for i in range(len(point_cloud)):\n #self.point_cloud.addPoint(point_cloud[i],0)\n self.current_point = point_cloud\n\n def render(self):\n # Renderer\n renderer = vtk.vtkRenderer()\n # Render Window\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n\n # Interactor\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n renderer.AddActor(self.point_cloud.point_vtkActor)\n renderer.SetBackground(0.0, 0.0, 0.0)\n renderWindow.Render()\n renderWindowInteractor.Initialize()\n\n #callback \n #cb = vtkTimerCallback(duration=250, VtkPointCloud=self.point_cloud)\n renderWindowInteractor.AddObserver('TimerEvent', self.callback(renderWindowInteractor))\n renderWindowInteractor.CreateTimer(0)\n renderWindowInteractor.Start()\n\n def callback(self, obj):\n iren = obj\n while True:\n try:\n for p in self.current_point:\n self.point_cloud.addPoint(p,1)\n iren.CreateTimer(1)\n iren.GetRenderWindow().Render()\n except KeyboardInterrupt:\n iren.DestroyTimer()\n\nclass VtkPointCloud:\n def __init__(self, zMin=-1.0, zMax=1.0, maxNumPoints=1e6):\n self.init_planes()\n self.init_points(zMin, zMax, maxNumPoints)\n\n def addPoint(self, point, color_num):\n if self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints:\n pointId = self.vtkPoints.InsertNextPoint(point[:3])\n self.vtkDepth.InsertNextValue(color_num)\n self.vtkCells.InsertNextCell(1)\n self.vtkCells.InsertCellPoint(pointId)\n else:\n r = np.random.randint(0, self.maxNumPoints)\n self.vtkPoints.SetPoint(r, point[:3])\n self.vtkCells.Modified()\n self.vtkPoints.Modified()\n self.vtkDepth.Modified()\n\n def addPlane(self, plane_center, normal, x_axis, y_axis):\n self.vtkPlanes.SetCenter(plane_center)\n self.vtkPlanes.SetNormal(normal)\n self.vtkPlanes.SetPoint1(x_axis)\n self.vtkPlanes.SetPoint2(y_axis)\n\n def init_points(self, zMin=-1.0, zMax=1.0, maxNumPoints=1e6):\n self.maxNumPoints = maxNumPoints\n self.vtkPolyData = vtk.vtkPolyData()\n self.vtkPoints = vtk.vtkPoints()\n self.vtkCells = vtk.vtkCellArray()\n self.vtkDepth = vtk.vtkDoubleArray()\n\n self.vtkDepth.SetName('DepthArray')\n self.vtkPolyData.SetPoints(self.vtkPoints)\n self.vtkPolyData.SetVerts(self.vtkCells)\n self.vtkPolyData.GetCellData().SetScalars(self.vtkDepth)\n self.vtkPolyData.GetCellData().SetActiveScalars('DepthArray')\n point_mapper = vtk.vtkPolyDataMapper()\n point_mapper.SetInputData(self.vtkPolyData)\n point_mapper.SetColorModeToDefault()\n point_mapper.SetScalarRange(zMin, zMax)\n self.point_vtkActor = vtk.vtkActor()\n self.point_vtkActor.SetMapper(point_mapper)\n\n def init_planes(self):\n self.vtkPlanes = vtk.vtkPlaneSource()\n plane_mapper = vtk.vtkPolyDataMapper()\n plane_mapper.SetInputData(self.vtkPlanes.GetOutput())\n self.plane_vtkActor = vtk.vtkActor()\n self.plane_vtkActor.SetMapper(plane_mapper)\n","sub_path":"ICP_realtime/VisualPoint.py","file_name":"VisualPoint.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"113130077","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nfrom torch import nn, optim\nfrom datetime import datetime\nimport os\n\nimport tensorboardX\nfrom data_loader import *\nfrom tensorboardX import SummaryWriter\nfrom torchvision import utils\nfrom vae_cat import *\n\ndef test(data_loader, model, out_dir):\n model.eval()\n with torch.no_grad():\n for idx, img in enumerate(data_loader):\n out, _ = model(img)\n out, mu, logvar = model(img)\n z = [torch.randn((512, 32, 32))]\n z = torch.stack(z)\n z = z.to('cuda')\n generated = model.module.decode(z)\n utils.save_image(\n torch.cat([out,img],0),\n os.path.join(args.out_dir, 'out', str(idx)+'.png' ),\n normalize=True,\n range=(-1, 1)\n )\n utils.save_image(\n generated,\n os.path.join(args.out_dir, 'out', str(idx)+'_random.png' ),\n normalize=True,\n range=(-1, 1)\n )\n\n\ndef main(args):\n model = VAE(zsize=512).to(args.device)\n transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]\n )\n test_loader = get_test_loader(args.batch_size, args.dataset_path, args.device, transform=transform)\n\n with open(args.model_path, 'rb') as f:\n state_dict = torch.load(f, map_location='cpu')\n model.load_state_dict(state_dict)\n model = nn.DataParallel(model)\n model.eval()\n test(test_loader, model, args.out_dir)\n\n\nif __name__ == '__main__':\n\n import argparse\n import os\n\n parser = argparse.ArgumentParser(description='parser for vae test')\n\n parser.add_argument('--dataset_path', type=str, help='path to the dataset')\n parser.add_argument('--model_path', type=str, help='path to the model checkpoint', default=None)\n\n\n # miscellaneous\n parser.add_argument('--out_dir', type=str, help='output dir', default='./')\n parser.add_argument('--device', type=str, help='set the device', default='cpu')\n parser.add_argument('--batch_size', type=int, help='batch size', default=1)\n\n args = parser.parse_args()\n\n if not os.path.exists(os.path.join(args.out_dir, 'out')):\n os.makedirs(os.path.join(args.out_dir, 'out'))\n args.device = torch.device(args.device)\n\n main(args)\n","sub_path":"basic_vae/test_vae_cat.py","file_name":"test_vae_cat.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440445903","text":"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import count\nfrom mpl_toolkits.mplot3d import Axes3D\n\nx_track = np.zeros((1,3))\nx_track_s = np.array([.0,.0,.0])\ntheta = 0\ndef gen_path():\n global x_track_s,x_track,theta\n theta += 2*np.pi/90\n x = 300*np.tan(theta)\n y = 0.1*np.tan(theta)\n x_track_s += [x,y,0.1]\n x_track = np.append(x_track,[x_track_s],axis=0)\n return x_track\n\nax = plt.axes(projection = '3d')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_title('3d_mobile_obs')\n\nplt.grid(True)\nplt.ion()\n\nfor t in count():\n if t == 2500:\n break\n\n ax.plot3D(x_track[:,0],x_track[:,1],x_track[:,2],'blue')\n x_track = gen_path()\n plt.pause(0.01)\n","sub_path":"qqnn.py","file_name":"qqnn.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"124370529","text":"import sys\n#sys.path.append('ex48/')\nfrom lexicon import scan\nclass ParserError(Exception):\n\tdef __init__(self,ss):\n\t\tself.errorinfo=ss\n\tdef __str__(self):\n\t\treturn self.errorinfo\nclass Sentence(object):\n\tdef __init__(self,subject,verb,obj):\n\t\tself.subject=subject[1]\n\t\tself.verb=verb[1]\n\t\tself.obj=obj[1]\ndef peek(word_list):\n\tif word_list:\n\t\tword=word_list[0]\n\t\treturn word[0]\n\telse:\n\t\treturn None\ndef match(word_list,excepting):\n\tif word_list:\n\t\tword=word_list.pop(0)\n\t\tif word[0]==excepting:\n\t\t\treturn word\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\treturn None\ndef skip(word_list,word_type):\n\twhile peek(word_list)==word_type:\n\t\tmatch(word_list,word_type)\n'''def parse_verb(word_list):\n\tskip(word_list,'prep')\n\tnext_word=peek(word_list)\n\tif next_word=='noun':\n\t\treturn match(word_list,'noun')\n\telif next_word=='direct':\n\t\treturn match(word_list,'direct')\n\telse:\n\t\traise ParserError('expectd a noun or direct next.')\n'''\ndef parse_verb(word_list):\n\tskip(word_list, 'prep') \n\n\tif peek(word_list) == 'verb':\n\t\treturn match(word_list, 'verb')\n\telse:\n\t\t\n\t\traise ParserError(\"Expected a verb next.\")\n\t\t\ndef parse_subject(word_list):\n\tskip(word_list, 'prep')\n\tnext_word = peek(word_list)\n\n\tif next_word == 'noun':\n\t\treturn match(word_list, 'noun')\n\telif next_word == 'verb':\n\t\treturn ('noun', 'player')\n\telse:\n\t\traise ParserError(\"Expected a verb next.\")\ndef parse_object(word_list):\n\tskip(word_list, 'prep')\n\tnext_word = peek(word_list)\n\n\tif next_word == 'noun':\n\t\treturn match(word_list, 'noun')\n\telif next_word == 'direct':\n\t\treturn match(word_list, 'direct')\n\telse:\n\t\t\t\n\n\t\traise ParserError(\"Expected a noun or direction next.\")\n\t\t\ndef parse_sentence(word_list):\n\tsubj = parse_subject(word_list)\n\tverb = parse_verb(word_list)\n\tobj = parse_object(word_list) \n\n\treturn Sentence(subj, verb, obj)\n#hh=[('verb', 'go'), ('prep', 'the'), ('verb', 'go')]\n#ss=parse_sentence(hh)\n#print(ss.subject,ss.verb,ss.obj)\n'''x = parse_sentence([('noun', 'bear'), ('verb', 'eat'),('prep','the'),('noun','honey')])\nprint(x.subject,x.verb,x.obj)\naa=ParserError(\"Expected a noun or direction next.\")\n\t\nprint(aa,'==============')\nprint(type(aa),'======================')\nif str(aa)==\"Expected a noun or direction next.\":\n\tprint('ok','是正确的')\nelse :\n\tprint('不相等')\n'''\n# 测试输入转换代码是否成功\n# chin=input('请输入内容')\n# chan=scan(chin)\n# result=parse_sentence(chan)\n# print(result.subject,result.verb,result.obj)\ndef convert_word(words):\n\tchan=scan(words)\n\tresult=parse_sentence(chan)\n\thh=result.subject+' '+result.verb+' '+result.obj\n\treturn hh\n\n\t\t\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"184564749","text":"''''\nSimple method to add to data store for use in project intended to help\nlearn how to implement GTK+ 3.0 ListStores and add, delete, and edit their\ncontents in a Gtk.TreeView. This version relies upon glade to construct the\nAddRecordDialog.\n'''\nfrom gi.repository import Gtk #@UnresolvedImport pylint: disable-msg = E0611\n \ndef AddRecordDialog(recordsstore, fields = None):\n '''\n The UI for the dialog the user completes for each new record is defined in a glade\n file. Function to get values for a new record. Args point to the current data store\n and provide a way to communicate previous failed attempts at entering the record.\n '''\n record_dialog = Gtk.Builder()\n record_dialog.add_from_file(\"Add_Record.glade\")\n# This window is a dialog, so no close button.\n record_dialog.window = record_dialog.get_object(\"add_record_dialog\")\n record_dialog.project_entry = record_dialog.get_object(\"project_entry\")\n '''\n Set field values to those entered on previous unsuccessful attempts to create\n a record or, if there is no previous valid entry, make the entry blank.\n '''\n record_dialog.project_entry.set_text(fields['project'])\n record_dialog.status_entry = record_dialog.get_object(\"status_entry\")\n record_dialog.status_entry.set_text(fields['status'])\n record_dialog.priority_spinbutton = record_dialog.get_object(\"priority_spinbutton\")\n record_dialog.priority_adjustment = record_dialog.get_object(\"priority_adjustment\")\n record_dialog.priority_adjustment.set_value(fields['priority'])\n record_dialog.completed_toggle = record_dialog.get_object(\"toggle\")\n record_dialog.ok_button = record_dialog.get_object(\"ok_button\")\n record_dialog.cancel_button = record_dialog.get_object(\"cancel_button\")\n record_dialog.connect_signals(record_dialog)\n '''\n If some of the data fields already have values when this function gets\n called, it means that the user tried at least once before and entered\n illegal values. This code detects where the user first went wrong and\n puts the cursor and focus on the offending field while maintaining any\n legal values that were supplied to other fields.\n '''\n if fields['focus'] == \"project\":\n record_dialog.project_entry.grab_focus()\n elif fields['focus'] == \"status\":\n record_dialog.status_entry.grab_focus()\n elif fields['focus'] == \"priority\":\n record_dialog.priority_spinbutton.grab_focus()\n '''\n Now we've got the dialog all set up - show the results and wait\n for the user to finish data input.\n '''\n result = record_dialog.window.run()\n if result == Gtk.ResponseType.OK:\n fields['project'] = record_dialog.project_entry.get_text()\n fields['status'] = record_dialog.status_entry.get_text()\n '''\n The priority_adjustment gives a float value, but we need an int.\n '''\n fields['priority'] = int(record_dialog.priority_adjustment.get_value())\n fields['completed'] = record_dialog.completed_toggle.get_active()\n '''\n After submitting data to the caller, this dialog's work is done.\n '''\n record_dialog.window.destroy()\n return fields\n elif result == Gtk.ResponseType.CANCEL:\n '''\n If the user decides to cancel, just close the dialog and go\n back to the main program loop without changing any recorded\n data.\n '''\n record_dialog.window.destroy()\n return None","sub_path":"src/Data_Entry/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"176169967","text":"\n# Write a program called check.py that takes the names of one or more inflammation data files as arguments and checks that all the files have the same number of rows and columns. What is the best way to test your program?\n\nimport sys\nfrom glob import glob\nimport numpy as np\n\ndef main():\n\tscript = sys.argv[0]\n\tfilenames = sys.argv[1:]\n\tassert len(filenames) >= 1, 'Need to specify the files'\n\n\n\tif len(filenames) == 1:\n\t\tprint('Only one file is checked.')\n\t\tdata = np.loadtxt(filenames[0], delimiter=',')\n\t\tprint('Data shape is ', data.shape)\n\telse:\n\t\tprint(len(filenames), 'files are checked:')\n\t\tdata_0 = np.loadtxt(filenames[0], delimiter=',')\n\t\tprint(filenames[0])\n\t\tfor i in filenames[1:]:\n\t\t\tprint(i)\n\t\t\tdata = np.loadtxt(i, delimiter=',')\n\t\t\tdt_shape = data.shape\n\t\t\tassert dt_shape != data_0, 'data structure is different'\n\t\tprint('All files have', dt_shape[0],'rows and',dt_shape[1],'columns.')\n\n\t# filename = glob('../data/*.csv')\n\t\t\n\t#assert len()\n\t\n\t\n\t\n\nif __name__ == '__main__':\n\tmain()","sub_path":"precourse_python/swc-python/code_mine/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"471612085","text":"import argparse\nimport os\nimport sys\n\nsys.path.append(\"..\")\n\nimport yaml\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\n\nfrom datasets.googlecc import PreProcessing\nfrom datasets.common import get_dataset_metadata_cfg\nfrom preprocessing import utils\n\nimport tensorflow_hub as hub\nimport tensorflow as tf\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger\nfrom tensorflow.keras.layers import Dense, Input, Add, Dropout\nfrom utils.performance import PerformanceMetrics\n\nBERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\nMAX_LEN = 40\nvocab_size = 30522\n\n\ndef bert_model():\n image_input = Input(shape=(2048,))\n im1 = Dropout(0.5)(image_input)\n im2 = Dense(256, activation='relu')(im1)\n\n text_input = Input(shape=(MAX_LEN,), dtype=tf.int32)\n input_mask = Input(shape=(MAX_LEN,), dtype=tf.int32)\n segment_ids = Input(shape=(MAX_LEN,), dtype=tf.int32)\n\n bert_module = hub.Module(\n BERT_MODEL_HUB,\n trainable=True\n )\n\n bert_inputs = dict(\n input_ids=text_input,\n input_mask=input_mask,\n segment_ids=segment_ids\n )\n\n pooled_output = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True\n )[\"pooled_output\"]\n\n sent = Dense(256, activation='relu')(pooled_output)\n\n decoder1 = Add()([im2, sent])\n decoder2 = tf.keras.layers.Dense(256, activation='relu')(decoder1)\n pred = tf.keras.layers.Dense(vocab_size, activation='softmax')(decoder2)\n\n model = tf.keras.models.Model(inputs=[image_input, text_input, input_mask, segment_ids], outputs=pred)\n\n return model\n\n\nif __name__ == \"__main__\":\n clear_session()\n parser = argparse.ArgumentParser(description=\"config\")\n parser.add_argument(\n \"--config\",\n nargs=\"?\",\n type=str,\n default=\"../configs/inception_bert.yaml\",\n help=\"Configuration file to use\",\n )\n\n args = parser.parse_args()\n\n with open(args.config) as fp:\n cfg = yaml.load(fp)\n\n dataset_cfg = get_dataset_metadata_cfg()\n model_workspace_dir = os.path.join(cfg[\"workspace\"][\"directory\"], cfg[\"dataset\"][\"name\"], cfg[\"model\"][\"arch\"])\n utils.make_directories(model_workspace_dir)\n\n img_model = InceptionV3(weights='imagenet')\n\n dataset_preprocessor = PreProcessing(cfg, \"inception\", False, True)\n dataset_preprocessor.run_one_time_encoding(img_model)\n\n # Load train, validation sets from the pre-processor\n training_generator, validation_generator, test_generator = dataset_preprocessor.get_keras_generators(\"inception\")\n\n model = bert_model()\n\n model.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n\n model.summary()\n\n callbacks = [\n EarlyStopping(patience=10, verbose=1),\n ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),\n ModelCheckpoint(\n os.path.join(os.path.join(model_workspace_dir, 'weights_best.hdf5')),\n verbose=1,\n save_best_only=False,\n save_weights_only=True\n ),\n CSVLogger(os.path.join(model_workspace_dir, 'training.csv')),\n PerformanceMetrics(os.path.join(model_workspace_dir, 'performance.csv')),\n ]\n\n model.fit_generator(\n generator=training_generator,\n validation_data=validation_generator,\n epochs=100,\n callbacks=callbacks\n )\n","sub_path":"image-captioning-approaches/models/Inception_BERT_merge.py","file_name":"Inception_BERT_merge.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"641727196","text":"#Auther nmap\nimport pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n 'localhost')) #创建一个链接实例\nchannel = connection.channel() #声明一个通道\n\n#声明queue\nchannel.queue_declare(queue='hello',durable=True)\n\n#n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='Hello World!',\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n )\n )\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()","sub_path":"day11/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"422838524","text":"'''\nCreated on May 2, 2011\n\n@author: oabalbin\n'''\n\n\n\ndef build_concesus_table(pm, cm, dumped_files,ofile, mmapfile, \n complement, act_snps=None, dbsnp=None, hapmap=None, validated=None):\n '''\n '''\n fields, format, names=fields_table()\n ncol = len(fields)\n nrow = len(pm)\n # Functional Annotation: Majority Vote and Present or not in HapMap\n \n print >> sys.stderr, \"Building concensus table ....\"\n \n if act_snps is not None and dbsnp is not None and hapmap is not None and validated is not None:\n annot = ['Majority','dbSNP','HapMap', 'Actionable', 'Validated']\n mcol, dcol, hcol, acol, vcol = -5, -4,-3,-2,-1\n \n elif act_snps is not None and dbsnp is not None and hapmap is not None:\n annot = ['Majority','dbSNP','HapMap', 'Actionable']\n mcol, dcol, hcol, acol = -4,-3,-2,-1\n \n elif act_snps is not None and validated is not None and hapmap is not None:\n annot = ['Majority','HapMap', 'Actionable', 'Validated']\n mcol, hcol, acol, vcol = -4,-3,-2,-1\n \n elif dbsnp is not None and hapmap is not None:\n annot = ['Majority','dbSNP','HapMap']\n mcol, dcol, hcol = -3,-2,-1\n \n elif dbsnp is not None and act_snps is not None:\n annot = ['Majority','dbSNP','Actionable']\n mcol, dcol, acol = -3,-2,-1\n\n elif hapmap is not None and act_snps is not None:\n annot = ['Majority','HapMap','Actionable']\n mcol, hcol, acol = -3,-2,-1\n \n elif act_snps is not None and validated is not None:\n annot = ['Majority','Actionable', 'Validated']\n mcol, acol, vcol = -3,-2,-1\n\n else:\n annot = ['Majority']\n mcol=-1\n\n \n ############## Function\n #newpm=defaultdict()\n imat = np.zeros( (nrow,len(dumped_files)+len(annot) ) ) \n #act_gen_names = np.empty( nrow, dtype='str')\n act_gen_names=defaultdict()\n #[act_gen_names.append('nan') for i in range(nrow)]\n \n # file1, file2, file3, file n. \n \n \n for k, df in enumerate(dumped_files):\n ovcf=VCF()\n ovcf.readFromPickle(df)\n # create a matrix for this file\n cmat = np.empty( (nrow,ncol) )\n \n for coord, i in pm.iteritems():\n snv_list = ovcf[[coord]] # list of 1 snv \n if not snv_list:\n cmat[i,:] = np.nan\n #print >> sys.stderr, \"Not snps for this coordinate %s\"%(coord)\n else:\n # Intersection matrix\n imat[i,k]=1\n\n for snv in snv_list:\n # Filling annotation matrix\n #print snv\n loc = snv['COORD']\n #newpm[loc]=i\n if len(snv_list) > 1: \n print >> sys.stderr, \"There is more than one snp with the same location\"\n \n if act_snps is not None:\n if loc in act_snps.keys():\n \n imat[i,acol] = 1\n #act_gen_names[i]=act_snps[loc]\n #act_gen_names.insert(i,act_snps[loc])\n act_gen_names[i]=act_snps[loc]\n \n else:\n imat[i,acol] = 0\n if 'NON_SYNONYMOUS_CODING(SPLICE_SITE)' in snv.keys():\n gen_name=snv['NON_SYNONYMOUS_CODING(SPLICE_SITE)']\n elif 'NON_SYNONYMOUS_CODING' in snv.keys():\n gen_name=snv['NON_SYNONYMOUS_CODING']\n else:\n gen_name='NAN'\n \n act_gen_names[i]='NotAct'+'_'+gen_name\n \n \n if dbsnp is not None:\n imat[i,dcol] = 1 if loc in dbsnp else 0\n if hapmap is not None:\n imat[i,hcol] = 1 if loc in hapmap else 0\n if validated is not None:\n imat[i,vcol] = 1 if loc in validated else 0\n \n # Properties of the snp\n for prop,j in fields.iteritems():\n \n try:\n cmat[i,j] = snv[prop]\n except KeyError:\n print >> sys.stderr, \"%s, Error with this snv %s, it is missing field %s\"%(df,coord,prop)\n if prop==\"SB\":\n # This number means the field was not present in the vcf\n cmat[i,j]=999\n else:\n cmat[i,j] = np.nan\n \n \n # concatenate matrix for this file to gral matrix\n if k==0:\n Cmat=np.copy(cmat)\n else:\n Cmat = np.hstack( (Cmat,cmat) )\n \n # Get the majority vote for the snv calls\n fformat=[]\n fnames=[]\n fmethods=[]\n for i in range(len(dumped_files)):\n tn = ['M%d_%s'%(i,n) for n in names]\n fformat+=format\n fnames+=tn\n fmethods.append('M%d'%(i)) \n \n fmethods=fmethods+annot\n fmethods.reverse()\n [fformat.insert(0,'%d') for i in range(imat.shape[1])]\n [fnames.insert(0,f) for f in fmethods]\n \n imat[:,mcol]=np.sum(imat[:,:mcol], axis=1)\n Cmat = np.hstack( (imat,Cmat) )\n write_table(Cmat,ofile,\",\".join(fformat),mmapfile, pm, fnames, act_gen_names)\n \n return fnames\n","sub_path":"exome/trunk/exome/variantEval/consensus_table.py","file_name":"consensus_table.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"10441764","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef load_applog(start, periods):\r\n # Load applog data\r\n monthly_df = []\r\n filename = pd.period_range(start=start, periods=periods, freq='M').strftime('%Y%m')\r\n for fn in filename:\r\n monthly_df.append(pd.read_csv('./data/applog/applog_' + fn + '.csv'))\r\n df = pd.concat(monthly_df)\r\n return df\r\n\r\ndef basic_preprocess_applog(df):\r\n # 칼럼명 변경\r\n df.columns = ['PartyId', '방문일시', '페이지코드', '체류시간', '세션ID', '로그인여부', '신규방문여부', '통신회사코드', '이탈여부', '연월일']\r\n\r\n # null 제거\r\n print('Orig. data len:', len(df))\r\n df = df.dropna()\r\n print('Aft. drop-nan:', len(df), '\\n')\r\n\r\n # party_id int형으로 변경\r\n df['PartyId'] = df['PartyId'].astype('Int64')\r\n\r\n # 방문일시 datetime으로 변환\r\n vst_dtm = df['방문일시'].astype('str')\r\n f = lambda x: x[:-3]\r\n vst_dtm = vst_dtm.apply(f)\r\n\r\n vst_dtm = pd.to_datetime(vst_dtm, format='%Y%m%d%H%M%S')\r\n df['방문일시'] = vst_dtm\r\n\r\n # 연월일 칼럼 제거\r\n df = df.drop(columns='연월일')\r\n\r\n # 신규방문여부 Y->1, N->0\r\n df['신규방문여부'] = df['신규방문여부'].replace({'Y': 1, 'N': 0})\r\n\r\n # 1970년 데이터 제외\r\n df = df[df['방문일시'].dt.year != 1970]\r\n df = df.reset_index(drop=True)\r\n return df\r\n\r\ndef load_member(date):\r\n # Load members data\r\n members_df = pd.read_csv('data/member/mbr_' + date + '.csv',\r\n usecols=['party_id', 'mbr_scrb_dt', 'fee_yn', 'fcip_yn', 'ip_insr_cd', 'lst_vst_dt'])\r\n members_df.columns = ['PartyId', '가입일자', '멤버십비납부여부', '유료멤버십보험가입여부', '11/4이전유료멤버십', '마지막로그인일자']\r\n\r\n # null 제거\r\n print('Orig. data len:', len(members_df))\r\n members_df = members_df.dropna()\r\n print('Aft. drop-nan:', len(members_df), '\\n')\r\n\r\n # party_id int형으로 변경\r\n members_df['PartyId'] = members_df['PartyId'].astype('Int64')\r\n\r\n # 가입일자 out of bound 제거\r\n members_df = members_df[members_df['가입일자'] != 99991231]\r\n\r\n # 가입일자 datetime으로 변환\r\n join_dtm = members_df['가입일자'].astype('str')\r\n join_dtm = pd.to_datetime(join_dtm, format='%Y%m%d')\r\n members_df['가입일자'] = join_dtm\r\n\r\n last_dtm = members_df['마지막로그인일자'].astype('str')\r\n last_dtm = pd.to_datetime(last_dtm, format='%Y%m%d')\r\n members_df['마지막로그인일자'] = last_dtm\r\n\r\n # PartyId index 설정\r\n members_df = members_df.set_index('PartyId')\r\n\r\n # PartyId 중복 제거\r\n members_df = members_df[~members_df.index.duplicated(keep='last')]\r\n return members_df","sub_path":"dooinee/eda_lib/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247841195","text":"import random\nimport string\nimport json\n\nfrom django import template\n\n\nregister = template.Library()\n\n# implementa range no template\n@register.filter()\ndef get_range(value):\n return range(int(value))\n\n# usado para montar tabela, dado um número retorna o campo\n#do queryset\n@register.filter()\ndef get_campo(queryset, order):\n try:\n retorno = queryset[order].texto\n except:\n retorno = ''\n return retorno\n\n@register.filter()\ndef get_campo_mark(queryset, order):\n try:\n retorno = queryset[order].texto_mark\n except:\n retorno = ''\n return retorno\n\n# soma dois números\n@register.filter()\ndef add(a,b):\n return a + b\n\n#multiplica dois números\n@register.filter()\ndef multiple(a,b):\n return a * b\n\n@register.filter()\ndef get_letter(value):\n lista = ['a','b','c','d','e','f','g','h','i','j','l','m','n']\n return lista[value]\n\n@register.filter()\ndef filtra_frase(frase):\n inicio = frase.find('{')\n while inicio != -1:\n fim = frase.find('}', inicio+1) + 1\n lista = frase[inicio+1:fim-1]\n space = len(max(lista.split(\";\"))) * ' ' * 2\n r = '' + space + ''\n frase = frase[:inicio] + r + frase[fim:]\n inicio = frase.find('{')\n return frase\n\n@register.filter()\ndef filtra_frase_exemplo(frase):\n inicio = frase.find('{')\n while inicio != -1:\n fim = frase.find('}', inicio+1) + 1\n lista = frase[inicio+1:fim-1]\n lista2 = lista.split(\";\")\n lista3 = list(filter(lambda x: \"*\" in x, lista2))\n if len(lista3) > 0:\n space = ' ' + lista3[0][1:] + ' '\n r = '' + space + ''\n else:\n space = len(max(lista2)) * ' ' * 2\n r = '' + space + ''\n frase = frase[:inicio] + r + frase[fim:]\n inicio = frase.find('{')\n return frase\n\n@register.filter()\ndef lista_palavra(frase):\n inicio = inicio = frase.find('{')\n lista = ''\n while inicio != -1:\n fim = frase.find('}', inicio + 1) + 1\n letras = frase[inicio + 1:fim - 1].split(\";\")\n random.shuffle(letras)\n random.shuffle(letras)\n letras = ' / '.join(letras)\n lista = lista + '' + letras + ''\n frase = frase[fim:]\n inicio = frase.find('{')\n return lista\n\n@register.filter()\ndef letterfy(number):\n lista = string.ascii_lowercase\n return lista[number-1]\n\n@register.filter()\ndef toList(palavra):\n if len(palavra) > 0:\n lista = json.decoder.JSONDecoder().decode(palavra)\n else:\n lista = ''\n return lista\n\n@register.filter()\ndef getComando(tipo, inverte=False):\n if inverte==False:\n url = 'img/comandos/observe.png'\n if tipo == 1:\n url = 'img/comandos/escreva.jpg'\n if tipo == 2:\n url = 'img/comandos/assinale.jpg'\n if tipo == 3:\n url = 'img/comandos/assinale.jpg'\n if tipo == 4:\n url = 'img/comandos/escreva.jpg'\n if tipo == 5:\n url = 'img/comandos/ligue.jpg'\n if tipo == 6:\n url = 'img/comandos/desenhe.png'\n else:\n url = 'img/comandos/xobserve.png'\n if tipo == 1:\n url = 'img/comandos/xescreva.jpg'\n if tipo == 2:\n url = 'img/comandos/xassinale.jpg'\n if tipo == 3:\n url = 'img/comandos/xassinale.jpg'\n if tipo == 4:\n url = 'img/comandos/xescreva.jpg'\n if tipo == 5:\n url = 'img/comandos/xligue.jpg'\n if tipo == 6:\n url = 'img/comandos/xdesenhe.png'\n return url\n\n@register.filter()\ndef getComandoDigital(tipo, inverte=False):\n if inverte==False:\n url = 'img/comandos/observe.png'\n if tipo == 1:\n url = 'img/comandos/escreva.jpg'\n if tipo == 2:\n url = 'img/comandos/assinale.jpg'\n if tipo == 3:\n url = 'img/comandos/assinale.jpg'\n if tipo == 4:\n url = 'img/comandos/escreva.jpg'\n if tipo == 5:\n url = 'img/comandos/arrastar.png'\n if tipo == 6:\n url = 'img/comandos/desenhe.png'\n else:\n url = 'img/comandos/xobserve.png'\n if tipo == 1:\n url = 'img/comandos/xescreva.jpg'\n if tipo == 2:\n url = 'img/comandos/xassinale.jpg'\n if tipo == 3:\n url = 'img/comandos/xassinale.jpg'\n if tipo == 4:\n url = 'img/comandos/xescreva.jpg'\n if tipo == 5:\n url = 'img/comandos/xarrastar.png'\n if tipo == 6:\n url = 'img/comandos/xdesenhe.png'\n return url","sub_path":"wizard/templatetags/wizard_extra.py","file_name":"wizard_extra.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"548073364","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom typing import Union\nfrom torch.distributions import Categorical\n\nfrom pathlib import Path\nimport sys\nbase_dir = Path(__file__).resolve().parent.parent\nsys.path.append(str(base_dir))\nfrom agent.greedy.greedy_agent import greedy_snake\nfrom types import SimpleNamespace as SN\nimport yaml\nimport math\nimport os\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\ndef hard_update(source, target):\n target.load_state_dict(source.state_dict())\n\n\ndef soft_update(source, target, tau):\n for src_param, tgt_param in zip(source.parameters(), target.parameters()):\n tgt_param.data.copy_(tgt_param.data * (1.0 - tau) + src_param.data * tau)\n\n\nActivation = Union[str, nn.Module]\n\n_str_to_activation = {\n 'relu': nn.ReLU(),\n 'tanh': nn.Tanh(),\n 'identity': nn.Identity(),\n 'softmax': nn.Softmax(dim=-1),\n}\n\n\ndef mlp(sizes,\n activation: Activation = 'relu',\n output_activation: Activation = 'identity'):\n if isinstance(activation, str):\n activation = _str_to_activation[activation]\n if isinstance(output_activation, str):\n output_activation = _str_to_activation[output_activation]\n\n layers = []\n for i in range(len(sizes) - 1):\n act = activation if i < len(sizes) - 2 else output_activation\n layers += [nn.Linear(sizes[i], sizes[i + 1]), act]\n return nn.Sequential(*layers)\n\ndef diji(state, X, Y, width, height):\n mp=np.zeros((height,width))\n for i in range(height):\n for j in range(width):\n mp[i][j]=math.inf\n mp[X][Y]=0\n vis=np.zeros((height,width))\n from queue import PriorityQueue as PQ\n pq=PQ()\n pq.put((0,(X,Y)))\n dx = [-1,1,0,0]\n dy = [0,0,-1,1]\n while (not pq.empty()):\n (d, (x,y)) =pq.get()\n if (vis[x][y]==1): continue\n vis[x][y] = 1\n for i in range(4):\n x1=x+dx[i]\n y1=y+dy[i]\n x1 += height\n x1 %= height\n y1 += width\n y1 %= width\n if (state[x1][y1]==2 or state[x1][y1]==3): continue\n if (mp[x1][y1]>mp[x][y]+1):\n mp[x1][y1]=mp[x][y]+1\n pq.put((mp[x1][y1],(x1,y1)))\n return mp\n\ndef get_min_bean(x, y, beans_position, width, height, snakes, state):\n min_distance = math.inf\n min_x = beans_position[0][1]\n min_y = beans_position[0][0]\n index = 0\n Ux = snakes[0][0][1]\n Uy = snakes[0][0][0]\n id = 1\n if (Ux== x and Uy==y):\n Ux = snakes[1][0][1]\n Uy = snakes[1][0][0]\n id = 0\n mat = diji(state,y,x,width, height)\n matU= diji(state,Uy, Ux, width,height)\n for i, (bean_y, bean_x) in enumerate(beans_position):\n # distance = math.sqrt((x - bean_x) ** 2 + (y - bean_y) ** 2)\n distance_my = mat[bean_y][bean_x]\n distance_U = matU[bean_y][bean_x]\n if (len(snakes[id])+1<=len(snakes[id^1])):\n distance = distance_my\n else:\n if (distance_U == math.inf and distance_my == math.inf):\n distance = math.inf\n elif (distance_my == math.inf):\n distance = math.inf\n elif (distance_U == math.inf):\n distance = distance_my *0.6\n elif (distance_U == distance_my == 1):\n distance = math.inf\n else:\n distance = 0.9*distance_my-0.1*distance_U\n # snake_id = get_id(y, x, width)\n # beans_id = get_id(bean_y, bean_x, width)\n # distance = mat[snake_id][beans_id]\n if distance < min_distance:\n min_x = bean_x\n min_y = bean_y\n min_distance = distance\n index = i\n return min_x, min_y, index\n\n\n# Self position: 0:head_x; 1:head_y\n# Head surroundings: 2:head_up; 3:head_down; 4:head_left; 5:head_right\n# Beans positions: (6, 7) (8, 9) (10, 11) (12, 13) (14, 15)\n# Other snake positions: (16, 17) -- (other_x - self_x, other_y - self_y)\n\ndef get_observations(state, info, agents_index, obs_dim, height, width, step):\n state = np.array(state)\n state = np.squeeze(state, axis=2)\n observations = np.zeros((len(agents_index), obs_dim))\n snakes_position = np.array(info['snakes_position'], dtype=object)\n # beans_position = np.array(info['beans_position']).flatten()\n beans_position = np.array(info['beans_position'])\n for i in agents_index:\n # self head position\n observations[i][:2] = snakes_position[i][0][:]\n\n # head surroundings\n head_x = snakes_position[i][0][1]\n head_y = snakes_position[i][0][0]\n head_surrounding = get_surrounding_3(state, width, height, head_x, head_y)\n observations[i][2:14] = head_surrounding[:]\n # observations[i][14:16] = [head_x, head_y]\n observations[i][14:16] = [snakes_position[i][1][1], snakes_position[i][1][0]]\n observations[i][16:18] = [snakes_position[i][-1][1], snakes_position[i][-1][0]]\n\n head_x_U = snakes_position[i ^ 1][0][1]\n head_y_U = snakes_position[i ^ 1][0][0]\n head_surrounding = get_surrounding_3(state, width, height, head_x_U, head_y_U)\n observations[i][18:30] = head_surrounding[:]\n # observations[i][32:34] = [head_x_U, head_y_U]\n observations[i][30:32] = [snakes_position[i^1][1][1], snakes_position[i^1][1][0]]\n observations[i][32:34] = [snakes_position[i^1][-1][1], snakes_position[i^1][-1][0]]\n # other snake positions\n snake_heads = [snake[0] for snake in snakes_position]\n snake_heads = np.array(snake_heads[1:])\n snake_heads -= snakes_position[i][0]\n observations[i][34:36] = snake_heads.flatten()[:]\n observations[i][36:38] = [len(snake) for snake in snakes_position]\n\n observations[i][38] = step\n # beans positions\n beans = beans_position.flatten()\n beans_len = len(beans)\n observations[i][39 : 39 + beans_len] = beans[:]\n if (beans_len < 10) : observations[i][39 + beans_len:] = 0\n for j, (beans_y, beans_x) in enumerate(beans_position):\n dis_my = min(abs(head_y - beans_y), abs(head_y + beans_y + 2 - height)) + min(abs(head_x - beans_x), abs(head_x + beans_x + 2 - width))\n dis_U = min(abs(head_y_U - beans_y), abs(head_y_U + beans_y + 2 - height)) + min(abs(head_x_U - beans_x), abs(head_x_U + beans_x + 2 - width))\n observations[i][49 + 2 * j - 2] = dis_my\n observations[i][49 + 2 * j - 1] = dis_U\n if (beans_len < 10) : observations[i][49 + beans_len:] = 0\n observations[i][59:62] = get_min_bean(head_x, head_y, beans_position, width, height, snakes_position, state)\n observations[i][62:65] = get_min_bean(head_x_U, head_y_U, beans_position, width, height, snakes_position, state)\n return observations\n\n\ndef get_reward(state, info, snake_index, reward, snake_my_delta, snake_your_delta, height, width, final_result):\n state = np.squeeze(np.array(state), axis=2)\n step_reward = np.zeros(len(snake_index))\n for i in snake_index:\n if final_result == 1: # done and won\n step_reward[i] += 200\n elif final_result == 2: # done and lose\n step_reward[i] -= 100\n elif final_result == 3: # done and draw\n step_reward[i] += 20\n else: # not done\n #if reward[i] > 0: # eat a bean\n # step_reward[i] += 30\n # else: # just move\n snakes_position = np.array(info['snakes_position'], dtype=object)\n beans_position = np.array(info['beans_position'], dtype=object)\n snakes_len = len(snakes_position)\n snake_heads = [snake[0] for snake in snakes_position]\n self_head = np.array(snake_heads[i])\n #dists = [min(abs(self_head[0] - other_head[0]), abs(self_head[0] + other_head[0] + 2 - height)) + \n # min(abs(self_head[1] - other_head[1]), abs(self_head[1] + other_head[1] + 2 - width))\n # for other_head in beans_position]\n # [np.sqrt(np.sum(np.square(other_head - self_head))) for other_head in beans_position]\n min_x, min_y, index = get_min_bean(self_head[1], self_head[0], beans_position, width, height, snakes_position, state)\n step_reward[i] -= (min_x + min_y) * 2\n if (snake_my_delta > 0) : step_reward[i] += 30\n else : step_reward[i] += snake_my_delta * 10\n if (snake_your_delta < 0): step_reward[i] += snake_your_delta * (-10)\n # step_reward[i] += min(dists)\n # step_reward[i] += (snake_my_delta - snake_your_delta) * 10\n # step_reward[i] += snake_your_delta * (-10)\n # if reward[i] < 0:\n # step_reward[i] -= 10\n return step_reward\n\n\ndef logits_random(act_dim, logits):\n logits = torch.Tensor(logits).to(device)\n acs = [Categorical(out).sample().item() for out in logits]\n num_agents = len(logits)\n actions = np.random.randint(act_dim, size=num_agents << 1)\n actions[:num_agents] = acs[:]\n return actions\n\ndef append_random(act_dim, action):\n action = torch.Tensor([action]).to(device)\n acs = [out for out in action]\n num_agents = len(action)\n actions = np.random.randint(act_dim, size=num_agents << 1)\n actions[:num_agents] = acs[:]\n return actions\n\ndef logits_greedy(state, info, logits, height, width):\n state = np.squeeze(np.array(state), axis=2)\n beans = info['beans_position']\n snakes = info['snakes_position']\n\n logits = torch.Tensor(logits).to(device)\n logits_action = np.array([Categorical(out).sample().item() for out in logits])\n greedy_action = greedy_snake(state, beans, snakes, width, height, [1])\n\n action_list = np.zeros(2)\n action_list[0] = logits_action[0]\n action_list[1] = greedy_action[0]\n\n return action_list\n\ndef append_greedy(act_dim, state, info, action, height, width, step):\n state = np.squeeze(np.array(state), axis=2)\n beans = info['beans_position']\n snakes = info['snakes_position']\n\n action = torch.Tensor([action]).to(device)\n logits_action = np.array([out for out in action])\n greedy_action = greedy_snake(state, beans, snakes, width, height, [1], step)\n \n action_list = np.zeros(2)\n action_list[0] = logits_action[0]\n action_list[1] = greedy_action[0]\n\n return action_list\n\ndef get_surrounding(state, width, height, x, y):\n surrounding = [state[(y - 1) % height][x], # up\n state[(y + 1) % height][x], # down\n state[y][(x - 1) % width], # left\n state[y][(x + 1) % width]] # right\n\n return surrounding\n\ndef get_surrounding_3(state, width, height, x, y):\n surrounding = [state[(y - 1) % height][(x - 1) % width], \n state[(y - 1) % height][x],\n state[(y - 1) % height][(x + 1) % width], \n state[(y + 1) % height][(x - 1) % width], \n state[(y + 1) % height][x],\n state[(y + 1) % height][(x + 1) % width], \n state[y][(x - 1) % width], \n state[y][(x + 1) % width],\n state[y][(x - 2) % width],\n state[y][(x + 2) % width],\n state[(y - 2) % height][x],\n state[(y + 2) % height][x]] \n\n return surrounding\n\n\ndef save_config(args, save_path):\n file = open(os.path.join(str(save_path), 'config.yaml'), mode='w', encoding='utf-8')\n yaml.dump(vars(args), file)\n file.close()\n\n\ndef load_config(args, log_path):\n file = open(os.path.join(str(log_path), 'config.yaml'), \"r\")\n config_dict = yaml.load(file, Loader=yaml.FullLoader)\n print(\"@\", config_dict)\n args = SN(**config_dict)\n print(\"@@\", args)\n return args\n\n\n# def set_algos():\n# with open(os.path.join(os.path.dirname(__file__), \"config\", \"default.yaml\"), \"r\") as f:\n# try:\n# config_dict = yaml.load(f, Loader=yaml.FullLoader)\n# except yaml.YAMLError as exc:\n# assert False, \"default.yaml error: {}\".format(exc)\n#\n# args = SN(**config_dict)\n# return args\n\n\n","sub_path":"rl_trainer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"570724348","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom listen_joystick.msg import joystick\n\ndef callback(data):\n rospy.loginfo(data)\n \ndef listener():\n\n rospy.init_node('listen_joystick_py', anonymous=True)\n \n rospy.Subscriber(\"joystick\", joystick, callback)\n\n rospy.spin()\n \nif __name__ == '__main__':\n listener()","sub_path":"src/listen_joystick/src/listen_joystick_py.py","file_name":"listen_joystick_py.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"90935978","text":"'''\n合并统计每个月的新增开发者\n格式:年份.csv\n1 2 3 4 5 6 7 8 9 10 11 12\n新增代码贡献者数目\n\n当月代码贡献者数量\n'''\n\nimport json\nimport os\nimport csv\n\ndef merge(folder):\n cDir = \"public\"\n allUsers = {}\n strMonths = []\n\n for i in range(1,10):\n strMonths.append(\"0\" + str(i))\n for i in range(10,13):\n strMonths.append(str(i))\n\n result = {}\n\n for year in range(2008,2019):\n newUsers = [0 for x in range(0,12)]\n everyMonthUsers = [0 for x in range(0,12)]\n result[str(year)] = {}\n\n for month in range(0,12):\n file = cDir + \"/\" + folder + \"/\" + str(year) + '-' + strMonths[month] + '-commitsUser.json'\n with open(file,'r') as f:\n data = json.loads(f.read())\n print(data)\n for item in data:\n if( item not in allUsers):\n allUsers[item] = data[item]\n newUsers[month] += 1\n everyMonthUsers[month] += 1\n\n result[str(year)][\"newUsers\"] = newUsers\n result[str(year)][\"everyUsers\"] = everyMonthUsers\n\n with open( cDir + \"/\" + folder + '/analysisUsers.json','w',newline=\"\") as f:\n json.dump(result,f)\n\nmerge('javascript')\n","sub_path":"public/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242235710","text":"#!/usr/bin/env python\n\nimport pygame, sys, time, random\nfrom pygame.locals import *\n\npygame.init()\nfpsClock = pygame.time.Clock()\n\nplaySurface = pygame.display.set_mode((640, 480))\npygame.dispay.set_caption(\"Raspberry Snake\")\n\nredColour = pygame.Color(255, 0, 0)\nblackColour = pygame.Color(0, 0, 0)\nwhiteColour = pygame.Color(255, 255, 255)\ngreyColour = pygame.Color(150, 150, 0)\n\nsnakePosition = [100, 100]\nsnakeSegments = [[100, 100], [80, 100], [60, 100]]\nraspberrySpawned = 1\ndirection = \"right\"\nchangeDirection = direction\n","sub_path":"python_projects/raspberry_snake.py","file_name":"raspberry_snake.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"120360793","text":"\"\"\"\nKPCA using 3D dataset based off Sebastian Raschka swiss roll tutorial\n\"\"\"\n\nimport pandas as pd\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy import exp\nfrom scipy.linalg import eigh\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets.samples_generator import make_swiss_roll\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef stepwise_kpca(X, gamma, n_components):\n '''\n Implementation of a RBF kernel PCA.\n\n Arguments:\n X: An M*N dataset as NumPy array where the samples are stored as rows (M),\n and the attributes defined as columns (N).\n gamma: A free parameter (coefficient) for the RBF kernel.\n n_components: The number of components to be returned.\n\n '''\n # Calculating the squared Euclidean distances for every pair of points\n # in the MxN dimensional dataset.\n sq_dists = pdist(X, 'sqeuclidean')\n\n # Converting the pairwise distances into a symmetric MxM matrix.\n mat_sq_dists = squareform(sq_dists)\n\n # Computing the MxM kernel matrix.\n K = exp(-gamma * mat_sq_dists)\n\n # Centering the symmetric NxN kernel matrix.\n N = K.shape[0]\n one_n = np.ones((N,N)) / N\n K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)\n\n # Obtaining eigenvalues in descending order with corresponding\n # eigenvectors from the symmetric matrix.\n eigvals, eigvecs = eigh(K)\n\n # Obtaining the i eigenvectors that corresponds to the i highest eigenvalues.\n X_pc = np.column_stack((eigvecs[:,-i] for i in range(1,n_components+1)))\n\n return X_pc\n\n#use a random 100 subset file\ninp_csv = pd.read_csv('../../data/sample/rand100subset1.csv', delimiter=',', header=0)\n\n#chosen dimensions - hard coded for now, easy to soft code later.\n# blood glucose\nglucose = inp_csv['glucose']\n# sex (given as 1 or 2)\nsex = inp_csv['sex']\n# BMI\nbmi = inp_csv['bmi']\n# weight\nweight = inp_csv['weight']\n\ncol_by_sex = []\n\nfor i in range(len(sex)):\n if sex[i] == 1:\n col_by_sex.append('red')\n if sex[i] == 2:\n col_by_sex.append('blue')\n\n#print(col_by_sex)\n\n#create vector\ndata = []\n\nfor i in range(len(bmi)):\n\n #create sample vector\n xi = []\n xi.append(glucose[i])\n xi.append(weight[i])\n xi.append(bmi[i])\n\n #Append sample vector to array\n data.append(xi)\n\n#simple way to convert to numpy array\nX = np.array(data)\n\nprint(X[:, 0] )\nprint(X[:, 1])\nprint(X[:, 2])\n#plot initial data\nfig = plt.figure(figsize=(7,7))\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=col_by_sex)\n\n#ax.legend()\n\nplt.title('Glucose vs weight vs bmi by sex')\nax.set_xlabel('Blood glucose concentration')\nax.set_ylabel('weight ')\nax.set_zlabel('BMI')\nplt.show()\n\n#2-component linear PCA\nscikit_pca = PCA(n_components=2)\nX_spca = scikit_pca.fit_transform(X)\n\nplt.figure(figsize=(8,6))\nplt.scatter(X_spca[:, 0], X_spca[:, 1], c=col_by_sex)\n\nplt.title('First 2 principal components after Linear PCA')\nplt.xlabel('PC1')\nplt.ylabel('PC2')\nplt.show()\n","sub_path":"scripts/test/3dkpcatest1_1.py","file_name":"3dkpcatest1_1.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237447846","text":"def printPascal(n):\r\n for line in range(0,n):\r\n for i in range(0,line+1):\r\n\r\n print(binomialCoef(line,i),\" \",end = \"\")\r\n print()\r\ndef binomialCoef(n,k):\r\n res=1\r\n if(k>n-k):\r\n k=n-k\r\n for i in range(0,k):\r\n res = res *(n-i)\r\n res=res//(i+1)\r\n return res\r\nn=7\r\nprintPascal(n)","sub_path":"PascalTriangle.py","file_name":"PascalTriangle.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"44184593","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import division\r\n\r\nfrom PIL import Image\r\n\r\nimport os\r\n\r\n\r\ndef get_img_file(dir_name):\r\n all_img = os.listdir(dir_name)\r\n for img in all_img:\r\n yield os.path.join(dir_name, img)\r\n\r\n\r\ndef resize_img(image_file, width=640, height=1136):\r\n img = Image.open(image_file)\r\n w, h = img.size\r\n scale = max(w / width, h / height)\r\n\r\n if scale > 1:\r\n new_img = img.resize((int(w / scale), int(h / scale)), Image.LANCZOS)\r\n new_img.save('new-' + image_file)\r\n new_img.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n for img in get_img_file('image'):\r\n resize_img(img)\r\n","sub_path":"python/0022/resize_img.py","file_name":"resize_img.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577918938","text":"from __future__ import absolute_import\n\nfrom tornado import web\n\nfrom ..views import BaseHandler\nfrom ..models import WorkersModel, WorkerModel\n\n\nclass WorkersView(BaseHandler):\n @web.authenticated\n def get(self):\n app = self.application\n workers = WorkersModel.get_latest(app).workers\n broker = app.celery_app.connection().as_uri()\n\n self.render(\"workers.html\", workers=workers, broker=broker)\n\n\nclass WorkerView(BaseHandler):\n @web.authenticated\n def get(self, workername):\n app = self.application\n worker = WorkerModel.get_worker(app, workername)\n if worker is None:\n raise web.HTTPError(404, \"Unknown worker '%s'\" % workername)\n\n self.render(\"worker.html\", worker=worker)\n","sub_path":"root/lib/flower/views/workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"642864611","text":"import pandas as pd\n\ndef avg_caps_word(\n df: pd.DataFrame,\n col: str\n ) -> pd.DataFrame:\n out_df = df.copy()\n new_col_name = 'avg_caps_word'\n out_df[new_col_name] = out_df[col].apply(_avg_caps)\n return out_df\n\ndef _avg_caps(x):\n num_caps=0\n for char in x:\n if char.isupper():\n num_caps += 1\n words = x.split()\n return num_caps / len(words)","sub_path":"functions/avg_caps_word.py","file_name":"avg_caps_word.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"71776392","text":"#!/usr/bin/env python3\nimport sys\nimport getopt\nimport json\nimport scipy.ndimage as nd\nfrom skimage import io as skio\nimport numpy as np\n\n# usage\ndef apply_affine_tiff_usage():\n print(\"\"\"\nUsage : apply_affine_tiff.py -i \\\\\n -o \\\\\n -a \\\\\n -r \\\\\n -f [fliph/flipv/noflip, default noflip] \\\\\n -O [draw overlaped graph, no arg, default no set]\n\nExample :\n apply_affine_tiff.py -i input.tif \\\\\n -o prefix \\\\\n -r ref.tif \\\\\n -a '[[0.023890163936836614, 1.1533635423882767, 3673.0911462805616], [-1.1509242533824746, 0.022522101202567, 22343.59128420033], [0.0, 0.0, 1.0]]' \\\\\n -f h\n\"\"\",flush=True)\n\n\ndef apply_affine_tiff_main(argv:[]) :\n inputf = ''\n prefix = ''\n affine=np.eye(3)\n flip = 'noflip'\n ref = ''\n overlap = False\n try:\n opts, args = getopt.getopt(argv,\"hi:o:a:f:r:O\",[\"help=\" ,\n \"input=\",\n \"output=\",\n \"affine=\",\n \"flip=\",\n \"ref=\",\n \"overlap\"])\n except getopt.GetoptError:\n apply_affine_tiff_usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-i\", \"--inputf\"):\n inputf = arg\n elif opt in ('-h','--help'):\n handle_trackEM_matrix_usage()\n sys.exit(0)\n elif opt in (\"-o\", \"--output\"):\n prefix = arg\n elif opt in ('-a' , '--affine'):\n affine = np.matrix(np.array(json.loads(arg)))\n elif opt in ('-f' , '--flip'):\n flip = arg\n elif opt in ('-r' , '--ref'):\n ref = arg\n #elif opt in ('-O' , '--overlap'):\n\n\n if inputf == '' or prefix == '' or not flip in ('fliph','flipv','noflip') or ref == '':\n apply_affine_tiff_usage()\n sys.exit(2)\n refd = skio.imread(ref)\n w,h = refd.shape\n print(f'ref w={w}, h={h}')\n\n ind = skio.imread(inputf)\n if flip == 'fliph':\n ind = np.fliplr(ind)\n elif flip == 'flipv':\n ind = np.flipud(ind)\n outd = nd.affine_transform(ind.T,affine,output_shape=(h,w),order=0)\n outd = outd.T\n skio.imsave(f'{prefix}.affined.tiff',outd)\n if overlap:\n draw=np.zeros((refd.shape[0], refd.shape[1],3),dtype='uint8')\n draw[:,0]=refd\n draw[:,1]=outd\n skio.imsave(f'{prefix}.overlap.tiff',draw)\n\nif __name__ == \"__main__\":\n apply_affine_tiff_main(sys.argv[1:])\n","sub_path":"tools/apply_affine_tiff.py","file_name":"apply_affine_tiff.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"72282138","text":"#!/usr/bin/env python\nimport ROOT\n\n\nimport subprocess\nimport optparse\nfrom Stat.Limits.settings import *\n\nfrom Stat.Limits.combineUtils import runSinglePoint\n\n \nusage = 'usage: %prog [--cat N]'\nparser = optparse.OptionParser(usage)\n\n\nparser.add_option(\"\", \"--mDark\", dest='mDark',type=\"string\", help='signal point mDark')\nparser.add_option(\"\", \"--mZprime\", dest='mZprime',type=\"string\", help='signal point mZprime')\nparser.add_option(\"\", '--rinv', dest='rinv',type=\"string\", help='signal point rinv')\nparser.add_option(\"\", '--alpha', dest='alpha', type=\"string\",help='signal point alpha')\nparser.add_option(\"-c\", \"--channel\",dest=\"ch\",type=\"string\",default=\"all\",help=\"Indicate channels of interest. Default is all\")\nparser.add_option(\"-y\", \"--years\",dest=\"years\",type=\"string\",default=\"all\",help=\"Indicate years of interest. Default is 2016\")\nparser.add_option('-m', '--method', dest='method', type='string', default = 'hist', help='Run a single method ( hist, template)')\nparser.add_option('-d', '--dir', dest='dir', type='string', default = 'outdir', help='datacards direcotry')\n\nparser.add_option('',\"--runSingleCat\",dest=\"runSingleCat\",action='store_true', default=False)\n\n(opt, args) = parser.parse_args()\n#interaction = opt.interaction\n\n\npath_ = \"/t3home/decosa/SVJ/CMSSW_8_1_0/src/Stat/Limits/test/\"\npath_ += opt.dir\n\ncategories = channels\nyears = [\"2016\", \"2017\", \"2018\"]\nif opt.years != \"all\":\n y_clean = opt.years.replace(\" \", \"\")\n years = y_clean.split(\",\")\n\n\n\nif opt.ch != \"all\":\n ch_clean = opt.ch.replace(\" \", \"\")\n categories = ch_clean.split(\",\")\n\nif opt.method != \"all\":\n meth_clean = opt.method.replace(\" \", \"\")\n methods = meth_clean.split(\",\")\n\n\ncats = []\nfor y in years:\n\n cats_ = [c + \"_\" + y for c in categories]\n cats = cats + cats_\n\ncategories = cats\n\nsingleYear=\"\"\nif (len(years)==1 and opt.years!=\"all\"):singleYear=years[0]\nrunSinglePoint(path_, opt.mZprime, opt.mDark, opt.rinv, opt.alpha, categories, opt.method, opt.runSingleCat, singleYear)\n","sub_path":"Limits/test/runCombineSinglePoint.py","file_name":"runCombineSinglePoint.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386009964","text":"def remove(substr,theStr):\n string_remove = \"I\"\n sWithoutsubstr = \"\"\n\n for i in theStr:\n if i != string_remove:\n sWithoutsubstr = sWithoutsubstr + i\n return sWithoutsubstr\n\nprint(remove('I', 'I am number 1. I am number 1.'))\n","sub_path":"Chp_9/9.11.py","file_name":"9.11.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447541132","text":"import time \nimport os\nimport subprocess \nimport xmlrpc.client\nimport traceback\n\nimport log\nimport web\nimport update\n\n\n@log.add_task \nclass aria2(log.Task):\n def __init__(self):\n self.p = None\n try:\n aria2 = xmlrpc.client.ServerProxy(\"http://127.0.0.1:6800/rpc\")\n aria2.aria2.getGlobalStat() \n log.error_log.error('[loop_init] aria2 found')\n except:\n try:\n if log.os_name == 'Windows':\n self.p = subprocess.Popen(\n log.relative_path('aria2/aria.bat'), \n cwd=log.relative_path('aria2/'), \n stdout=None, \n stderr=None, \n ) \n log.error_log.error('[loop_init] aria2 start on windows')\n elif log.os_name == 'Linux':\n self.p = subprocess.Popen(\n log.relative_path('aria2/aria.sh'), \n cwd=log.relative_path('aria2/'), \n stdout=None, \n stderr=None,\n ) \n log.error_log.error('[loop_init] aria2 start on linux')\n except:\n log.error_log.error('[loop_init] aria2 not found and start failed')\n\n def loop_exit(self):\n if self.p is not None:\n self.p.kill()\n log.error_log.error('[loop_exit] aria2 killed')\n\n\nif __name__ == '__main__':\n try:\n counter = 0\n while 1: \n time.sleep(0.2) \n counter += 1\n if log.idle: \n log.status = '' \n counter = 0 \n elif counter == 1: \n try:\n log.status = 'loop_head' \n for task in log.tasks: task.loop_head() \n log.status = 'loop_body'\n for task in log.tasks: task.loop_body()\n log.status = 'loop_tail'\n for task in log.tasks: task.loop_tail() \n except:\n log.error_log.info(traceback.format_exc()) \n log.error_log.error()\n elif 1 < counter < 900*5:\n log.status = f'loop_waiting {counter*0.2:.1f} s' \n else: \n counter = 0\n except (KeyboardInterrupt, SystemExit):\n for task in log.tasks:\n task.loop_exit() \n time.sleep(0.2)\n os._exit(0)\n \n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315665233","text":"#!/usr/bin/env python3\n\"\"\" Reuben Thorpe (2016) Example of an intermediate search using kat.\"\"\"\n\nfrom kickass import api\nfrom kickass import CATEGORY, FIELD, ORDER\n\n\n# Create a new kat() instance with a valid kick ass torrent domain.\nkat = api(\"kat.al\")\n\n\n# Generate a new search instance for \"ubuntu\".\nubuntu_search = kat.search(\"ubuntu\")\n\n\n# Set the category to search in.\nubuntu_search.category(CATEGORY.APPLICATIONS)\n\n\n# Set the field to sort the result by.\nubuntu_search.field(FIELD.SIZE)\n\n\n# Set the order in which to display results (assending/desending).\nubuntu_search.order(ORDER.DESC)\n\n\n# Display the title of torrents from page one of your query.\nfor torrent in ubuntu_search.page(1):\n print(torrent[\"title\"])\n\n\n# Display the title of torrents from page 2 through to 4 of your query.\nfor torrent in ubuntu_search.multipage(2, 4):\n print(torrent[\"title\"])\n\n","sub_path":"examples/intermediate_search.py","file_name":"intermediate_search.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122112277","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import *\n\n\nclass Widget(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.setLayout(QVBoxLayout())\n self.btn = QPushButton(\"Add\", self)\n sc = QScrollArea(self)\n self.area = QWidget()\n self.layout().addWidget(self.btn)\n self.layout().addWidget(sc)\n sc.setWidget(self.area)\n sc.setWidgetResizable(True)\n self.area.setLayout(QVBoxLayout())\n self.btn.clicked.connect(self.onClicked)\n\n def onClicked(self):\n path = QStandardPaths.standardLocations(QStandardPaths.PicturesLocation)[0]\n pictures, _ = QFileDialog.getOpenFileNames(self, \"Select Images\", path, \"Image Files (*.png *.jpg *.bmp)\")\n for picture in pictures:\n label = QLabel(self)\n label.setPixmap(QPixmap(picture))\n self.area.layout().addWidget(label)\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n w = Widget()\n w.show()\n sys.exit(app.exec_())","sub_path":"temp/46819818.py","file_name":"46819818.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70333238","text":"# Section02-3\n# lxml기초 스크래핑\n# css selector : https://www.w3schools.com/cssref/trysel.asp\n# pip instal lxml, requests, cssselect\n\n# 문서를 가져와서 parsing(구조에 맞게 내가 가져오는 것, 예. 한글만 가져오기, 특수문자 제거 등).\n# 그 중에 많이 사용하는 것이 lxml\n\nimport requests\nimport lxml.html\n\ndef main():\n '''\n 네이버 메인 뉴스 스탠드 스크래핑 메인함수\n '''\n\n #스크래핑 대상 URL(앞에서 나온 URLOPEN으로해도 되는데 더 편해서 쓰는 것)\n response = requests.get('https://www.naver.com/') # Get, POST\n\n # 신문사 링크 리스트 획득\n urls = scrape_news_list_page(response)\n\n\ndef scrape_news_list_page(response):\n # URL LIST 선언\n urls = []\n # 태그 정보 문자열 저장\n # print(response.content)\n root = lxml.html.fromstring(response.content) #\n\n for a in root.cssselect('div._NM_NEWSSTAND_THUMB div.popup_wrap a:nth-child(3)'):\n url = a.get('href')\n print(url)\n urls.append(urls)\n\n return urls\n\n\n# 스크래핑 시작\nif __name__=='__main__':\n main()\n","sub_path":"crawling/BS4/section02-03.py","file_name":"section02-03.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"632780945","text":"## Calculate a person's state income tax.\r\nincome = float(input(\"Enter your taxable income: \"))\r\nif income <= 20000:\r\n tax = .02 * income\r\nelse:\r\n if income <= 50000:\r\n tax = 400 + .025 * (income - 20000)\r\n else:\r\n tax = 1150 + .035 * (income - 50000)\r\nprint(\"Your tax is ${0:,.0f}.\".format(tax))\r\n","sub_path":"Answer keys Ch3/3-2-E43.py","file_name":"3-2-E43.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250493664","text":"#!flask/bin/python\n\n# Author: Ngo Duy Khanh\n# Email: ngokhanhit@gmail.com\n# Git repository: https://github.com/ngoduykhanh/flask-file-uploader\n# This work based on jQuery-File-Upload which can be found at https://github.com/blueimp/jQuery-File-Upload/\nimport PIL\nimport simplejson\nimport traceback\nimport redis\nfrom threading import Thread\nimport skimage.io as sio\nfrom PIL import Image\nimport numpy as np\nimport base64\nimport flask\nimport redis\nimport uuid\nimport time\nimport json\nimport sys\nimport io\nimport os\n\nfrom flask import Flask, request, render_template, redirect, url_for, send_from_directory\nfrom flask_bootstrap import Bootstrap\nfrom werkzeug import secure_filename\n\nfrom lib import tool\nfrom lib.upload_file import uploadfile\nfrom lib import store as S3_lib\nfrom Classifier import Classifier\nfrom Parser import Parser\nimport settings\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = settings.SECRET_KEY\napp.config['UPLOAD_FOLDER'] = settings.TEMP_FOLDER \napp.config['THUMBNAIL_FOLDER'] = settings.TEMP_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = settings.MAX_CONTENT_LENGTH\nbootstrap = Bootstrap(app)\ns3 = S3_lib.s3_client()\n@app.route(\"/upload\", methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n files = request.files['file']\n\n if files:\n filename = secure_filename(files.filename)\n filename = tool.gen_file_name(\n filename, app.config['UPLOAD_FOLDER'])\n mime_type = files.content_type\n\n if not tool.allowed_file(files.filename, settings.ALLOWED_EXTENSIONS):\n result = uploadfile(\n name=filename, type=mime_type, size=0, not_allowed_msg=\"File type not allowed\")\n\n else:\n # save file to disk\n uploaded_file_path = os.path.join(\n app.config['UPLOAD_FOLDER'], filename)\n files.save(uploaded_file_path)\n\n # classify the image\n im = Image.open(uploaded_file_path)\n image = Classifier.prepare_image(im , (settings.C_IMAGE_WIDTH, settings.C_IMAGE_HEIGHT))\n # predict the score\n image = image.copy(order=\"C\")\n\n if settings.USE_CLASSIFIER:\n k = str(uuid.uuid4())\n d = {\"id\": k, \"image\": tool.base64_encode_image(image)}\n settings.db.rpush(settings.C_IMAGE_QUEUE, json.dumps(d))\n print('push image in classifier queue: ', k)\n\n while True:\n output = settings.db.get(k)\n if output is not None:\n output = output.decode(\"utf-8\")\n score = json.loads(output)\n print('get result :', k)\n settings.db.delete(k)\n break\n time.sleep(settings.CLIENT_SLEEP)\n\n S3_lib.s3_down_file(s3 , settings.OTHER_BUCKET , 'score.json' , app.config['UPLOAD_FOLDER'] + 'score.json')\n f = open(app.config['UPLOAD_FOLDER'] + 'score.json', 'r')\n model = json.load(f)\n f.close()\n model[filename] = score\n f = open(app.config['UPLOAD_FOLDER'] + 'score.json' , 'w')\n f.write(json.dumps(model))\n f.close()\n S3_lib.s3_upload_file(s3 , settings.OTHER_BUCKET , 'score.json' , app.config['UPLOAD_FOLDER'] + 'score.json')\n \n if settings.USE_PARSER:\n image = Parser.prepare_image(im , (settings.P_IMAGE_HEIGHT, settings.P_IMAGE_WIDTH))\n k = str(uuid.uuid4())\n d = {\"id\": k, \"image\": tool.base64_encode_image(image)}\n settings.db.rpush(settings.P_IMAGE_QUEUE, json.dumps(d))\n print('push image in parser queue: ', k)\n\n while True:\n output = settings.db.get(k)\n if output is not None:\n output = output.decode(\"utf-8\")\n result = json.loads(output)\n print('get result :', k)\n settings.db.delete(k)\n break\n time.sleep(settings.CLIENT_SLEEP)\n \n parsed_image_string = result['image']\n parsed_image = tool.base64_decode_image(\n parsed_image_string , \n dtype = np.uint8 ,\n shape = (settings.P_IMAGE_HEIGHT, settings.P_IMAGE_WIDTH, settings.P_IMAGE_CHANS))\n sio.imsave(os.path.join(app.config['UPLOAD_FOLDER'], 'parsed_'+filename) , parsed_image)\n S3_lib.s3_upload_file(s3 , settings.RESULT_BUCKET , 'parsed_'+filename , app.config['UPLOAD_FOLDER'] + 'parsed_'+ filename)\n\n S3_lib.s3_upload_file(s3 , settings.IMAGE_BUCKET , filename , app.config['UPLOAD_FOLDER'] + filename)\n # create thumbnail after saving\n if mime_type.startswith('image'):\n tool.create_thumbnail(\n filename, app.config['UPLOAD_FOLDER'], app.config['THUMBNAIL_FOLDER']\n )\n S3_lib.s3_upload_file(s3 , settings.THUMBNAIL_BUCKET , 'tumb_'+filename , app.config['THUMBNAIL_FOLDER'] + 'tumb_'+filename)\n\n # get file size after saving\n size = os.path.getsize(uploaded_file_path)\n\n # return json for js call back\n result = uploadfile(name=filename, type=mime_type, size=size)\n\n return simplejson.dumps({\"files\": [result.get_file()]})\n\n if request.method == 'GET':\n # files = [f for f in os.listdir(app.config['UPLOAD_FOLDER']) if os.path.isfile(\n # os.path.join(app.config['UPLOAD_FOLDER'], f)) and f not in settings.IGNORED_FILES]\n files = [f for f in S3_lib.get_listfiles(s3 ,settings.IMAGE_BUCKET) if f[0] not in settings.IGNORED_FILES]\n\n file_display = []\n for name , size in files:\n file_saved = uploadfile(name=name, size=size)\n file_display.append(file_saved.get_file())\n\n return simplejson.dumps({\"files\": file_display})\n\n return redirect(url_for('index'))\n\n\n@app.route(\"/delete/\", methods=['DELETE'])\ndef delete(filename):\n #file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n #file_thumb_path = os.path.join(app.config['THUMBNAIL_FOLDER'], filename)\n file_s3_path = S3_lib.get_url(settings.IMAGE_BUCKET , filename)\n file_s3_tumb_path = S3_lib.get_url(settings.THUMBNAIL_BUCKET , 'tumb_'+filename)\n\n #try:\n S3_lib.s3_delete_file(s3 , settings.IMAGE_BUCKET , filename)\n print(f'delete {file_s3_path}')\n S3_lib.s3_delete_file(s3 , settings.THUMBNAIL_BUCKET , 'tumb_'+filename)\n S3_lib.s3_delete_file(s3 , settings.RESULT_BUCKET , 'parsed_'+filename) \n return simplejson.dumps({filename: 'True'})\n #except:\n # return simplejson.dumps({filename: 'False'})\n\n\n# serve static files\n@app.route(\"/thumbnail/\", methods=['GET'])\ndef get_thumbnail(filename):\n return send_from_directory(app.config['THUMBNAIL_FOLDER'], filename=filename)\n\n\n@app.route(\"/data/\", methods=['GET'])\ndef get_file(filename):\n return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER']), filename=filename)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n@app.route('/gallary' , methods=['GET' , 'POST'])\ndef gallary():\n #try:\n S3_lib.s3_down_file(s3 , settings.OTHER_BUCKET , 'score.json' , app.config['UPLOAD_FOLDER'] + 'score.json') \n f = open(app.config['UPLOAD_FOLDER'] + 'score.json' , 'r')\n scores = json.load(f)\n f.close()\n # files = [f for f in os.listdir(app.config['UPLOAD_FOLDER']) if os.path.isfile(\n # os.path.join(app.config['UPLOAD_FOLDER'], f)) and f not in settings.IGNORED_FILES]\n files = [f for f in S3_lib.get_listfiles(s3 ,settings.IMAGE_BUCKET) if f[0] not in settings.IGNORED_FILES]\n parsed_files = [f[0] for f in S3_lib.get_listfiles(s3 ,settings.RESULT_BUCKET) if f[0] not in settings.IGNORED_FILES]\n file_display = []\n file_score = {}\n for name , size in files:\n file_saved = uploadfile(name=name, size=size)\n if 'parsed_' + name not in parsed_files:\n file_saved.parsed_url = \"\"\n print(file_saved.get_file())\n file_display.append(file_saved.get_file())\n score_string = \"\"\n if name in scores:\n for i, score in enumerate(scores[name]):\n score_string += \"{}.{} : {:.4f}%
\".format(i+1 , score['label'] , score['probability'] * 100)\n file_score[name] = score_string\n return render_template('gallary.html' , images = file_display , scores = file_score)\n #except Exception as e:\n # print(e)\n@app.route('/video' , methods=['GET' , 'POST'])\ndef video():\n return render_template('video.html')\n\n@app.route(\"/classify\", methods=[\"POST\"])\ndef classifier_predict():\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n data = {\"success\": False}\n # read the image in PIL format and prepare it for\n # classification\n image = flask.request.files[\"image\"].read()\n # initialize the data dictionary that will be returned from the view\n image = Image.open(io.BytesIO(image))\n image = Classifier.prepare_image(\n image, (settings.C_IMAGE_WIDTH, settings.C_IMAGE_HEIGHT))\n\n # ensure our NumPy array is C-contiguous as well,\n # otherwise we won't be able to serialize it\n image = image.copy(order=\"C\")\n\n # generate an ID for the classification then add the\n # classification ID + image to the queue\n k = str(uuid.uuid4())\n d = {\"id\": k, \"image\": tool.base64_encode_image(image)}\n settings.db.rpush(settings.C_IMAGE_QUEUE, json.dumps(d))\n print('push image : ', k)\n\n # keep looping until our model server returns the output\n # predictions\n while True:\n # attempt to grab the output predictions\n output = settings.db.get(k)\n\n # check to see if our model has classified the input\n # image\n if output is not None:\n # add the output predictions to our data\n # dictionary so we can return it to the client\n output = output.decode(\"utf-8\")\n data[\"predictions\"] = json.loads(output)\n print('get result :', k)\n # delete the result from the database and break\n # from the polling loop\n settings.db.delete(k)\n break\n\n # sleep for a small amount to give the model a chance\n # to classify the input image\n time.sleep(settings.CLIENT_SLEEP)\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)\n\n\n@app.route(\"/parsing\", methods=[\"POST\"])\ndef parser_predict():\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n data = {\"success\": False}\n # read the image in PIL format and prepare it for\n # classification\n image = flask.request.files[\"image\"].read()\n # initialize the data dictionary that will be returned from the view\n image = Image.open(io.BytesIO(image))\n image = Parser.prepare_image(\n image, (settings.P_IMAGE_WIDTH, settings.P_IMAGE_HEIGHT))\n\n # ensure our NumPy array is C-contiguous as well,\n # otherwise we won't be able to serialize it\n image = image.copy(order=\"C\")\n\n # generate an ID for the classification then add the\n # classification ID + image to the queue\n k = str(uuid.uuid4())\n d = {\"id\": k, \"image\": tool.base64_encode_image(image)}\n settings.db.rpush(settings.P_IMAGE_QUEUE, json.dumps(d))\n print('push image : ', k)\n\n # keep looping until our model server returns the output\n # predictions\n while True:\n # attempt to grab the output predictions\n output = settings.db.get(k)\n\n # check to see if our model has classified the input\n # image\n if output is not None:\n # add the output predictions to our data\n # dictionary so we can return it to the client\n data[\"predictions\"] = json.loads(output.decode(\"utf-8\"))\n print('get result :', k)\n # delete the result from the database and break\n # from the polling loop\n settings.db.delete(k)\n break\n\n # sleep for a small amount to give the model a chance\n # to classify the input image\n time.sleep(settings.CLIENT_SLEEP)\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)\n\n\nif __name__ == '__main__':\n # ignore the warning\n import warnings\n warnings.simplefilter(\"ignore\")\n # load the function used to classify input images in a *separate*\n # thread than the one used for main classification\n '''\n t = Thread(target=classfier.classify_process, args=())\n t.daemon = True\n t.start()\n '''\n # start the web server\n print(\"* Starting web service...\")\n app.run(debug=True, port=9191, threaded=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591173069","text":"import os\nimport json\nimport sys\nimport random\n\ntopic_name = \"NBA\"\n\nraw_dir = 'data/'+topic_name+'/tagged_before/'\nsample_dir = 'data/' + topic_name + '/tagged/'\nrate = 0.05\n\ndate_list = ['10-05','10-06','10-13']\nfor date in date_list:\n\n\tfilename = date+'tagged.txt'\n\traw_file = raw_dir + filename\n\tsample_file = sample_dir + date+'.txt'\n\n\traw_data = []\n\traw_f = open(raw_file,'r')\n\tfor line in raw_f.readlines():\n\t\tif line.strip()=='':\n\t\t\tcontinue\n\t\traw_data.append(line)\n\traw_f.close()\n\tsample_data = []\n\n\traw_sum = len(raw_data)\n\trandomList=random.sample(range(0,raw_sum),int(raw_sum*rate))\n\n\tfor index in randomList:\n\t\tlabel,sentence = raw_data[index].split('\\t')\n\t\tnew_line = label+'\\t'+str(index)+'\\t'+sentence\n\t\tsample_data.append(new_line)\n\n\t#sample_data = list(str(index)+'\\t'+raw_data[index] for index in randomList)\n\n\twith open(sample_file,'w',encoding='utf-8') as f:\n\t\tfor data in sample_data:\n\t\t\tf.write(data)\n\t\t\tf.write('\\n')\n\tf.close()\n","sub_path":"real-world/pick&tag_everyday/pick_tagged.py","file_name":"pick_tagged.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386370402","text":"from django.contrib.auth.decorators import login_required\nfrom . models import *\nfrom django.views.generic import ListView, CreateView, UpdateView, DetailView\nfrom . forms import *\nfrom django.urls import reverse_lazy\nimport csv\nimport xlwt\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django import template\nfrom .models import Agent\n\nfrom .filters import ProjectsFilter, AgentFilter, InvoiceFilter\n# Create your views here.\ndef export_invoices_xls(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"approved-invoices.xls\"'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Invoices')\n\n # Sheet header, first row\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n columns = ['Agent', 'ISP Name', 'Monthly Subscription', 'Date Due', 'Status', 'Date Submitted',]\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n\n rows = Invoice.objects.filter(approved=True).values_list('agent', 'isp_name', 'monthly_subscription', 'due_date', 'status', 'date_submitted')\n for row in rows:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n\n wb.save(response)\n return response\n\ndef export_users_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"users.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['Username', 'First name', 'Last name', 'Email address'])\n\n users = User.objects.all().values_list('username', 'first_name', 'last_name', 'email')\n for user in users:\n writer.writerow(user)\n\n return response\n\ndef export_payments_due_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"payments-due.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['Agent', 'ISP Name', 'Monthly Subscription', 'Date Due'])\n\n invoices = Invoice.objects.filter(approved=True).values_list('agent', 'isp_name', 'monthly_subscription', 'due_date')\n for invoice in invoices:\n writer.writerow(invoice)\n\n return response\n\ndef export_filtered_invoices(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\n\tresponse = HttpResponse(content_type='text/csv')\n\tresponse['Content-Disposition'] = 'attachment; filename=\"filtered-invoices-due.csv\"'\n\n\twriter = csv.writer(response)\n\twriter.writerow(['Agent', 'ISP Name', 'Monthly Subscription', 'Date Due'])\n\n\tinvoices = Invoice.objects.filter(Q(agent__ssdc_number__icontains=q) |Q(project__title__icontains=q) | Q(status__icontains=q) | Q(due_date__icontains=q)).values_list('agent', 'isp_name', 'monthly_subscription', 'due_date')\n\tfor invoice in invoices:\n\t writer.writerow(invoice)\n\n\treturn response\n\n@login_required(login_url=\"/login/\")\ndef home(request):\n\treturn render(request, \"index.html\")\n\n@login_required(login_url=\"/login/\")\ndef TeamLeaders(request):\n\tteam_leaders = TeamLeader.objects.all()\n\ttemplate_name = \"app/team-leaders.html\"\n\treturn render(request, template_name, {\"team_leaders\": team_leaders})\n\nclass NewTeamLeader(CreateView):\n\tmodel = TeamLeader\n\tform_class = AddTeamLeaderForm\n\ttemplate_name = \"app/new-team-leader.html\"\n\n@login_required(login_url=\"/login/\")\ndef Agents(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\tfiltered_agents = Agent.objects.filter(Q(name__icontains=q) | Q(ssdc_number__icontains=q) | Q(project__title__icontains=q))\n\t\n\tcontext = {\n\t\t\"filtered_agents\": filtered_agents\n\t}\n\n\ttemplate_name = \"app/agents.html\"\n\treturn render(request, template_name, context)\n\nclass NewAgent(CreateView):\n\tmodel = Agent\n\tform_class = AddAgentForm\n\ttemplate_name = \"app/new-agent.html\"\n\ndef all_projects(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\tfiltered_projects = Project.objects.filter(Q(title__icontains=q))\n\tcontext = {\n\t\t\"filtered_projects\": filtered_projects\n\t}\n\t\"\"\"\n\tfiltered_projects = ProjectsFilter(\n\t\trequest.GET,\n\t\tqueryset=Project.objects.all()\n\t)\n\tcontext['filtered_projects'] = filtered_projects\n\t\"\"\"\n\treturn render(request, \"app/projects.html\", context)\n\nclass NewProject(CreateView):\n\tmodel = Project\n\tfields = \"__all__\"\n\ttemplate_name = \"app/new-project.html\"\n\n@login_required(login_url=\"/login/\")\ndef Invoices(request):\n\tinvoices = Invoice.objects.all()\n\ttemplate_name = \"app/invoices.html\"\n\treturn render(request, template_name, {\"invoices\": invoices})\n\nclass NewInvoice(CreateView):\n\tmodel = Invoice\n\tform_class = AddInvoiceForm\n\ttemplate_name = \"app/new-invoice.html\"\n\n\tdef form_valid(self, form):\n\t\tform.instance.project = self.request.user.agent.project\n\t\treturn super().form_valid(form)\n\nclass InvoiceDetails(DetailView):\n\tmodel = Invoice\n\ttemplate_name = \"app/invoice.html\"\n\ndef agent_profile(request):\n\tagent = Agent.objects.filter(user=request.user)\n\tprint(\"Agent is: \",agent)\n\tcontext = {\n\t\t\"agent\": agent\n\t}\n\treturn render(request, \"agent-profile.html\", context)\n\ndef team_leader_profile(request):\n\tteamleader = TeamLeader.objects.filter(user=request.user)\n\tprint(\"Agent is: \",teamleader)\n\tcontext = {\n\t\t\"teamleader\": teamleader\n\t}\n\treturn render(request, \"team-leader-profile.html\", context)\n\ndef agent_invoices(request):\n\tagent = Agent.objects.filter(user=request.user)\n\tagent_invoices = Invoice.objects.filter(agent=request.user.agent)\n\n\tcontext = {\n\t\t\"agent\": agent,\n\t\t\"agent_invoices\": agent_invoices\n\t}\n\treturn render(request, \"app/agent-invoices.html\", context)\n\nclass ApproveInvoice(UpdateView):\n\tmodel = Invoice\n\tfields = [\"approved\", \"reason_declined\"]\n\ttemplate_name = \"app/approve-invoice.html\"\n\tsuccess_url = reverse_lazy(\"tl-invoices\")\n\n@login_required(login_url=\"/login/\")\ndef index(request):\n agents = Agent.objects.all()\n return render(request, 'index.html', {'agents':agents})\n\ndef my_agents(request):\n\tcontext = {}\n\tfiltered_agents = AgentFilter(\n\t\trequest.GET,\n\t\tqueryset=Agent.objects.all()\n\t)\n\tcontext['filtered_agents'] = filtered_agents\n\n\treturn render(request, \"app/my-agents.html\", context=context)\n\nclass AgentDetails(DetailView):\n\tmodel = Agent\n\ttemplate_name = \"app/agent-details.html\"\n\ndef tl_invoices(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\tfiltered_invoices = Invoice.objects.filter(Q(agent__ssdc_number__icontains=q) |Q(project__title__icontains=q) | Q(status__icontains=q) | Q(due_date__icontains=q))\n\tcontext = {\n\t\t\"filtered_invoices\": filtered_invoices\n\t}\n\treturn render(request, \"app/tl-invoices.html\", context)\n\nclass ChangeInvoiceStatus(UpdateView):\n\tmodel = Invoice\n\tfields = [\"status\"]\n\ttemplate_name = \"app/change-invoice-status.html\"\n\tsuccess_url = reverse_lazy(\"admin-invoices\")\n\nclass UpdateProject(UpdateView):\n\tmodel = Project\n\tfields = \"__all__\"\n\ttemplate_name = \"app/update-project.html\"\n\ndef ApprovedInvoices(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\t\"\"\"\n\tfiltered_invoices = InvoiceFilter(\n\t\trequest.GET,\n\t\tqueryset=Invoice.objects.filter(approved=True)\n\t)\n\tcontext['filtered_invoices'] = filtered_invoices\n\t\"\"\"\n\tfiltered_invoices = Invoice.objects.filter(Q(agent__ssdc_number__icontains=q) | Q(project__title__icontains=q) | Q(status__icontains=q) | Q(due_date__icontains=q), approved=True)\n\tcontext = {\n\t\t\"filtered_invoices\": filtered_invoices\n\t}\n\treturn render(request, \"app/admin-invoices.html\", context=context)\n\ndef DeclinedInvoices(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\t\"\"\"\n\tfiltered_invoices = InvoiceFilter(\n\t\trequest.GET,\n\t\tqueryset=Invoice.objects.filter(approved=True)\n\t)\n\tcontext['filtered_invoices'] = filtered_invoices\n\t\"\"\"\n\tfiltered_invoices = Invoice.objects.filter(Q(agent__ssdc_number__icontains=q) | Q(project__title__icontains=q) | Q(status__icontains=q) | Q(due_date__icontains=q), approved=False)\n\tcontext = {\n\t\t\"filtered_invoices\": filtered_invoices\n\t}\n\treturn render(request, \"app/admin-invoices.html\", context=context)\n\ndef PaidInvoices(request):\n\tq = request.GET.get('q') if request.GET.get('q') != None else ''\n\t\"\"\"\n\tfiltered_invoices = InvoiceFilter(\n\t\trequest.GET,\n\t\tqueryset=Invoice.objects.filter(approved=True)\n\t)\n\tcontext['filtered_invoices'] = filtered_invoices\n\t\"\"\"\n\tfiltered_invoices = Invoice.objects.filter(Q(agent__ssdc_number__icontains=q) | Q(project__title__icontains=q) | Q(status__icontains=q) | Q(due_date__icontains=q), approved=True, status=\"Paid\")\n\tcontext = {\n\t\t\"filtered_invoices\": filtered_invoices\n\t}\n\treturn render(request, \"app/admin-invoices.html\", context=context)\n\nclass UpdateTL(UpdateView):\n\tmodel = TeamLeader\n\tform_class = UpdateTLForm\n\ttemplate_name = \"app/update-tl-profile.html\"\n\nclass UpdateAgent(UpdateView):\n\tmodel = Agent\n\tform_class = UpdateAgentForm\n\ttemplate_name = \"app/update-agent-profile.html\"\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"438544636","text":"#Matthew Blum 2/3/2020; create a script with two variables, decide which is larger, or display the relation. Only use if once.\r\n\r\n#assign variables\r\na = 5\r\nb = 7\r\n\r\n#if statement: if a is larger than b, print\r\nif a > b:\r\n\tprint(\"a is greater than b\")\r\n#elif statement: if not, also print\r\nelif a < b:\r\n\tprint(\"a is smaller than b\")\r\n\r\n#else statement: if not, also print\r\nelse:\r\n\tprint(\"a is equal to b\")","sub_path":"02_problem3.py","file_name":"02_problem3.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"558753514","text":"import utils as ut\nimport numpy as np\nimport math\n\n\ndef diff_check(directory, ff_1, ff_2):\n \n \n u1 = np.zeros((3, 48, 35, 48))\n u2 = u1\n \n \n # ORIGINAL\n f = ut.openFile(directory + '/' + ff_1)\n for i, line in enumerate(f):\n values = line.split()\n nx = int(values[0])\n ny = int(values[1])\n nz = int(values[2])\n nd = int(values[3])\n vel = float(values[4])\n \n u1[nd, nx, ny, nz] = vel\n f.close()\n\n\n # AFTER SOLVER\n f = ut.openFile(directory + '/' + ff_2)\n for i, line in enumerate(f):\n values = line.split()\n nx = int(values[0])\n ny = int(values[1])\n nz = int(values[2])\n nd = int(values[3])\n vel = float(values[4])\n \n u2[nd, nx, ny, nz] = vel\n f.close()\n \n threshold = 1e-10\n \n Nd = u1.shape[0]\n Nx = u1.shape[1]\n Ny = u1.shape[2]\n Nz = u1.shape[3]\n \n differences = {}\n \n for nd in range(0, Nd):\n for nx in range(0, Nx):\n for ny in range(0, Ny):\n for nz in range(0, Nz):\n diff = np.abs( u1[nd, nx, ny, nz] - u2[nd, nx, ny, nz] )\n \n if diff >= threshold:\n label=str(nd)+'.'+str(ny)+'.'+str(ny)+'.'+str(nz)\n differences[label] = diff\n \n if not differences:\n print('Done checking all differences, there were...')\n print('No Differences :)')\n \n else:\n print('Done checking all differences, there were...')\n print('Some differences...')\n print('Check the dictionary in debug mode')\n \n \n return 0\n \n\ndirectory='/home/arslan/Documents/work/channelflow-related/database_solns/W03/equilibria/EQ5/temp'\nff_1='eq5.asc'\nff_2='eq5-from-ASCII.asc'\ndiff_check(directory, ff_1, ff_2)","sub_path":"flowField_difference.py","file_name":"flowField_difference.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"354033413","text":"from pyee._base import BaseEventEmitter\nfrom warnings import warn\n\ntry:\n from asyncio import iscoroutine, ensure_future\nexcept ImportError:\n iscoroutine = None\n ensure_future = None\n\n\nclass CompatEventEmitter(BaseEventEmitter):\n \"\"\"An EventEmitter exposed for compatibility with prior versions of\n pyee. This functionality is deprecated; you should instead use either\n ``AsyncIOEventEmitter``, ``TwistedEventEmitter``, ``ExecutorEventEmitter``,\n ``TrioEventEmitter`` or ``BaseEventEmitter``.\n\n This class is similar to the ``AsyncIOEventEmitter`` class, but also allows\n for overriding the scheduler function (``ensure_future`` by default as in\n ``ASyncIOEventEmitter``) and does duck typing checks to handle Deferreds.\n In other words, by setting ``scheduler`` to\n ``twisted.internet.defer.ensureDeferred`` this will support twisted use\n cases for coroutines.\n\n When calling synchronous handlers, raised exceptions are ignored - as with\n the BaseEventEmitter, you must capture and handle your own exceptions.\n However, for coroutine functions, exceptions are handled by emitting them\n on the ``error`` event. Note that when using with twisted, the ``error``\n event will emit Failures, not Exceptions.\n\n This class will also successfully import in python 2, but without coroutine\n support.\n \"\"\"\n\n def __init__(self, scheduler=ensure_future, loop=None):\n warn(\n DeprecationWarning(\n \"pyee.EventEmitter is deprecated and will be removed in a future \"\n \"major version; you should instead use either \"\n \"pyee.AsyncIOEventEmitter, pyee.TwistedEventEmitter, \"\n \"pyee.ExecutorEventEmitter, pyee.TrioEventEmitter, \"\n \"or pyee.BaseEventEmitter.\"\n )\n )\n\n super(CompatEventEmitter, self).__init__()\n\n self._schedule = scheduler\n self._loop = loop\n\n def _emit_run(self, f, args, kwargs):\n coro = f(*args, **kwargs)\n\n if iscoroutine and iscoroutine(coro):\n if self._loop:\n d = self._schedule(coro, loop=self._loop)\n else:\n d = self._schedule(coro)\n\n # scheduler gave us an asyncio Future\n if hasattr(d, \"add_done_callback\"):\n\n @d.add_done_callback\n def _callback(f):\n exc = f.exception()\n if exc:\n self.emit(\"error\", exc)\n\n # scheduler gave us a twisted Deferred\n elif hasattr(d, \"addErrback\"):\n\n @d.addErrback\n def _callback(exc):\n self.emit(\"error\", exc)\n","sub_path":"venv/lib/python3.9/site-packages/pyee/_compat.py","file_name":"_compat.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"463582413","text":"# pylint: disable=redefined-outer-name\n\nimport json\n\nimport keras\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nimport bentoml\nfrom tests.integration.utils import (\n build_api_server_docker_image,\n export_service_bundle,\n run_api_server_docker_container,\n)\n\nTF2 = tf.__version__.startswith('2')\n\nif TF2:\n from tests.bento_service_examples.keras_classifier import KerasClassifier\nelse:\n from tests.bento_service_examples.keras_with_tf1_classifier import KerasClassifier\n\ntest_data = [1, 2, 3, 4, 5]\n\n\n@pytest.fixture(params=[tf.keras, keras], scope=\"session\")\ndef keras_model(request):\n ke = request.param\n net = ke.Sequential(\n (\n ke.layers.Dense(\n units=1,\n input_shape=(5,),\n use_bias=False,\n kernel_initializer=ke.initializers.Ones(),\n ),\n )\n )\n net.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return net\n\n\n@pytest.fixture(scope=\"session\")\ndef svc(keras_model):\n \"\"\"Return a TensorFlow2 BentoService.\"\"\"\n # When the ExampleBentoService got saved and loaded again in the test, the\n # two class attribute below got set to the loaded BentoService class.\n # Resetting it here so it does not effect other tests\n\n KerasClassifier._bento_service_bundle_path = None\n KerasClassifier._bento_service_bundle_version = None\n\n svc = KerasClassifier()\n keras_model.predict(np.array([test_data]))\n svc.pack('model', keras_model)\n svc.pack('model2', keras_model)\n return svc\n\n\n@pytest.fixture(scope=\"session\")\ndef image(svc, clean_context):\n with export_service_bundle(svc) as saved_path:\n yield clean_context.enter_context(build_api_server_docker_image(saved_path))\n\n\n@pytest.fixture(scope=\"module\")\ndef host(image, enable_microbatch):\n with run_api_server_docker_container(\n image, enable_microbatch=enable_microbatch, timeout=500\n ) as host:\n yield host\n\n\ndef test_keras_artifact(svc):\n assert svc.predict([test_data]) == [\n 15.0\n ], 'Inference on unsaved Keras artifact does not match expected'\n assert svc.predict2([test_data]) == [\n 15.0\n ], 'Inference on unsaved Keras artifact does not match expected'\n\n\ndef test_keras_artifact_loaded(svc):\n with export_service_bundle(svc) as saved_path:\n loaded = bentoml.load(saved_path)\n assert (\n loaded.predict([test_data]) == 15.0\n ), 'Inference on saved and loaded Keras artifact does not match expected'\n assert (\n loaded.predict2([test_data]) == 15.0\n ), 'Inference on saved and loaded Keras artifact does not match expected'\n\n\n@pytest.mark.asyncio\nasync def test_keras_artifact_with_docker(host):\n await pytest.assert_request(\n \"POST\",\n f\"http://{host}/predict\",\n headers=((\"Content-Type\", \"application/json\"),),\n data=json.dumps(test_data),\n assert_status=200,\n assert_data=b'[15.0]',\n )\n await pytest.assert_request(\n \"POST\",\n f\"http://{host}/predict2\",\n headers=((\"Content-Type\", \"application/json\"),),\n data=json.dumps(test_data),\n assert_status=200,\n assert_data=b'[15.0]',\n )\n","sub_path":"tests/integration/test_keras_artifact.py","file_name":"test_keras_artifact.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"266602014","text":"import unittest\nfrom test_code.name_function import get_formatted_name\nfrom test_code.city_functions import get_city_name\n\n\nclass NameTestCase(unittest.TestCase):\n \"\"\"测试name_funciton.py\"\"\"\n\n def test_first_last_name(self):\n # 函数中self为形参,类似于赋值中的变量\n \"\"\"能够处理像Jains Joplin这样的姓名吗\"\"\"\n formatted_name = get_formatted_name('jains', 'joplin')\n\n # 下面的\"assertEqual\"为测试方法,验证名字的格式是否一直\n self.assertEqual(formatted_name, 'Jains Joplin')\n\n def test_city_name(self):\n city_name = get_city_name('shanghai', 'china', '90000')\n self.assertEqual(city_name, 'Shanghai China -Population 90000')\n # city_name = get_city_name('shanghai', 'china')\n # self.assertEqual(city_name, 'Shanghai China')\n\n\n# __name__ 是特殊变量,这个变量是在程序执行时设置的(这句话的意思是运行变量的过程中,系统会默认设置一个值给这个变量)\n# 如果这个文件作为主程序执行,变量__name__将被设置为'__main__'\n# 当该程序被引用的是 __name__ 变量就会发生改变\nif __name__ == '__main__':\n unittest.main()\n\n# # __name__ 内置函数,在不同模块下的含义\n# print(f\"\\n\\n内置函数的名称为:{__name__}\")\n\n'''引用 name_function 中的get_name函数'''\n# import name_function\n\n# 修改后的代码\n# from test_code.name_function import get_name\n#\n# get_name()\n\na = 1\na += 1\nretu\n\n'''\nunittest模块中使用了6种断言方法\nassertEqual 核实a == b\nassertNotEqual 核实 a != b\nassertTrue(x) 核实x为True\nassertFalse(x) 核实x为False\nassertIn(item, list) 核实item在list队列中\nassertNotIn(item, list) 核实item(项目)不在list队列中\n'''\n\n\n\n\n\n","sub_path":"Python/test_code/test_name_function.py","file_name":"test_name_function.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121360565","text":"from MyBotan import Botan\nfrom PythonApi.RPApi.Base import Api as RPApi\nimport settings\nimport logging\nimport time\nimport PythonApi.jotihunt.Retrievers as jotihuntApi\nimport pickle\nimport os\nfrom telegram.parsemode import ParseMode\nimport re\nimport imaplib\nfrom PythonApi.scraperApi.Jotihuntscraper import get_hunts\nfrom PythonApi.scraperApi.webscraper import to_dict\n\nUPDATER_FILE = 'updater.jhu'\n\nALPHA, BRAVO, CHARLIE, DELTA, ECHO, FOXTROT, XRAY, PHOTOS, OPDRACHTEN, \\\n NIEUWS, ERROR, HINTS = range(12)\nmy_updates_instance = None\n__all__ = ['get_updates', 'ALPHA', 'BRAVO', 'CHARLIE', 'DELTA', 'ECHO',\n 'FOXTROT', 'XRAY', 'PHOTOS', 'OPDRACHTEN', 'NIEUWS', 'ERROR',\n 'HINTS']\n\nstatus_plaatjes = {\n 'a': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADOAADxPsqAXmyBBClXTd4Ag'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADNAADxPsqAWy_jDGSfM8VAg'\n },\n 'oranje': {''\n 'type': 'sticker',\n 'file_id': 'BQADBAADNgADxPsqAW5L5FGEVeZsAg'\n }\n },\n 'c': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADTAADxPsqAYLV3juZLpBdAg'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADSgADxPsqAT-u5My8rm3gAg'\n },\n 'oranje': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADRgADxPsqAQV4dBO6m83XAg'\n }\n },\n 'b': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADQAADxPsqAe0nAoB-ZMyOAg'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADQgADxPsqAYIFsuIiE6hzAg'\n },\n 'oranje': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADRAADxPsqAWxDH1LIGSXKAg'\n }\n },\n 'e': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADWgADxPsqAUL07wYDRvidAg'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADVAADxPsqAQsjZhRr4lEnAg'\n },\n 'oranje': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADWAADxPsqATm-pA-vdphAAg'\n }\n },\n 'd': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADTgADxPsqAZx6xRcZie8dAg'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADUgADxPsqAb2HyQa_q_n8Ag'\n },\n 'oranje': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADUAADxPsqAQmw5iS__C7yAg'\n }\n },\n 'f': {\n 'groen': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADXgADxPsqATT7K_u22oL7Ag'\n },\n 'rood': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADXAADxPsqAYLGQPHFp1xLAg'\n },\n 'oranje': {\n 'type': 'sticker',\n 'file_id': 'BQADBAADVgADxPsqAffXkv_Pldg-Ag'\n }\n }\n}\n\n\ndef get_updates():\n global my_updates_instance\n if my_updates_instance is None:\n try:\n if os.path.isfile(UPDATER_FILE):\n with open(UPDATER_FILE, 'rb') as file:\n my_updates_instance = MyUpdates()\n d = pickle.load(file)\n my_updates_instance.from_dict(d)\n if my_updates_instance is None:\n raise Exception('huh')\n else:\n my_updates_instance = MyUpdates()\n except Exception as e:\n my_updates_instance = MyUpdates()\n my_updates_instance.error(e, 'startup error')\n return my_updates_instance\n\n\ndef void_no_crash():\n def decorate(func):\n def call(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except Exception as e:\n logging.error(str(e), func.__name__)\n print(str(e))\n updates = get_updates()\n updates.error(e, func.__name__)\n\n return call\n\n return decorate\n\n\nclass MyUpdates:\n def __init__(self):\n\n self.seenHunts = dict()\n self.mail = imaplib.IMAP4_SSL('imap.gmail.com')\n self.mail.login(settings.Settings().rpmail_username,\n settings.Settings().rpmail_pass)\n self.mail.select('INBOX')\n self.bot = None\n self.botan = Botan(settings.Settings().botan_key)\n\n # sets met chat_ids die updates willen ontvangen.\n self._A = set() # naam niet veranderen\n self._B = set() # naam niet veranderen\n self._C = set() # naam niet veranderen\n self._D = set() # naam niet veranderen\n self._E = set() # naam niet veranderen\n self._F = set() # naam niet veranderen\n self._X = set() # naam niet veranderen\n self._photos = set() # naam niet veranderen\n self._opdrachten = set() # naam niet veranderen\n self._nieuws = set() # naam niet veranderen\n self._error = set() # naam niet veranderen\n self._hints = set() # naam niet veranderen\n self._punten = {'opdrachten': 0,\n 'hints': 0,\n 'hunts': 0,\n 'fotos': 0,\n 'totaal': 0}\n\n self._last_update = 0\n self.lastA = None\n self.lastB = None\n self.lastC = None\n self.lastD = None\n self.lastE = None\n self.lastF = None\n self.lastX = None\n self.lastPhoto = None\n self.lastOpdracht = None\n self.lastNieuws = None\n self.lastStatus = None\n self.seenMail = set()\n self.lastHint = None\n self.rp_api = RPApi.get_instance(settings.Settings().rp_username,\n settings.Settings().rp_pass)\n\n def to_dict(self):\n return {'A': self._A,\n 'B': self._B,\n 'C': self._C,\n 'D': self._D,\n 'E': self._E,\n 'F': self._F,\n 'X': self._X,\n 'photos': self._photos,\n 'opdrachten': self._opdrachten,\n 'nieuws': self._nieuws,\n 'error': self._error,\n 'hints': self._hints,\n 'punten': self._punten\n }\n\n @void_no_crash()\n def from_dict(self, d):\n for k in d:\n setattr(self, '_' + k, d[k])\n\n @void_no_crash()\n def update(self):\n if self.has_bot() and \\\n (self._last_update is None or\n abs(time.time() - self._last_update) > 60):\n self.update_vos_last()\n self.update_vos_status()\n self.update_nieuws()\n self.update_opdrachten()\n self.update_hint()\n self.update_foto_opdracht()\n self.update_mail()\n self.update_hunts()\n self._last_update = time.time()\n else:\n return\n\n def save(self):\n d = self.to_dict()\n with open(UPDATER_FILE, 'wb') as file:\n pickle.dump(d, file)\n\n @void_no_crash()\n def add_bot(self, bot):\n self.bot = bot\n\n def has_bot(self):\n return self.bot is not None\n\n def check_updates(self, chat_id):\n if chat_id in self._A:\n yield 'Alpha'\n if chat_id in self._B:\n yield 'Bravo'\n if chat_id in self._C:\n yield 'Charlie'\n if chat_id in self._D:\n yield 'Delta'\n if chat_id in self._E:\n yield 'Echo'\n if chat_id in self._F:\n yield 'Foxtrot'\n if chat_id in self._X:\n yield 'X-Ray'\n if chat_id in self._error:\n yield 'Errors'\n if chat_id in self._nieuws:\n yield 'Nieuws'\n if chat_id in self._opdrachten:\n yield 'Opdrachten'\n if chat_id in self._photos:\n yield 'photos'\n if chat_id in self._hints:\n yield 'hints'\n\n @void_no_crash()\n def add_chat(self, chat_id, update_type):\n if update_type == ALPHA:\n self._A.add(chat_id)\n elif update_type == BRAVO:\n self._B.add(chat_id)\n elif update_type == CHARLIE:\n self._C.add(chat_id)\n elif update_type == DELTA:\n self._D.add(chat_id)\n elif update_type == ECHO:\n self._E.add(chat_id)\n elif update_type == FOXTROT:\n self._F.add(chat_id)\n elif update_type == XRAY:\n self._X.add(chat_id)\n elif update_type == PHOTOS:\n self._photos.add(chat_id)\n elif update_type == OPDRACHTEN:\n self._opdrachten.add(chat_id)\n elif update_type == NIEUWS:\n self._nieuws.add(chat_id)\n elif update_type == ERROR:\n self._error.add(chat_id)\n elif update_type == HINTS:\n self._hints.add(chat_id)\n\n @void_no_crash()\n def remove_chat(self, chat_id, update_type):\n if update_type == ALPHA:\n self._A.remove(chat_id)\n elif update_type == BRAVO:\n self._B.remove(chat_id)\n elif update_type == CHARLIE:\n self._C.remove(chat_id)\n elif update_type == DELTA:\n self._D.remove(chat_id)\n elif update_type == ECHO:\n self._E.remove(chat_id)\n elif update_type == FOXTROT:\n self._F.remove(chat_id)\n elif update_type == XRAY:\n self._X.remove(chat_id)\n elif update_type == PHOTOS:\n self._photos.remove(chat_id)\n elif update_type == OPDRACHTEN:\n self._opdrachten.remove(chat_id)\n elif update_type == NIEUWS:\n self._nieuws.remove(chat_id)\n elif update_type == ERROR:\n self._error.remove(chat_id)\n elif update_type == HINTS:\n self._hints.remove(chat_id)\n\n @void_no_crash()\n def set_updates(self, chat_id, dg, status):\n if status:\n self.add_chat(chat_id, dg)\n else:\n self.remove_chat(chat_id, dg)\n\n @void_no_crash()\n def update_vos_last(self):\n vos_a = self.rp_api.vos('a')\n vos_b = self.rp_api.vos('b')\n vos_c = self.rp_api.vos('c')\n vos_d = self.rp_api.vos('d')\n vos_e = self.rp_api.vos('e')\n vos_f = self.rp_api.vos('f')\n vos_x = self.rp_api.vos('x')\n if self.lastA != vos_a and self.has_bot():\n self.lastA = vos_a\n for chat_id in self._A:\n self.new_vos(chat_id, 'Alpha', vos_a)\n if self.lastB != vos_b and self.has_bot():\n self.lastB = vos_b\n for chat_id in self._B:\n self.new_vos(chat_id, 'Bravo', vos_b)\n if self.lastC != vos_c and self.has_bot():\n self.lastC = vos_c\n for chat_id in self._C:\n self.new_vos(chat_id, 'Charlie', vos_c)\n if self.lastD != vos_d and self.has_bot():\n self.lastD = vos_d\n for chat_id in self._D:\n self.new_vos(chat_id, 'Delta', vos_d)\n if self.lastE != vos_e and self.has_bot():\n self.lastE = vos_e\n for chat_id in self._E:\n self.new_vos(chat_id, 'Echo', vos_e)\n if self.lastF != vos_f and self.has_bot():\n self.lastF = vos_f\n for chat_id in self._F:\n self.new_vos(chat_id, 'Foxtrot', vos_f)\n if self.lastX != vos_x and self.has_bot():\n self.lastX = vos_x\n for chat_id in self._X:\n self.new_vos(chat_id, 'X-Ray', vos_x)\n\n @void_no_crash()\n def new_vos(self, chat_id, deelgebied, vos):\n if vos['icon'] == '3':\n m = self.bot.sendMessage(chat_id, deelgebied + \" Is gespot.\\n \" +\n \"extra info: \" + vos['extra'] + '\\n' +\n 'opmerking/adres: ' + vos['opmerking'])\n elif vos['icon'] == '4':\n m = self.bot.sendMessage(chat_id, deelgebied + \" is geshunt.\\n\" +\n \"extra info: \" + vos['extra'] + '\\n' +\n 'opmerking/adres: ' + vos['opmerking'])\n else:\n m = self.bot.sendMessage(chat_id,\n \"Er is een Hint ingevoerd voor \" + str(\n deelgebied) + '\\n' +\n 'extra info: ' + str(\n vos['extra']) + '\\n' +\n 'opmerking/adres: ' + str(\n vos['opmerking']))\n self.bot.sendLocation(chat_id, latitude=vos['latitude'],\n longitude=vos['longitude'])\n self.botan.track(m, 'newLoc_' + deelgebied + '_' + vos['icon'])\n\n @void_no_crash()\n def update_vos_status(self):\n response = jotihuntApi.get_vossen()\n curr_status = response.data\n\n def send_update(chat_id, vos, new_status):\n if new_status is None:\n return\n m = self.bot.sendSticker(chat_id, status_plaatjes[vos][new_status][\n 'file_id'])\n self.botan.track(m, 'vos_status_' + vos + '_' + new_status)\n send_cloudmessage(vos, new_status)\n\n def extract_status(vos):\n return curr_status[vos[0].lower()].status\n\n def send_a():\n for chat_id in self._A:\n vos = 'a'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_b():\n for chat_id in self._B:\n vos = 'b'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_c():\n for chat_id in self._C:\n vos = 'c'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_d():\n for chat_id in self._D:\n vos = 'd'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_e():\n for chat_id in self._E:\n vos = 'e'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_f():\n for chat_id in self._F:\n vos = 'f'\n send_update(chat_id, vos, extract_status(vos))\n\n def send_x():\n for chat_id in self._X:\n vos = 'x'\n send_update(chat_id, vos, extract_status(vos))\n\n if self.lastStatus is None:\n send_a()\n send_b()\n send_c()\n send_d()\n send_e()\n send_f()\n send_x()\n self.lastStatus = curr_status\n else:\n for k, item in enumerate(curr_status):\n if item.team == 'Alpha' and item['status'] != \\\n extract_status('a')['status']:\n send_a()\n if item.team == 'Bravo' and item['status'] != \\\n extract_status('b')['status']:\n send_b()\n if item.team == 'Charlie' and item['status'] != \\\n extract_status('c')['status']:\n send_c()\n if item.team == 'Delta' and item['status'] != \\\n extract_status('d')['status']:\n send_d()\n if item.team == 'Echo' and item['status'] != \\\n extract_status('e')['status']:\n send_e()\n if item.team == 'Foxtrot' and item['status'] != \\\n extract_status('f')['status']:\n send_f()\n self.lastStatus = curr_status\n\n @void_no_crash()\n def update_nieuws(self):\n nieuws = jotihuntApi.get_nieuws_lijst().data\n if nieuws and nieuws[0] != self.lastNieuws:\n item = nieuws[0].data\n message = 'Er is nieuws met de titel [{title}]({url})'.format(\n title=item.titel,\n url=settings.Settings().base_nieuws_url + item.ID)\n for chat_id in self._nieuws:\n self.bot.sendMessage(chat_id, message,\n parse_mode=ParseMode.MARKDOWN)\n self.lastNieuws = nieuws[0]\n\n @void_no_crash()\n def update_opdrachten(self):\n opdrachten = jotihuntApi.get_opdrachten().data\n if opdrachten and opdrachten[0] != self.lastOpdracht:\n opdracht = opdrachten[0].data\n message = 'Er is nieuws met de titel [{title}]({url})'.format(\n title=opdracht.titel,\n url=settings.Settings().base_opdracht_url + opdracht.ID)\n for chat_id in self._opdrachten:\n self.bot.sendMessage(chat_id, message,\n parse_mode=ParseMode.MARKDOWN)\n self.lastOpdracht = opdrachten[0]\n\n @void_no_crash()\n def update_hint(self):\n hints = jotihuntApi.get_hints().data\n if hints and hints[0] != self.lastHint:\n hint = hints[0].data\n message = 'Er is een hint met de titel [{title}]({url})'\n message = message.format(title=hint.titel,\n url=settings.Settings().base_hint_url +\n hint.ID)\n for chat_id in self._hints:\n self.bot.sendMessage(chat_id, message,\n parse_mode=ParseMode.MARKDOWN)\n self.lastHint = hints[0]\n\n @void_no_crash()\n def update_foto_opdracht(self):\n pass\n\n @void_no_crash()\n def update_mail(self):\n i = 1\n found = []\n self.mail.search(None, 'ALL')\n while True:\n j = bytes(str(i), 'utf8')\n try:\n status, mail = self.mail.fetch(j, '(RFC822)')\n except Exception as e:\n self.error(e, 'update_mail')\n break\n if mail[0] is None:\n break\n raw_text = mail[0][1].decode('utf8')\n result = re.search('de opdracht(.)*?deze opdracht', raw_text, re.S)\n if result is not None and result.group(0) not in self.seenMail:\n found.append(result.group(0))\n self.seenMail.add(result)\n result = re.search(\n 'Jullie tegenhunt(.)*?mag uiteraard wel',\n raw_text, re.S)\n if result is not None and result.group(0) not in self.seenMail:\n found.append(result.group(0))\n self.seenMail.add(result.group(0))\n i += 1\n for update in found:\n for chat_id in self._nieuws:\n self.bot.sendMessage(chat_id,\n 'Er is een mail van de organisatie:\\n'\n + str(update))\n\n @void_no_crash()\n def update_hunts(self):\n h = get_hunts()\n hd = to_dict(*h)\n for k, v in enumerate(hd):\n if k not in self.seenHunts:\n if str(k).lower().startswith('a'):\n for chat_id in self._A:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n elif str(k).lower().startswith('b'):\n for chat_id in self._B:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n elif str(k).lower().startswith('c'):\n for chat_id in self._C:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n elif str(k).lower().startswith('d'):\n for chat_id in self._D:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n elif str(k).lower().startswith('e'):\n for chat_id in self._E:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n elif str(k).lower().startswith('f'):\n for chat_id in self._F:\n self.bot.sendMessage(chat_id, 'code: ' + str(\n k) + ' is ingevoerd op de website')\n self.seenHunts[k] = v\n else:\n if v['status'] != self.seenHunts[k]['status']:\n message = 'de status van code: {code} is aangepast op de website. Van {old_status} naar {new_status}. Het aantal punten voor deze hunt is nu: {punten}'\n message = message.format(code=str(k),\n old_status=str(\n self.seenHunts[k][\n 'status']),\n new_status=str(v['status']\n ),\n puntten=str(v['punten']))\n if str(k).lower().startswith('a'):\n for chat_id in self._A:\n self.bot.sendMessage(chat_id, message)\n elif str(k).lower().startswith('b'):\n for chat_id in self._B:\n self.bot.sendMessage(chat_id, message)\n elif str(k).lower().startswith('c'):\n for chat_id in self._C:\n self.bot.sendMessage(chat_id, message)\n elif str(k).lower().startswith('d'):\n for chat_id in self._D:\n self.bot.sendMessage(chat_id, message)\n elif str(k).lower().startswith('e'):\n for chat_id in self._E:\n self.bot.sendMessage(chat_id, message)\n elif str(k).lower().startswith('f'):\n for chat_id in self._F:\n self.bot.sendMessage(chat_id, message)\n else:\n for chat_id in self._nieuws:\n self.bot.sendMessage(chat_id, message)\n self.seenHunts[k] = v\n\n @void_no_crash()\n def error(self, e, func_name):\n logging.info('updates error send to user:' + str(e) + ' ' + func_name)\n for chat_id in self._error:\n if self.has_bot():\n self.bot.sendMessage(chat_id,\n \"er is een error opgetreden:\\n\" + str(\n func_name) + '\\n' + str(e))\n\n\ndef send_cloudmessage(vos, status):\n key = settings.Settings().firebase_key\n data = {'vos': vos, 'status': status}\n","sub_path":"Updates.py","file_name":"Updates.py","file_ext":"py","file_size_in_byte":22768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"493145792","text":"from gcb_web_auth.backends.oauth import OAuth2Backend\n\nBESPIN_USER_GROUP = 'duke:group-manager:roles:bespin-users'\n\n\nclass BespinOAuth2Backend(OAuth2Backend):\n \"\"\"\n Adds check to make sure users belong to the bespin-user group manager group.\n This group is managed via https://groups.oit.duke.edu/groupmanager/.\n \"\"\"\n def check_user_details(self, details):\n duke_unique_id = details['dukeUniqueID']\n self.verify_user_belongs_to_group(duke_unique_id, BESPIN_USER_GROUP)\n","sub_path":"bespin/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319011990","text":"from math import cos, sin, radians\nfrom mars.msg import Robot as RobotMsg\nimport rosparam\nfrom geometry_utils.geometry import *\nfrom vision.Marker import Marker\nfrom vision.SmoothFilter import Smoother\nfrom time import time\n\n\nclass RosserialClient(object):\n def __init__(self, id, timeout=3.5):\n self.id = id\n self.last_time = time()\n self.timeout = timeout\n\n def update_time(self):\n self.last_time = time()\n\n def is_disconnect(self):\n if time() - self.last_time >= self.timeout:\n return True\n else:\n return False\n\n\nclass PointSmoother(object):\n def __init__(self, N=15):\n self.smoothers_list = [Smoother(N), Smoother(N)]\n\n def smooth_point(self, point):\n x, y = point.x, point.y\n x_smoother, y_smoother = self.smoothers_list\n smoothed_x = x_smoother.get_smoothed_value(x)\n smoothed_y = y_smoother.get_smoothed_value(y)\n return Point(smoothed_x, smoothed_y)\n\nclass CornersSmoother(object):\n def __init__(self, N=15):\n self.N = N\n self.smoothers_list = self.create_smoothers_list()\n self.iterations = 0\n self.max_iterations = 100\n\n def create_smoothers_list(self):\n smoothers = [PointSmoother(self.N),\n PointSmoother(self.N),\n PointSmoother(self.N),\n PointSmoother(self.N)]\n return smoothers\n\n def smooth_corners(self, corners):\n smoothed = []\n for pt, smoother in zip(corners, self.smoothers_list):\n smoothed.append(smoother.smooth_point(pt))\n self.iterations += 1\n if self.iterations > self.max_iterations:\n self.smoothers_list = self.create_smoothers_list()\n self.iterations = 0\n return smoothed\n\n\nclass Robot(Marker):\n def __init__(self, id, corners, x_pos=None, y_pos=None):\n Marker.__init__(self, id, corners, x_pos, y_pos)\n self.center_smoother = PointSmoother()\n self.corners_smoother = CornersSmoother()\n self.front_side = None\n self.back_side = None\n self.left_side = None\n self.right_side = None\n self.marker_size = rosparam.get_param(\"marker_size\") * 10\n self.dist_between_markers_pair_centers = rosparam.get_param(\"dist_between_markers_pair_centers\") * 10\n self.corners_list = []\n\n def set_robot_geometry(self, from_center_to_front_side,\n from_center_to_back_side,\n from_center_to_left_side,\n from_center_to_right_side):\n\n self.from_center_to_front_side = from_center_to_front_side\n self.from_center_to_back_side = from_center_to_back_side\n self.from_center_to_left_side = from_center_to_left_side\n self.from_center_to_right_side = from_center_to_right_side\n\n def prepare_msg(self):\n robot_msg = RobotMsg()\n robot_msg.id = self.id\n robot_msg.center = self.center\n robot_msg.direction = self.direction\n robot_msg.corners = self.corners\n robot_msg.front_side = self.front_side if self.front_side is not None else Point(-1, -1)\n robot_msg.back_side = self.back_side if self.back_side is not None else Point(-1, -1)\n robot_msg.left_side = self.left_side if self.left_side is not None else Point(-1, -1)\n robot_msg.right_side = self.right_side if self.right_side is not None else Point(-1, -1)\n return robot_msg\n\n def set_actual_point(self, point):\n self.actual_point = point\n\n def get_corner_lst(self, corner_id):\n return [corners[corner_id] for corners in self.corners_list]\n\n def get_distances(self, corners):\n pass\n\n def update(self, corners):\n smoothed_corners = self.corners_smoother.smooth_corners(corners)\n self.corners = smoothed_corners\n self.center = self.get_center(smoothed_corners)\n # self.corners = corners\n # self.center = self.get_center(corners)\n self.direction = self.get_direction_point()\n self.update_k_points()\n\n def filtered_to_corners(self, filtered):\n c0, c1, c2, c3 = [], [], [], []\n final_corners = []\n for l in filtered:\n if len(l) >= 4:\n c0.append(l[0])\n c1.append(l[1])\n c2.append(l[2])\n c3.append(l[3])\n full_c = [c0, c1, c2, c3]\n for c in full_c:\n\n x_lst = [p.x for p in c]\n y_lst = [p.y for p in c]\n if x_lst and y_lst:\n final_corners.append(Point(np.mean(x_lst), np.mean(y_lst)))\n return final_corners\n\n def set_k_points(self, front_side, back_side, left_side, right_side):\n self.front_side = front_side\n self.back_side = back_side\n self.left_side = left_side\n self.right_side = right_side\n\n def update_k_points(self):\n x, y = self.center.to_tuple()\n\n h_pix = ([self.corners[1], self.corners[2]], [self.corners[3], self.corners[0]])\n w_pix = ([self.corners[0], self.corners[1]], [self.corners[2], self.corners[3]])\n h_mm_in_pix = get_mmeters_in_pix(self.marker_size, h_pix)\n w_mm_in_pix = get_mmeters_in_pix(self.dist_between_markers_pair_centers, w_pix)\n\n mm_in_pix = np.mean([h_mm_in_pix, w_mm_in_pix])\n\n self.from_center_to_front_side_pix = self.from_center_to_front_side * 10 / mm_in_pix * 0.9\n self.from_center_to_back_side_pix = self.from_center_to_back_side * 10 / mm_in_pix * 0.9\n\n self.from_center_to_left_side_pix = self.from_center_to_left_side * 10 / mm_in_pix * 0.9\n self.from_center_to_right_side_pix = self.from_center_to_right_side * 10 / mm_in_pix * 0.9\n\n front_dx, front_dy = self.find_side_point_dx_dy(0, 1, self.from_center_to_front_side_pix)\n if self.center.x <= self.direction.x:\n if self.center.y <= self.direction.y:\n self.front_side = Point(x + front_dx, y + front_dy)\n else:\n self.front_side = Point(x + front_dx, y - front_dy)\n else:\n if self.center.y <= self.direction.y:\n self.front_side = Point(x - front_dx, y + front_dy)\n else:\n self.front_side = Point(x - front_dx, y - front_dy)\n\n back_dx, back_dy = self.find_side_point_dx_dy(2, 3, self.from_center_to_back_side_pix)\n if self.center.x <= self.direction.x:\n if self.center.y <= self.direction.y:\n self.back_side = Point(x - back_dx, y - back_dy)\n else:\n self.back_side = Point(x - back_dx, y + back_dy)\n else:\n if self.center.y <= self.direction.y:\n self.back_side = Point(x + back_dx, y - back_dy)\n else:\n self.back_side = Point(x + back_dx, y + back_dy)\n\n left_dx, left_dy = self.find_side_point_dx_dy(0, 3, self.from_center_to_left_side_pix)\n if self.center.x <= self.direction.x:\n if self.center.y <= self.direction.y:\n self.left_side = Point(x + left_dx, y - left_dy)\n else:\n self.left_side = Point(x - left_dx, y - left_dy)\n else:\n if self.center.y <= self.direction.y:\n self.left_side = Point(x + left_dx, y + left_dy)\n else:\n self.left_side = Point(x - left_dx, y + left_dy)\n\n right_dx, right_dy = self.find_side_point_dx_dy(1, 2, self.from_center_to_right_side_pix)\n if self.center.x <= self.direction.x:\n if self.center.y <= self.direction.y:\n self.right_side = Point(x - right_dx, y + right_dy)\n else:\n self.right_side = Point(x + right_dx, y + right_dy)\n else:\n if self.center.y <= self.direction.y:\n self.right_side = Point(x - right_dx, y - right_dy)\n else:\n self.right_side = Point(x + right_dx, y - right_dy)\n\n def find_side_point_dx_dy(self, corner1_num, corner2_num, distance):\n axis_point = get_line_cntr(self.corners[corner1_num], self.corners[corner2_num])\n if axis_point.x >= self.center.x:\n delta = 10\n else:\n delta = -10\n angle = get_angle_by_3_points(axis_point, Point(self.center.x + delta, self.center.y), self.center)\n dx = cos(angle) * distance\n dy = sin(angle) * distance\n return dx, dy","sub_path":"mars/src/fields_objects/Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"10834173","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 8 12:59:32 2018\n\n@author: ich\n\"\"\"\n\nimport os\nimport numpy as np\nimport Leap\nimport struct\nimport ctypes\n\nroot = os.getcwd()\n\norig_data_path = root + \"/RecordedData\"\nproc_data_path = root + \"/ProcessedData\"\n\ngestures = os.listdir(orig_data_path)\npos = []\n\ndef main():\n controller = Leap.Controller()\n for i in range(len(gestures)):\n orig_gest_path = os.path.join(orig_data_path, gestures[i])\n proc_gest_path = os.path.join(proc_data_path, gestures[i])\n \n if not os.path.exists(proc_gest_path):\n os.mkdir(proc_gest_path)\n \n users = os.listdir(orig_gest_path)\n repetition = 0\n \n for j in range(len(users)):\n orig_user_path = os.path.join(orig_gest_path, users[j])\n \n # read pressed.txt\n pressed_file = open(os.path.join(orig_user_path,\"pressed.txt\"), \"r\") \n pressed = pressed_file.readlines()\n pressed_file.close()\n \n # find changes from 0 to 1\n changes = np.where(np.roll(pressed,1)!=pressed)[0]\n index = np.arange(0, len(changes), 2)\n \n # read trajectories\n traj_file = os.path.join(orig_user_path,\"savedframes.data\")\n with open(traj_file, \"rb\") as data_file:\n next_block_size = data_file.read(4)\n while next_block_size:\n size = struct.unpack('i', next_block_size)[0]\n data = data_file.read(size)\n leap_byte_array = Leap.byte_array(size)\n address = leap_byte_array.cast().__long__()\n ctypes.memmove(address, data, size)\n \n frame = Leap.Frame()\n frame.deserialize((leap_byte_array, size))\n next_block_size = data_file.read(4)\n \n for hand in frame.hands:\n for finger in hand.fingers:\n if finger.type == 1:\n pos.append(finger.tip_position)\n \n # copy frames with pressed 1 on each change to a different directory\n for l in index:\n \n if l).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp import models, fields, SUPERUSER_ID as SI\n\nclass HarvestFix(models.TransientModel):\n\t_name = 'harvest.fix'\n\n\taction = fields.Char('Action', size=64)\n\t\n\tdef execute_action(self, cr, uid, ids, context=None):\n\t\tres = False\n\t\tobj = self.browse(cr, uid, ids[0], context)\n\t\tif hasattr(self, obj.action):\n\t\t\tres = getattr(self, obj.action)(cr, uid, context)\n\t\treturn res\n\t\n\tdef all(self, cr, uid, context=None):\n\t\tself.fix_harvest_field_relations(cr, uid, context=context)\n\t\tself.fix_harvest_laborers_relations(cr, uid, context=context)\n\t\tself.fix_harvest_costs_relations(cr, uid, context=context)\n\n \n\tdef fix_harvest_field_relations(self, cr, uid, context=None):\n\t\tfield_ids = self.pool.get('project.project').search(cr, uid, [], context=context)\n\t\tfor obj in self.pool.get('project.project').browse(cr, uid, field_ids):\n\t\t\tif not obj.budget_id.id:\n\t\t\t\tbudget_id = self.pool.get('crossovered.budget').create(cr, uid, {'code':obj.name,'name':obj.name, 'date_from':datetime.now().date(), 'date_to':datetime.now().date()})\n\t\t\t\tbudget = self.pool.get('crossovered.budget').browse(cr, uid, [budget_id])[0]\n\t\t\t\tbudget.update({'state': 'confirm'})\n\t\t\t\tobj.update({'budget_id':budget_id})\n\t\t\t\t\n\t\t\tif not obj.stock_location_id.id:\n\t\t\t\tstock_location = self.pool.get('stock.location').create(cr, uid, {'name':obj.name, 'usage':'production', 'partner_id':uid})\n\t\t\t\tobj.update({'stock_location_id':stock_location})\n\t\t\t\t\n\tdef fix_harvest_laborers_relations(self, cr, uid, context=None):\n\t\tlaborer_ids = self.pool.get('harvest.laborer').search(cr, uid, [], context=context)\n\t\tfor obj in self.pool.get('harvest.laborer').browse(cr, uid, laborer_ids):\n\t\t\tif not obj.uom_id:\n\t\t\t\tuom_id = self.pool.get('harvest.uom').search(cr, uid, [('name', '=', 'Hour')])[0]\n\t\t\t\tobj.update({'uom_id':uom_id})\n\t\t\n\tdef fix_harvest_costs_relations(self, cr, uid, context=None):\n\t\tlaborer_cost_ids = self.pool.get('labor.cost').search(cr, uid, [], context=context)\n\t\tfor obj in self.pool.get('labor.cost').browse(cr, uid, laborer_cost_ids):\n\t\t\tif not obj.laborer:\n\t\t\t\tcr.execute('SELECT name FROM labor_cost WHERE id=%d;'%obj.cost_id)\n\t\t\t\tname = cr.fetchone()\n\t\t\t\tif isinstance(name, int):\n\t\t\t\t\tobj.update({'laborer':name})\n\t\t\t\telif isinstance(name, str):\n\t\t\t\t\tlaborer = self.pool.get('harvest.laborer').search(cr, uid, [('name', '=', name)])[0]\n\t\t\t\t\tobj.update({'laborer':laborer})\n\t\t\t\telif isinstance(name, tuple):\n\t\t\t\t\tobj.update({'laborer':name[0]})\n\t\t\t\t\n\t\t\t\t\n","sub_path":"harvest/fix.py","file_name":"fix.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"419757204","text":"\n# coding: utf-8\n\n# In[1]:\n\n#Imports\n\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n#remove for .py file!\nsns.set_style('whitegrid')\n#remove for .py file!\nget_ipython().magic('matplotlib inline')\n\n#requests module, great API for grabbing data from the web\nimport requests\n\nfrom io import StringIO\n\n\n# In[2]:\n\n#Questions\n\n#1. Who was being polled and what was their party affiliation\n#2. Did the poll results favor Romney or Obama\n#3. How do undecided voters effect the poll?\n#4. Can we account for the undecided voters\n#5. How did voter sentiment change over time?\n#6. Can we see an effect in the polls from the debates?\n\n\n# In[3]:\n\n#url link for poll data in csv\nurl = 'https://elections.huffingtonpost.com/'\nurl2 = 'pollster/2012-general-election-romney-vs-obama.csv'\n#use requests method .get() and then the .text() method on url\nsource = requests.get(url+url2).text\n\n#\npoll_data = StringIO(source)\n\n\n# In[4]:\n\n#now that we have data, turn it into a DataFrame!\n\npoll_df = pd.read_csv(poll_data)\n\n\n# In[5]:\n\npoll_df.info()\n\n\n# In[6]:\n\n\n#let's get an idea of the affiliation of these\n#various polls\nsns.factorplot('Affiliation', data=poll_df, kind='count')\n\n\n# In[7]:\n\nsns.factorplot('Affiliation', data=poll_df, hue='Population',\n kind='count')\n\n\n# In[8]:\n\n# made an average object\n# take averae of all values in poll_df\navg = pd.DataFrame(poll_df.mean())\n\n# Drop the number of observations column\navg.drop('Number of Observations', axis=0, inplace=True)\n\n\n# In[9]:\n\navg.head()\n\n\n# In[10]:\n\n# made an std object that takes standard deviation\n# of all the values in poll_df\nstd = pd.DataFrame(poll_df.std())\n\n# Drops that silly number of observations row\nstd.drop('Number of Observations', axis=0, inplace=True)\n\n\n# In[11]:\n\n#now we can plot\navg.plot(yerr=std, kind='bar', legend =False)\n\n\n# In[12]:\n\n#uh oh let's drop those other funky columns\n\n\n# In[13]:\n\n\navg.drop('Question Text', axis=0, inplace=True)\navg.drop('Question Iteration', axis=0, inplace=True)\n\n\n# In[14]:\n\n#now we can plot\navg.plot(yerr=std, kind='bar', legend =False)\n\n\n# In[15]:\n\n# looks like undecided (and other) are pretty significant\n# let's take a look at how we can determine the effect of\n# the undecided vote\npoll_avg = pd.concat([avg,std], axis=1)\npoll_avg.columns = ['Average', 'STD']\npoll_avg\n\n\n# In[ ]:\n\n# Let's calculate the difference between Obama and Romney\n# How about favorage over time?\n\n\n# In[16]:\n\npoll_df.head()\n\n\n# In[18]:\n\n#we can use pandas plot method and use the end date as the x axis\n\npoll_df.plot(x='End Date',y=['Obama', 'Romney', 'Undecided'], linestyle='', marker='o')\n\n\n# In[19]:\n\n# As we get closer to election day, there are fewer undecided voters and Obama and Romney seem to converge\n\n\n# In[20]:\n\nfrom datetime import datetime\n\n\n# In[21]:\n\npoll_df['Difference'] = (poll_df['Obama']-poll_df['Romney'])/100\n\npoll_df.head()\n\n\n# In[22]:\n\n#Positive Difference? Leaning towards Obama\n#Negative Difference? Leaning towards Romney\n\n\n\n# In[24]:\n\npoll_df = poll_df.groupby(['Start Date'], \n #as index=False allows us to keep the index as 0,1,2,3, etc. instead of the start dates\n as_index = False).mean()\n #the mean() method simply gives us the mean for each of the numeric column entries for each start date\n\n\n# In[25]:\n\npoll_df.plot('Start Date', 'Difference', figsize=(12,4), marker='o', linestyle='-', color='purple')\n\n\n# In[26]:\n\n#Why were Romney polls so strong in September of 2011 and in November of 2011???\n\n\n# In[31]:\n\n#Ok, let's look at the effect of debates\nrow_in = 0\nxlimit = []\n#for rows in poll_df.iterrows():\n# start = poll_df.get_value(row_in, 'Start Date')\n# start = datetime(int(start))\n# if start.year == 2012:\n# if start.month == 10:\n# xlimit.append([row_in])\n# row_in+=1\n \nfor date in poll_df['Start Date']:\n if date[0:7] == '2012-10':\n xlimit.append(row_in)\n row_in+=1\n else:\n row_in+=1\nprint(min(xlimit))\nprint(max(xlimit))\n \n\n\n# In[34]:\n\npoll_df.plot('Start Date', 'Difference', figsize=(12,4), marker='o', linestyle='-', color='purple', \n #now we put our xlimits as the index values we found above for the month of October!\n xlim=(329,356))\n\n#Now let's add some v-lines for the debate days\n\n#Oct 3rd\nplt.axvline(x=329+2,linewidth=4, color='grey')\n#Oct 11th\nplt.axvline(x=329+10,linewidth=4, color='grey')\n#Oct 22nd\nplt.axvline(x=329+21,linewidth=4, color='grey')\n\n\n# In[35]:\n\n# Great, now we added markers for the debate days!\n\n\n# In[ ]:\n\n\n\n","sub_path":"Election_Analysis_2.20.17.py","file_name":"Election_Analysis_2.20.17.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"331040044","text":"class Pessoa:\n def _setNome(self,nome):\n print(\"Registrando nome.\")\n self._nome = nome\n def _getNome(self):\n print(\"Recuperando nome.\")\n return self._nome\n def _delNome(self):\n print(\"Apagando o nome\")\n del self._nome\n nome = property(_getNome,_setNome,_delNome,\"Propriedade Nome \") \np = Pessoa()\np.nome = \"Raimundo dos Santos Pereira\"\nprint(\"Pessoa = \",p.nome)\ndel p.nome","sub_path":"all-gists/8858e3dc32914d548474e0e85ccee7f5/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"174430540","text":"import selenium\r\nfrom selenium import webdriver\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport random\r\nfrom settings import *\r\nfrom Currency_database import Currency_database\r\n\r\n\r\nclass Currency():\r\n def __init__(self, settings, source_url):\r\n \r\n self.db_host = settings[\"db_host\"]\r\n self.db_user = settings[\"db_user\"]\r\n self.db_password = settings[\"db_password\"]\r\n self.db_port = settings[\"db_port\"]\r\n self.db_use_unicode = settings[\"db_use_unicode\"]\r\n self.charset = settings[\"charset\"]\r\n self.db_name = settings[\"db_name\"]\r\n self.source_url = source_url\r\n self.currency_instance = Currency_database(self.db_user,self.db_password,self.db_name, self.db_host,self.db_port,self.db_use_unicode,self.charset)\r\n self.opener = urllib.request.build_opener()\r\n self.opener.addheaders = [('User-Agent', 'Mozilla/5.0')] \r\n self.Updated_time = self.get_updated_time(self.source_url)\r\n self.Collection_time = self.get_collection_time()\r\n\r\n def convert_to_number(self, enumString):\r\n empty = ''\r\n if 'e-' in enumString:\r\n return 0\r\n else:\r\n for elem in enumString:\r\n try:\r\n int(elem)\r\n empty += elem\r\n except:\r\n if elem in ['.', '-']:\r\n empty += elem\r\n if empty == '':\r\n return None\r\n else:\r\n if '.' in empty:\r\n return float(empty)\r\n else:\r\n return int(empty)\r\n\r\n def return_content(self, rawStringSet):\r\n newStringSet = [rawString.get_text().strip() for rawString in rawStringSet]\r\n return newStringSet\r\n\r\n def get_updated_time(self, webpage):\r\n response = self.opener.open(webpage)\r\n ac_page = BeautifulSoup(response, \"html.parser\")\r\n rows = ac_page.findAll(\"div\", {\"class\":\"col-xs-12\"})\r\n for row in rows:\r\n if row.find(\"p\", {\"class\":\"small\"}):\r\n x = row.p.get_text()\r\n updated_time = x.replace(\"Last updated: \", \"\").replace(\",\", \"\").replace(\" UTC\", '')\r\n time_struct = time.strptime(updated_time, '%b %d %Y %I:%M %p')\r\n time_datetime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_struct)\r\n return time_datetime\r\n\r\n def get_collection_time(self):\r\n now = time.localtime(time.time())\r\n collection_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", now)\r\n return collection_time\r\n\r\n #store all currencies\r\n def store_all_currencies(self):\r\n response = self.opener.open(self.source_url)\r\n ac_page = BeautifulSoup(response, \"html.parser\")\r\n rows = ac_page.find(\"table\", id=\"currencies-all\").tbody.findAll(\"tr\")\r\n for row in rows:\r\n tds = row.findAll('td')\r\n tdsNew = self.return_content(tds)\r\n Ranking = int(tdsNew[0])\r\n Name = tdsNew[1]\r\n Symbol = tdsNew[2]\r\n Market_cap = self.convert_to_number(tdsNew[3])\r\n Price = self.convert_to_number(tdsNew[4])\r\n Circulating_supply = self.convert_to_number(tdsNew[5])\r\n Volume_24h = self.convert_to_number(tdsNew[6])\r\n Percent_1h = self.convert_to_number(tdsNew[7])\r\n Percent_24h = self.convert_to_number(tdsNew[8])\r\n Percent_7d = self.convert_to_number(tdsNew[9])\r\n \r\n currency_info = (Ranking, Name, Symbol, Market_cap, Price, Circulating_supply, Volume_24h, Percent_1h, Percent_24h, Percent_7d, self.Updated_time, self.Collection_time)\r\n try:\r\n self.currency_instance.insert_data_into_All_Currencies(currency_info)\r\n print(\"Currency information stored successfully.\")\r\n except:\r\n print(\"Unable to store currency information.\")\r\n \r\n\r\n #get all volume headers\r\n def get_all_volume_headers(self):\r\n response = self.opener.open(self.source_url)\r\n ac_page = BeautifulSoup(response, \"html.parser\")\r\n\r\n urls = []\r\n volume_headers = ac_page.findAll(\"h3\", {\"class\":\"volume-header\"})\r\n for volume_header in volume_headers:\r\n url = volume_header.a.attrs['href']\r\n urls += ['https://coinmarketcap.com' + url]\r\n return urls\r\n\r\n #click \"Market\"\r\n def click_market(self, webpage):\r\n self.dr = webdriver.PhantomJS(executable_path='C:\\\\Users\\\\win10\\\\Downloads\\\\phantomjs-2.1.1-windows\\\\phantomjs-2.1.1-windows\\\\bin\\\\phantomjs.exe')\r\n self.dr.get(webpage)\r\n self.btn = self.dr.find_element_by_css_selector('a[href=\"#markets\"]')\r\n self.btn.click()\r\n return self.dr.page_source\r\n\r\n\r\n #store 24HVR by exchange\r\n def store_24HVR_by_exchange(self):\r\n\r\n volume_headers_urls_market = self.get_all_volume_headers()\r\n\r\n for url in volume_headers_urls_market:\r\n\r\n response = self.opener.open(url)\r\n bx_page = BeautifulSoup(response, \"html.parser\")\r\n currencies = bx_page.find(\"table\", {\"class\":\"table no-border table-condensed\"}).findAll(\"tr\")\r\n currencies = currencies[1:len(currencies)]\r\n for currency in currencies:\r\n tds = currency.findAll('td')\r\n tdsNew = self.return_content(tds)\r\n Market = bx_page.find(\"div\", {\"class\":\"col-xs-4\"}).find(\"h1\", {\"class\":\"text-large\"}).get_text().strip()\r\n Market_ranking = volume_headers_urls_market.index(url) + 1\r\n Ranking = int(tdsNew[0])\r\n Currency = tdsNew[1]\r\n Pair = tdsNew[2]\r\n Volume_24h = self.convert_to_number(tdsNew[3])\r\n Price = self.convert_to_number(tdsNew[4])\r\n Volume_percent = self.convert_to_number(tdsNew[5])\r\n \r\n exchange_info = (Market, Market_ranking, Ranking, Currency, Pair, Volume_24h, Price, Volume_percent, self.Updated_time, self.Collection_time)\r\n \r\n try:\r\n self.currency_instance.insert_data_into_24HVR_by_Exchange(exchange_info)\r\n print(\"Currency (by exchange) stored successfully.\")\r\n except:\r\n print(\"Unable to store currency (by exchange).\") \r\n\r\n\r\n #store 24HVR by currency \r\n def store_24HVR_by_currency(self):\r\n \r\n volume_headers_urls_currency = self.get_all_volume_headers()\r\n \r\n for url in volume_headers_urls_currency:\r\n\r\n marketPageSource = self.click_market(url)\r\n bc_page = BeautifulSoup(marketPageSource, \"html.parser\")\r\n markets = bc_page.find(\"table\", id=\"markets-table\").findAll(\"tr\")\r\n \r\n for market in markets[1:len(markets)]: \r\n\r\n tds = market.findAll('td')\r\n tdsNew = self.return_content(tds)\r\n CurrencyR = bc_page.find(\"div\", {\"class\":\"col-xs-6 col-sm-4 col-md-4\"}).get_text()\r\n Currency_symbol = bc_page.find(\"div\", {\"class\":\"col-xs-6 col-sm-4 col-md-4\"}).small.get_text()\r\n \r\n Currency = CurrencyR.replace(Currency_symbol, \"\").strip()\r\n Currency_ranking = volume_headers_urls_currency.index(url) + 1\r\n Ranking = int(tdsNew[0])\r\n Source = tdsNew[1]\r\n Pair = tdsNew[2]\r\n Volume_24h = self.convert_to_number(tdsNew[3])\r\n Price = self.convert_to_number(tdsNew[4])\r\n Volume_percent = self.convert_to_number(tdsNew[5])\r\n \r\n currency_info = (Currency, Currency_ranking, Ranking, Source, Pair, Volume_24h, Price, Volume_percent, self.Updated_time, self.Collection_time)\r\n \r\n try:\r\n self.currency_instance.insert_data_into_24HVR_by_Currency(currency_info)\r\n print(\"Market (by currency) stored successfully.\")\r\n except:\r\n print(\"Unable to store Market (by currency).\")\r\n\r\n\r\nac_url = \"https://coinmarketcap.com/all/views/all/\"\r\nbx_url = \"https://coinmarketcap.com/exchanges/volume/24-hour/all/\"\r\nbc_url = \"https://coinmarketcap.com/currencies/volume/24-hour/\"\r\n\r\n\r\ndef execute_for_time_period(seconds):\r\n while True:\r\n ac = Currency(settings, ac_url)\r\n ac.store_all_currencies()\r\n\r\n time.sleep(20)\r\n\r\n bx = Currency(settings, bx_url)\r\n bx.store_24HVR_by_exchange()\r\n\r\n time.sleep(20)\r\n\r\n bc = Currency(settings, bc_url)\r\n bc.store_24HVR_by_currency()\r\n\r\n time.sleep(seconds)\r\n\r\n#execute_for_time_period()\r\n\r\n","sub_path":"Coin_Market_Cap_web_scraping/Currency.py","file_name":"Currency.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"241469828","text":"from django.shortcuts import render, redirect\nfrom django.template import loader, Context\nfrom django.contrib.auth.models import User,auth\nfrom django.contrib.sessions.models import Session\nfrom django.template import RequestContext\nfrom django.urls import reverse\nimport sys\nimport json\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.hashers import make_password\nfrom file.models import PackageOne, Packaget, Packageth, Packagef, schoolatlincharges, teachernominatedforatl, aes, \\\n student_login_detail, AI_Questions, table, venreg1, moc, Atl_login_detail,MyLogin\n\ndef mylogout(request):\n request.session['myuser']=''\n print(request.session['myuser'])\n return redirect('/choose_school')\n\ndef mylogin(request):\n myuser='none'\n if request.method == 'POST':\n userid=request.POST.get('userid')\n password=request.POST.get('password')\n try:\n if MyLogin.objects.filter(username=userid).exists():\n db=MyLogin.objects.get(username=userid)\n if db.username == userid and db.password == password:\n myuser=db.firstname\n request.session['myuser']=myuser\n print(type(myuser))\n return render(request,'schoolreg.html',{'myuser':myuser})\n return HttpResponse(\"Invalid Credentials\")\n except Exception as e:\n print(e)\n HttpResponse(\"No value Found\") \n return render(request,'MyLogin.html')\n \n\n# labregistration page\ndef Package1(request):\n myuser=request.session['myuser']\n if request.method == 'POST':\n sel = request.POST.get('sel')\n other = request.POST.get('other')\n scope = request.POST.get('scope')\n days = request.POST.get('days')\n who = request.POST.get('who')\n if sel is not None or sel != \"\" or other is not None or other != \"\" or scope is not None or scope != \"\" or days is not None or days != \"\" or who is not None or who != \"\":\n pc1 = PackageOne(Electroniclist=sel, otherofferingsPleasespecify=other, Scopeoftraining=scope,\n Noofdaysoftraining=days, Whoattendedtraining=who)\n pc1.save()\n # print(other,scope,days,who)\n return HttpResponseRedirect(\"/Package1\")\n return HttpResponse('invalid')\n return render(request, 'regi.html',{'myuser':myuser})\n\n\ndef Package2(request):\n myuser=request.session['myuser']\n if request.method == 'POST':\n cel = request.POST.get('cel')\n other = request.POST.get('other')\n scope = request.POST.get('scope')\n days = request.POST.get('days')\n who = request.POST.get('who')\n if cel is not None or cel != \"\" or other is not None or other != \"\" or scope is not None or scope != \"\" or days is not None or days != \"\" or who is not None or who != \"\":\n pc2 = Packaget(Printer=cel, OtherOfferingsPleasespecify=other, Scopeoftraining=scope,\n Noofdaysoftraining=days, Whoattendedtraining=who)\n pc2.save()\n return HttpResponseRedirect('/Package2')\n return HttpResponse('invalid')\n return render(request, 'package2.html',{'myuser':myuser})\n\n\ndef Package3(request):\n myuser=request.session['myuser']\n if request.method == 'POST':\n selv = request.POST.get('selv')\n other = request.POST.get('other')\n scope = request.POST.get('scope')\n days = request.POST.get('days')\n who = request.POST.get('who')\n if selv is not None or selv != \"\" or other is not None or other != \"\" or scope is not None or scope != \"\" or days is not None or days != \"\" or who is not None or who != \"\":\n pc3 = Packageth(Field=selv, Otherofspecify=other, Scopeoftraining=scope, Noofdaysoftraining=days,\n Whoattendedtraining=who)\n pc3.save()\n return HttpResponseRedirect('/Package3')\n return HttpResponse('invalid')\n return render(request, 'package3.html',{'myuser':myuser})\n\n\ndef Package4(request):\n myuser=request.session['myuser']\n if request.method == 'POST':\n self = request.POST.get('self')\n other = request.POST.get('other')\n scope = request.POST.get('scope')\n days = request.POST.get('days')\n who = request.POST.get('who')\n print(self, other, scope, days, who)\n if self is not None or self != \"\" or other is not None or other != \"\" or scope is not None or scope != \"\" or days is not None or days != \"\" or who is not None or who != \"\":\n pc4 = Packagef(Safety=self, OtherOfferingsPleasespecify=other, Scopeoftraining=scope,\n Noofdaysoftraining=days, Whoattendedtraining=who)\n pc4.save()\n return HttpResponseRedirect('/Package4')\n\n return HttpResponse('invalid')\n return render(request, 'package4.html',{'myuser':myuser})\n\n\ndef userlogin(request):\n\n if request.method == \"POST\":\n ValMobilenumber = request.POST.get(\"Mobilenumber\")\n # studentid = request.POST.get(\"studentid\")\n # username = request.POST.get(\"username\")\n Valpassword = request.POST.get(\"password\")\n # username=getattr(object,'username')\n list = student_login_detail.objects.get(Mobilenumber=ValMobilenumber)\n\n username = list.username\n request.session['username'] = username\n print(username)\n # username = request.session['username']\n print(ValMobilenumber, Valpassword, username)\n try:\n\n if student_login_detail.objects.filter(Mobilenumber=ValMobilenumber).exists():\n db = student_login_detail.objects.get(Mobilenumber=ValMobilenumber)\n if db.Mobilenumber == ValMobilenumber:\n if db.Mobilenumber == ValMobilenumber and db.password == Valpassword:\n print(\"Hii\")\n return HttpResponseRedirect(\"/schoolreg\", {\"username\": username})\n return HttpResponse(\"Invalid Credentials\")\n elif student_login_detail.objects.filter(email=ValMobilenumber).exists():\n db = student_login_detail.objects.get(email=ValMobilenumber)\n if db.email == ValMobilenumber and db.password == Valpassword:\n return HttpResponseRedirect(\"/userlogin\", {\"username\": username})\n return HttpResponse(\"Invalid Credentials\")\n except Exception as e:\n print(e)\n return HttpResponse(\"No Value Found\")\n return render(request, \"index1.html\")\n\ndef atllogin(request):\n \n if request.method == \"GET\":\n ValMobilenumber = request.GET.get(\"Mobilenumber\")\n Valpassword = request.GET.get(\"password\")\n \n try:\n\n if Atl_login_detail.objects.filter(Mobilenumber=ValMobilenumber).exists():\n \n db = Atl_login_detail.objects.get(Mobilenumber=ValMobilenumber)\n global loguser\n loguser=db.Mobilenumber\n request.session[loguser]=loguser\n if db.Mobilenumber == ValMobilenumber and db.password == Valpassword:\n print(\"Hello\")\n print(loguser)\n return HttpResponseRedirect(\"/schoolreg\", {\"loguser\": loguser})\n return HttpResponse(\"Invalid Credentials\")\n # elif Atl_login_detail.objects.filter(email=ValMobilenumber).exists():\n # print(\"Yes Email\")\n # db = Atl_login_detail.objects.get(email=ValMobilenumber)\n # if db.email == ValMobilenumber and db.password == Valpassword:\n # print(\"Success\")\n # return HttpResponseRedirect(\"/schoolreg\")\n #return HttpResponse(\"Invalid\")\n except Exception as e:\n print(e)\n return HttpResponse(\"No Value Found\")\n return render(request,'schoolreg.html')\n\ndef indexpage(request):\n return render(request, \"index.html\" )\n\n'''def userlogin(request):\n print(\"in the userlogin\")\n return render(request, \"userlogin.html\" )'''\n\ndef choose_school(request):\n return render(request,\"chooseschoolreg.html\")\n\ndef schoolreg(request):\n request.session[myuser]=myuser\n return render(request, \"schoolreg.html\",{'myuser':myuser})\n\n #def schoolreg(request):\n # return render(request, \"index2.html\")\n\n #def schoolreg(request):\n #return render(request, \"index3.html\")\n\n\ndef atl_login(request):\n return render(request, \"schoolreg.html\")\n\n\ndef vendorreg(request):\n myuser=request.session['myuser']\n return render(request, \"vendor.html\",{'myuser':myuser})\n\n\ndef mentorofchange(request):\n myuser=request.session['myuser']\n return render(request, \"mentorofchange.html\",{'myuser':myuser})\n\n\ndef school_details_atl_incharge(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n school_name = request.POST.get(\"school_name\")\n school_reg_id = request.POST.get(\"school_reg_id\")\n country = request.POST.get(\"country\")\n state = request.POST.get(\"state\")\n district = request.POST.get(\"district\")\n pincode = request.POST.get(\"pincode\")\n phone = request.POST.get(\"phone\")\n Email = request.POST.get(\"Email\")\n affilation_body = request.POST.get(\"affilation_body\")\n print(school_name, school_reg_id, country, state, district, pincode, phone, Email, affilation_body)\n try:\n\n ram = school_details_atl_incharge(school_name=school_name, school_reg_id=school_reg_id, country=country,\n state=state, district=district, pincode=pincode, phone=phone, Email=Email,\n affiliation_body=affilation_body)\n print(school_name, school_reg_id, country, state, district, pincode, phone, Email, affilation_body)\n ram.save()\n return HttpResponseRedirect(\"/school_details_atl_incharge/\")\n except Exception as e:\n print(e)\n\n return HttpResponse(\"Error\")\n return render(request, \"index1.html\",{'myuser':myuser})\n\n\ndef schoolatlinchargeweb(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n valpersonname = request.POST.get(\"personname\")\n valpersonID = request.POST.get(\"personID\")\n valmobile = request.POST.get(\"mobile\")\n valemail = request.POST.get(\"email\")\n print(valpersonname, valpersonID, valmobile, valemail)\n try:\n print(\"Try Sction\")\n val = schoolatlincharges(personname=valpersonname, personID=valpersonID, mobile=valmobile, email=valemail)\n val.save()\n return HttpResponseRedirect(\"/schoolatlinchargeweb/\")\n except Exception as e:\n print(e)\n return HttpResponse(\"invaliduser\")\n return render(request, \"index2.html\",{'myuser':myuser})\n\n\ndef AESWeb(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n option1 = request.POST.get(\"option1\")\n date1 = request.POST.get(\"date1\")\n name = request.POST.get(\"name\")\n number = request.POST.get(\"number\")\n email = request.POST.get(\"email\")\n option2 = request.POST.get(\"option2\")\n date2 = request.POST.get(\"date2\")\n option3 = request.POST.get(\"option3\")\n date3 = request.POST.get(\"date3\")\n print(option1, date1, name, number, email, option2, date2, option3, date3)\n try:\n val = aes(option1=option1, date1=date1, name=name, number=number, email=email, option2=option2, date2=date2,\n option3=option3, date3=date3)\n val.save()\n return HttpResponseRedirect(\"/AESWeb/\")\n except Exception as e:\n return HttpResponse(\"Invalid......\")\n return render(request, \"index3.html\",{'myuser':myuser})\n\n\ndef TNFA(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n vname = request.POST.get(\"name\")\n vphone = request.POST.get(\"number\")\n vemailid = request.POST.get(\"email\")\n try:\n val = teachernominatedforatl(name=vname, phone=vphone, emailid=vemailid)\n val.save()\n return HttpResponseRedirect(\"/TNFA/\")\n except Exception as e:\n return HttpResponse(\"invaliduserpass\")\n return render(request, \"index4.html\",{'myuser':myuser})\n\n\ndef question(request):\n ques1 = AI_Questions.objects.get(id=1)\n ques2 = AI_Questions.objects.get(id=2)\n ques3 = AI_Questions.objects.get(id=3)\n ques4 = AI_Questions.objects.get(id=4)\n ques5 = AI_Questions.objects.get(id=5)\n context = {\n 'obj1': ques1,\n 'obj2': ques2,\n 'obj3': ques3,\n 'obj4': ques4,\n 'obj5': ques5,\n }\n return render(request, 'quiz.html', {'context': context})\n\n\ndef data(request):\n record = table.objects.get(id=2)\n content = {\n 'object': record\n }\n return render(request, 'Dashboard.html', {'content': content})\n\n\ndef venregweb(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n GEM = request.POST.get(\"order\")\n GEMordernumber = request.POST.get(\"gemorderno\")\n PurchaseOrderNo = request.POST.get(\"oredrno\")\n Enrollmentforwhichpackage = request.POST.get(\"package\")\n CompanyName = request.POST.get(\"comname\")\n CompanyIncorporationID = request.POST.get(\"incorporationID\")\n Address = request.POST.get(\"address\")\n ContactNumber = request.POST.get(\"contact\")\n EmailID = request.POST.get(\"email\")\n Name = request.POST.get(\"name\")\n ContactNumber1 = request.POST.get(\"contactno\")\n EmailID1 = request.POST.get(\"email1\")\n Doyouwanttoaddmoreteacheardetails = request.POST.get(\"more\")\n print(GEM,GEMordernumber,PurchaseOrderNo,Enrollmentforwhichpackage,CompanyName,CompanyIncorporationID,Address,ContactNumber,EmailID,Name,ContactNumber1,EmailID1,Doyouwanttoaddmoreteacheardetails)\n try:\n val = venreg1(GEM=GEM, GEMordernumber=GEMordernumber, PurchaseOrderNo=PurchaseOrderNo,\n Enrollmentforwhichpackage=Enrollmentforwhichpackage, CompanyName=CompanyName,\n CompanyIncorporationID=CompanyIncorporationID, Address=Address, ContactNumber=ContactNumber,\n EmailID=EmailID, Name=Name, ContactNumber1=ContactNumber1, EmailID1=EmailID1,\n Doyouwanttoaddmoreteacheardetails=Doyouwanttoaddmoreteacheardetails)\n val.save()\n\n return HttpResponseRedirect(\"/schoolreg/\")\n except Exception as e:\n return HttpResponse(\"Invalidreg\")\n return render(request, \"vendor.html\",{'myuser':myuser})\n\n\ndef mentorofchangeweb(request):\n myuser=request.session['myuser']\n if request.method == \"POST\":\n NameofMentor = request.POST.get(\"name\")\n MentorID = request.POST.get(\"mentorid\")\n WhoAppointed = request.POST.get(\"who\")\n Whichdatehegotappointed = request.POST.get(\"which\")\n EducationBackground = request.POST.get(\"educational\")\n ProfessionalBackground = request.POST.get(\"professional\")\n ContactDetails = request.POST.get(\"contact\")\n Address = request.POST.get(\"address\")\n Whichcompanycurrentlysheheisworking = request.POST.get(\"company\")\n Whichprofilesheheisworking = request.POST.get(\"profile\")\n Howmanydaysdoesshehevisitsschools = request.POST.get(\"days\")\n Specializationandskills = request.POST.get(\"skills\")\n print(NameofMentor, MentorID, WhoAppointed, Whichdatehegotappointed)\n val = moc(NameofMentor=NameofMentor, MentorID=MentorID, WhoAppointed=WhoAppointed,\n Whichdatehegotappointed=Whichdatehegotappointed, EducationBackground=EducationBackground,\n ProfessionalBackground=ProfessionalBackground, ContactDetails=ContactDetails, Address=Address,\n Whichcompanycurrentlysheheisworking=Whichcompanycurrentlysheheisworking,\n Whichprofilesheheisworking=Whichprofilesheheisworking,\n Howmanydaysdoesshehevisitsschools=Howmanydaysdoesshehevisitsschools,\n Specializationandskills=Specializationandskills)\n val.save()\n return render(request, \"mentorofchange.html\",{'myuser':myuser})\n","sub_path":"file/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"188740679","text":"import copy, os, sys\nfrom RootTools.core.Sample import Sample\nimport ROOT\n\ndef get_parser():\n import argparse\n argParser = argparse.ArgumentParser(description = \"Argument parser for samples file\")\n argParser.add_argument('--overwrite', action='store_true', help=\"Overwrite current entry in db?\")\n argParser.add_argument('--update', action='store_true', help=\"Update current entry in db?\")\n return argParser\n\nif __name__==\"__main__\":\n # Logging\n import nanoMET.tools.logger as logger\n logger = logger.get_logger(\"INFO\", logFile = None )\n import RootTools.core.logger as logger_rt\n logger_rt = logger_rt.get_logger(\"INFO\", logFile = None )\n options = get_parser().parse_args()\n ov = options.overwrite\n if options.update:\n ov = 'update'\nelse:\n import logging\n logger = logging.getLogger(__name__)\n ov = False\n\n# Redirector\nfrom nanoMET.tools.user import redirector_global as redirector\n\n# DB\nfrom nanoMET.tools.user import dbDir\ndbFile = dbDir+\"/samples/DB_Fall17_UL17_central_legacy.sql\"\n\nlogger.info(\"Using db file: %s\", dbFile)\n\n## DY\nDYJetsToLL_M50_LO = Sample.nanoAODfromDAS(\"DYJetsToLL_M50_LO\", \"/DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=2075.14*3)\n\nDY = [\n DYJetsToLL_M50_LO,\n]\n\n## ttbar\nTTLep_pow = Sample.nanoAODfromDAS(\"TTLep_pow\", \"/TTTo2L2Nu_TuneCP5_13TeV-powheg-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=831.762*((3*0.108)**2))\n\n## single top\nTToLeptons_sch_amcatnlo = Sample.nanoAODfromDAS(\"TToLeptons_sch_amcatnlo\", \"/ST_s-channel_4f_leptonDecays_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=(7.20+4.16)*0.108*3)\nT_tch_pow = Sample.nanoAODfromDAS(\"T_tch_pow\", \"/ST_t-channel_top_5f_InclusiveDecays_TuneCP5_13TeV-powheg-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=136.02) # inclusive sample #LOR COMM BEFORE WITHOUT InclusiveDecays\nTBar_tch_pow = Sample.nanoAODfromDAS(\"TBar_tch_pow\", \"/ST_t-channel_antitop_5f_InclusiveDecays_TuneCP5_13TeV-powheg-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=80.95) # inclusive sample #LOR COMM BEFORE WITHOUT InclusiveDecays\nT_tWch_ext = Sample.nanoAODfromDAS(\"T_tWch_ext\", \"/ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=35.85*(1-(1-3*0.108)**2))\nTBar_tWch_ext = Sample.nanoAODfromDAS(\"TBar_tWch_ext\", \"/ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIISummer19UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=35.85*(1-(1-3*0.108)**2))\n\ntop = [\n TTLep_pow,\n TToLeptons_sch_amcatnlo,\n T_tch_pow,\n TBar_tch_pow,\n T_tWch_ext,\n TBar_tWch_ext,\n]\n\n## di/multiboson\nWWToLNuQQ = Sample.nanoAODfromDAS(\"WWToLNuQQ\", \"/WWToLNuQQ_NNPDF31_TuneCP5_PSweights_13TeV-powheg-pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14_ext1-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=49.997)#LOR COMM! IT IS NOT AVAILABLE!!!!!!!\nWWTo1L1Nu2Q = Sample.nanoAODfromDAS(\"WWTo1L1Nu2Q\", \"/WWTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=49.997)#LOR COMM! IT IS NOT AVAILABLE!!!!!!!\n\nZZTo2L2Q = Sample.nanoAODfromDAS(\"ZZTo2L2Q\", \"/ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=3.28)#LOR COMM! IT IS NOT AVAILABLE!!!!!!!\n\nWZTo1L3Nu = Sample.nanoAODfromDAS(\"WZTo1L3Nu\", \"/WZTo1L3Nu_13TeV_amcatnloFXFX_madspin_pythia8_v2/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=(47.13)*(3*0.108)*(0.2) ) #LOR COMM! IT IS NOT AVAILABLE!!!!!!!\nWZTo1L1Nu2Q = Sample.nanoAODfromDAS(\"WZTo1L1Nu2Q\", \"/WZTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=10.71) #LOR COMM! IT IS NOT AVAILABLE!!!!!!!\nWZTo2L2Q = Sample.nanoAODfromDAS(\"WZTo2L2Q\", \"/WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=5.60) #LOR COMM! IT IS NOT AVAILABLE!!!!!!!\nWZTo3LNu = Sample.nanoAODfromDAS(\"WZTo3LNu\", \"/WZTo3LNu_13TeV-powheg-pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2_legacy_nano_v4-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=4.42965) #LOR COMM! IT IS NOT AVAILABLE!!!!!!!\n\nVVTo2L2Nu = Sample.nanoAODfromDAS(\"VVTo2L2Nu\", \"/VVTo2L2Nu_13TeV_amcatnloFXFX_madspin_pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=14.00) #LOR COMM! IT IS NOT AVAILABLE!!!!!!!\n\nWWW_4F = Sample.nanoAODfromDAS(\"WWW_4F\", \"/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer20UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=0.2086)#LOR COMM! ONLY RunIISummer20 IS AVAILABLE!!!!\nWWZ_4F = Sample.nanoAODfromDAS(\"WWZ_4F\", \"/WWZ_4F_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer20UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=0.1651)#LOR COMM! ONLY RunIISummer20 IS AVAILABLE!!!! \nWZZ = Sample.nanoAODfromDAS(\"WZZ\", \"/WZZ_TuneCP5_13TeV-amcatnlo-pythia8/schoef-crab_RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1_legacy_nano_v3-f82d502d908e8d321edd6873d261cf31/USER\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=0.05565)#LOR COMM! IT IS NOT AVAILABLE!!!!!!! \nZZZ = Sample.nanoAODfromDAS(\"ZZZ\", \"/ZZZ_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer20UL17NanoAODv2-106X_mc2017_realistic_v8-v1/NANOAODSIM\", dbFile=dbFile, redirector=redirector, instance=\"global\", overwrite=ov, xSection=0.01398) #LOR COMM! ONLY RunIISummer20 IS AVAILABLE!!!! \n\n\n\n\nboson = [\n WWToLNuQQ,\n WWTo1L1Nu2Q,\n ZZTo2L2Q,\n WZTo1L3Nu,\n WZTo1L1Nu2Q,\n WZTo2L2Q,\n WZTo3LNu,\n VVTo2L2Nu,\n WWW_4F,\n WWZ_4F,\n WZZ,\n ZZZ,\n]\n\nallSamples = DY + top + boson\n\nfor s in allSamples:\n s.isData = False\n\nfrom nanoMET.tools.AutoClass import AutoClass\nsamples = AutoClass( allSamples )\n","sub_path":"nanoAOD/python/Fall17_UL17_central_legacy_v1.py","file_name":"Fall17_UL17_central_legacy_v1.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"187220033","text":"from __future__ import print_function, unicode_literals, division\n\nimport numpy as np\n\nx_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]\ny_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]\nX, Y = np.meshgrid(x_coords, y_coords)\nprint(\"x's shape:\", X.shape)\nprint(X)\n\nprint(\"y's shape:\", Y.shape)\nprint(Y)\n\ndata = np.sin(X * Y / 40.5)\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfig = plt.figure(1, figsize=(7, 6))\n\nplt.imshow(data, cmap=cm.hot, interpolation=\"bicubic\")\nplt.show()\n","sub_path":"demo/numpy/14_vectorization_demo.py","file_name":"14_vectorization_demo.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"195944481","text":"#!/bin/python3.6\n# -*- coding: utf-8 -*-\n#\n# MIT License\n#\n# Copyright (c) 2018 Robert Gustafsson\n# Copyright (c) 2018 Andreas Lindhé\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport os\nimport pathlib\nimport configparser\nimport re\nfrom pathlib import Path\n\ndef main(rel_path):\n res_path = os.getcwd() + '/' + rel_path + '/'\n res_file = res_path + \"summary.txt\"\n\n results = configparser.ConfigParser()\n files = os.listdir(res_path)\n for i in ['reader', 'writer']:\n res_file = res_path + i + \"_summary.txt\"\n regex = re.compile('.*' + i + '.*log')\n filtered_files = [x for x in files if regex.match(x)]\n avg = []\n avg_no_outliers = []\n if filtered_files:\n for f in filtered_files:\n results.read(res_path + f)\n avg.append(float(results['Average']['average']))\n avg_no_outliers.append(float(results['Average']['average_no_outliers']))\n test = results['Meta']['test']\n rounds = results['Meta']['rounds']\n outliers = results['Meta']['outliers']\n else:\n print(\"No results file found!\", file=sys.stderr)\n sys.exit()\n summary = sum(avg)/len(avg)\n summary_no_outliers = sum(avg_no_outliers)/len(avg_no_outliers)\n result = f\"Average from {len(avg)} {test} clients during {rounds} rounds:\\nAll: {summary}\\nRemoved {outliers} outliers: {summary_no_outliers}\"\n with open(res_file, 'a') as f:\n f.write(result)\n print(f\"Created summary file {res_file}\")\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n path = sys.argv[1]\n else:\n raise Exception(\"Missing argument: relative path\")\n main(path)\n","sub_path":"self-stabilizing-coded-atomic-storage/code/evaluation/planetlab/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"194143119","text":"from test_scores import *\n\ndef get_frame(scoring, frame):\n return filter( lambda x: x[0] == frame, scoring )\n\ndef next_two_roles(scoring, frame_no):\n next_frame = get_frame(scoring, frame_no + 1)\n if len(next_frame) < 2:\n return next_frame + get_frame(scoring, frame_no + 2)[:1]\n return next_frame\n\ndef is_strike(frame):\n return (len(frame) == 1) and (frame[0][2] == 10)\n\ndef is_spare(frame):\n return (len(frame)>1) and ((frame[0][2] + frame[1][2]) == 10)\n\ndef score(scoring, frame_no=1 ):\n frame = get_frame(scoring, frame_no)\n if len(frame) == 0 or frame_no > 10:\n return 0\n\n if is_strike(frame):\n bonus_rolls = map(lambda x: x[2], next_two_roles(scoring, frame_no))\n bonus = sum(bonus_rolls)\n frame_score = bonus + 10\n return score(scoring, frame_no+1 ) + frame_score\n\n elif is_spare(frame):\n next_frame = get_frame(scoring, frame_no+1)\n frame_score = 10 + next_frame[0][2]\n return score(scoring, frame_no+1 ) + frame_score\n\n else:\n frame_score = 0\n if len(frame) < 2:\n frame_score = frame[0][2]\n else:\n roll_1, roll_2 = frame\n frame_score = roll_1[2] + roll_2[2]\n return score(scoring, frame_no+1 ) + frame_score\n\n\nif __name__ == '__main__':\n assert(score(perfect_game) == 300)\n assert(score(score_a) == 133)\n assert(score(score_b) == 28)\n assert(score(score_c) == 20)\n","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"403569495","text":"def main():\n list_x = [38, 27, 43, 3, 9, 82, 10]\n merge_sort(list_x, True)\n print('--------')\n print(list_x)\n\ndef merge_list(ls_a, ls_b):\n len_a = len(ls_a)\n len_b = len(ls_b)\n\n i, j = 0, 0\n ls_c = []\n \n # compare\n while i < len_a and j < len_b:\n # a less than b so get front \n if ls_a[i] < ls_b[j]:\n ls_c.append(ls_a[i])\n i += 1\n else:\n ls_c.append(ls_b[j])\n j += 1\n \n # enough value\n if i < len_a:\n ls_c.extend(ls_a[i:])\n if j < len_b:\n ls_c.extend(ls_b[j:])\n \n return ls_c\n\n\ndef merge_sort(list_x, show_step=False):\n\n # [[],[],[],[],[],......]\n for i in range (len(list_x)):\n list_x[i] = [list_x[i]]\n \n if show_step == True:\n print(list_x)\n \n ls_copy = [i for i in list_x]\n while len(list_x) != 1:\n list_x.clear()\n while len(ls_copy) != 0 :\n if len(ls_copy) == 1:\n list_x.append(ls_copy[0])\n ls_copy.clear()\n else:\n ls_1 = ls_copy.pop(0)\n ls_2 = ls_copy.pop(0)\n list_x.append(merge_list(ls_1, ls_2))\n \n ls_copy = [i for i in list_x]\n \n if show_step == True:\n print(list_x)\n \n # remove bracket out\n if len(list_x) == 1:\n list_x.extend(list_x[0])\n list_x.pop(0)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"204113/Lab06/merge_sort2.py","file_name":"merge_sort2.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56224843","text":"def outputFeatures(likelihoodRegionList, likelihoodSeqDiffList, outputFile):\n\t# Output the features to the output file\n\tif len(likelihoodRegionList) == 0:\n\t\t# There are no motifs with and without the sequence difference\n\t\toutputFile.write(\"0\\t0\\t0\\t0\\n\")\n\t\treturn\n\tif (0 in likelihoodRegionList) or (0 in likelihoodSeqDiffList):\n\t\t# There is a TF whose motif is added or removed\n\t\toutputFile.write(\"1\\t\")\n\telse:\n\t\toutputFile.write(\"0\\t\")\n\n\tmaxTuple = (max(likelihoodRegionList), max(likelihoodSeqDiffList))\n\tlikelihoodDiffGen = abs(maxTuple[0] - maxTuple[1])\n\tmaxMotifScore = max(maxTuple)\n\tmaxMotifTupleIndex = maxTuple.index(maxMotifScore)\n\toneWritten = False\n\n\tif maxMotifTupleIndex == 0:\n\t\t# The motif with the highest score is from the region without the sequence difference (or there is a tie)\n\t\tmaxMotifIndex = likelihoodRegionList.index(maxMotifScore)\n\t\tcurrentIndex = maxMotifIndex + 1\n\t\tlikelihoodDiffSpecific = abs(maxTuple[0] - likelihoodSeqDiffList[maxMotifIndex])\n\t\tif likelihoodSeqDiffList[maxMotifIndex] == 0:\n\t\t\t# A TF with the maximum motif score has no motif in the file with the sequence difference\n\t\t\toutputFile.write(\"1\\t\")\n\t\t\toneWritten = True\n\t\tif maxMotifScore in likelihoodRegionList[maxMotifIndex + 1:]:\n\t\t\t# There is a tie for the motif with the maximum score\n\t\t\tfor motifScore in likelihoodRegionList[maxMotifIndex + 1:]:\n\t\t\t\t# Iterate through the other TFs determine whether their motifs have the same score\n\t\t\t\tif motifScore == maxMotifScore:\n\t\t\t\t\t# The tied motif has been found\n\t\t\t\t\tif (likelihoodSeqDiffList[currentIndex] == 0) and (oneWritten == False):\n\t\t\t\t\t\t# A TF with the maximum motif score has no motif in the file with the sequence difference\n\t\t\t\t\t\toutputFile.write(\"1\\t\")\n\t\t\t\t\t\toneWritten = True\n\t\t\t\t\tif abs(motifScore - likelihoodSeqDiffList[currentIndex]) > likelihoodDiffSpecific:\n\t\t\t\t\t\t# A larger strongest motif difference has been found, so replace the current strongest motif difference\n\t\t\t\t\t\tlikelihoodDiffSpecific = abs(motifScore - likelihoodSeqDiffList[currentIndex])\n\t\t\t\tcurrentIndex = currentIndex + 1\n\t\tif maxMotifScore in likelihoodSeqDiffList:\n\t\t\t# There is a tie for the motif with the maximum likelihood score, where the tie occurs in the region with the sequence difference\n\t\t\tcurrentIndex = 0\n\t\t\tfor motifScore in likelihoodSeqDiffList:\n\t\t\t\t# Iterate through the TFs with motifs in the region with the sequence difference and determine whether their motifs have the same score\n\t\t\t\tif motifScore == maxMotifScore:\n\t\t\t\t\t# The tied motif has been found\n\t\t\t\t\tif (likelihoodRegionList[currentIndex] == 0) and (oneWritten == False):\n\t\t\t\t\t\t# A TF with the maximum motif score has no motif in the file with the sequence difference\n\t\t\t\t\t\toutputFile.write(\"1\\t\")\n\t\t\t\t\t\toneWritten = True\n\t\t\t\t\tif abs(motifScore - likelihoodRegionList[currentIndex]) > likelihoodDiffSpecific:\n\t\t\t\t\t\t# A larger strongest motif difference has been found, so replace the current strongest motif difference\n\t\t\t\t\t\tlikelihoodDiffSpecific = abs(motifScore - likelihoodRegionList[currentIndex])\n\t\t\t\tcurrentIndex = currentIndex + 1\n\t\tif oneWritten == False:\n\t\t\t# The motif with the largest score is present without the sequence difference\n\t\t\toutputFile.write(\"0\\t\")\n\t\toutputFile.write(str(likelihoodDiffGen) + \"\\t\" + str(likelihoodDiffSpecific) + \"\\n\")\n\n\telse:\n\t\t# The motif with the highest score is from the region with the sequence difference\n\t\tmaxMotifIndex = likelihoodSeqDiffList.index(maxMotifScore)\n\t\tlikelihoodDiffSpecific = abs(maxTuple[1] - likelihoodRegionList[maxMotifIndex])\n\t\tif likelihoodRegionList[maxMotifIndex] == 0:\n\t\t\t# A TF with the maximum motif score has no motif in the file with the sequence difference\n\t\t\toutputFile.write(\"1\\t\")\n\t\t\toneWritten = True\n\t\tif maxMotifScore in likelihoodSeqDiffList[maxMotifIndex + 1:]:\n\t\t\t# There is a tie for the motif with the maximum score\n\t\t\tcurrentIndex = maxMotifIndex + 1\n\t\t\tfor motifScore in likelihoodSeqDiffList[maxMotifIndex + 1:]:\n\t\t\t\t# Iterate through the other TFs determine whether their motifs have the same score\n\t\t\t\tif motifScore == maxMotifScore:\n\t\t\t\t\t# The tied motif has been found\n\t\t\t\t\tif (likelihoodRegionList[currentIndex] == 0) and (oneWritten == False):\n\t\t\t\t\t\t# A TF with the maximum motif score has no motif in the file with the sequence difference\n\t\t\t\t\t\toutputFile.write(\"1\\t\")\n\t\t\t\t\t\toneWritten = True\n\t\t\t\t\tif abs(motifScore - likelihoodRegionList[currentIndex]) > likelihoodDiffSpecific:\n\t\t\t\t\t\t# A larger strongest motif difference has been found, so replace the current strongest motif difference\n\t\t\t\t\t\tlikelihoodDiffSpecific = abs(motifScore - likelihoodRegionList[currentIndex])\n\t\tif oneWritten == False:\n\t\t\t# The motif with the largest score is present without the sequence difference\n\t\t\toutputFile.write(\"0\\t\")\n\t\toutputFile.write(str(likelihoodDiffGen) + \"\\t\" + str(likelihoodDiffSpecific) + \"\\n\")\n\t\n\ndef getPWMFeatures(FIMOComparisonFileName, sequenceDifferenceFileName, outputFileName):\n\t# Get the following PWM-related features:\n\t# Column 1: Is there a TF whose motif that is added/removed? (Binary)\n\t# Column 2: Is the TF with the overall strongest motif's motif added/removed? (Binary) [If there is a tie for the overall strongest motif, choose 1 if any of the strongest motifs are added/removed]\n\t# Column 3: |log-likelihood(strongest motif without sequence difference) - log-likelihood(strongest motif with sequence difference)|\n\t# Column 4: |log-likelihood(overall strongest motif) - log-likelihood(overall strongest motif in other file)| [If there is a tie for the overall strongest motif, choose the largest difference]\n\t# ASSUMES THAT NO 2 REGIONS OVERLAP\n\t# ASSUMES THAT THE FIMO COMPARISON FILE IS SORTED BY REGION CHROM, REGION START, REGION END, SEQUENCE DIFFERENCE CHROM, SEQUENCE DIFFERENCE START, SEQUENCE DIFFERENCE END\n\t# ASSUMES THAT THE SEQUENCE DIFFERENCE FILE IS SORTED BY CHROM, START, END\n\t# ASSUMES THAT REGIONS IN THE FIMO COMPARISON FILE WITH MOTIFS THAT ARE NOT IN THE SEQUENCE DIFFERENCE REGIONS ARE LISTED FIRST\n\tFIMOComparisonFile = open(FIMOComparisonFileName)\n\tsequenceDifferenceFile = open(sequenceDifferenceFileName)\n\toutputFile = open(outputFileName, 'w+')\n\n\tsequenceDifferenceLine = sequenceDifferenceFile.readline()\n\tsequenceDifferenceLineElements = sequenceDifferenceLine.split(\"\\t\")\n\tsequenceDifferenceLocation = (sequenceDifferenceLineElements[0], int(sequenceDifferenceLineElements[1]), int(sequenceDifferenceLineElements[2]))\n\t\n\tlikelihoodRegionList = []\n\tlikelihoodSeqDiffList = []\n\tlastRegionLocation = (\"\", -1, -1)\n\tlastSequenceDifferenceLocation = (\"-1\", -1, -1)\n\tlastTFRegionIndex = 0\n\tTFList = []\n\t\n\tallDifferencesSeen = False\n\tfor line in FIMOComparisonFile:\n\t\t# Iterate through the lines of the FIMO comparison file and use them to compute the PWM features\n\t\tlineElements = line.split(\"\\t\")\n\t\tregionLocation = (lineElements[0], int(lineElements[1]), int(lineElements[2]))\n\t\tcurrentSequenceDifferenceLocation = (lineElements[3], int(lineElements[4]), int(lineElements[5]))\n\t\tif currentSequenceDifferenceLocation != lastSequenceDifferenceLocation:\n\t\t\t# In a new sequence difference, so re-initialize if this is not the first sequence difference\n\t\t\tif lastSequenceDifferenceLocation != (\"-1\", -1, -1):\n\t\t\t\t# The previous sequence difference was not empty, so output the appropriate information\n\t\t\t\toutputFeatures(likelihoodRegionList, likelihoodSeqDiffList, outputFile)\n\t\t\t\tsequenceDifferenceLine = sequenceDifferenceFile.readline()\n\t\t\t\tif sequenceDifferenceLine == \"\":\n\t\t\t\t\t# All sequence differences have been seen, so stop\n\t\t\t\t\tallDifferencesSeen = True\n\t\t\t\t\tbreak\n\t\t\t\tsequenceDifferenceLineElements = sequenceDifferenceLine.split(\"\\t\")\n\t\t\t\tsequenceDifferenceLocation = (sequenceDifferenceLineElements[0], int(sequenceDifferenceLineElements[1]), int(sequenceDifferenceLineElements[2]))\n\t\t\t\tlikelihoodRegionList = likelihoodRegionList[0:lastTFRegionIndex]\n\t\t\t\tlikelihoodSeqDiffList = []\n\t\t\t\tfor i in range(len(likelihoodRegionList)):\n\t\t\t\t\t# Initialize the log-likelihoods for the sequence differences to be zeros\n\t\t\t\t\tlikelihoodSeqDiffList.append(0)\n\t\t\tif (lastRegionLocation != (\"\", -1, -1)) and (currentSequenceDifferenceLocation != (\"-1\", -1, -1)):\n\t\t\t\t# Not at beginning, so need to check that no sequence differences are being excluded\n\t\t\t\twhile currentSequenceDifferenceLocation != sequenceDifferenceLocation:\n\t\t\t\t\t# A sequence difference region has no motifs\n\t\t\t\t\toutputFeatures(likelihoodRegionList, likelihoodSeqDiffList, outputFile)\n\t\t\t\t\tlikelihoodSeqDiffList = []\n\t\t\t\t\tfor i in range(len(likelihoodRegionList)):\n\t\t\t\t\t\t# Initialize the log-likelihoods for the sequence differences to be zeros\n\t\t\t\t\t\tlikelihoodSeqDiffList.append(0)\n\t\t\t\t\tsequenceDifferenceLine = sequenceDifferenceFile.readline()\n\t\t\t\t\tif sequenceDifferenceLine == \"\":\n\t\t\t\t\t\t# All sequence differences have been seen, so stop\n\t\t\t\t\t\tallDifferencesSeen = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsequenceDifferenceLineElements = sequenceDifferenceLine.split(\"\\t\")\n\t\t\t\t\tsequenceDifferenceLocation = (sequenceDifferenceLineElements[0], int(sequenceDifferenceLineElements[1]), int(sequenceDifferenceLineElements[2]))\n\t\t\tif allDifferencesSeen == True:\n\t\t\t\t# All sequence differences have been seen, so stop\n\t\t\t\tbreak\n\t\t\tlastSequenceDifferenceLocation = currentSequenceDifferenceLocation\n\n\t\tif regionLocation != lastRegionLocation:\n\t\t\t# The region has changed, so re-initialize everything\n\t\t\tlikelihoodRegionList = []\n\t\t\tlikelihoodSeqDiffList = []\n\t\t\tTFList = []\n\t\t\tlastRegionLocation = regionLocation\n\t\tif currentSequenceDifferenceLocation == (\"-1\", -1, -1):\n\t\t\t# Initializing current region\n\t\t\tTFList.append(lineElements[6])\n\t\t\tlikelihoodRegionList.append(float(lineElements[10]))\n\t\t\tlikelihoodSeqDiffList.append(float(0))\n\t\t\tlastTFRegionIndex = lastTFRegionIndex + 1\n\t\telif lineElements[6] not in TFList:\n\t\t\t# The current sequence difference allows for a TF motif that was not previously present\n\t\t\tTFList.append(lineElements[6])\n\t\t\tlikelihoodRegionList.append(float(0))\n\t\t\tlikelihoodSeqDiffList.append(float(lineElements[8]))\n\t\telse:\n\t\t\tTFIndex = TFList.index(lineElements[6])\n\t\t\tlikelihoodSeqDiffList[TFIndex] = float(lineElements[8])\n\n\tif allDifferencesSeen == False:\n\t\t# Some sequence differences still have not been seen\n\t\toutputFeatures(likelihoodRegionList, likelihoodSeqDiffList, outputFile)\n\t\tlikelihoodRegionList = []\n\t\tlikelihoodSeqDiffList = []\n\t\tTFList = []\n\t\tsequenceDifferenceLine = sequenceDifferenceFile.readline()\n\t\twhile sequenceDifferenceLine != \"\":\n\t\t\t# Record sequence difference features to the output file for the remaining sequence differences\n\t\t\tsequenceDifferenceLineElements = sequenceDifferenceLine.split(\"\\t\")\n\t\t\tsequenceDifferenceLocation = (sequenceDifferenceLineElements[0], int(sequenceDifferenceLineElements[1]), int(sequenceDifferenceLineElements[2]))\n\t\t\toutputFeatures(likelihoodRegionList, likelihoodSeqDiffList, outputFile)\n\t\t\tsequenceDifferenceLine = sequenceDifferenceFile.readline()\n\n\tFIMOComparisonFile.close()\t\n\tsequenceDifferenceFile.close()\n\toutputFile.close()\n\n\nif __name__==\"__main__\":\n import sys\n FIMOComparisonFileName = sys.argv[1] # Name of with sequence difference FIMO information\n sequenceDifferenceFileName = sys.argv[2]\n outputFileName = sys.argv[3]\n\n getPWMFeatures(FIMOComparisonFileName, sequenceDifferenceFileName, outputFileName)\n","sub_path":"getPWMFeaturesOld.py","file_name":"getPWMFeaturesOld.py","file_ext":"py","file_size_in_byte":11221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"638003849","text":"# Lara, Emilio.\n# exl7207\n# 2019-05-2\n\n#----------------------------------------------------------------------\n# This code was originally created by Prof. Farhad Kamangar.\n# It has been significantly modified and updated by Brian A. Dalio for\n# use in CSE 4303 / CSE 5365 in the 2018 Fall semester.\n\n#----------------------------------------------------------------------\nfrom CohenSutherland import clipLine\nclass cl_world :\n def __init__( self, objects = [], canvases = [] ) :\n self.objects = objects\n self.canvases = canvases\n\n def add_canvas( self, canvas ) :\n self.canvases.append( canvas )\n canvas.world = self\n\n def reset( self ) :\n self.objects = []\n for canvas in self.canvases :\n canvas.delete( 'all' )\n\n def create_graphic_objects( self, canvas, modelData,doClip,perspective,euler,resolution) :\n p = perspective\n e = euler\n patches=[]\n pointList=[]\n \n patches= modelData.getPatches()\n\n height = int(canvas.cget(\"height\"))\n width = int(canvas.cget(\"width\"))\n w = modelData.getWindow()\n v = modelData.getViewport()\n vxMin = v[0] * width\n vxMax = v[2] * width\n vyMin = v[1] * height\n vyMax = v[3] * height\n portal = (vxMin,vyMin,vxMax,vyMax)\n print(f'Portal :({vxMin:,.2f},{vyMin:.2f},{vxMax:.2f},{vyMax:.2f})')\n\n for v1Num,v2Num,v3Num in modelData.getFaces():\n v1 = modelData.getTransformedVertex(v1Num,p,e)\n v2 = modelData.getTransformedVertex(v2Num,p,e)\n v3 = modelData.getTransformedVertex(v3Num,p,e)\n if doClip:\n for (vax,vay,_),(vbx,vby,_) in [(v1,v2),(v2,v3),(v3,v1)]:\n doDraw,vax,vay,vbx,vby = clipLine(vax,vay,vbx,vby,portal)\n if doDraw:\n canvas.create_line(vax,vay,vbx,vby)\n else:\n canvas.create_line(*v1[:-1], *v2[:-1],*v3[:-1],*v1[:-1])\n\n\n\n \n if(len(patches)!=0):\n listPatch=[]\n for patch in patches:\n for vNum in patch:\n Tuple=modelData.getTransformedVertex(vNum,p,e)\n listPatch.append(Tuple);\n pointList=modelData.resolveBézierPatch(listPatch)\n for row in range(0,resolution-1):\n rowStart = row * resolution\n\n for col in range(0,resolution-1):\n here = rowStart + col\n there = here + resolution\n\n \n triangleA =(pointList[here], pointList[there], pointList[there+1])\n (v1,v2,v3)=triangleA\n self.drawTriangle( canvas,v1,v2,v3, portal, doClip)\n \n triangleB =(pointList[there+1],pointList[here+1],pointList[here])\n (v1,v2,v3)=triangleB\n self.drawTriangle( canvas,v1,v2,v3, portal, doClip)\n \n def redisplay( self, canvas, event ) :\n pass\n \n def drawTriangle(self, canvas,v1,v2,v3, portal, doClip):\n if doClip:\n for (vax,vay,_),(vbx,vby,_) in [(v1,v2),(v2,v3),(v3,v1)]:\n doDraw,vax,vay,vbx,vby = clipLine(vax,vay,vbx,vby,portal)\n if doDraw:\n canvas.create_line(vax,vay,vbx,vby)\n else:\n canvas.create_line(*v1[:-1], *v2[:-1],*v3[:-1],*v1[:-1])\n\n\n\n\n\n\n\n\n\n \n\n#----------------------------------------------------------------------\n","sub_path":"hmwk_4b_exl7207/myGraphics.py","file_name":"myGraphics.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"145977212","text":"\nimport requests\nimport urllib3\nimport datetime\nfrom dateutil.parser import parse\n\nurllib3.disable_warnings()\n\nheaders = {\n 'User-Agent': 'OhYee Spider',\n}\n\n\ndef get(url: str):\n retry = 10\n rep = None\n\n while retry > 0:\n try:\n rep = requests.get(\n url, timeout=30, verify=False, headers=headers)\n break\n except Exception as e:\n print(\"Get page error: {}, {} times left...\".format(e, retry))\n retry -= 1\n\n rep = requests.get(url, verify=False, headers=headers)\n rep.encoding = 'utf-8'\n return rep.text\n\n\ndef post(url: str, data: object):\n rep = requests.post(url, data, verify=False, headers=headers)\n rep.encoding = 'utf-8'\n return rep.text\n\n\nclass Site:\n def __init__(self):\n pass\n\n def matcher(self, url: str):\n return False\n\n def solver(self, url: str):\n return []\n\n\nclass Post:\n def __init__(self, title: str, link: str, time: int):\n self.title = title.strip()\n self.link = link\n self.time = time\n\n def __repr__(self):\n\n return \"(%s - %s - %s)\" % (\n self.title,\n self.link,\n datetime.datetime.fromtimestamp(\n self.time\n ).strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n\n\ndef parseToUnix(timeStr: str):\n return parse(timeStr).timestamp()\n","sub_path":"spider/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"408239801","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2014 Wuxi. All rights reserved.\n#\n# @author: Lixin Xu \n# Created on Mar 13, 2014\n#\n\nimport logging\nimport datetime\nfrom base import BaseModel, Status, Error\nimport json\n\n\nclass Book(BaseModel):\n def get_category(self, type):\n \"\"\"获取图书分类\"\"\"\n if type == 1:\n the_list = self.db.query('select c.id,c.category,count(c.id) as count from book as b '\n 'inner join book_category as c on b.category_id = c.id '\n 'group by c.id order by c.id')\n else:\n the_list = self.db.query('select * from book_category order by id')\n return the_list or []\n\n def get_list(self, user_id, category_id=None, keyword=None, orderByIndex=0, book_ids=[], page_no=None):\n \"\"\"列表查询\"\"\"\n\n sql = \"select b.id,b.title,b.author,b.rating,b.picture,b.comments_tally,u.nick_name,u.real_name,c.category\\\n from book b join users u on b.user_id=u.id join book_category c on b.category_id=c.id where 1=1 and b.status=1 \"\n\n order_by_list = ['created_at desc', 'rating desc, comments_tally desc'] # 排序规则\n if orderByIndex > len(order_by_list) - 1:\n order_by_list = 0\n order_by = ' order by ' + order_by_list[orderByIndex]\n if book_ids: # 只用于我的收藏,无分类、无分页、无关键字查询\n book_ids = ','.join(book_ids)\n the_list = self.db.query((sql + 'and b.id in (%s)' + order_by) % book_ids) # 总是自动加引号,只能拼接了\n else:\n if keyword: # 关键字查询\n keyword = '%' + keyword + '%'\n\n every_page_count = 12\n offset = 0\n if page_no: # 为空的话不分页\n offset = (page_no - 1) * every_page_count\n\n if category_id > 0: # 单个分类\n if keyword:\n the_list = self.db.query(sql + ' and category_id=%s and (author ilike %s or title ilike %s) '\n + order_by + ' limit %s offset %s ',\n (category_id, keyword, keyword, every_page_count, offset,))\n else:\n the_list = self.db.query(sql + ' and category_id=%s '\n + order_by + ' limit %s offset %s ',\n (category_id, every_page_count, offset,))\n\n elif category_id == 0: # 所有分类\n if keyword:\n the_list = self.db.query(sql + ' and author ilike %s or title ilike %s '\n + order_by + ' limit %s offset %s ',\n (keyword, keyword, every_page_count, offset,))\n else:\n the_list = self.db.query(sql\n + order_by + ' limit %s offset %s ',\n (every_page_count, offset,))\n\n else: # category_id == -1: 本人上传���图书\n if keyword:\n the_list = self.db.query(sql + ' and b.user_id=%s and (author ilike %s or title ilike %s) '\n + order_by + ' limit %s offset %s ',\n (user_id, keyword, keyword, every_page_count, offset,))\n else:\n the_list = self.db.query(sql + ' and b.user_id=%s '\n + order_by + ' limit %s offset %s ',\n (user_id, every_page_count, offset,))\n\n if the_list:\n for row in the_list:\n if row:\n pic = row['picture'].decode(\"GB2312\")\n row['source_path'] = pic or ''\n row['thumb_path'] = pic.replace('source', 'thumb') or ''\n\n return the_list or []\n\n def get_one(self, id):\n \"\"\"单条详细查询\"\"\"\n row = self.db.query('select b.*,u.nick_name,u.real_name,c.category\\\n from book b join users u on b.user_id=u.id join book_category c on b.category_id=c.id\\\n where b.id=%s and b.status=1', (id,), fetchone=True)\n if row:\n pic = row['picture'].decode(\"GB2312\")\n row['source_path'] = pic or ''\n row['thumb_path'] = pic.replace('source', 'thumb') or ''\n return row or []\n\n def get_comments(self, id):\n \"\"\"查询评论\"\"\"\n the_list = self.db.query('select c.*,u.nick_name,u.real_name\\\n from book_comments c join users u on c.user_id=u.id join book b on b.id=c.book_id where book_id=%s order by updated_at desc',\n (id,))\n return the_list or []\n\n def get_my_comments(self, id):\n \"\"\"我的评论\"\"\"\n the_list = self.db.query('select c.*,u.nick_name,u.real_name,b.title\\\n from book_comments c join users u on c.user_id=u.id join book b on b.id=c.book_id where c.user_id=%s order by updated_at desc',\n (id,))\n return the_list or []\n\n def submit_book(self, book_id, user_id, category_id, title, author, description, picture):\n \"\"\"提交图书\"\"\"\n\n self.db.begin()\n if book_id > 0:\n if picture:\n res = self.db.execute(\n \"\"\"update book set category_id=%s, title=%s, author=%s, description=%s,\n picture=%s where id=%s and user_id=%s\"\"\",\n (category_id, title, author, description, picture, book_id, user_id,))\n else: # 如果picture为空,则为用户封面没修改\n res = self.db.execute(\n 'update book set category_id=%s, title=%s, author=%s, description=%s where id=%s and user_id=%s',\n (category_id, title, author, description, book_id, user_id,))\n else:\n ret = self.db.query('select * from book where title=%s and author=%s', (title, author), fetchone=True)\n if ret:\n text = '书名为《%s》;作者为\"%s\"的图书已存在,不能重复上传!' % (title, author)\n return Error(code=111, message=text)\n\n sql = \"\"\"insert into book (user_id, category_id, title, author, description, picture, status)\n values (%s, %s, %s, %s, %s, %s, 1)\"\"\"\n res = self.db.execute(sql, (user_id, category_id, title, author, description, picture,))\n if res:\n self.db.commit()\n return Status(code=1, message='Submit OK.')\n else:\n self.db.rollback()\n return Error(code=110, message='Submit failed.')\n\n def submit_comment(self, user_id, book_id, rating, comment):\n \"\"\"提交评论\"\"\"\n self.db.begin()\n comment_row = self.db.query('select * from book_comments where book_id=%s and user_id=%s', (book_id, user_id,),\n fetchone=True)\n if comment_row:\n # 以前评论过\n res = self.db.execute('update book_comments set rating=%s, comment=%s, updated_at=now() where id=%s',\n (rating, comment, comment_row['id'],))\n else:\n res = self.db.execute(\n 'insert into book_comments (book_id, user_id, rating, comment) values (%s, %s, %s, %s)',\n (book_id, user_id, rating, comment,))\n\n if res:\n row = self.db.query('select count(*) cnt,avg(rating) rating from book_comments '\n 'where book_id=%s and rating!=0',\n (book_id, ), fetchone=True)\n if row: # 评分为0的评论不计入评论总数,也不参与平均分的计算\n res2 = self.db.execute('update book set comments_tally=%s, rating=%s where id=%s',\n (row['cnt'] or 0, row['rating'] or 0, book_id,))\n if res2:\n self.db.commit()\n return Status(code=1, message='Submit OK.')\n else:\n self.db.rollback()\n return Error(code=110, message='Submit failed.')\n\n # 暂时用不到\n def store_picture(self, user_id, url):\n now = datetime.datetime.now()\n ret = self.db.execute('insert into pictures (user_id, url, created_at) values (%s, %s, %s)',\n (user_id, url, now,))\n if ret:\n ret = self.db.query('select id, url from pictures where user_id=%s and created_at=%s',\n (user_id, now,), fetchone=True)\n return ret or Error(code=100, message='未获取图片信息')\n return Error(code=100, message='存储图片失败')\n","sub_path":"api/model/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":8848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70282777","text":"# coding=utf-8\nimport pickle\nimport random\nimport asyncio\n\nimport discord\n\nemoticon_id = \"106395317271805952\"\nshould_log = False\n\nclient = discord.Client()\n\n\ndef init(cilent):\n global client\n client = cilent\n\n\n# exit function\ndef safe_exit(msg):\n if msg.author.id == emoticon_id:\n # client.logout()\n save_dicts(msg)\n exit()\n\n\n# log functions\ndef log_command(msg):\n global should_log\n if msg.author.id == emoticon_id:\n if msg.content.split(\" \")[1] == \"complete\":\n try:\n logs[msg.channel.id].extend(get_full_log(msg))\n except KeyError:\n logs[msg.channel.id] = []\n logs[msg.channel.id].extend(get_full_log(msg))\n elif msg.content.split(\" \")[1] == \"update\":\n update_log(msg)\n elif msg.content.split(\" \")[1] == \"switch\":\n should_log = not should_log\n\n\ndef get_full_log(msg):\n if msg.author.id == emoticon_id:\n print(\"Getting complete log from channel #\" + msg.channel.name + \" \" + msg.channel.id)\n full_log = []\n last_part = False\n last_msg = list(client.logs_from(msg.channel, 2))[1]\n while not last_part:\n log = list(client.logs_from(msg.channel, 100, last_msg))\n full_log.extend(log)\n last_msg = log[len(log) - 1]\n print(\"Last message: \" + str(log[len(log) - 1]))\n if len(log) < 100:\n last_part = True\n print(\"Done getting complete log from channel #\" + msg.channel.name + \" \" + msg.channel.id)\n print(len(full_log))\n return full_log\n\n\ndef update_log(msg):\n if msg.author.id == emoticon_id:\n print(\"Updating log from channel #\" + msg.channel.name + \" \" + msg.channel.id)\n try:\n last_known_msg = logs[msg.channel.id][len(logs[msg.channel.id]) - 1]\n except KeyError:\n print(\"No logs for channel found!\")\n return\n log_update = []\n last_part = False\n last_msg = last_known_msg\n while not last_part:\n log = list(client.logs_from(msg.channel, 100, after=last_msg))\n log_update.extend(log)\n last_msg = log[0]\n print(u\"Last message: \" + str(log[0]))\n if len(log) < 100:\n last_part = True\n print(\"Done updating log from channel #\" + msg.channel.name + \" \" + msg.channel.id)\n print(len(log_update))\n return log_update\n\n\n@asyncio.coroutine\ndef log_search(msg):\n yield from client.send_typing(msg.channel)\n yield from client.send_message(msg.channel, \"Searching log...\")\n print(\"searching log\")\n log = logs[msg.channel.id]\n filtered_log = [line.author.name + \": \" + line.content for line in log if msg.content[12:] in line.content]\n print(u\"\\n\".join(filtered_log))\n print(len(u\"\\n\".join(filtered_log)))\n if len(filtered_log) > 0:\n try:\n yield from client.send_message(msg.channel, u\"```\\n\" + \"\\n\\n\".join(filtered_log).replace(\"`\", \"´\") + \"```\")\n except discord.utils.HTTPException:\n if len(u\"\\n\".join(filtered_log)) > 2000:\n yield from client.send_message(msg.channel, \"Output too long\")\n else:\n yield from client.send_message(msg.channel, \"Error\")\n\n\n# id function\n@asyncio.coroutine\ndef get_id(msg):\n onlines = list(client.get_all_members())\n print(msg.content.split(\" \"))\n for member in onlines:\n if member.name.lower() == msg.content.split(\" \")[1].lower():\n print(member.id)\n yield from client.send_message(msg.channel, \"ID of user \" + member.name + \": \" + member.id)\n\n\n# command edit function\ndef command_edit(msg):\n if msg.author.id == emoticon_id:\n if msg.content.split(\" \")[1] == \"add\":\n if msg.content.split(\" \")[2] == \"str\":\n commands[msg.content.split(\" \")[3]] = msg.content[16 + len(msg.content.split(\" \")[3]):]\n elif msg.content.split(\" \")[2] == \"fun\":\n commands[msg.content.split(\" \")[3]] = globals()[msg.content[16 + len(msg.content.split(\" \")[3]):]]\n elif msg.content.split(\" \")[1] == \"del\":\n commands.pop(msg.content.split(\" \")[2])\n\n\n@asyncio.coroutine\ndef command_list(msg):\n yield from client.send_message(msg.channel, \", \".join(commands.keys()))\n\n\n# dictionary functions\ndef save_dicts(msg):\n if msg.author.id == emoticon_id:\n save_dict(\"commands\", commands)\n save_dict(\"logs\", logs)\n print(\"Saved commands and logs\")\n\n\ndef save_dict(dictname, dic):\n try:\n f = open(dictname + \".txt\", \"wb\")\n pickle.dump(dic, f)\n f.close()\n except FileNotFoundError:\n f = open(dictname + \".txt\", \"ab\")\n pickle.dump(dic, f)\n f.close()\n\n\ndef load_dict(dictname):\n try:\n with open(dictname + \".txt\", \"rb\") as f:\n dic = pickle.load(f)\n f.close()\n return dic\n except FileNotFoundError:\n with open(dictname + \".txt\", \"ab\") as f:\n pickle.dump({}, f)\n f.close()\n return load_dict(dictname)\n\nlogs = load_dict(\"logs\")\ncommands = load_dict(\"commands\")\n\n\n# command function\n@asyncio.coroutine\ndef command_exec(msg):\n content = msg.content\n if content.split(\" \")[0][1:] in commands:\n cmdname = content.split(\" \")[0][1:]\n if type(commands[cmdname]) is str:\n yield from client.send_message(msg.channel, commands[cmdname])\n elif callable(commands[cmdname]):\n if asyncio.iscoroutinefunction(commands[cmdname]):\n yield from commands[cmdname](msg)\n else:\n commands[cmdname](msg)\n\n\n@asyncio.coroutine\ndef eight_ball(msg):\n answers = [\n \"Signs point to yes.\",\n \"Yes.\",\n \"Reply hazy, try again.\",\n \"Without a doubt.\",\n \"My sources say no.\",\n \"As I see it, yes.\",\n \"You may rely on it.\",\n \"Concentrate and ask again.\",\n \"Outlook not so good.\",\n \"It is decidedly so.\",\n \"Better not tell you now.\",\n \"Very doubtful.\",\n \"Yes - definitely.\",\n \"It is certain.\",\n \"Cannot predict now.\",\n \"Most likely.\",\n \"Ask again later.\",\n \"My reply is no.\",\n \"Outlook good.\",\n \"Don't count on it.\"\n ]\n yield from client.send_message(msg.channel, random.choice(answers))\n","sub_path":"cfuncs.py","file_name":"cfuncs.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"547345051","text":"import os\nimport json\nfrom webpack_manifest.webpack_manifest import (\n WebpackManifest, \n WebpackManifestEntry, \n WebpackManifestFileError\n)\n\n\nclass CraManifest(WebpackManifest):\n\n def __init__(self):\n path = os.path.join(os.path.dirname(__file__), \"dist\", \"asset-manifest.json\")\n static_root = os.path.dirname(path)\n data = self._read_manifest(path)\n super().__init__(path, data, '/dist/', static_root)\n self.main = WebpackManifestEntry(self._data['entrypoints'], self._static_url, self._static_root)\n\n\n def _read_manifest(self, path):\n if not os.path.isfile(path):\n raise WebpackManifestFileError('Path \"{}\" is not a file or does not exist'.format(path))\n\n if not os.path.isabs(path):\n raise WebpackManifestFileError('Path \"{}\" is not an absolute path to a file'.format(path))\n\n with open(path, 'r') as manifest_file:\n content = manifest_file.read()\n\n return json.loads(content)","sub_path":"app/manifests.py","file_name":"manifests.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"495257540","text":"################################################################################\n# All conv net\n# Copyright (c) 2016 Artsiom Sanakoyeu\n################################################################################\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as tflayers\nimport os\n\n\nclass ExemplarCnnConvNet(object):\n \"\"\"\n Convnet\n WARNING! You should feed images in HxWxC BGR format!\n \"\"\"\n\n class RandomInitType:\n GAUSSIAN = 0,\n XAVIER_UNIFORM = 1,\n XAVIER_GAUSSIAN = 2\n\n def __init__(self,\n im_shape=(96, 96, 3),\n device_id='/gpu:0',\n random_init_type=RandomInitType.XAVIER_GAUSSIAN,\n gpu_memory_fraction=None, **params):\n \"\"\"\n Args:\n init_model: dict containing network weights, or a string with path to .np file with the dict,\n if is None then init using random weights and biases\n num_classes: number of output classes\n gpu_memory_fraction: Fraction on the max GPU memory to allocate for process needs.\n Allow auto growth if None (can take up to the totality of the memory).\n :return:\n \"\"\"\n self.input_shape = im_shape\n self.device_id = device_id\n self.random_init_type = random_init_type\n self.batch_norm_decay = 0.99\n\n if len(self.input_shape) == 2:\n self.input_shape += (3,)\n assert len(self.input_shape) == 3\n\n self.global_iter_counter = tf.Variable(0, name='global_iter_counter', trainable=False)\n with tf.variable_scope('input'):\n self.x = tf.placeholder(tf.float32, (None,) + self.input_shape, name='x')\n self.y_gt = tf.placeholder(tf.int32, shape=(None,), name='y_gt')\n self.is_phase_train = tf.placeholder(tf.bool, shape=tuple(), name='is_phase_train')\n self.dropout_keep_prob = tf.placeholder_with_default(1.0, tuple(),\n name='dropout_keep_prob')\n\n\n with tf.device(self.device_id):\n\n self.conv1 = self.conv_relu(self.x, kernel_size=5,\n kernels_num=64, stride=1,\n name='conv1', batch_norm=False)\n self.maxpool1 = tf.nn.max_pool(self.conv1,\n ksize=[1, 48, 48, 1],\n strides=[1, 48, 48, 1],\n padding='SAME',\n name='maxpool1')\n self.pool1 = tf.nn.max_pool(self.conv1,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='maxpool1')\n\n\n self.conv2 = self.conv_relu(self.pool1, kernel_size=5,\n kernels_num=128, stride=1,\n name='conv2', batch_norm=False)\n self.maxpool2 = tf.nn.max_pool(self.conv2,\n ksize=[1, 24, 24, 1],\n strides=[1, 24, 24, 1],\n padding='SAME',\n name='maxpool2')\n\n self.pool2 = tf.nn.max_pool(self.conv2,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='maxpool2')\n\n self.conv3 = self.conv_relu(self.pool2, kernel_size=5,\n kernels_num=256, stride=1,\n name='conv3', batch_norm=False)\n\n self.maxpool3 = tf.nn.max_pool(self.conv3,\n ksize=[1, 12, 12, 1],\n strides=[1, 12, 12, 1],\n padding='SAME',\n name='maxpool3')\n\n self.conv4 = self.conv_relu(self.conv3, kernel_size=8,\n kernels_num=512, stride=1,\n name='conv4', batch_norm=False)\n\n self.maxpool4 = tf.nn.max_pool(self.conv4,\n ksize=[1, 9, 9, 1],\n strides=[1, 9, 9, 1],\n padding='VALID',\n name='maxpool4')\n self.graph = tf.get_default_graph()\n config = tf.ConfigProto(log_device_placement=False,\n allow_soft_placement=True)\n # please do not use the totality of the GPU memory.\n if gpu_memory_fraction is None:\n config.gpu_options.allow_growth = True\n else:\n config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n self.sess = tf.Session(config=config)\n\n\n\n def restore_from_snapshot(self, snapshot_path, num_layers, restore_iter_counter=True):\n \"\"\"\n :param snapshot_path: path to the snapshot file\n :param num_layers: number layers to restore from the snapshot\n (conv1 is the #1, fc8 is the #8)\n :param restore_iter_counter: if True restore global_iter_counter from the snapshot\n\n WARNING! A call of sess.run(tf.initialize_all_variables()) after restoring from snapshot\n will overwrite all variables and set them to initial state.\n Call restore_from_snapshot() only after sess.run(tf.initialize_all_variables())!\n \"\"\"\n if num_layers > 4:\n raise ValueError('You can restore only 4 layers')\n if num_layers == 0:\n return\n # if not restore_iter_counter:\n # raise ValueError('We can restore only everything including iter_counter')\n\n with self.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(self.sess, snapshot_path)\n\n def conv_relu(self, input_tensor, kernel_size, kernels_num, stride, batch_norm=True,\n group=1, name=None):\n\n with tf.variable_scope(name) as scope:\n assert int(input_tensor.get_shape()[3]) % group == 0\n num_input_channels = int(input_tensor.get_shape()[3]) / group\n w, b = self.get_conv_weights(kernel_size, num_input_channels, kernels_num)\n conv = self.conv(input_tensor, w, b, stride, padding=\"SAME\", group=group)\n if batch_norm:\n conv = tf.cond(self.is_phase_train,\n lambda: tflayers.batch_norm(conv,\n decay=self.batch_norm_decay,\n is_training=True,\n trainable=True,\n reuse=None,\n scope=scope),\n lambda: tflayers.batch_norm(conv,\n decay=self.batch_norm_decay,\n is_training=False,\n trainable=True,\n reuse=True,\n scope=scope))\n conv = tf.nn.relu(conv, name=name)\n return conv\n\n @staticmethod\n def conv(input_tensor, kernel, biases, stride, padding=\"VALID\", group=1):\n\n c_i = input_tensor.get_shape()[-1]\n assert c_i % group == 0\n assert kernel.get_shape()[3] % group == 0\n\n def convolve(inp, w, name=None):\n return tf.nn.conv2d(inp, w, [1, stride, stride, 1], padding=padding, name=name)\n\n if group == 1:\n conv = convolve(input_tensor, kernel)\n else:\n input_groups = tf.split(axis=3, num_or_size_splits=group, value=input_tensor)\n kernel_groups = tf.split(axis=3, num_or_size_splits=group, value=kernel)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]\n conv = tf.concat(axis=3, values=output_groups)\n # TODO: no need to reshape?\n return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:],\n name='conv')\n\n def get_conv_weights(self, kernel_size, num_input_channels, kernels_num,\n weight_std=0.01, bias_init_value=0.1):\n\n w = self.random_weight_variable((kernel_size, kernel_size,\n num_input_channels,\n kernels_num),\n stddev=weight_std)\n b = self.random_bias_variable((kernels_num,), value=bias_init_value)\n return w, b\n\n def random_weight_variable(self, shape, stddev=0.01):\n \"\"\"\n stddev is used only for RandomInitType.GAUSSIAN\n \"\"\"\n if self.random_init_type == self.RandomInitType.GAUSSIAN:\n initial = tf.truncated_normal(shape, stddev=stddev)\n return tf.Variable(initial, name='weight')\n elif self.random_init_type == self.RandomInitType.XAVIER_GAUSSIAN:\n return tf.get_variable(\"weight\", shape=shape,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=False))\n elif self.random_init_type == self.RandomInitType.XAVIER_UNIFORM:\n return tf.get_variable(\"weight\", shape=shape,\n initializer=tf.contrib.layers.xavier_initializer(\n uniform=True))\n else:\n raise ValueError('Unknown random_init_type')\n\n\n @staticmethod\n def random_bias_variable(shape, value=0.1):\n initial = tf.constant(value, shape=shape)\n return tf.Variable(initial, name='bias')\n\n\nif __name__ == \"__main__\":\n\n path_to_snapshot = '/export/home/mbautist/Desktop/workspace/cnn_similarities/tjprj/data/exemplar_cnn/'\n net = ExemplarCnnNet(device_id=0,\n gpu_memory_fraction=0.4, random_init_type=ExemplarCnnNet.RandomInitType.XAVIER_GAUSSIAN)\n a = 0\n","sub_path":"tfext/exemplarcnnconvnet.py","file_name":"exemplarcnnconvnet.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"489802745","text":"#! /usr/bin/env python3\n#NoGuiLinux\n#take data from csv file and input it into a sqlite3 db\n\nimport pandas as pd\nimport pandasql\nimport numpy\nimport os,sqlite3\nimport argparse\nimport string\n\nclass container:\n class data:\n master=None\n file=''\n invalidChars=string.punctuation+string.whitespace\n invalidCol=string.digits\n def loadData(self,file):\n data=pd.read_csv(file)\n for char in self.invalidChars:\n data.rename(columns=lambda x: x.replace(char,\"_\").lower(),inplace=True)\n data.rename(columns=lambda x: \"c_\"+x if x[0] in self.invalidCol else x,inplace=True)\n df=pd.DataFrame(data)\n return df\n\n def checkDb(self,dbName):\n if os.path.exists(dbName) and os.path.isfile(dbName):\n os.remove(dbName)\n\n class dbManager:\n master=None\n def mkTableCols(self,df):\n tableCols=[]\n dfCols=df.columns.get_values()\n for col in dfCols:\n if df[col].dtype != 'object':\n tableCols.append(\"{} real\".format(col))\n if df[col].dtype == 'object':\n tableCols.append(\"{} text\".format(col))\n return tableCols\n\n def mkTableName(self,file):\n tableName=os.path.splitext(file)[0].replace(' ',\"_\").lower()\n for char in self.master.data.invalidChars:\n tableName=tableName.replace(char,\"_\").lower()\n return tableName\n\n def mkTableSql(self,tableCols,tableName):\n tableSql=\"create table if not exists {}({},rowid INTEGER PRIMARY KEY AUTOINCREMENT);\".format(tableName,','.join(tableCols))\n return tableSql\n\n def mkDbName(self,file):\n return os.path.splitext(file)[0]+\".db\"\n\n def mkDb(self,dbName):\n db={}\n db['db']=sqlite3.connect(dbName)\n db['cursor']=db['db'].cursor()\n return db\n\n def dbCleanup(self,db):\n db['db'].commit()\n db['db'].close()\n\n def insertEntry(self,sql,db):\n try:\n db['cursor'].execute(sql)\n except OSError as e:\n print(sql)\n print(e)\n exit(1)\n\n def mkEntries(self,tableName,df,db):\n counter=0\n valuesStr=None\n tableRows=len(df)\n dfCols=df.columns.get_values()\n values=[]\n msg=\"Processed Entries: \"\n for i in range(tableRows):\n for x in range(len(dfCols)):\n column=df.loc[i][dfCols[x]]\n if type(column) != str:\n if str(column) == 'nan':\n column='Null'\n values.append(str(column))\n elif type(column) == str:\n if column == '':\n column='Null'\n values.append('\"{}\"'.format(column))\n sql='insert into {} ({}) values({});'.format(tableName,','.join(dfCols),','.join(values))\n values=[]\n print(str(len(str(counter)+msg)*\"\\b\")+msg+str(counter),end='')\n if ( counter % 1000 ) == 0:\n db['db'].commit()\n counter+=1\n self.insertEntry(sql,db)\n\n class cmdline:\n master=None\n def getargs(self):\n parser=argparse.ArgumentParser()\n parser.add_argument(\"-f\",\"--file\",help=\"csv input file\",required=\"yes\")\n options,unknown=parser.parse_known_args()\n return options\n\n class void:\n master=None\n\n class tasks:\n master=None\n def run(self):\n options=self.master.cmd.getargs()\n self.master.data.file=options.file\n\n file=self.master.data.file\n\n dbName=self.master.db.mkDbName(file)\n self.master.data.checkDb(dbName)\n db=self.master.db.mkDb(dbName)\n df=self.master.data.loadData(file)\n \n tableName=self.master.db.mkTableName(file)\n tableCols=self.master.db.mkTableCols(df)\n tableSql=self.master.db.mkTableSql(tableCols,tableName)\n #make table\n self.master.db.insertEntry(tableSql,db)\n #add entries to table\n self.master.db.mkEntries(tableName,df,db)\n self.master.db.dbCleanup(db)\n print(\"\\ncompleted! '{}' -> '{}'\".format(file,dbName))\n\n def assembler(self):\n wa=self.void()\n wa.master=wa\n \n wa.cmd=self.cmdline()\n wa.cmd.master=wa\n\n wa.data=self.data()\n wa.data.master=wa\n\n wa.db=self.dbManager()\n wa.db.master=wa\n\n wa.tasks=self.tasks()\n wa.tasks.master=wa\n wa.tasks.run()\n\nrun=container()\nrun.assembler()\n\n#sql='''select registrar,enrolment_agency from data limit 50'''\n#solution=pandasql.sqldf(sql.lower(),locals())\n#print(solution)\n","sub_path":"classing-python3/csv2sqlite3.py","file_name":"csv2sqlite3.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"300433264","text":"'''\nUn documento HTML puo' essere rappresentato sotto forma di albero, come visto a lezione, che si chiama DOM (Document Object Model).\n\nUn qualsiasi nodo di questo albero puo' essere individuato sulla base delle proprie caratteristiche:\n - tag un tag del tipo indicato\n - .classe una delle parole presenti nel valore dell'attributo \"class\"\n - #id il valore dell'attributo \"id\"\n - @[attributo=\"valore\"] valore di un attributo generico\ned in base alla sua relazione con i tag che lo contengono:\n - avo discendente il tag 'avo' contiene un tag 'discendente' a qualsiasi profondita'\n - padre > figlio il tag 'padre' contiene il tag 'figlio' nel livello immediatamente sotto\n\nUn selettore CSS e' una successione di selettori di tag separati da spazio che serve ad individuare uno o piu' nodi all'interno del DOM.\nEsempio:\n div .class1 > #main_window\n seleziona un qualsiasi tag che ha id=\"main_window\"\n e' il figlio di un tag che ha class=\"... class1 ...\"\n e si trova all'interno (a qualsiasi livello di distanza) di un tag div\nEsempio2:\n p table > tr > td > a\n\tseleziona un link ( )\n\tfiglio di una casella ( ... )\n\tfiglia di una riga ( ... )\n\tfiglia di una tabella ( ...
)\n\tcontenuta a qualsiasi livello in un paragrafo (

....

)\n\nNOTA: questa definizione del CSS e' una versione ridottissima che non segue lo standard completo.\nIn particolare, non e' possibile usare piu' relazioni '>' consecutive o costruire selettori alternativi (in OR) e selettori in AND.\n\nLe modifiche da attuare su un DOM possono essere di 3 tipi:\n - conteggio dei nodi che soddisfano il selettore CSS\n - eliminazione di tutti i tag individuati da un selettore CSS\n - modifica degli attributi di tutti i tag individuati da un selettore CSS\n\nRealizzate le funzioni che esplorano e modificano l'albero:\n conta_nodi( fileIn, selettore)\n elimina_nodi( fileIn, selettore, fileOut)\n cambia_attributo(\tfileIn, selettore, chiave, valore, fileOut)\n\nATTENZIONE: nei test verra' utilizzato un timout globale di 1*N secondi (se il grader fa N test)\n'''\n\nfrom my_html import HTMLNode, fparse\n\ndef print_tree(nodo,level):\n if nodo.istext():\n print(' '*level+'__text__'+repr(nodo.content))\n else:\n print(' '*level+str(nodo))\n for child in nodo.content:\n print_tree(child,level+1)\n\n\n\n\n\ndef create_d(selettore): #per creare il dizionario attributi\n spec_ch=['.','#','@']\n d=dict()\n #lista=list()\n #lista_2=list()\n lista=selettore.split()\n for attr in lista:\n #print(attr)\n for elemento in spec_ch:\n if elemento in attr[0]: #se trovo un spec_ch in attr->lista splitatta da spazio\n #print('attr',attr,type(attr))\n lista_2=attr.split(elemento)\n if elemento=='.':\n d['class']=lista_2[-1]\n if elemento=='#':\n d['id']=lista_2[-1]\n if elemento=='@':\n app=str()\n for c in lista_2[-1]:\n if c not in '][':\n app+=c\n app1=app.split('=')\n appfinal=app1[1].split('\\\"')\n appfinal.remove(appfinal[0])\n appfinal.remove(appfinal[-1])\n #print('appfinal',appfinal)\n d[app1[0]]=appfinal[0]\n break\n return d\n\ndef conta_attr(nodo,d,lista,cont):\n '''Funzione che conta i nodi con dato attributo'''\n if not nodo.istext():\n #print('entro con', nodo.tag)\n state=0\n for k in lista: #lista=d.keys\n #print('dizionario attr:',nodo.attr)\n if nodo.attr!={} and k in nodo.attr:\n d_nodo=nodo.attr\n #print('valori:',d_nodo.get(k))\n if d[k] in d_nodo.get(k):\n state+=1\n #print('attr',nodo.attr,state)\n if state==len(lista):\n #print('uguale')\n cont+=1\n for child in nodo.content:\n if not child.istext():\n cont=conta_attr(child,d,lista,cont)\n\n return cont\n\ndef con_padre_figlio(nodo,selettore,ind,cont): #selettore E una lista\n '''Funzione che conta i nodi con relazione padre figlio'''\n if not nodo.istext():\n #print('nodotag',nodo.tag,'=',selettore[ind])\n if nodo.tag==selettore[ind]:\n #print('si')\n if ind==(len(selettore)-1) :\n #print('incrementato',cont)\n cont+=1\n return cont\n for child in nodo.content:\n if not child.istext():\n cont=con_padre_figlio(child,selettore,ind+1,cont)\n\n\n\n for child in nodo.content:\n if not child.istext():\n cont=con_padre_figlio(child,selettore,0,cont)\n\n return cont\n\ndef con_avo_disc(nodo,selettore,ind,cont): #selettore e una lista\n '''Funzione che CONTA i nodi con relazione avo discendente'''\n if not nodo.istext():\n for child in nodo.content:\n if not child.istext():\n #print('tag',child.tag,'=',selettore[ind])\n if child.tag==selettore[ind]:\n if ind==(len(selettore)-1):\n cont+=1\n print('arrivato',cont)\n #---return cont stesso motivo\n #print('si, call')\n cont=con_avo_disc(child,selettore,ind+1,cont)\n else:\n #print('no')\n cont=con_avo_disc(child,selettore,ind,cont)\n\n return cont\n\n\ndef rem_avo_disc(nodo,selettore,ind,dad): #selettore e una lista\n '''Funzione che RIMUOVE i nodi con relazione avo discendente'''\n if not nodo.istext():\n for child in nodo.content:\n if not child.istext():\n #print('tag',child.tag,'=',selettore[ind])\n if child.tag==selettore[ind]:\n if ind==(len(selettore)-1):\n print_tree(dad,0)\n #print('arrivato',child)\n dad_schild=dad.content\n #####\n child_schild=child.content\n c=0\n for x in dad_schild: #elimino figlio a da padre\n #print(x,child)\n if x==child:\n dad_schild.remove(x)\n '''for el_listacont_child in child.content:#inserisco filgio del figlio\n #nel conent del padre, allo stesso posto(indice) in cui era il figlio\n dad_schild.insert(c,el_listacont_child)'''\n\n\n c+=1\n #####\n #---dad_schild+=child.content\n #---dad.content=dad_schild\n #print('nuovi figli',dad,dad.content)\n print('\\nelimo',child,'\\n')\n print_tree(dad,0)\n #print('op ef.')\n #return nodo #levato il return perche senno non continuava a scorrere\n #print('si, call')\n nodo=rem_avo_disc(child,selettore,ind+1,dad)\n else:\n #print('no')\n dad=child\n #print('padre',dad)\n nodo=rem_avo_disc(child,selettore,ind,dad)\n #print('ritorno',nodo)\n\n return nodo\n\n\n\ndef check_selettore(root,selettore,choice,zipp=0):\n '''Funzione che richiama la funzione apposita per ogni relazione'''\n spec_ch=['@','#','.']\n for elemento in spec_ch:\n if elemento in selettore:\n d=create_d(selettore)\n #print(d)\n if choice=='count':\n print('relazione tag - choice->',choice,d)\n cont=conta_attr(root,d,d.keys(),0)\n return cont\n elif choice=='remove':\n print('funz rimuovi')\n elif choice=='edit':\n print('relazione attributo - choice->',choice,d)\n k,v=zipp\n change_attr(root,d,d.keys(),0,k,v)\n return\n\n\n if '>' in selettore:\n lista=selettore.split('>')\n #print('inizio con lista',lista)\n contatore=0\n for elemento in lista:\n #print('elemento',elemento)\n c1=str()\n for c in elemento:\n if c!=' ':\n c1+=c\n lista[contatore]=c1\n contatore+=1\n print('Relazione padre-figlio ->', lista)\n cont=con_padre_figlio(root,lista,0,0)\n return cont\n\n\n else:\n single_tag=True\n for c in selettore:\n if c==' ':\n single_tag=False\n if single_tag==False:\n lista=selettore.split(' ')\n\n if choice=='count':\n print('Relazione avo-discendente ->',choice,lista)\n cont=con_avo_disc(root,lista,0,0)\n return cont\n\n elif choice=='remove':\n print('Relazione avo-discendente ->',choice,lista)\n cont=rem_avo_disc(root,lista,0,0)\n #print_tree(root,0)\n\n\n elif choice=='edit':\n print('Relazione avo-discendente ->',choice,lista)\n k,v=zipp\n a=change_avo_disc(root,lista,0,k,v)\n else:\n lista=list()\n lista.append(selettore)\n print('Relazione tag-',lista)\n cont=con_padre_figlio(root,lista,0,0)\n return cont\n\n\n\ndef change_attr(nodo,d,lista,cont,k1,v):\n '''Funzione che conta i nodi con dato attributo'''\n if not nodo.istext():\n #print('entro con', nodo.tag)\n state=0\n for k in lista: #lista=d.keys\n #print('dizionario attr:',nodo.attr)\n if nodo.attr!={} and k in nodo.attr:\n d_nodo=nodo.attr\n #print('valori:',d_nodo.get(k))\n if d[k] in d_nodo.get(k):\n state+=1\n #print('attr',nodo.attr,state)\n if state==len(lista):\n print('uguale')\n print(nodo.attr)\n diz_nodo=nodo.attr\n diz_nodo[k1]=v\n print(nodo.attr)\n\n cont+=1\n for child in nodo.content:\n if not child.istext():\n change_attr(child,d,lista,cont,k1,v)\n\n return\n\n\n\n\n\ndef change_avo_disc(nodo,selettore,ind,k,v): #selettore e una lista\n '''Funzione che CAMBIA attributi dei nodi con relazione avo discendente'''\n if not nodo.istext():\n for child in nodo.content:\n if not child.istext():\n print('tag',child.tag,'=',selettore[ind])\n if child.tag==selettore[ind]:\n if ind==(len(selettore)-1):\n #print('attr di',child,'-',child.attr)\n d_nodo=child.attr\n d_nodo[k]=v\n #print('attr di',child,',modificati -',child.attr)\n #----return\n #print('si, call')\n change_avo_disc(child,selettore,ind+1,k,v)\n else:\n #print('no')\n change_avo_disc(child,selettore,ind,k,v)\n\n return\n\n\n\ndef conta_nodi(fileIn, selettore):\n '''Torna il numero di nodi dell'albero, che soddisfano il selettore CSS.'''\n choice='count'\n root=fparse(fileIn)\n cont=check_selettore(root,selettore,choice)\n print('res',cont)\n return cont\n\n\n\n#conta_nodi('page1-3.html','p a')\n\ndef save_HTML(root,fileOut):\n '''FUNZIONE PER SCRIVERE .HTML SU DISCO'''\n s=root.to_string()\n #print(s)\n f=open(fileOut,'w')\n f.write(s)\n f.close\n\n\ndef elimina_nodi(fileIn, selettore, fileOut):\n '''Elimina dallalbero tutti i nodi che soddisfano il selettore CSS (compreso il loro contenuto)'''\n choice='remove'\n root=fparse(fileIn)\n check_selettore(root,selettore,choice)\n #print_tree(root,0)\n save_HTML(root,fileOut)\n\n#elimina_nodi('page1-3.html','p a','prova.html')\n\n\n\n\ndef cambia_attributo(fileIn, selettore, chiave, valore, fileOut):\n '''Modifica tutti i nodi dell'albero che soddisfano il selettore CSS'''\n choice='edit'\n root=fparse(fileIn)\n #print_tree(root,0)\n zipp=(chiave,valore)\n check_selettore(root,selettore,choice,zipp)\n save_HTML(root,fileOut)\n\n\ncambia_attributo('slashdot.html','#slashdot_deals-title','style','background-color:red','prova.html')\n","sub_path":"students/1755633/homework04/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":12868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56627893","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom blop import db\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime\nfrom sqlalchemy.sql import desc\n\napp = Flask(__name__)\napp.debug = True\napp.config['SQLALCHEMY_DATABASE_URI'] = db.db_url\n\ndb = SQLAlchemy(app)\n\nfrom blop import models\n\nPOSTS_PER_PAGE = 20\n\n@app.route('/submit', methods=['GET', 'POST'])\ndef submit():\n types = db.session.query(models.Type).order_by(models.Type.code).all()\n locations = db.session.query(models.Location).order_by(models.Location.name).all()\n return render_template('test.html', types=types, locations=locations)\n\n\n@app.route('/blotter')\n@app.route('/blotter/')\ndef blotter(page=1):\n incidents = models.Incident.query.order_by('datetime desc').paginate(page, POSTS_PER_PAGE, False)\n return render_template('blotter.html', title='Live Feed', incidents=incidents)\n\n@app.route('/')\n@app.route('/map')\ndef maps():\n today = datetime.datetime.now()\n year = today.year - 1\n startdate = today.replace(year = year)\n incidents = db.session.query(models.Incident).filter(models.Incident.datetime >= startdate).all()\n return render_template('map.html', incidents = incidents)\n\n\n@app.route('/search/')\ndef search():\n types = db.session.query(models.Type).order_by(models.Type.description).all()\n locations = db.session.query(models.Location).order_by(models.Location.name).all()\n return render_template('search.html', types=types, locations=locations)\n\n\n@app.route('/processform', methods=['GET', 'POST'])\ndef processform():\n\n types = []\n codelist = request.form.getlist('incidents', type=int)\n for c in codelist:\n typ = db.session.query(models.Type).filter(models.Type.id == c).first()\n types.append(typ)\n\n loc = int(request.form['location'])\n location = db.session.query(models.Location).filter(models.Location.id == loc).first()\n\n month = int(request.form['month dropdown'])\n day = int(request.form['day dropdown'])\n year = int(request.form['year dropdown'])\n hour = int(request.form['hour dropdown'])\n ampm = int(request.form['AM/PM dropdown'])\n if ampm == 1:\n if hour != 12:\n hour = hour + 12\n else:\n if hour == 12:\n hour = 00\n minute = int(request.form['minute dropdown'])\n\n dt = datetime.datetime(year, month, day, hour, minute)\n\n incident_summary = str(request.form['summary field'])\n\n incident = models.Incident(\n datetime=dt,\n summary=incident_summary,\n location_id=location.id,\n types=types\n )\n\n db.session.add(incident)\n db.session.commit()\n\n return redirect(url_for('blotter'))\n\n@app.route('/blottersearch', methods=['GET', 'POST'])\ndef blottersearch():\n \n starthour = int(request.form['start_hour'])\n if request.form['ampm_start']=='am': \n if starthour == 12:\n starthour = 00\n\n if request.form['ampm_start']=='pm' and starthour != 00:\n starthour = starthour + 12 \n\n endhour = int(request.form['end_hour'])\n if request.form['ampm_end'] == 'am':\n if endhour == 12:\n endhour == 00\n\n if request.form['ampm_end'] == 'pm' and endhour != 00:\n endhour = endhour + 12\n\n startyear = int(request.form['start_year'])\n startmonth = int(request.form['start_month'])\n startday = int(request.form['start_day'])\n\n endyear = int(request.form['end_year'])\n endmonth = int(request.form['end_month'])\n endday = int(request.form['end_day']) \n\n today = datetime.datetime.today()\n\n if startyear == 0000:\n startyear = today.year - 1\n if startmonth == 0:\n startmonth = today.month\n if startday == 0:\n startday = today.day\n if endyear == 0000:\n endyear = today.year\n if endmonth == 0:\n endmonth = today.month\n if endday == 0:\n endday = today.day\n\n startdate= datetime.datetime(startyear,\n startmonth,\n startday,\n 0,0)\n\n enddate= datetime.datetime(endyear,\n endmonth,\n endday,\n 23,59)\n\n starttime=datetime.datetime(2000,1,1,starthour,\n int(request.form['start_minute']))\n\n endtime=datetime.datetime(2015,12,28,endhour,\n int(request.form['end_minute']))\n\n datefilter = []\n query = db.session.query(models.Incident).order_by(desc(models.Incident.datetime)).all()\n for q in query:\n if q.datetime.date()>=startdate.date():\n if q.datetime.date() <= enddate.date():\n datefilter.append(q)\n\n timefilter = []\n for q in query:\n if q.datetime.time()>=starttime.time():\n if q.datetime.time() <= endtime.time():\n timefilter.append(q)\n\n result = [val for val in datefilter if val in timefilter]\n\n if request.form['dynamicLocation']!='0':\n locs = request.form.getlist('dynamicLocation',type=int)\n print(locs)\n locs = list(set(locs))\n print(locs)\n locationfilter = []\n for l in locs:\n for q in query:\n if l == q.location_id:\n locationfilter.append(q)\n for i in query:\n groups = i.location.locgroups\n for g in groups:\n if g.name == q.location.name:\n locationfilter.append(i)\n result=list(set(result).intersection(locationfilter))\n\n if request.form['incidents']!='0':\n types = request.form.getlist('incidents',type=int)\n print(types)\n types = list(set(types))\n print(types)\n typefilter=[]\n if request.form['and_or'] == 'or':\n for q in query:\n inctypes = q.types\n for i in inctypes:\n for t in types:\n if i.id == t:\n typefilter.append(q)\n else:\n for q in query:\n ttypes = []\n for r in q.types:\n tid = r.id\n ttypes.append(tid)\n if set(types) <= set(ttypes):\n typefilter.append(q)\n ttypes = []\n\n\n result=list(set(result).intersection(typefilter))\n\n if request.form['search textarea']!='':\n slist = request.form['search textarea'].split()\n sstring = \"|\".join(slist)\n summaryfilter = db.session.query(models.Incident).filter(db.func.to_tsvector(models.Incident.summary).match(sstring)).all()\n result=list(set(result).intersection(summaryfilter))\n result = sorted(result, key= lambda incident: incident.datetime, reverse = True)\n return render_template('blottersearch.html', result = result)\n\n\n@app.route('/edit', methods=['GET', 'POST'])\ndef edit():\n return render_template('edit.html')\n\n@app.route('/edit/form', methods=['GET', 'POST'])\ndef editform():\n result = int(request.form['id'])\n incident = db.session.query(models.Incident).filter(models.Incident.id == result).one()\n dt = incident.datetime\n month = dt.strftime(\"%B\")\n hour = dt.strftime(\"%H\")\n minute = dt.strftime(\"%M\")\n types = db.session.query(models.Type).all()\n locations = db.session.query(models.Location).all()\n return render_template('editform.html', incident = incident, locations = locations, types = types, month = month, minute = minute, hour = hour)\n\n@app.route('/edit/submit', methods=['GET','POST'])\ndef editsubmit():\n incid = int(request.form['id'])\n inc = db.session.query(models.Incident).filter(models.Incident.id == incid).one()\n location_id = int(request.form['location'])\n location = db.session.query(models.Location).filter(models.Location.id == location_id).one()\n type_ids = request.form.getlist('incidents', type=int)\n types = []\n for t in type_ids:\n typ = db.session.query(models.Type).filter(models.Type.id == t).one()\n types.append(typ)\n summary = request.form['summary field']\n year = int(request.form['year dropdown'])\n month = int(request.form['month dropdown'])\n day = int(request.form['day dropdown'])\n hour = int(request.form['hour dropdown'])\n if request.form['AM/PM dropdown'] == '1':\n if hour != 12:\n hour = hour + 12\n else:\n if hour == 12:\n hour = 00\n minute = int(request.form['minute dropdown'])\n dt = datetime.datetime(year, month, day, hour, minute)\n setattr(inc, 'location', location)\n setattr(inc, 'datetime', dt)\n setattr(inc, 'summary', summary)\n setattr(inc, 'types', types)\n print(inc)\n db.session.commit()\n return redirect(url_for('blotter'))\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"blop/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563213414","text":"from urllib.request import urlopen\nimport json\nimport requests\n\nsearch_term = \"ES-2620\"\nkey = 'MikeJohn-Search-PRD-578731904-721e4441'\nurl = ('http://svcs.ebay.com/services/search/FindingService/v1\\\n?OPERATION-NAME=findItemsByKeywords\\\n&SERVICE-VERSION=1.0.0\\\n&SECURITY-APPNAME=' + key +\n'&RESPONSE-DATA-FORMAT=JSON\\\n&REST-PAYLOAD\\\n&itemFilter(0).name=Condition\\\n&itemFilter(0).value=New\\\n&itemFilter(1).name=MaxPrice\\\n&itemFilter(1).value=500.0\\\n&itemFilter(1).paramName=Currency\\\n&itemFilter(1).paramValue=USD\\\n&keywords=' + search_term)\n\nprint(url)\n\napiResult = requests.get(url)\nparsedoc = apiResult.json()\nprint(parsedoc)\nfor item in (parsedoc[\"findItemsByKeywordsResponse\"][0][\"searchResult\"][0][\"item\"]):\n title = item[\"title\"][0]\n condition = item[\"condition\"][0][\"conditionDisplayName\"][0]\n price = item[\"sellingStatus\"][0][\"convertedCurrentPrice\"][0][\"__value__\"]\n print(title + \", \" + price + \", \" + condition)\n","sub_path":"ebayTest1.py","file_name":"ebayTest1.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"422209494","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# suma _liczb.py\n\n\n\ndef main(args):\n \n a = 0\n p = int(input(\"Podaj początek sumowanego zakresu\"))\n k = int(input(\"Podaj koniec sumowanego zakresu\"))\n\n for i in range(p , k + 1):\n print(i)\n a = a + i\n print (\"Suma = \", a)\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"python/suma _liczb.py","file_name":"suma _liczb.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401284557","text":"from functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .module.rnn import LSTM\nfrom .module.embedding import Embedding\nfrom evaluate import perplexity\n\n\nclass LSTMLanguageModel(nn.Module):\n def __init__(self, ntoken, nwe, nhid_rnn, nlayers_rnn, dropoute, dropouti, dropoutl, dropouth, dropouto,\n tied_weights, padding_idx, nwords):\n super().__init__()\n # attributes\n self.ntoken = ntoken\n self.tied_weights = tied_weights\n self.nwords = nwords\n self.padding_idx = padding_idx\n # language model modules\n self.word_embedding = Embedding(ntoken, nwe, dropout=dropouti, dropoute=dropoute, padding_idx=self.padding_idx)\n self.lstm = LSTM(nwe, nhid_rnn, nhid_rnn, nlayers_rnn, 0., dropoutl, dropouth, dropouto)\n self.decoder = nn.Linear(nhid_rnn, ntoken)\n if self.tied_weights:\n self.decoder.weight = self.word_embedding.weight\n # init\n self._init()\n\n def _init(self):\n self.word_embedding.weight.data.uniform_(-0.1, 0.1)\n self.word_embedding.weight.data[self.padding_idx] = 0\n self.decoder.bias.data.zero_()\n\n def forward(self, text, hidden=None):\n emb = self.word_embedding(text)\n output, hidden = self.lstm(emb, hidden)\n return self.decoder(output), hidden\n\n def closure(self, text, target, timestep, optimizer, config):\n optimizer.zero_grad()\n # language model\n output, _ = self.forward(text)\n # nll\n nll = F.cross_entropy(output.view(-1, self.ntoken), target.view(-1), ignore_index=self.padding_idx)\n # rescaled elbo\n loss = nll\n # backward\n loss.backward()\n # step\n optimizer.step()\n # logs\n logs = {}\n logs['loss'] = loss.item()\n logs['ppl'] = perplexity(nll.item())\n return logs\n\n def evaluate(self, text, *args, **kwargs):\n output, _ = self.forward(text)\n return output\n\n def get_optimizer(self, lr, wd):\n return torch.optim.Adam(self.parameters(), lr=lr, weight_decay=wd)\n","sub_path":"model/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"490214254","text":"# BJ 2121 : 넷이 놀기 / SILVER III / 1272ms\n\nimport sys\n\nn = int(sys.stdin.readline().strip())\n\na, b = map(int, sys.stdin.readline().strip().split(' '))\n\npoints = list()\nfor _ in range(n):\n point = tuple(map(int, sys.stdin.readline().strip().split(' ')))\n points.append(point)\n\npoints_set = set(points)\n\ncnt = 0\n\nfor i in range(n):\n x1, y1 = points[i]\n\n p2 = (x1 + a, y1)\n p3 = (x1, y1 + b)\n p4 = (x1 + a, y1 + b)\n\n if (p2 in points_set) and (p3 in points_set) and (p4 in points_set):\n cnt += 1\n\nprint(cnt)\n\n\n'''\nNOTE:\n\npython의 set도 해시의 일종이라는 사실을 알았다!!\n\n순간 tuple이랑 헷갈려서 좀 막혔는데 이 기회에 개념을 확실히 익혔당 오예\n\n'''","sub_path":"season2/season2/week1/chaewon/2121.py","file_name":"2121.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499734956","text":"from django.test import TestCase\n\nimport features\nfrom .utils import *\nfrom .models import *\n\n\nclass UtilsTest(TestCase):\n fixtures = ['test.json', 'features']\n\n def setUp(self):\n Feature.objects.update(enabled=True)\n\n def test_allenabled(self):\n '''\n Everything should be enabled.\n '''\n for f in Feature.objects.all():\n self.assertTrue(is_enabled(f.title))\n\n def test_noexistant(self):\n '''\n If a feature slug is not found, it should be enabled.\n '''\n self.assertTrue(is_enabled('this_doesnt_exist'))\n\n\nclass ModelTest(TestCase):\n def test_createnew(self):\n '''\n Test all model functionality\n '''\n f = Feature.objects.create(title='Test')\n self.assertEquals(f.slug, 'test')\n self.assertTrue(f.enabled)\n self.assertEquals(unicode(f), 'Test is enabled.')\n f.enabled = False\n f.save()\n self.assertEquals(unicode(f), 'Test is disabled.')\n\n def test_dependencies(self):\n '''\n Test all model functionality\n '''\n f = Feature.objects.create(title='Test')\n f2 = Feature.objects.create(title='Depends_on_test')\n f2.depends_on.add(f)\n f2.save()\n self.assertTrue(f.is_enabled)\n self.assertTrue(f2.is_enabled)\n f.enabled = False\n f.save()\n self.assertTrue(f2.enabled)\n self.assertTrue(not f2.is_enabled)\n","sub_path":"features/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"276798588","text":"\n# Creation of summation class\nclass Summation(object):\n\n # Initializes an instance of the Summation class\n def __init__ (self,number_stop):\n self.number = number_stop\n\n # Function that sums a list of numbers from 1 to (including) the users input\n def sum (self):\n total = 0\n for num in range(self.number):\n total += num\n total = total + (num + 1)\n return total\n\n# User input that is stored in the Summation class\nif __name__ == \"__main__\":\n user_number = Summation(int(input(\"Please enter a number to which you would like to sum: \")))\n print(user_number.sum())","sub_path":"Assignments/Assignment 4/Assgnment 4.py","file_name":"Assgnment 4.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"400582413","text":"from odoo import models,fields,api,_ \nfrom odoo.exceptions import ValidationError\nfrom . import amount_to_text as amount_to_text_ar\n\nclass Clearance(models.Model):\n\t_name = 'ratification.petty.cash.clearance'\n\t_description = 'Petty Cash Clearance'\n\t_order = 'id desc'\n\t_rec_name = 'ratification_id'\n\t_inherit = ['mail.thread','mail.activity.mixin', 'portal.mixin']\n\n\tname = fields.Char(string='Description', related='ratification_id.the_name',track_visibility='always')\n\tratification_id = fields.Many2one('ratification.ratification', string='Petty Cash',track_visibility='always')\n\tpartner_id = fields.Many2one('res.partner', string='Employee', related='ratification_id.partner_id',track_visibility='always')\n\tdate = fields.Date(related='ratification_id.date',track_visibility='always')\n\tnet_amount = fields.Float(related='ratification_id.net_amount', string='Net Amount',track_visibility='always')\n\tnet_amount_in_words = fields.Char(related='ratification_id.net_amount_in_words',track_visibility='always')\n\tpetty_cash_amount = fields.Float(related='ratification_id.petty_cash_amount')\n\n\t\n\tjournal_id = fields.Many2one('account.journal', string='Clearance Bank/Cash', domain=[('type','in',['bank','cash'])],track_visibility='always')\n\tclearance_date = fields.Date(default=fields.date.today(),track_visibility='always')\n\n\tremaining_amount = fields.Float(compute='compute_remaining_amount',track_visibility='always')\n\tremaining_journal_id = fields.Many2one('account.journal', string='Return Amount to Bank/Cash', domain=[('type','in',['bank','cash'])],track_visibility='always')\n\tremaining_account_id = fields.Many2one('account.account', string='Return Amount to Account',track_visibility='always')\n\n\tclipboard_number = fields.Char(string='Clipboard Number/39 Receipt Number',track_visibility='always')\n\tstate = fields.Selection( [('draft','Draft'),('cleared','Cleared'),('canceled','Canceled')],default='draft' ,track_visibility='always')\n\n\tline_ids = fields.One2many('ratification.petty.cash.clearance.line','clearance_id')\n\n\tbranch_id = fields.Many2one('res.company', string='Branch',default= lambda self:self.env.user.company_id.id,track_visibility='always')\n\tcompany_id = fields.Many2one('res.company', default= lambda self:self.env.user.company_id.id)\n\n\n\t\n\n\n\n\t@api.multi\n\t@api.onchange('ratification_id')\n\tdef onchange_ratification(self):\n\t\tfor record in self:\n\t\t\tif record.ratification_id:\n\t\t\t\tfor payment in self.env['ratification.payment'].search([('ratification_id','=',record.ratification_id.id)]):\n\t\t\t\t\trecord.journal_id = payment.journal_id\n\n\t@api.multi\n\t@api.depends('line_ids')\n\tdef compute_remaining_amount(self):\n\t\tfor record in self:\n\t\t\tlines_amount = 0\n\t\t\tfor line in record.line_ids:\n\t\t\t\tlines_amount = lines_amount + line.amount\n\t\t\t# if lines_amount > record.net_amount:\n\t\t\t# \traise ValidationError(_('Details Can not be Bigger than Petty Cash Amount'))\n\t\t\tif lines_amount < record.net_amount:\n\t\t\t\trecord.remaining_amount = record.petty_cash_amount - lines_amount\n\n\t@api.multi\n\tdef show_journal_moves(self):\n\t\treturn {\n\t\t\t'type' : 'ir.actions.act_window',\n\t\t\t'view_mode' : 'tree,form',\n\t\t\t'res_model' : 'account.move',\n\t\t\t'domain' : [('petty_cash_clearance_id','=', self.id )],\n\t\t}\n\n\n\t@api.multi\n\tdef show_bank_cash_money_supply(self):\n\t\taccount_id = False\n\t\tfor line in self.ratification_id.line_ids:\n\t\t\tif line.ratification_type == 'petty_cash':\n\t\t\t\taccount_id = line.account_id.id\n\n\t\treturn {\n\t\t\t'type' : 'ir.actions.act_window',\n\t\t\t'view_mode' : 'tree,form',\n\t\t\t'res_model' : 'money.supply',\n\t\t\t'domain' : [('petty_cash_clearance_id','=', self.id )],\n\t\t\t'context' : {'default_petty_cash_clearance_id': self.id, \n\t\t\t\t\t'default_amount' : self.remaining_amount,\n\t\t\t\t\t'default_name' : self.ratification_id.name,\n\t\t\t\t\t'default_partner_id' : self.partner_id.id,\n\t\t\t\t\t'default_account_id' : account_id,\n\t\t\t\t\t}\n\t\t}\n\n\n\n\t\n\n\n\n\t@api.multi\n\tdef do_cancel(self):\n\t\tfor move in self.env['account.move'].search([('petty_cash_clearance_id','=',self.id)]):\n\t\t\tmove.button_cancel()\n\t\t\tmove.unlink()\n\t\t\tself.ratification_id.is_petty_cash_cleared = False\n\t\tself.state = 'canceled'\n\n\n\t@api.multi\n\tdef do_clear(self):\n\t\tfor record in self:\n\t\t\t# if len(record.line_ids) < 1:\n\t\t\t# \traise ValidationError(_('Please Insert Petty cash Details'))\n\n\n\t\t\tlines_amount = 0\n\t\t\tlines_net_amount = 0\n\t\t\tfor line in record.line_ids:\n\t\t\t\tlines_amount = lines_amount + line.amount\n\t\t\t\tlines_net_amount = lines_net_amount + line.net_amount\n\t\t\tif lines_amount > record.net_amount:\n\t\t\t\traise ValidationError(_('Details Can not be Bigger than Petty Cash Amount'))\n\t\t\n\n\t\t\tfor line in record.line_ids:\n\t\t\t\tif line.analytic_account_id:\n\t\t\t\t\tself.env['crossovered.budget'].budget_operations(do_just_check=True, do_reserve=False, do_actual=False, budget_item=line.analytic_account_id, account=line.account_id,amount=line.amount,date=record.date)\n\n\n\t\t\tpetty_cash_account = False\n\t\t\tfor ratification_line in record.ratification_id.line_ids:\n\t\t\t\tif ratification_line.ratification_type == 'petty_cash':\n\t\t\t\t\tpetty_cash_account = ratification_line.account_id\n\n\t\t\tif not petty_cash_account:\n\t\t\t\traise ValidationError(_('Please Make Sure that Petty Cash Type is exists in the Ratification'))\n\n\t\t\tmove_id = self.env['account.move'].create({\n\t\t\t\t'name' : record.name,\n\t\t\t\t'date' : record.date,\n\t\t\t\t'journal_id': record.journal_id.id,\n\t\t\t\t'company_id' : record.branch_id.id,\n\t\t\t\t'petty_cash_clearance_id' : record.id,\n\t\t\t\t})\n\n\t\t\tmove_lines = []\n\t\t\tmove_lines.append((0,0,{\n\t\t\t\t'account_id': petty_cash_account.id ,\n\t\t\t\t'partner_id' : record.partner_id.id ,\n\t\t\t\t'name': record.name ,\n\t\t\t\t'credit' : lines_net_amount ,\n\t\t\t\t'debit' : 0 ,\n\t\t\t\t'move_id' : move_id.id,\n\t\t\t\t}) )\n\n\t\t\tif record.remaining_amount > 0:\n\t\t\t\tmove_lines.append((0,0,{\n\t\t\t\t\t'account_id': petty_cash_account.id,\n\t\t\t\t\t'partner_id' : record.partner_id.id ,\n\t\t\t\t\t'name': record.name ,\n\t\t\t\t\t'credit' : record.remaining_amount ,\n\t\t\t\t\t'debit' : 0 ,\n\t\t\t\t\t'move_id' : move_id.id,\n\t\t\t\t\t}) )\n\n\t\t\t\tmove_lines.append((0,0,{\n\t\t\t\t\t'account_id': record.remaining_journal_id.default_debit_account_id.id,\n\t\t\t\t\t'partner_id' : record.partner_id.id ,\n\t\t\t\t\t'name': record.name ,\n\t\t\t\t\t'debit' : record.remaining_amount ,\n\t\t\t\t\t'credit' : 0 ,\n\t\t\t\t\t'move_id' : move_id.id,\n\t\t\t\t\t}) )\n\t\t\t\t\n\n\t\t\tfor line in record.line_ids:\n\t\t\t\t\n\t\t\t\tfor tax in line.deduction_ids:\n\t\t\t\t\ttax_amount = 0\n\t\t\t\t\tif tax.amount_type == 'fixed':\n\t\t\t\t\t\ttax_amount = tax.amount\n\t\t\t\t\tif tax.amount_type == 'percent':\n\t\t\t\t\t\ttax_amount = (tax.amount * line.amount / 100)\n\t\t\t\t\tmove_lines.append((0,0,{\n\t\t\t\t\t\t'account_id': tax.account_id.id ,\n\t\t\t\t\t\t'name': tax.name ,\n\t\t\t\t\t\t'credit' : tax_amount,\n\t\t\t\t\t\t'debit' : 0 ,\n\t\t\t\t\t\t'move_id' : move_id.id,\n\t\t\t\t\t\t}) )\n\n\t\t\t\tmove_lines.append((0,0,{\n\t\t\t\t\t'account_id': line.account_id.id ,\n\t\t\t\t\t'partner_id' : record.partner_id.id ,\n\t\t\t\t\t'name': record.name ,\n\t\t\t\t\t'credit' : 0 ,\n\t\t\t\t\t'debit' : line.amount ,\n\t\t\t\t\t'analytic_account_id' :line.analytic_account_id.id,\n\t\t\t\t\t'analytic_tag_ids' : [(6,0, line.analytic_tag_ids._ids )],\n\t\t\t\t\t'cost_center_id' : line.cost_center_id.id,\n\t\t\t\t\t'move_id' : move_id.id,\n\t\t\t\t\t}))\n\n\t\t\tmove_id.line_ids = move_lines\n\t\t\tmove_id.post()\n\t\t\trecord.state = 'cleared'\n\t\t\trecord.ratification_id.is_petty_cash_cleared = True\n\n\nclass ClearanceLine(models.Model):\n\t_name = 'ratification.petty.cash.clearance.line'\n\n\tname = fields.Char(string='Description')\n\tanalytic_account_id = fields.Many2one('account.analytic.account',string='Budget Item')\n\taccount_id = fields.Many2one('account.account', string='Account')\n\tcost_center_id = fields.Many2one('kamil.account.cost.center', string='Cost Center')\n\tanalytic_tag_ids = fields.Many2many('account.analytic.tag',string='Analytic Tags')\n\tamount = fields.Float()\n\tclearance_id = fields.Many2one('ratification.petty.cash.clearance')\n\tbranch_id = fields.Many2one('res.company', string='Branch',default= lambda self:self.env.user.company_id.id,track_visibility='always')\n\n\tdeduction_ids = fields.Many2many('account.tax', string='Deductions')\n\n\tdeduction_amount = fields.Float(compute='compute_deduction_amount')\n\tnet_amount = fields.Float(compute='compute_deduction_amount')\n\tcompany_id = fields.Many2one('res.company', default= lambda self:self.env.user.company_id.id)\n\t\n\n\n\t@api.multi\n\t@api.depends('deduction_ids','amount')\n\tdef compute_deduction_amount(self):\n\t\tfor record in self:\n\t\t\ttax_amount = 0\n\t\t\tfor tax in record.deduction_ids:\n\t\t\t\tif tax.amount_type == 'fixed':\n\t\t\t\t\ttax_amount = tax_amount + tax.amount\n\t\t\t\tif tax.amount_type == 'percent':\n\t\t\t\t\ttax_amount = tax_amount + (tax.amount * record.amount / 100)\n\t\t\trecord.deduction_amount = tax_amount\n\t\t\trecord.net_amount = record.amount - record.deduction_amount\n\n\t\n\t@api.multi\n\t@api.onchange('account_id')\n\tdef onchange_account_id(self):\n\t\tif self.account_id:\n\t\t\tself.analytic_account_id = self.account_id.parent_budget_item_id \n\n\nclass AccountMove(models.Model):\n\t_inherit = 'account.move'\n\t\n\tpetty_cash_clearance_id = fields.Many2one('ratification.petty.cash.clearance')\n\nclass Ratification(models.Model):\n\t_inherit = 'ratification.ratification'\n\n\tis_petty_cash_cleared = fields.Boolean(default=False)\n\n\nclass MoneySupply(models.Model):\n\t_inherit = 'money.supply'\n\n\tpetty_cash_clearance_id = fields.Many2one('ratification.petty.cash.clearance')","sub_path":"kamil_accounting_financial_ratification/models/petty_cash_clearance.py","file_name":"petty_cash_clearance.py","file_ext":"py","file_size_in_byte":9179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574115619","text":"from django.db import models\n\nfrom service_api.models.articles import Article\nfrom service_api.models.disciplines import Game\n\n\nclass Author(models.Model):\n id = models.BigAutoField(primary_key=True)\n platform_id = models.IntegerField(null=False)\n platform_author_id = models.CharField(max_length=255, null=False)\n name = models.CharField(max_length=255, null=False)\n home_url = models.CharField(max_length=255)\n thumbnail_url = models.CharField(max_length=1024)\n enabled = models.BooleanField(null=False)\n view_count = models.BigIntegerField()\n comment_count = models.BigIntegerField()\n subscriber_count = models.BigIntegerField()\n video_count = models.IntegerField()\n modified_at = models.DateTimeField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n db_table = 'video_authors'\n managed = False\n\n\nclass VideoPlatform(models.Model):\n id = models.IntegerField(primary_key=True)\n name = models.CharField(max_length=255, null=False)\n created_at = models.DateTimeField(null=False)\n\n class Meta:\n db_table = 'video_platforms'\n managed = False\n\n\nclass Video(models.Model):\n id = models.BigAutoField(primary_key=True)\n author = models.ForeignKey('Author', on_delete=False)\n platform = models.ForeignKey('VideoPlatform', on_delete=False)\n platform_video_id = models.CharField(max_length=255, null=False)\n platform_category_id = models.IntegerField()\n title = models.TextField(null=False)\n original_title = models.TextField(null=False)\n thumbnail_url = models.CharField(max_length=1024)\n enabled = models.BooleanField(null=False)\n original_url = models.CharField(max_length=1024)\n mp4_url = models.CharField(max_length=1024)\n duration = models.IntegerField()\n width = models.IntegerField(default=640)\n height = models.IntegerField(default=360)\n view_count = models.BigIntegerField()\n like_count = models.BigIntegerField()\n dislike_count = models.BigIntegerField()\n favorite_count = models.BigIntegerField()\n comment_count = models.BigIntegerField()\n modified_at = models.DateTimeField(null=False)\n published_at = models.DateTimeField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n db_table = 'videos'\n managed = False\n\n def __str__(self):\n return self.title\n\n\nclass VideoText(models.Model):\n video = models.OneToOneField('Video', related_name='text', primary_key=True, on_delete=False)\n text = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = 'video_texts'\n managed = False\n\n\nclass VideoAttribute(models.Model):\n video = models.OneToOneField(Video, related_name='attribute', primary_key=True, on_delete=False)\n article = models.ForeignKey(Article, on_delete=False)\n game = models.ForeignKey(Game, on_delete=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n db_table = 'video_attributes'\n managed = False\n","sub_path":"service_api/models/videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"551488642","text":"#!/usr/bin/env python\n\"\"\"\nan experimental python implementation of the picaso algorithm. expects a multi-\nshell diffusion-weighted image and mask as input. this implementation will\nautomatically use all CPU cores available on your system.\n\noutputs a 4D image file (.nii.gz or .nrrd) with the following sub-bricks:\n[disturb_per, disturb_par, diff_per, diff_par].\n\"\"\"\n\nimport os, sys\nimport datetime\nimport argparse\nimport logging\nimport warnings\nimport numpy as np\nimport nrrd as nd # also known as pynrrd\nimport nibabel as nib\nfrom scipy.optimize import least_squares\nfrom multiprocessing import Pool\n\n\nlogging.basicConfig(level=logging.WARN, format=\"[%(name)s] %(levelname)s: %(message)s\")\nlogger = logging.getLogger(os.path.basename(__file__))\n\n\nclass DiffusionData:\n \"\"\"Contains the various attributes of the input diffusion data\"\"\"\n def __init__(self, filename, maskname, normalize=True):\n\n self.filename = filename # inputs\n self.maskname = maskname #\n self.data = None # images\n self.mask = None #\n self.clipmask = None #\n self.header = None # metadata\n self.affine = None #\n self.gradients = None #\n self.b = None #\n self.x = None #\n self.y = None #\n self.z = None #\n self.n = None #\n self.idx_b0 = None #\n self.filetype = None # state variables\n self.normalized = False #\n self.clipped = False #\n\n # returns gradients, index of b0 volumes, index of mask,\n # data and mask in x,y,z,n orientation\n if self._is_nrrd():\n self._import_nrrd()\n else:\n self._import_nifti()\n\n self.data = self.data.astype(np.float)\n\n if normalize:\n self._normalize() # divide all directions by b0\n self._clip() # remove impossible vales (all data 0 < x < 1)\n\n\n def _is_nrrd(self):\n if '.nrrd' in os.path.splitext(self.filename)[1]:\n return True\n else:\n return False\n\n\n def _import_nrrd(self):\n \"\"\"imports nrrd data using pynrrd\"\"\"\n self.filetype = 'nrrd'\n self.data, self.header = nd.read(self.filename)\n self.mask, _ = nd.read(self.maskname)\n\n # import diffusion matrix, gradient matrix, bval\n self.n, self.x, self.y, self.z = self.data.shape\n\n # converts the gradients as formatted in the nrrd header as a n_gradient\n # by direction (xyz) matrix.\n gradient_keys = self.header['keyvaluepairs'].keys()\n gradient_keys.sort()\n gradient_keys = filter(lambda x: 'gradient' in x, gradient_keys)\n\n self.gradients = []\n for key in gradient_keys:\n # converts whitespace delimited list to numpy vector\n self.gradients.append(\n np.array(self.header['keyvaluepairs'][key].split()).astype(np.float))\n self.gradients = np.vstack(self.gradients)\n\n # find b0 volumes\n self.idx_b0 = np.zeros(len(self.gradients)).astype(np.bool)\n for i, gradient in enumerate(self.gradients):\n if np.sum(np.abs(i)) == 0:\n self.idx_b0[i] = 1\n else:\n self.idx_b0[i] = 0\n\n # get bvalue from header, divided by 1000 (not clear why)\n self.b = float(self.header['keyvaluepairs']['DWMRI_b-value'])/1000\n\n # orientation: voxels x directions\n self.data = np.reshape(self.data, (self.n, self.x*self.y*self.z)).T\n self.mask = np.reshape(self.mask, (1, self.x*self.y*self.z)).T\n\n\n def _import_nifti(self):\n \"\"\"imports nifti data using nibabel\"\"\"\n self.filetype = 'nifti'\n file_nib = nib.load(self.filename)\n mask_nib = nib.load(self.maskname)\n\n self.data = file_nib.get_data()\n self.header = file_nib.header\n self.affine = file_nib.affine\n self.mask = mask_nib.get_data()\n\n self.x, self.y, self.z, self.n = file_nib.shape\n\n # import gradients and bvals, assume only name difference is extension\n if self.filename.endswith('nii.gz'):\n stem = os.path.splitext(os.path.splitext(self.filename)[0])[0]\n else:\n stem = os.path.splitext(self.filename)[0]\n\n self.gradients = np.genfromtxt('{}.bvec'.format(stem))\n self.b = np.genfromtxt('{}.bval'.format(stem)) / 1000 # not clear why we divide\n self.idx_b0 = ~self.b.astype(np.bool)\n\n # orientation: voxels x directions\n self.data = np.reshape(self.data, (self.x*self.y*self.z, self.n))\n self.mask = np.reshape(self.mask, (self.x*self.y*self.z, 1))\n\n\n def _normalize(self):\n \"\"\"\n takes the mean value across all b0 volumes, and then normalizes all\n diffusion values by the b0 estimate. Removes b0 volumes and gradients.\n \"\"\"\n if not self.normalized:\n # seperate b0 mean from gradient direction volumes\n b0_mean = np.mean(self.data[:, self.idx_b0], axis=1)\n self.data = self.data[:, ~self.idx_b0]\n self.gradients = self.gradients[~self.idx_b0, :]\n self.b = self.b[~self.idx_b0]\n\n # stop annoying divide by zero warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # normalize gradient direction volumes by b0 mean\n self.data = self.data / np.atleast_2d(b0_mean).T\n\n # set all infs and nans to 0\n self.data[np.isinf(self.data)] = 0\n self.data[np.isnan(self.data)] = 0\n\n self.normalized = True\n\n\n def _clip(self):\n \"\"\"clips all values to be 0 < x < 1\"\"\"\n if not self.clipped:\n self.clipmask = np.zeros((self.x*self.y*self.z, self.data.shape[1]))\n self.data[(self.mask == 0).flatten(), :] = 0 # mask non-brain regions\n\n idx = self.data > 1\n logger.debug('removing {} values > 1'.format(np.sum(idx)))\n self.data[idx] = 1\n self.clipmask[idx] = 1\n\n idx = self.data < 0\n logger.debug('removing {} values < 0'.format(np.sum(idx)))\n self.data[idx] = 0\n self.clipmask[idx] = 1\n\n self.clipmask = np.atleast_2d(np.sum(self.clipmask, axis=1)).T\n self.clipped = True\n\n\n def write(self, data, filename):\n if self.filetype == 'nifti':\n try:\n data = np.reshape(data, (self.x, self.y, self.z, data.shape[1]))\n except:\n raise IndexError('input data does not have the same x,y,z dimensions as input DWI file')\n\n output = nib.nifti1.Nifti1Image(data, self.affine, self.header)\n output.update_header()\n output.header_class(extensions=())\n output.to_filename(filename)\n\n\ndef null(a, rtol=1e-5):\n \"\"\"\n is an orthonormal basis for the null space of A obtained from the singular\n value decomposition. That is, a.dot(n) has negligible elements,\n np.shape(n, 2) is the nullity of a, and n.T.dot(n) = I.\n \"\"\"\n u, s, v = np.linalg.svd(a)\n rank = (s > rtol*s[0]).sum()\n n = v[rank:].T.copy()\n\n return n\n\n\ndef estimate_tensor(x, g):\n \"\"\"\n computes DTI tensor using q values and the normalized dMRI signals.\n details in: Introduction to diffusion tensor imaging mathematics: part III.\n Tensor calculation, noise, simulations, and optimization. Peter B. Kingsley.\n 2005. Concepts in Magnetic Resonance Part A, Vol 28A(2)\n\n x = input diffusion weightings for a voxel\n g = normalized gradient components\n \"\"\"\n idx = np.where(x > 1e-4)[0] # get rid of extremely small diff weighted vols\n x = x[idx] #\n g = g[idx, :] #\n\n # apparent diffusion coefficients (ADCs), eqn 8\n x = -np.log(x)\n\n # represents tensor [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz], eqn 10\n d = np.array([g[:, 0]**2,\n g[:, 1]**2,\n g[:, 2]**2,\n g[:, 0]*g[:, 1],\n g[:, 0]*g[:, 2],\n g[:, 1]*g[:, 2]]).T\n\n # TODO: DOUBLE CHECK THIS LINE\n # calculate pseudoinverse of H, multiply with data s, eqn. 37\n # collapses X gradient directions down to 9 numbers that represent cardinal\n # directions x,y,z\n d = np.linalg.pinv(d.T.dot(d)).dot(d.T).dot(np.atleast_2d(x).T)\n d = np.real(d)\n\n # initial diffusion matrix\n D = np.array([[d[0], d[3], d[4]],\n [d[3], d[1], d[5]],\n [d[4], d[5], d[2]]])\n\n # find the eigenvalues, eigenvectors of d\n # NB: order of outputs from eig is flipped w.r.t. MATLAB\n e, U = np.linalg.eig(D[:, :, 0]) # I have numpy matrix issues, hence the 0\n e[e < 5e-6] = 5e-6 # ensure all values are positive (clip near zero)\n e = np.diag(e) # makes square with off-diagnonal of zero\n\n # final pseudoinverse of H, eqn. 47\n D = U.dot(e).dot(U.T)\n\n return D\n\n\ndef bloch_to_rrey_signal(x, g, b, u):\n \"\"\"TODO: get docstring from Lipeng\"\"\"\n u_perp = null(np.atleast_2d(u))\n U = np.hstack((np.atleast_2d(u).T, u_perp))\n U2D = U.dot(np.diag([x[0], x[1], x[1]])).dot(U.T)\n diff = U.dot(np.diag([x[2], x[3], x[3]])).dot(U.T)\n signal = np.sum(g.dot(U2D)*g, axis=1) + (1-np.sum(g.dot(U2D)*g, axis=1)) * np.exp(-b*np.sum(g.dot(diff)*g, axis=1))\n\n return signal\n\n\ndef bloch_wrapper(x, x_subj, g, b, u):\n \"\"\"\n designed to be fed to least_squares for nonlinear fit\n returns error between fit estimate and real data\n x: x estimate for least_squares fitting\n x_subj: the voxel's diffusion data\n g: the gradients for all directions (non-zero b's only)\n b: the b values for all directions (non-zero b's only)\n u: the first eigenvector from the single-tensor estimation\n \"\"\"\n return(bloch_to_rrey_signal(x, g, b, u) - x_subj)\n\n\ndef picaso(args):\n \"\"\"\n arglist = [x, g, b]\n accepts x (normalized diffusion signal from one voxel),\n g (gradient direction vectors), and\n b (a vector of b-values)\n returns ?\n\n Computes the 'structural disturbance' or 'axon density and volume' measure\n as well as the mean diffusivity in the directions parallel and perpendicular\n to the fiber orientation. (U2_parallel, U2_perp, D_parallel, D_perp).\n \"\"\"\n x = args[0]\n g = args[1]\n b = args[2]\n\n g = np.tile(np.sqrt(b), (3, 1)).T * g # modulates gradients by sqrt of bval\n T = estimate_tensor(x, g)\n U, S, V = np.linalg.svd(T)\n u = U[:, 0] # first eig\n\n # nonlinear lsq fit between initial paramaters and the actual diffusion data\n X0 = np.array([0.1, 0.1, 0.5, 0.5]) # column vector\n lb = np.array([0, 0, 0, 0]) #\n ub = np.array([1, 1, 3, 3]) #\n # check loss function -- currently using 'linear' but other methods that are\n # robust to outliers might help here.\n fit = least_squares(bloch_wrapper, X0, bounds=(lb, ub), args=(x, g, b, u))\n f_coef = fit['x']\n\n u_perp = null(np.atleast_2d(u))\n U = np.hstack((np.atleast_2d(u).T, u_perp))\n U2 = U.dot(np.diag([f_coef[0]*f_coef[2], f_coef[1]*f_coef[3], f_coef[1]*f_coef[3]])).dot(U.T)\n diff = U.dot(np.diag([f_coef[2], f_coef[3], f_coef[3]])).dot(U.T)\n s_estimate = bloch_to_rrey_signal(f_coef, g, b, u)\n\n return(U2, diff, f_coef, s_estimate)\n\n\ndef main(filename, maskname, outputname):\n\n logger.debug('loading {} and {}'.format(\n os.path.basename(filename), os.path.basename(maskname)))\n # normalizes and clips input data by default\n dmri = DiffusionData(filename, maskname)\n\n logger.debug('initializing output arrays')\n disturb_per = np.zeros((dmri.x*dmri.y*dmri.z, 1))\n disturb_par = np.zeros((dmri.x*dmri.y*dmri.z, 1))\n diff_per = np.zeros((dmri.x*dmri.y*dmri.z, 1))\n diff_par = np.zeros((dmri.x*dmri.y*dmri.z, 1))\n\n idx = np.where(dmri.mask == 1)[0]\n n_voxels = len(idx)\n\n logger.debug('initializing parallel pool')\n pool = Pool()\n\n # gather all inputs for picaso\n arglist = []\n for i, voxel in enumerate(idx):\n arglist.append([dmri.data[voxel, :], dmri.gradients, dmri.b])\n\n # run picaso across all available cores using map\n try:\n logger.debug('beginning picaso estimation on all voxels')\n start = datetime.datetime.now()\n # http://xcodest.me/interrupt-the-python-multiprocessing-pool-in-graceful-way.html\n # https://stackoverflow.com/questions/35908987/python-multiprocessing-map-vs-map-async\n picaso_outputs = np.array(pool.map_async(picaso, arglist).get(9999999))\n pool.close()\n end = datetime.datetime.now()\n elapsed = end-start\n logger.debug('calculated picaso on all voxels in {} minutes'.format(elapsed.total_seconds()/60.0))\n except KeyboardInterrupt:\n pool.terminate()\n pool.join()\n sys.exit(1)\n\n # distribute outputs to output arrays\n for i, voxel in enumerate(idx):\n f_coef = picaso_outputs[i][2]\n disturb_per[voxel] = f_coef[1]*f_coef[3]\n disturb_par[voxel] = f_coef[0]*f_coef[2]\n diff_per[voxel] = f_coef[3]\n diff_par[voxel] = f_coef[2]\n\n # output data has 4 3D volumes: (disturb_per, disturb_par, diff_per, diff_par)\n output_data = np.hstack((disturb_per, disturb_par, diff_per, diff_par))\n dmri.write(output_data, outputname)\n\n\nif __name__ == \"__main__\":\n\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument('dwi', help='diffusion-weighted image (multi-shell)')\n argparser.add_argument('mask', help='mask files (same dimensions as dwi)')\n argparser.add_argument('output', help='output filename')\n argparser.add_argument('-v', '--verbose', action='count', help='turns on debug messages')\n args = argparser.parse_args()\n\n # set debugging\n logger.info('starting')\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n\n main(args.dwi, args.mask, args.output)\n\n","sub_path":"pycasso.py","file_name":"pycasso.py","file_ext":"py","file_size_in_byte":14024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340444927","text":"import os\nimport itertools\nimport cStringIO\n\nimport dulwich, dulwich.patch\nfrom diff import prepare_udiff\n\ndef pairwise(iterable):\n \"\"\"\n Yields the items in `iterable` pairwise:\n\n >>> list(pairwise(['a', 'b', 'c', 'd']))\n [('a', 'b'), ('b', 'c'), ('c', 'd')]\n \"\"\"\n prev = None\n for item in iterable:\n if prev is not None:\n yield prev, item\n prev = item\n\nclass RepoWrapper(dulwich.repo.Repo):\n def get_branch_or_commit(self, id):\n \"\"\"\n Returns a `(commit_object, is_branch)` tuple for the commit or branch\n identified by `id`.\n \"\"\"\n try:\n return self[id], False\n except KeyError:\n return self.get_branch(id), True\n\n def get_branch(self, name):\n \"\"\" Returns the commit object pointed to by the branch `name`. \"\"\"\n return self['refs/heads/'+name]\n\n def get_default_branch(self):\n return self.get_branch('master')\n\n def get_branch_names(self, exclude=()):\n \"\"\" Returns a sorted list of branch names. \"\"\"\n branches = []\n for ref in self.get_refs():\n if ref.startswith('refs/heads/'):\n name = ref[len('refs/heads/'):]\n if name not in exclude:\n branches.append(name)\n branches.sort()\n return branches\n\n def get_tag_names(self):\n \"\"\" Returns a sorted list of tag names. \"\"\"\n tags = []\n for ref in self.get_refs():\n if ref.startswith('refs/tags/'):\n tags.append(ref[len('refs/tags/'):])\n tags.sort()\n return tags\n\n def history(self, commit=None, path=None, max_commits=None, skip=0):\n \"\"\"\n Returns a list of all commits that infected `path`, starting at branch\n or commit `commit`. `skip` can be used for pagination, `max_commits`\n to limit the number of commits returned.\n\n Similar to `git log [branch/commit] [--skip skip] [-n max_commits]`.\n \"\"\"\n if not isinstance(commit, dulwich.objects.Commit):\n commit, _ = self.get_branch_or_commit(commit)\n commits = self._history(commit)\n path = path.strip('/')\n if path:\n commits = (c1 for c1, c2 in pairwise(commits)\n if self._path_changed_between(path, c1, c2))\n return list(itertools.islice(commits, skip, skip+max_commits))\n\n def _history(self, commit):\n \"\"\" Yields all commits that lead to `commit`. \"\"\"\n if commit is None:\n commit = self.get_default_branch()\n while commit.parents:\n yield commit\n commit = self[commit.parents[0]]\n yield commit\n\n def _path_changed_between(self, path, commit1, commit2):\n \"\"\"\n Returns `True` if `path` changed between `commit1` and `commit2`,\n including the case that the file was added or deleted in `commit2`.\n \"\"\"\n path, filename = os.path.split(path)\n try:\n blob1 = self.get_tree(commit1, path)\n if not isinstance(blob1, dulwich.objects.Tree):\n return True\n blob1 = blob1[filename]\n except KeyError:\n blob1 = None\n try:\n blob2 = self.get_tree(commit2, path)\n if not isinstance(blob2, dulwich.objects.Tree):\n return True\n blob2 = blob2[filename]\n except KeyError:\n blob2 = None\n if blob1 is None and blob2 is None:\n # file present in neither tree\n return False\n return blob1 != blob2\n\n def get_tree(self, commit, path, noblobs=False):\n \"\"\" Returns the Git tree object for `path` at `commit`. \"\"\"\n tree = self[commit.tree]\n if path:\n for directory in path.strip('/').split('/'):\n if directory:\n tree = self[tree[directory][1]]\n return tree\n\n def commit_diff(self, commit):\n from klaus import guess_is_binary, force_unicode\n\n if commit.parents:\n parent_tree = self[commit.parents[0]].tree\n else:\n parent_tree = None\n\n changes = self.object_store.tree_changes(parent_tree, commit.tree)\n for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in changes:\n try:\n if newsha and guess_is_binary(self[newsha].chunked) or \\\n oldsha and guess_is_binary(self[oldsha].chunked):\n yield {\n 'is_binary': True,\n 'old_filename': oldpath or '/dev/null',\n 'new_filename': newpath or '/dev/null',\n 'chunks': [[{'line' : 'Binary diff not shown'}]]\n }\n continue\n except KeyError:\n # newsha/oldsha are probably related to submodules.\n # Dulwich will handle that.\n pass\n\n stringio = cStringIO.StringIO()\n dulwich.patch.write_object_diff(stringio, self.object_store,\n (oldpath, oldmode, oldsha),\n (newpath, newmode, newsha))\n files = prepare_udiff(force_unicode(stringio.getvalue()),\n want_header=False)\n if not files:\n # the diff module doesn't handle deletions/additions\n # of empty files correctly.\n yield {\n 'old_filename': oldpath or '/dev/null',\n 'new_filename': newpath or '/dev/null',\n 'chunks': []\n }\n else:\n yield files[0]\n\n\ndef Repo(name, path, _cache={}):\n repo = _cache.get(path)\n if repo is None:\n repo = _cache[path] = RepoWrapper(path)\n repo.name = name\n return repo\n","sub_path":"repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"326206448","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import ORJSONResponse\n\nfrom .resources import shutdown, startup\nfrom .routers import projects\n\nrouters = [\n projects.router,\n]\n\norigins = [\n \"http://localhost:8080\",\n]\n\n\ndef create_app() -> FastAPI:\n\n app = FastAPI(default_response_class=ORJSONResponse)\n for router in routers:\n app.include_router(router)\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n @app.on_event('startup')\n async def startup_event():\n await startup()\n\n @app.on_event('shutdown')\n async def shutdown_event():\n await shutdown()\n\n return app\n","sub_path":"backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"423106995","text":"# -*- coding: utf-8 -*-\n# !/usr/bin python\n\"\"\"\n@author: fzj\n@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.\n@time: 20200320 11:13\n@desc: 此脚本用于生成数据仓库中的日期维度表\n\"\"\"\n\nimport logging\nfrom datetime import datetime, timedelta\n\nimport pymysql\n\n\ndef get_console_logger(level=logging.INFO,\n log_format='%(asctime)s-%(filename)s[line:%(lineno)d]-%(levelname)s: %(message)s'):\n \"\"\"python的log日志,此logger只会将日志打印到控制台\"\"\"\n logger = logging.getLogger()\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n formatter = logging.Formatter(log_format)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\nlogger = get_console_logger()\n\n\ndef get_conn(host=\"127.0.0.1\", user=\"root\", password=\"root\", database=\"mysql\"):\n conn = pymysql.connect(host=host, user=user, password=password, database=database, charset=\"utf8\")\n return conn\n\n\ndef create_table(conn):\n drop_table_sql = \"DROP TABLE IF EXISTS `dim_date`\"\n cursor = conn.cursor()\n logger.info(drop_table_sql)\n cursor.execute(drop_table_sql)\n create_table_sql = \"\"\"\n CREATE TABLE `dim_date` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `ymd` varchar(25) NOT NULL COMMENT 'yyyymmdd',\n `ymd_long_desc` varchar(25) NOT NULL COMMENT 'yyyy年mm月dd日',\n `ymd_short_desc` varchar(25) NOT NULL COMMENT 'yyyy-mm-dd',\n `ymd_dt` timestamp NOT NULL COMMENT '时间戳类型',\n `ym` varchar(25) NOT NULL COMMENT 'yyyymm',\n `y` varchar(25) NOT NULL COMMENT 'yyyy',\n `m` varchar(25) NOT NULL COMMENT 'mm',\n `d` varchar(25) NOT NULL COMMENT 'dd',\n `w_y` int(11) NOT NULL COMMENT '当年的第几周',\n `w` int(11) NOT NULL COMMENT '周几(周日=7)',\n `q` int(11) NOT NULL COMMENT '第几季度',\n PRIMARY KEY (`id`)\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='日期维度表';\n \"\"\"\n logger.info(create_table_sql)\n cursor.execute(create_table_sql)\n cursor.close()\n\n\ndef insert_date(conn, str_start_time=\"20100101\", str_end_time=\"20351231\", str_parse=\"%Y%m%d\"):\n insert_sql = \"insert into `dim_date` ( `ymd`,`ymd_long_desc`,`ymd_short_desc`, `ymd_dt` ,`ym` ,`y`,`m`,`d`,`w_y`,`w`,`q` ) values \"\n values = []\n end_time = datetime.strptime(str_end_time, str_parse)\n start_time = datetime.strptime(str_start_time, str_parse)\n cursor = conn.cursor()\n while True:\n if start_time > end_time:\n break\n ymd = start_time.strftime(\"%Y%m%d\")\n y = ymd[0:4]\n m = ymd[4:6]\n d = ymd[6:8]\n ymd_long_desc = f\"{y}年{m}月{d}日\"\n ymd_short_desc = f\"{y}-{m}-{d}\"\n ymd_dt = start_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n ym = y + m\n isocalendar = start_time.isocalendar()\n w_y = isocalendar[1]\n w = isocalendar[2]\n q = (start_time.month - 1) // 3 + 1\n value = f\"('{ymd}', '{ymd_long_desc}','{ymd_short_desc}', '{ymd_dt}', '{ym}', '{y}', '{m}', '{d}', '{w_y}','{w}', '{q}')\"\n values.append(value)\n start_time = start_time + timedelta(days=1)\n if len(values) > 1000:\n sql = insert_sql + \",\".join(values)\n logging.info(sql)\n cursor.execute(sql)\n values = []\n sql = insert_sql + \",\".join(values)\n logging.info(sql)\n cursor.execute(sql)\n conn.commit()\n cursor.close()\n\n\nif __name__ == '__main__':\n conn = get_conn(database=\"fosun\")\n logger.info(\"获取mysql连接成功\")\n create_table(conn)\n logger.info(\"创建日期维度表成功: dim_date\")\n insert_date(conn)\n logger.info(\"填充数据成功\")\n conn.close()\n","sub_path":"scripts/dim_date.py","file_name":"dim_date.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"567713272","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.stattools import kpss\nfrom statsmodels.tsa.stattools import adfuller\n\n#Import Data\ndata = pd.read_csv('DAXFTSE.csv',sep=';').drop(columns=['Name'])\n\n#ADF function which inputs a dataframe, calculates test statistics and \n#assigns * if result is significant at 5% level, outputs marked values of\n#ADF test statistic\n\ndef ADF(df,maxlag,regression):\n\tres=[]\n\tfor col in df.columns:\n\t\ttest_results=adfuller(df[col],maxlag=maxlag,regression=regression)\n\t\tpvalue=test_results[1]\n\t\tif pvalue<=0.05:#test decision\n\t\t\ttest_stat=str(test_results[0].round(4))+'*'\n\t\telse:\n\t\t\ttest_stat=str(test_results[0].round(4))\n\t\tres.append([test_stat])\n\treturn(pd.DataFrame(res,columns=['ADF_'+str(maxlag)+'_'+str(regression)]))\n\n#KPSS function which inputs a dataframe, calculates test statistics and \n#assigns * if result is significant at 5% level, outputs marked values of\n#KPSS test statistic\ndef KPSS(df,lags,regression):\n\tres=[]\n\tfor col in df.columns:\n\t\ttest_results=kpss(df[col],lags=lags,regression=regression)\n\t\tpvalue=test_results[1]\n\t\tif pvalue<=0.05:#test decision\n\t\t\ttest_stat=str(test_results[0].round(4))+'*'\n\t\telse:\n\t\t\ttest_stat=str(test_results[0])\n\t\tres.append([test_stat])\n\treturn(pd.DataFrame(res,columns=['KPSS_'+str(lags)+'_'+str(regression)]))\n\n#ignore warnings from statsmodels package related to the displayed p-value\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n#\n\nADF_summary=pd.concat([ADF(data,0,'c'),ADF(data,4,'c'),ADF(data,0,'ct'),ADF(data,4,'ct')],axis=1)\nADF_summary.index=data.columns\n\nKPSS_summary=pd.concat([KPSS(data,8,'c'),KPSS(data,12,'c'),KPSS(data,8,'ct'),KPSS(data,12,'ct')],axis=1)\nKPSS_summary.index=data.columns\npd.set_option('display.max_columns',10)\nprint(ADF_summary)\nprint(KPSS_summary)\n","sub_path":"SFEAdfKpss/SFEAdfKpss.py","file_name":"SFEAdfKpss.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"633233153","text":"from image.image_in_set import *\nfrom ann.ann import *\nfrom live_feed.live_feed import *\nfrom pymongo import errors\n\n\nclass CapturedFrame(ImageInSet):\n\n #ann_model = InceptionResnetV1(classify=True, pretrained='vggface2', num_classes=len(id_to_name_dict_load.keys()))\n #ann_model.load_state_dict(torch.load(\"ann_model.pth\",map_location=torch.device(\"cpu\")))\n ann_model = NewNet(num_classes=len(id_to_name_dict_load.keys()))\n ann_model.load_state_dict(torch.load('..\\\\ann\\\\ann_model.pth',map_location=torch.device(\"cpu\")))\n\n\n def __init__(self,values):\n self.values=values\n self.name=\"\"\n self.path=None\n self.face_indexes=None\n self.face_image=None\n self.face_detected=False\n self.id_detected=None\n self.recognition_probability=None\n\n\n def set_face_image(self,live_feed):\n self.face_indexes=self.get_face_indexes()\n self.face_detected=True if (self.face_indexes is not None) and not (isinstance(self.face_indexes, type(None))) else False\n if self.face_detected:\n #self.save(\"face_image_temp.jpg\")\n self.face_image = self.get_face_image(self.face_indexes)\n #os.remove(\"face_image_temp.jpg\")\n #self.face_image=self.values[int(indexes_box[1]):int(indexes_box[3]), int(indexes_box[0]):int(indexes_box[2])]\n live_feed.number_of_faces_detected+=1\n else:\n live_feed.number_of_face_not_detected += 1\n\n\n def identify(self):\n if not self.face_detected:\n raise FrameException(\"face must be detected in order to perform identification\")\n img = self.norm_without_aug()\n with torch.no_grad():\n CapturedFrame.ann_model.eval()\n output = CapturedFrame.ann_model(img)\n self.recognition_probability=float(torch.max(F.softmax(output,dim=1),1)[0].item())\n self.id_detected = int(torch.max(F.softmax(output, dim=1), 1)[1].item())\n print(\"recognition probability: \" + str(self.recognition_probability))\n\n\n def set_name(self,id_to_name_dict):\n if self.id_detected is None:\n raise FrameException(\"id must be detected in order to set name\")\n self.name=id_to_name_dict[self.id_detected]\n\n\n def save_image_to_db(self,db,number_of_employee_images=None):\n if number_of_employee_images is None:\n number_of_employee_images=db.images_collection.count_documents({\"employee_id\":self.id_detected})\n if len(str(number_of_employee_images)):\")\ntab = []\npodz = []\nnotpodz = []\nfor i in range (0,1000):\n tab.append(random.randrange(1,50))\ndef even():\n for i in tab:\n if i%2 == 0:\n podz.append(i)\n result = (len(podz)/1000)*100\n return result\nprint(\"Liczby parzyste to: \",even(),\"%\")\ndef noteven():\n for i in tab:\n if i%2 != 0:\n notpodz.append(i)\n result = (len(notpodz)/1000)*100\n return result\nprint(\"Liczby nieparzyste to: \",noteven(),\"%\")","sub_path":"04-Subroutines/After Class/zad.30.py","file_name":"zad.30.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577821804","text":"import sys\n\nfrom oauth2client import client\nfrom googleapiclient import sample_tools\n\ndef main(argv):\n \n # Authenticate and construct service.\n service, flags = sample_tools.init(\n argv, 'calendar', 'v3', __doc__, __file__,\n scope='https://www.googleapis.com/auth/calendar')\n \n try:\n \n page_token = None\n \n calendar = \"ecor0om461gru7koq0bmujv10c@group.calendar.google.com\"\n \n service.calendars().delete(calendarId=calendar).execute()\n \n print('Calendar {} is deleted.').format(calendar)\n \n except client.AccessTokenRefreshError:\n print ('The credentials have been revoked or expired, please re-run'\n 'the application to re-authorize.')\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"code/delete_calendar.py","file_name":"delete_calendar.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"449322149","text":"def factorial(n):\n total = 1\n for num in range(1, n+1):\n total *= num\n return total\n\n#main\n#help with my math factorial questions\nfor n in range(1,10):\n n_fact = factorial(n)\n print(str(n) + \"! is \" + str(n_fact)) \n","sub_path":"week of 10-14/factorial_function.py","file_name":"factorial_function.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"475248465","text":"# Desenvolva um programa que leia o primeiro termo e a razão de uma PA. \r\n# No final, mostre os 10 primeiros termos dessa progressão.\r\n\r\nprimTermo=int(input('Digite o primeiro termo inteiro: '))\r\nrazao=int(input('Digite a razao: '))\r\n\r\ndecimoTermo= primTermo+(10-1)*razao\r\n\r\nfor i in range(primTermo,decimoTermo+razao,razao):\r\n print(i,end=' -> ')\r\n\r\nprint('ACABOU')","sub_path":"ex051_ProgressaoAritmetica.py","file_name":"ex051_ProgressaoAritmetica.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"341292761","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django import forms\nfrom .models import Course\nfrom .models import Student\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, logout\n# Create your views here.\nclass NewTaskForm(forms.Form):\n task = forms.CharField(label=\"New Task\")\n\ndef index(request):\n return render(request, \"subjects/test.html\", {\n \"subjects\": Course.objects.all()\n })\n\n\ndef course(request, course_idsubject):\n course = Course.objects.get(idsubject=course_idsubject)\n return render(request, \"subjects/course.html\", {\n \"course\": course\n })\n\ndef test(request):\n return render(request, \"subjects/test.html\", {\n \"subjects\": Course.objects.all()\n })\n\ndef student(request, student_first):\n student = Student.objects.get(first=student_first)\n return render(request, \"subjects/student.html\", {\n \"student\": student\n })\n\ndef login_view(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request,username=username, password=password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"users/login.html\", {\n \"message\": \"Invalid credentials\"\n })\n return render(request, \"users/login.html\")\n\ndef logout_view(request):\n logout(request)\n return render(request, \"users/login.html\", {\n \"message\": \"Logged out.\"\n })\n\ndef studentinfo(request,sID):\n print(sID)\n student_info = Student.objects.get(sID=sID)\n class_info = Course.objects.filter(attendStd = student_info)\n non_classinfo = Course.objects.exclude(attendStd = student_info).all()\n logged = logout_view\n context = {'student_info': student_info, \"class_info\":class_info, \"non_classinfo\":non_classinfo, \"logged1\":logged,}\n\n return context\n\ndef add(request):\n if request.method == \"POST\":\n addsubject = request.POST[\"Course.object.first()\"]\n addsubject.append(addsubject)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return HttpResponseRedirect(reverse(\"index\"))","sub_path":"subjects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585897734","text":"import struct\nimport numpy as np\n\ndef Proc_Scop_Wav(Waveform):\n ##\n Gain_Bin=struct.pack('bbbb',Waveform[156],Waveform[157],Waveform[158],Waveform[159])\n Vertical_Gain=struct.unpack('f', Gain_Bin)[0]\n ##\n Offset_Bin=struct.pack('bbbb',Waveform[160],Waveform[161],Waveform[162],Waveform[163])\n Vertical_Offset=struct.unpack('f', Offset_Bin)[0]\n ##\n Hor_Int_Bin=struct.pack('bbbb',Waveform[176],Waveform[177],Waveform[178],Waveform[179])\n Horiz_Interval=struct.unpack('f', Hor_Int_Bin)[0]\n ##\n Hor_off_Bin=struct.pack('bbbbbbbb',Waveform[180],Waveform[181],Waveform[182],Waveform[183]\n ,Waveform[184],Waveform[185],Waveform[186],Waveform[187])\n Hor_offset=struct.unpack('d', Hor_off_Bin)[0]\n ##\n Data=np.array(Waveform[346:])\n Voltage=Data*Vertical_Gain-Vertical_Offset\n ##\n Time=np.linspace(Hor_offset,Hor_offset+(len(Voltage)-1)*Horiz_Interval\n ,len(Voltage))\n ##\n return [Time,Voltage]\n ","sub_path":"read_scope.py","file_name":"read_scope.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370066331","text":"import datetime\nfrom recipe import Recipe\n\nclass Book:\n\tdef __init__(self, name):\n\t\tif len(name) == 0:\n\t\t\traise Exception(\"Book need name\")\n\t\tself.name = name\n\t\tself.creation_date = datetime.datetime.now()\n\t\tself.last_update = datetime.datetime.now()\n\t\tself.recipe_list = {\n\t\t\t\"starter\" : [],\n\t\t\t\"lunch\" : [],\n\t\t\t\"dessert\" : [],\n\t\t}\n\t\n\tdef is_recipe_exist(self, name):\n\t\tif name in self.recipe_list[\"starter\"]:\n\t\t\traise Exception(\"recipe already exist in starter list\")\n\t\tif name in self.recipe_list[\"lunch\"]:\n\t\t\traise Exception(\"recipe already exist in lunch list\")\n\t\tif name in self.recipe_list[\"dessert\"]:\n\t\t\traise Exception(\"recipe already exist in dessert list\")\n\n\n\tdef get_recipe_by_name(self, name):\n\t\t\"\"\"Print a recipe with the name `name` and return the instance\"\"\"\n\t\tfor item in self.recipe_list[\"starter\"]:\n\t\t\tif item.get_name() == name:\n\t\t\t\tprint(item)\n\t\t\t\treturn\n\t\tfor item in self.recipe_list[\"lunch\"]:\n\t\t\tif item.get_name() == name:\n\t\t\t\tprint(item)\n\t\t\t\treturn\n\t\tfor item in self.recipe_list[\"dessert\"]:\n\t\t\tif item.get_name() == name:\n\t\t\t\tprint(item)\n\t\t\t\treturn \n\n\tdef get_recipes_by_types(self, recipe_type):\n\t\t\"\"\"Get all recipe names for a given recipe_type \"\"\"\n\t\tfor item in self.recipe_list[recipe_type]:\n\t\t\tprint(item.get_name())\n\n\tdef add_recipe(self, recipe):\n\t\t\"\"\"Add a recipe to the book and update last_update\"\"\"\n\t\tif not isinstance(recipe, Recipe):\n\t\t\traise Exception(\"recipe is not an instance of Recipe\")\n\t\tself.recipe_list[recipe.get_recipe_type()].append(recipe)\n\t\tself.last_update = datetime.datetime.now() \n\t\t\n","sub_path":"bootcamp_python/Day01/ex00/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"7329177","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\n\n\nclass Screenshots():\n\n def test1(self):\n driver = webdriver.Chrome()\n driver.get(\"http://letskodeit.teachable.com/\")\n driver.maximize_window()\n driver.implicitly_wait(3)\n\n driver.find_element(By.LINK_TEXT, \"Login\").click()\n driver.find_element(By.ID, \"user_email\").send_keys(\"tralala@gmail.com\")\n driver.find_element(By.ID, \"user_password\").send_keys(\"bambam\")\n driver.find_element(By.XPATH, \"//input[@type='submit']\").click()\n destination_file_name = \"/home/rade/PycharmProjects/BasicActions/Screenshots/test.png\"\n # destinatio\n\n time.sleep(3)\n\n try:\n driver.save_screenshot(destination_file_name)\n print(\"Screenshots saved to directory --> ::\" + destination_file_name)\n except NotADirectoryError:\n print(\"Not a directory issue\")\n\n\n\n\n\n\nff = Screenshots()\nff.test1()","sub_path":"Exercises/Taking_Screenshots.py","file_name":"Taking_Screenshots.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"131561184","text":"import pytest\nfrom izigraph import Link, Node\n\n\ndef test_link_can_be_created():\n node1 = Node('node_1')\n node2 = Node('node_2')\n l = Link(node1, node2, weight=10)\n assert isinstance(l, Link)\n\n\ndef test_link_raises_value_error_when_created_with_not_valid_node():\n with pytest.raises(ValueError):\n Link(source=None, destination=None)\n Link(source='node_1', destination='node_2')\n Link(source=123, destination=456)\n Link(source=True, destination=False)\n\n\ndef test_link_raises_value_error_when_created_with_unvalid_weight():\n with pytest.raises(ValueError):\n node1 = Node('node_1')\n node2 = Node('node_2')\n Link(node1, node2, weight='asd')\n Link(node1, node2, weight=None)\n Link(node1, node2, weight=True)\n\n\ndef test_link_can_be_created_with_default_weight():\n node1 = Node('node_1')\n node2 = Node('node_2')\n l = Link(node1, node2)\n assert isinstance(l, Link)\n\n\ndef test_link_can_return_node():\n node1 = Node('node_1')\n node2 = Node('node_2')\n l = Link(node1, node2)\n assert isinstance(l.source(), Node)\n assert isinstance(l.destination(), Node)\n assert l.source().label() == node1.label()\n assert l.destination().label() == node2.label()\n\n\ndef test_link_can_return_weight():\n node1 = Node('node_1')\n node2 = Node('node_2')\n weight = 12345\n l = Link(node1, node2, weight=weight)\n assert l.weight() == weight\n","sub_path":"izigraph/tests/link_test.py","file_name":"link_test.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"506014440","text":"#!/usr/bin/env python\n\nimport unittest\nimport itertools\n\nimport tdl\n\nclass MapTests(unittest.TestCase):\n\n MAP = (\n '############',\n '# ### #',\n '# ### #',\n '# ### ####',\n '## #### # ##',\n '## ####',\n '############',\n )\n\n WIDTH = len(MAP[0])\n HEIGHT = len(MAP)\n\n POINT_A = (2, 2)\n POINT_B = (9, 2)\n POINT_C = (9, 4)\n\n POINTS_AB = POINT_A + POINT_B\n POINTS_AC = POINT_A + POINT_C\n\n @classmethod\n def map_is_transparant(cls, x, y):\n try:\n return cls.MAP[y][x] == ' '\n except IndexError:\n return False\n\n @classmethod\n def path_cost(cls, src_x, src_y, dest_x, dest_y):\n if cls.map_is_transparant(dest_x, dest_y):\n return 1\n return 0\n\n\n def setUp(self):\n self.map = tdl.map.Map(self.WIDTH, self.HEIGHT)\n for y, line in enumerate(self.MAP):\n for x, ch in enumerate(line):\n trans = ch == ' '\n self.map.transparent[x,y] = self.map.walkable[x,y] = trans\n self.assertEquals(self.map.transparent[x,y], trans)\n self.assertEquals(self.map.walkable[x,y], trans)\n\n def test_map_compute_fov(self):\n fov = self.map.compute_fov(*self.POINT_A)\n self.assertTrue(list(fov), 'should be non-empty')\n fov = self.map.compute_fov(*self.POINT_A, fov='PERMISSIVE8')\n self.assertTrue(list(fov), 'should be non-empty')\n with self.assertRaises(tdl.TDLError):\n self.map.compute_fov(*self.POINT_A, fov='invalid option')\n\n def test_map_compute_path(self):\n self.assertTrue(self.map.compute_path(*self.POINTS_AB),\n 'should be non-empty')\n self.assertFalse(self.map.compute_path(*self.POINTS_AC),\n 'invalid path should return an empty list')\n\n def test_map_specials(self):\n for x,y in self.map:\n self.assertTrue((x, y) in self.map)\n self.assertFalse((-1, -1) in self.map)\n\n def test_quick_fov(self):\n fov = tdl.map.quick_fov(self.POINT_B[0], self.POINT_B[1],\n self.map_is_transparant, radius=2.5)\n self.assertTrue(fov, 'should be non-empty')\n\n def test_bresenham(self):\n for x1, x2, y1, y2 in itertools.permutations([-4, -2, 4, 4], 4):\n self.assertTrue(tdl.map.bresenham(x1, x2, y1, y2),\n 'should be non-empty')\n\n def test_astar(self):\n pathfinder = tdl.map.AStar(self.WIDTH, self.HEIGHT,\n self.map_is_transparant)\n self.assertTrue(pathfinder.get_path(*self.POINTS_AB))\n\n pathfinder = tdl.map.AStar(self.WIDTH, self.HEIGHT,\n self.path_cost, None, True)\n self.assertTrue(pathfinder.get_path(*self.POINTS_AB))\n self.assertFalse(pathfinder.get_path(*self.POINTS_AC),\n 'invalid path should return an empty list')\n","sub_path":"tests/test_map.py","file_name":"test_map.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"99131345","text":"from math import cos, atan2, pi, radians, copysign\n\nfrom rlbot.agents.base_agent import SimpleControllerState\nfrom rlutilities.linear_algebra import vec3, dot, norm, xy, look_at\nfrom rlutilities.mechanics import Drive as RLUDrive, AerialTurn\n\nfrom util import sign, cap\n\n\nclass CustomDrive:\n\n def __init__(self, car):\n self.car = car\n self.target = vec3(0, 0, 0)\n self.speed = 2300\n self.controls = SimpleControllerState()\n self.finished = False\n self.rlu_drive = RLUDrive(self.car)\n self.update_rlu_drive()\n self.power_turn = True # Handbrake while reversing to turn around quickly\n self.aerial_turn = AerialTurn(car)\n self.kickoff = False\n\n def step(self, dt: float):\n self.speed = abs(self.speed)\n car_to_target = (self.target - self.car.location)\n local_target = dot(car_to_target, self.car.rotation)\n angle = atan2(local_target[1], local_target[0])\n vel = norm(self.car.velocity)\n in_air = (not self.car.on_ground)\n on_wall = (self.car.location[2] > 250 and not in_air)\n\n reverse = (cos(angle) < 0 and not (on_wall or in_air or self.kickoff))\n\n get_off_wall = (on_wall and local_target[2] > 450)\n if get_off_wall:\n car_to_target[2] = -self.car.location[2]\n local_target = dot(car_to_target, self.car.rotation)\n angle = atan2(local_target[1], local_target[0])\n\n max_speed = self.determine_max_speed(local_target)\n\n self.update_rlu_drive(reverse, max_speed)\n self.rlu_drive.step(dt)\n self.finished = self.rlu_drive.finished\n\n self.controls = self.rlu_drive.controls\n self.controls.handbrake = False\n \n if reverse:\n angle = -invert_angle(angle)\n if self.power_turn and not on_wall:\n angle *= -1\n self.controls.handbrake = (vel > 200)\n self.controls.steer = cap(angle * 3, -1, 1)\n self.controls.boost = False\n if not self.controls.handbrake:\n self.controls.handbrake = (abs(angle) > radians(70) and vel > 500 and not on_wall)\n if self.controls.handbrake:\n self.controls.handbrake = (dot(self.car.velocity, car_to_target) > -150)\n\n if in_air:\n self.aerial_turn.target = look_at(xy(car_to_target), vec3(0, 0, 1))\n self.aerial_turn.step(dt)\n aerial_turn_controls = self.aerial_turn.controls\n self.controls.pitch = aerial_turn_controls.pitch\n self.controls.yaw = aerial_turn_controls.yaw\n self.controls.roll = aerial_turn_controls.roll\n self.controls.boost = False\n\n def update_rlu_drive(self, reverse: bool = False, max_speed: float = 2200):\n self.target = self.target\n self.rlu_drive.target = self.target\n self.rlu_drive.speed = cap(self.speed * (-1 if reverse else 1), -max_speed, max_speed)\n\n def determine_max_speed(self, local_target):\n low = 100\n high = 2200\n if self.kickoff:\n return high\n for i in range(5):\n mid = (low + high) / 2\n radius = (1 / RLUDrive.max_turning_curvature(mid))\n local_circle = vec3(0, copysign(radius, local_target[1]), 0)\n dist = norm(local_circle - xy(local_target))\n if dist < radius:\n high = mid\n else:\n low = mid\n return high\n\n\ndef invert_angle(angle: float):\n return angle - sign(angle) * pi\n","sub_path":"bot/custom_drive.py","file_name":"custom_drive.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"61413121","text":"__author__= \"Rohit Ravishankar\"\n__email__= \"rr9105@rit.edu\"\n\nimport sys\nfrom DataPreProcessing import *\nfrom train import *\n\n\ndef main():\n \"\"\"\n Main calling function of the program\n :return: None\n \"\"\"\n if sys.argv[1] == 'train':\n build_training_data()\n main_trainer()\n\n if sys.argv[1] == 'predict' and len(sys.argv) > 2:\n from predict import classification\n from AdaboostPredict import decision_stumps\n input_file = open(sys.argv[2])\n data = input_file.readlines()\n print(\"Decision Tree prediction\")\n for i in data:\n print(classification(i, i.strip().split()))\n\n print(\"\\nAdaboost prediction\")\n\n for i in data:\n print(decision_stumps(i, i.strip().split()))\n\n elif sys.argv[1] == 'predict':\n print('Wrong usage for prediction. Please supply a file after predict')\n\n\nif __name__ == '__main__':\n main()","sub_path":"CSCI-630/Codes/FIS_Lab_2/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"326086173","text":"def partition(list_, l, r):\n p_idx = l\n for i in range(l+1, r):\n if list_[p_idx] > list_[i]:\n l += 1\n tmp = list_[l]\n list_[l] = list_[i]\n list_[i] = tmp\n else:\n pass\n tmp = list_[p_idx]\n list_[p_idx] = list_[l]\n list_[l] = tmp\n return l + 1\n\ndef quicksort(list_, l, u):\n if l < u:\n m = partition(list_, l, u)\n quicksort(list_, l, m-1)\n quicksort(list_, m, u)\nlist_ = [31,0,45,2,23,9,21,54,99,32,4,8,5]\nprint(list_)\nquicksort(list_, 0, len(list_))\nprint(list_)\n","sub_path":"old/sort/RQuickSort.py","file_name":"RQuickSort.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"373287226","text":"import os\n\nimport sys\nimport argparse\nparser=argparse.ArgumentParser()\n# parser.add_argument(\"--imclass\",help=\"Image Class\")\nparser.add_argument(\"--image_file\", help=\"Specify ImageFile to be predicted\")\nargs = parser.parse_args()\n\nfrom torch.autograd import Variable\nimport torch.utils.data\nfrom torch.nn import DataParallel\nfrom config import BATCH_SIZE, PROPOSAL_NUM,test_model\nfrom core import model, dataset\nfrom core.utils import progress_bar\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\nif not test_model:\n raise NameError('please set the test_model file to choose the checkpoint!')\n\ntest_model = 'fine_grained.ckpt'\n\n# read dataset\n# print(\"Loading Data\")\ntestset = dataset.Predict_data(image_file=args.image_file, data_len=None)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=8, drop_last=False)\n# print(\"Data Loaded\")\n# define model\nnet = model.attention_net(topN=PROPOSAL_NUM)\nckpt = torch.load(test_model)\nnet.load_state_dict(ckpt['net_state_dict'])\nnet = net.cuda()\nnet = DataParallel(net)\ncreterion = torch.nn.CrossEntropyLoss()\n\n\n\nnet.eval()\n\ntest_loss = 0\ntest_correct = 0\ntotal = 0\nout = []\nfor i, data in enumerate(testloader):\n with torch.no_grad():\n img, label = data[0].cuda(), data[1].cuda()\n # print(data[1])\n batch_size = img.size(0)\n _, concat_logits, _, _, _ = net(img)\n # calculate loss\n concat_loss = creterion(concat_logits, label)\n # calculate accuracy\n _, concat_predict = torch.max(concat_logits, 1)\n output = concat_predict.data\n # print(concat_loss.item())\n out += list(output.to(\"cpu\").numpy())\n # total += batch_size\n # test_correct += torch.sum(concat_predict.data == label.data)\n # test_loss += concat_loss.item() * batch_size\n # progress_bar(i, len(testloader), 'eval on test set')\n# out = out + 1\nwith open('class_labels.txt') as f:\n class_labels = f.read().splitlines()\nclass_labels = [i.split(' ')[1] for i in class_labels]\nout = [class_labels[int(i)] for i in out]\n# print(out)\nf3 = open('output.txt', 'a')\nimg_txt_file = [line.rstrip('\\n') for line in open(args.image_file)]\nassert (len(out) == len(img_txt_file))\n\nfor index, label in enumerate(out):\n if (label[0] == 'a'):\n f3.write(img_txt_file[index] + \" \" + \"aircrafts \" + label[1:] + '\\n')\n if (label[0] == 'b'):\n f3.write(img_txt_file[index] + \" \" + \"birds \" + label[1:] + '\\n')\n if (label[0] == 'c'):\n f3.write(img_txt_file[index] + \" \" + \"cars \" + label[1:] + '\\n')\n if (label[0] == 'd'):\n f3.write(img_txt_file[index] + \" \" + \"dogs \" + label[1:] + '\\n')\n if (label[0] == 'f'):\n f3.write(img_txt_file[index] + \" \" + \"flowers \" + label[1:] + '\\n')\n\n","sub_path":"Assignment2/submission/NTS-Net/test_predict.py","file_name":"test_predict.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313228313","text":"from flickrapi import FlickrAPI\r\nfrom urllib.request import urlretrieve\r\nimport os, time, sys\r\nfrom tqdm import tqdm\r\n\r\n# APIキーとシークレットを設定\r\nAPI_KEY = \"XXXXXXXXXXXXXXXXXXXX\"\r\nSECRET = \"XXXXXXXXXXXXXXXXXXXX\"\r\n\r\nPER_PAGE = 300 # ダウンロードする画像の枚数\r\n\r\nwait_time = 1 # インターバル\r\n\r\nif len(sys.argv) < 2:\r\n print('ダウンロードする画像のキーワードが未指定です。')\r\nelse:\r\n # ダウンロードする画像のキーワードを取得する\r\n keyword = sys.argv[1]\r\n savedir = \"./\" + keyword\r\n\r\n # キーワードのフォルダーが無ければ作成\r\n if os.path.exists(savedir) == False:\r\n os.mkdir(keyword)\r\n\r\n # FlickrAPIを使って画像情報を取得する\r\n flickr = FlickrAPI(API_KEY, SECRET, format='parsed-json')\r\n result = flickr.photos.search(\r\n text = keyword, # 画像検索用のキーワード\r\n per_page = PER_PAGE, # 取得する画像の枚数\r\n media = 'photos',\r\n sort = 'relevance', # 「関連度が高い順」を指定\r\n safe_search = 1, # 1=「不適切コンテンツの除外」を指定\r\n extras = 'url_q, licence' # 追加で取得したい情報の指定\r\n )\r\n # プログレスバーの作成\r\n pbar = tqdm(range(len(result['photos']['photo'])))\r\n\r\n # URLが存在しなくてダウンロードできない画像をカウント\r\n er = 0\r\n for photo, i in zip(result['photos']['photo'], pbar):\r\n try:\r\n # 画像のURLを取得\r\n url_q = photo['url_q']\r\n # 保存するファイル名を作成\r\n filepath = savedir + '/' + photo['id'] + '.jpg'\r\n # 既にファイルが存在するときは何もしない\r\n if os.path.exists(filepath): continue\r\n # 画像をダウンロードする\r\n urlretrieve(url_q, filepath)\r\n # インターバルをあける\r\n time.sleep(wait_time)\r\n except:\r\n # 画像のURLが存在しない\r\n er += 1\r\n continue\r\n\r\n # 結果の表示\r\n pbar.update()\r\n print('ダウンロード数:'+str(i+1))\r\n if 0 < er:\r\n print('URLの情報無し:'+str(er))\r\n\r\n\r\n","sub_path":"日経DCGAN/getImages.py","file_name":"getImages.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"396705781","text":"import heapq\r\n\r\ndef main():\r\n \r\n list1 = [34, 25, 12, 99, 87, 63, 58, 78, 65, 92]\r\n list2 = [\r\n {'name': 'A', 'shares': 100, 'price': 97.2},\r\n {'name': 'B', 'shares': 80, 'price': 197.22},\r\n {'name': 'C', 'shares': 30, 'price': 96.32},\r\n {'name': 'D', 'shares': 120, 'price': 27.32},\r\n {'name': 'E', 'shares': 70, 'price': 64.91},\r\n {'name': 'F', 'shares': 154, 'price': 77.2}\r\n ]\r\n \r\n print(heapq.nlargest(3, list1))\r\n print(heapq.nsmallest(3, list1))\r\n print(heapq.nlargest(2, list2, key = lambda x: x['price']))\r\n print(heapq.nlargest(2, list2, key = lambda x: x['shares']))\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"python_study/小算法/heapq.py","file_name":"heapq.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"484555732","text":"import sys\n\nimport pandas as pd\nimport requests\nimport json\nimport numpy as np\n\nREG_MAPPING = {\n \"москва\": \"Москва\",\n \"челябинская\": \"Челябинская область\",\n \"орловская\": \"Орловская область\",\n \"омская\": \"Омская область\",\n \"липецкая\": \"Липецкая область\",\n \"курская\": \"Курская область\",\n \"рязанская\": \"Рязанская область\",\n \"брянская\": \"Брянская область\",\n \"кировская\": \"Кировская область\",\n \"архангельская\": \"Архангельская область\",\n \"мурманская\": \"Мурманска�� область\",\n \"санкт\": \"Санкт-Петербург\",\n \"ярославская\": \"Ярославская область\",\n \"ульяновская\": \"Ульяновская область\",\n \"новосибирская\": \"Новосибирская область\",\n \"тюменская\": \"Тюменская область\",\n \"свердловская\": \"Свердловская область\",\n \"новгородская\": \"Новгородская область\",\n \"курганская\": \"Курганская область\",\n \"калининградская\": \"Калининградская область\",\n \"ивановская\": \"Ивановская область\",\n \"астраханская\": \"Астраханская область\",\n \"хабаровский\": \"Хабаровский край\",\n \"чеч\": \"Чеченская республика\",\n \"удмур\": \"Удмуртская республика\",\n \"осет\": \"Республика Северная Осетия\",\n \"мордов\": \"Республика Мордовия\",\n \"карел\": \"Республика Карелия\",\n \"калмык\": \"Республика Калмыкия\",\n \"ингуш\": \"Республика Ингушетия\",\n \"башкор\": \"Республика Башкортостан\",\n \"адыг\": \"Республика Адыгея\",\n \"крым\": \"Республика Крым\",\n \"севастоп\": \"Севастополь\",\n \"коми\": \"Республика Коми\",\n \"пензенская\": \"Пензенская область\",\n \"тамбовская\": \"Тамбовская область\",\n \"ленинградская\": \"Ленинградская область\",\n \"вологодская\": \"Вологодская область\",\n \"костромская\": \"Костромская область\",\n \"псковская\": \"Псковская область\",\n \"ямало\": \"Ямало-Ненецкий АО\",\n \"воронежская\": \"Воронежская область\",\n \"чукот\": \"Чукотский АО\",\n \"еврей\": \"Еврейская автономская область\",\n \"тыва\": \"Республика Тыва\",\n \"сахалин\": \"Сахалинская область\",\n \"амур\": \"Амурская область\",\n \"бурят\": \"Республика Бурятия\",\n \"хакас\": \"Республика Хакасия\",\n \"кемеровская\": \"Кемеровская область\",\n \"алтайский\": \"Алтайский край\",\n \"алтай\": \"Республика Алтай\",\n \"дагест\": \"Республика Дагестан\",\n \"балкар\": \"Кабардино-Балкарская республика\",\n \"черкес\": \"Карачаевая-Черкесская республика\",\n \"краснодарский\": \"Краснодарский край\",\n \"ростовская\": \"Ростовская область\",\n \"самарская\": \"Самарская область\",\n \"татарстан\": \"Республика Татарстан\",\n \"марий\": \"Республика Марий Эл\",\n \"чуваш\": \"Чувашская республика\",\n \"нижегород\": \"Нижегородская область\",\n \"владимирс\": \"Владимирская область\",\n \"владимиро\": \"Владимирская область\",\n \"московская\": \"Московская область\",\n \"калужская\": \"Калужская область\",\n \"белгородская\": \"Белгородская область\",\n \"забайкальский\": \"Забайкальский край\",\n \"приморский\": \"Приморский край\",\n \"камчатский\": \"Камачатский край\",\n \"магаданская\": \"Магаданская область\",\n \"саха\": \"Республика Саха\",\n \"красноярский\": \"Красноярский край\",\n \"оренбургская\": \"Оренбургская область\",\n \"саратовская\": \"Саратовская область\",\n \"волгоградская\": \"Волгоградская область\",\n \"ставропольский\": \"Ставропольский край\",\n \"смоленская\": \"Смоленская область\",\n \"тверская\": \"Тверская область\",\n \"пермская\": \"Пермский край\",\n \"пермский\": \"Пермский край\",\n \"ханты\": \"Ханты-Мансийский АО\",\n \"томская\": \"Томская облас��ь\",\n \"иркутская\": \"Иркутская область\",\n \"ненецкий\": \"Ненецскй АО\",\n \"тульская\": \"Тульская область\"\n}\n\n\ndef download_resource(label, index):\n headers = {'Content-Type': 'application/json;charset=UTF-8', }\n data = '{\"withFederalDistricts\":false,\"serviceType\":\"' + f'{label}' + '\",\"territories\":[\"' + f'{index}' + '\"],\"territoryCategory\":\"ADMINISTRATIVE\",\"operationYearFrom\":1700,\"operationYearTo\":2020}'\n \n response = requests.post('https://dom.gosuslugi.ru/interactive-reports/api/rest/services/commonMetersReport/table',\n headers=headers, data=data)\n resource = response.json()\n return resource\n\ndef convert_resource(resource):\n table_raw = resource[0]['children']\n data = []\n for area in table_raw:\n indicators = []\n indicators.append(area['territory']['name'])\n indicators.append(area['housesWithServiceType'])\n indicators.append(area['housesWithDevices'])\n indicators.append(area['percentHouseWithDevices'])\n data.append(indicators)\n return data\n\ndef fun(former_name):\n lower = former_name.lower()\n for key in REG_MAPPING.keys():\n if key in lower:\n return REG_MAPPING[key]\n print(f\"Failed to map {former_name}\")\n return former_name\n\n\ncolumns = ['territory', 'housesWithServiceType', 'housesWithDevices', 'percentHouseWithDevices']\nfor label in ['COLD_WATER', 'HOT_WATER', 'ELECTRICITY', 'THERMAL_ENERGY']:\n data_array = []\n for x in range(1, 77):\n resource = download_resource(label, str(x).zfill(2))\n area_array = convert_resource(resource)\n name = fun(resource[0]['territory']['name'])\n data_array.append({name:area_array})\n \n dictionary = {'columns': columns, 'data': data_array}\n \n with open(f\"zhkh_cities_{label}.json\", 'wt', encoding='utf-8') as f:\n json.dump(dictionary, f, ensure_ascii=False, indent=2)\n","sub_path":"download_zhkh_cities.py","file_name":"download_zhkh_cities.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177907900","text":"# -- coding: utf-8 --\n#!python3\n\n#^^^^ Keep this line if you plan to use french characters (even in comments)\n#CBonsoir à tous,\n\n#En conclusion du cours de ce matin voici des ressources pour approfondir les concepts abordés:\n#- API http://readwrite.com/2013/09/19/api-defined, http://www.quora.com/In-laymans-terms-what-is-an-API-1\n#- Verbes HTTP: http://micheltriana.com/2013/09/30/http-verbs-in-a-rest-web-api/\n#- JSON http://stackoverflow.com/questions/383692/what-is-json-and-why-would-i-use-it\n#- OAuth https://developers.google.com/identity/protocols/OAuth2\n#- Un must see pour montrer les potentialités de Pandas https://vimeo.com/59324550\n\n#L'exercice pour le cours prochain est le suivant: \n#- Récupérer via crawling la liste des 256 top contributors sur cette page https://gist.github.com/paulmillr/2657075 \n#- En utilisant l'API github https://developer.github.com/v3/ récupérer pour chacun de ces users le nombre moyens de stars des repositories qui leur appartiennent. Pour finir classer ces 256 contributors par leur note moyenne.\n\n# list of most activre user https://gist.github.com/paulmillr/2657075\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport json\nimport base64\nimport operator\nimport sys\nfrom requests.auth import HTTPBasicAuth\nimport numpy as np\nimport threading\nfrom queue import Queue\nimport time\n\n\nURL_PAUL_MILLR = \"https://gist.github.com/paulmillr/2657075\"\ngithub_token = \"6ccaca814c800646fdb0714caaf656552839569b\"\ngit_login = \"AurelienGalicher\"\nGITHUB_API = 'https://api.github.com'\ngoogle_token='AIzaSyDwgxHe0JysgEK9T99qUlRyr42f7IX_jKU'\ngoogle_token_matrix ='AIzaSyCAK_q-slfWI2FnyQa9PZMI9qYNGMNmaoQ'\n\ndef crawlGitUrlList(url):\n '''\n Récupérer via crawling la liste des 256 top contributors sur cette page https://gist.github.com/paulmillr/2657075 \n Retourne une dataframe avec nom et url\n '''\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n index_list = []\n url_list = []\n for row in soup.findAll('table')[0].tbody.findAll('tr'):\n classement = int(row.findAll('th')[0].contents[0].replace('#',''))\n url_column = row.findAll('td')[0].select('a')[0].attrs['href']\n url_list.append(url_column)\n index_list.append(classement)\n user_df= pd.DataFrame(url_list,index=index_list)\n user_df.columns=['url']\n user_df['user']=user_df['url'].str.replace('https://github.com/','')\n return user_df\n\n\n\ndef retrieveStarGazersCountMean(user):\n '''\n En utilisant l'API github https://developer.github.com/v3/ \n récupére pour 'user' le nombre moyens de stars des repositories\n '''\n user_repos_url = GITHUB_API+'/users/'+user+'/repos'\n r= requests.get(user_repos_url, auth=HTTPBasicAuth(git_login, github_token))\n repos_json_list = json.loads(r.text)\n list_stargazers_count=[]\n for repo in repos_json_list :\n if repo[\"stargazers_count\"]:\n list_stargazers_count.append(repo[\"stargazers_count\"])\n mean_repos_list = sum(list_stargazers_count)/ float(len(list_stargazers_count))\n return mean_repos_list\n\n\n## utilisation de tread pour paralléliser la récupération des moyennes\n\ndef retrieveAndSortStarGazersCountMeanList(user_df, NB_THREADS=50):\n\n #intialiazing result dict of type { user: meanStarGazers } \n result_dict = {}\n # lock to serialize console output\n lock = threading.Lock()\n\n def do_work(dict_params):\n # updating dict with the Mean of StarGazersCount of all repos of user\n user= dict_params['user']\n dict_params['dict'].update({user: retrieveStarGazersCountMean(user)})\n # pretend to do some lengthy work.\n # Make sure the whole print completes or threads can mix up output in one line.\n with lock:\n print(threading.current_thread().name,dict_params['user'])\n\n # The worker thread pulls an item from the queue and processes it\n def worker():\n while True:\n dict_params = q.get()\n do_work(dict_params)\n q.task_done()\n\n\n # Create the queue and thread pool.\n q = Queue()\n for i in range(min(NB_THREADS, len(user_df.index))):\n t = threading.Thread(target=worker)\n t.daemon = True # thread dies when main thread (only non-daemon thread) exits.\n t.start()\n\n # stuff work items on the queue (in this case a dict with the user adn global dict to update).\n start = time.perf_counter()\n for user in user_df['user']:\n dict_params = { 'user' : user, 'dict' : result_dict}\n q.put(dict_params)\n\n q.join() # block until all tasks are done\n\n print('time:',time.perf_counter() - start)\n df = pd.Series(result_dict, name='average StarGazers TOP 256 popular git users')\n df.index.name = 'user'\n df.columns=['average StarGazers']\n df.reset_index()\n return df.sort_values(ascending=0)\n\n\n\n### main ###\n\ndf_ville = pd.read_csv('villes.csv')\n\n\n\ndef retrieve_lat_lng(ville):\n address = 'paris'\n\n fgeo_s ='https://maps.googleapis.com/maps/api/geocode/json'\n\n param = {'address': address}\n\n response = requests.get(fgeo_s, params=param, auth=HTTPBasicAuth(git_login, google_token))\n\n json_dict = response.json()\n #print(json_dict)\n\n lat = json_dict['results'][0]['geometry']['location']['lat']\n lng = json_dict['results'][0]['geometry']['location']['lng']\n\n return [lat, lng]\n\nimport googlemaps\n\ngmaps = googlemaps.Client(key=google_token)\n\norigins = list(df_ville['Ville'].values)[0:10]\nprint(origins)\ndestinations = list(df_ville['Ville'].values)[0:10]\nprint(destinations)\n\nmatrix = gmaps.distance_matrix(origins, destinations)\nprint(matrix)\nmatrix.rows\n\ndef extract_value(row):\n return list(map( lambda x: x['duration']['text'], row['elements']))\n\nmm = list(map(extract_value, matrix['rows']))\n\n#user_df= crawlGitUrlList(URL_PAUL_MILLR)\n#user_df = pd.read_csv('user.csv')\n#df = retrieveAndSortStarGazersCountMeanList(user_df)\n#print(df)\n","sub_path":"Aurelien_Galicher/Lesson4/exo_cc_lesson_04.py","file_name":"exo_cc_lesson_04.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45508536","text":"#!/usr/env/bin python3\n\"\"\"\nYH ~ 8.8.2018\n[centroid_select.py]\nExtracts the centroids from Uclust's centroid output, based on a input threshold\nthen randomly selects an input percentage of the centroids of length below threshold\nfor further analysis with jackhmmer.\n\nMeant to be run after Uclust, to prepare input for jackhmmer, and reduce computational load\n\"\"\"\n### IMPORTS\nfrom sys import argv\nfrom math import ceil\nimport textwrap\nimport random\nimport numpy as np\n\n### FUNCTIONS\ndef fasta_parser_to_dict(inputfile):\n \"\"\"Parses fasta file into dictionary of format {seqID:seq}\"\"\"\n seqdict = {}\n for line in inputfile:\n if not line.strip():\n continue\n if line.startswith(\">\"):\n lbl = line.split(\">\")[1][0:4] # for PDB labels! \n seqdict[lbl] = []\n else:\n seq = line.split(\"\\n\")[0]\n seqdict[lbl].append(seq)\n\n for key, frag in seqdict.items():\n seqdict[key] = ''.join(frag) \n\n return seqdict\n\ndef print_output(sizeslist, centroidlist, seqdict):\n \"\"\"Prints list of centroids and sizes out to centroid_sizes.txt\n and new fasta file with selected centroids based on parameters to \n centroids_sele.txt\"\"\"\n sizefile = open(\"centroid_sizes_tot.txt\", 'w+')\n seleseqfile = open(\"centroids_sele.txt\", 'w+')\n selesizefile = open(\"centroid_sizes_sele.txt\", 'w+')\n\n for i in range(len(sizeslist)):\n sizefile.write('{}\\t{}'.format(sizeslist[i][0], sizeslist[i][1]))\n if i != len(sizeslist):\n sizefile.write('\\n')\n sizefile.close()\n\n for i in range(len(centroidlist)):\n prot = centroidlist[i][0]\n seq = seqdict[prot]\n seleseqfile.write(\">{}\\n\".format(prot))\n seleseqfile.write(\"{}\".format(textwrap.fill(seq,80)))\n selesizefile.write(\"{}\\t{}\".format(centroidlist[i][0], centroidlist[i][1]))\n if i != len(centroidlist):\n seleseqfile.write('\\n')\n selesizefile.write('\\n')\n seleseqfile.close()\n selesizefile.close()\n\n\n### MAIN\nif __name__ == \"__main__\": \n\n # read input from command line\n inputfile = argv[1]\n\n # parse fasta file into a dictionary\n SeqDict = fasta_parser_to_dict(open(inputfile))\n\n # list size of each of the sequences\n sizesdict = {}\n for key, frag in SeqDict.items():\n sizesdict[key] = len(frag)\n\n SizesList = sorted(list(sizesdict.items()), key=lambda x:x[1], reverse=True)\n\n # prompt user to enter threshold size and percentage, give max and min sizes\n print(\"The max and min sequence lengths are:\")\n print(SizesList[0][1])\n print(SizesList[len(SizesList)-1][1])\n print(\"The total number of sequences is:\")\n print(len(SizesList))\n thresh_size = int(input(\"Enter your threshold size: \"))\n percentage = float(input(\"Enter your percentage of the set: \"))\n \n # take selection of sequences only under a certain threshold size \n FinalSizesList = []\n for elem in SizesList:\n if elem[1] <= thresh_size:\n FinalSizesList.append(elem) \n\n # select percentage of list of final sizes\n total = ceil(percentage*len(FinalSizesList))\n SelectedCentroids = []\n for idx in random.sample(list(np.arange(len(FinalSizesList))),total):\n SelectedCentroids.append(FinalSizesList[idx])\n\n SelectedCentroids = sorted(SelectedCentroids, key=lambda x:x[1], reverse=True)\n print(\"Number of centroid sequences in your final set:\")\n print(len(SelectedCentroids))\n # print output\n print_output(SizesList, SelectedCentroids, SeqDict)\n","sub_path":"centroid_select.py","file_name":"centroid_select.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"35090756","text":"# flake8: noqa\n\n# pandas versioning\nimport pandas\n\nfrom distutils.version import LooseVersion\n\npv = LooseVersion(pandas.__version__)\n\nif pv < \"0.19.0\":\n raise ValueError(\"mbf_pandas_msgpack requires at least pandas 0.19.0\")\n_is_pandas_legacy_version = pv.version[1] == 19 and len(pv.version) == 3\n\nfrom .packers import to_msgpack, read_msgpack\n\n# versioning\n#from ._version import get_versions\n\n#versions = get_versions()\n#__version__ = versions.get(\"closest-tag\", versions[\"version\"])\n#__git_revision__ = versions[\"full-revisionid\"]\n\n__version__ = \"0.1\"\n\n\npandas.DataFrame.to_msgpack = lambda self, path=None, **kwargs: to_msgpack(path, self, **kwargs)\n\ndel pv, LooseVersion, pandas#get_versions, versions, \n\n","sub_path":"mbf_pandas_msgpack/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95690483","text":"\"\"\"\nhttps://leetcode.com/problems/reverse-linked-list/\n\nReverse Linked List\n\nReverse a singly linked list.\n A linked list can be reversed either iteratively or recursively. Could you implement both\n\"\"\"\n\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseList(self, head):\n if head is None:\n return head\n curr = head\n stack = []\n while curr:\n stack.append(curr)\n curr = curr.next\n reverse_head = stack.pop()\n rev_curr = reverse_head\n while len(stack) > 0:\n node = stack.pop()\n rev_curr.next = node\n rev_curr = rev_curr.next\n rev_curr.next = None\n return reverse_head\n\n\nimport list_node\nhead = list_node.generate_list(range(1,11))\nlist_node.print_list(head)\n\nsol = Solution()\nreverse = sol.reverseList(head)\nlist_node.print_list(reverse)\n","sub_path":"leet-code/reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"524184096","text":"import pandas as pd\nimport PerformanceMetrics as performanceMetrics\nfrom sklearn.ensemble import RandomForestClassifier \nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import KFold\nimport numpy as np\n\n\nprocessedDatas = pd.read_csv(\"../processedDataset.csv\")[[\"video_id\",\"trending_date\",\"title\",\"channel_title\",\"category\",\"category_id\",\"publish_time\",\"tags\",\"views\",\"likes\",\n\"dislikes\",\"comment_count\",\n\"comments_disabled\",\"ratings_disabled\",\"video_error_or_removed\",\"country_abb\", \"views_scaled\", \"likes_scaled\", \"dislikes_scaled\", \"comment_count_scaled\", \"music_cat\", \n\"comedy_cat\",\"entertainment_cat\", \"news&politics_cat\", \"people&blogs_cat\", \"howto&style_cat\", \"film&animation_cat\", \"science&technology_cat\", \"gaming_cat\", \"sports_cat\",\n\"pets&animals_cat\", \"travel&events_cat\", \"autos&vehicles_cat\", \"education_cat\", \"shows_cat\", \"movies_cat\", \"trailers_cat\", \"ca_country\", \"de_country\", \"fr_country\",\n\"gb_country\", \"in_country\", \"jp_country\", \"kr_country\", \"mx_country\", \"ru_country\",\"us_country\"\n]]\n\n\n\n\n#region Verilen view sayısına gore videonun entertainment kategorisine ait olup olmadığını buluyor.\n\nmodelUS = RandomForestClassifier()\n\npredictionViews = processedDatas[[\"views\"]].to_numpy()\n\npredictionCat = processedDatas[[\"entertainment_cat\"]].to_numpy()\n\npredictionUS = processedDatas[[\"us_country\"]].to_numpy()\n\nX = predictionViews\ny = predictionCat\nkf = KFold(n_splits=10,shuffle=False)\n\nbestResult = 0\nbestConfusion = []\n\nfor train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n modelUS.fit(X_train, y_train.ravel())\n prediction = modelUS.predict(X_test)\n confusionMatrix = confusion_matrix(y_test, prediction)\n accuracyScore = performanceMetrics.accuracy(confusionMatrix)\n if(accuracyScore > bestResult):\n bestResult = accuracyScore\n bestConfusion = confusionMatrix \n\n\n# Metrics for entertainment category prediction\naccuracyScore = performanceMetrics.accuracy(bestConfusion)\nprint(\"entertainmentViewsPrediction / Accuracy Score: {}\".format(round(accuracyScore, 3)))\n\nprecisionScore = performanceMetrics.precision(bestConfusion)\nprint(\"entertainmentViewsPrediction / Precision Score: {}\".format(round(precisionScore, 3)))\n\nrecallScore = performanceMetrics.recall(bestConfusion)\nprint(\"entertainmentViewsPrediction / Recall Score: {}\".format(round(recallScore, 3)))\n\nfMeasureScore = performanceMetrics.fmeasure(bestConfusion)\nprint(\"entertainmentViewsPrediction / F-Mesaure Score: {}\".format(round(fMeasureScore, 3)))\n\n#endregion\n\n\n#region Verilen view sayısına gore videonun lokasyonunun US olup olmadığını buluyor.\n\nmodelUS = RandomForestClassifier()\n\nX = predictionViews\ny = predictionUS\nkf = KFold(n_splits=10,shuffle=False)\n\nbestResult = 0\nbestConfusion = []\n\nfor train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n modelUS.fit(X_train, y_train.ravel())\n prediction = modelUS.predict(X_test)\n confusionMatrix = confusion_matrix(y_test, prediction)\n accuracyScore = performanceMetrics.accuracy(confusionMatrix)\n if(accuracyScore > bestResult):\n bestResult = accuracyScore\n bestConfusion = confusionMatrix \n\n\n# Metrics for entertainment category prediction\naccuracyScore = performanceMetrics.accuracy(bestConfusion)\nprint(\"USViewsPrediction / Accuracy Score: {}\".format(round(accuracyScore, 3)))\n\nprecisionScore = performanceMetrics.precision(bestConfusion)\nprint(\"USViewsPrediction / Precision Score: {}\".format(round(precisionScore, 3)))\n\nrecallScore = performanceMetrics.recall(bestConfusion)\nprint(\"USViewsPrediction / Recall Score: {}\".format(round(recallScore, 3)))\n\nfMeasureScore = performanceMetrics.fmeasure(bestConfusion)\nprint(\"USViewsPrediction / F-Mesaure Score: {}\".format(round(fMeasureScore, 3)))\n\n#endregion","sub_path":"ImplementingTheAlgorithms/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604723611","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n# Examples:\n# url(r'^$', 'gettingstarted.views.home', name='home'),\n# url(r'^blog/', include('blog.urls')),\n\nurlpatterns = [\n\turl(r'^',include('start.urls',namespace='start')),\n\turl(r'^account/',include('account.urls',namespace='account')),\n\turl(r'^contest/',include('contest.urls',namespace='contest')),\n\turl(r'^code',include('codecheck.urls',namespace='code')),\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"gettingstarted/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"437646315","text":"import os\nimport numpy as np\n\nfrom legendre import _sch_lpmv\n\nlpkrLayouts = {} # Initialise a blank dictionary\n\nlpkrLayouts[\"quad\"] = {'az': np.asfarray([45.0, 135.0, 225.0, 315.0]),\n 'el': np.asfarray([0.0, 0.0, 0.0, 0.0]),\n 'name': \"Quad\"}\n\nlpkrLayouts[\"octah\"] = {'az': np.asfarray([0.0, 45.0, 135.0, 225.0, 315.0, 0.0]),\n 'el': np.asfarray([90.0, 0.0, 0.0, 0.0, 0.0, -90.0]),\n 'name': \"Octahedron\"}\n\nlpkrLayouts[\"cube\"] = {'az': np.asfarray([45.0, 135.0, 225.0, 315.0, 45.0, 135.0, 225.0, 315.0]),\n 'el': np.asfarray([35.0, 35.0, 35.0, 35.0, -35.0, -35.0, -35.0, -35.0]),\n 'name': \"Cube\"}\n\nlpkrLayouts[\"birect\"] = {'az': np.asfarray([90, 270, 45, 135, 225, 315, 90, 270]),\n 'el': np.asfarray([45, 45, 0, 0, 0, 0, -45, -45]),\n 'name': \"Birectangle\"}\n\nlpkrLayouts[\"hex\"] = {'az': np.asfarray([0, 60, 120, 180, 240, 300]),\n 'el': np.asfarray([0, 0, 0, 0, 0, 0]),\n 'name': \"Hexagon\"}\n\nlpkrLayouts[\"dodeca\"] = {'az': np.asfarray([180, 50, 310, 118, 242, 0, 180, 62, 298, 130, 230, 0]),\n 'el': np.asfarray([63, 46, 46, 16, 16, 0, 0, -16, -16, -46, -46, -63]),\n 'name': \"Icosehedron\"}\n\nlpkrLayouts[\"octag\"] = {'az': np.asfarray([0, 45, 90, 135, 180, 225, 270, 315]),\n 'el': np.asfarray([0, 0, 0, 0, 0, 0, 0, 0]),\n 'name': \"Octagon\"}\n\nlpkrLayouts[\"9chcircular\"] = {'az': np.asfarray([0, 40, 80, 120, 160, 200, 240, 280, 320]),\n 'el': np.asfarray([0, 0, 0, 0, 0, 0, 0, 0, 0]),\n 'name': \"9 Channel Circular\"}\n\nlpkrLayouts[\"12chcircular\"] = {'az': np.asfarray([0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330]),\n 'el': np.asfarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n 'name': \"12 Channel Circular\"}\n\ndef encode_SH(source, M):\n \"\"\"\n encode_SH(source_dict, order)\n This function performs Ambisonic encoding of the input source positions up to order M,\n using SN3D normalisation and ACN sequence.\n \n :param source: dictionary containing azimuth and elevation angles. This must be formated\n by the convention used throughout this implementation: name_of_dict{\"az\": phi, \"el\": theta}, \n where phi is a numpy array of azimuth angles, and theta is the numpy array of elevation angles\n in the same sequence as the azimuth angles.\n :param M: encoding order setting which will later determine K, \n the number of spherical harmonics used for ambisonic encoding up to the order M.\n :return: Y: the source encoded matrix\n \"\"\"\n # ensure that M is an integer\n M = int(M)\n\n # Calculate the height matrix for future legendre coefficient calculation\n z = np.sin(source[\"el\"])\n # Calculate number of spherical harmonics from the Ambisonic order\n # Note: we force this to be 3D even if the layout is horizontal\n # in order to simplify the writing of .config files\n # the 2D equivalent would be 2(M+1)\n K = (M + 1) ** 2 # number of spher harms, or Ambisonic channels\n # Number of source directions.\n\n if isinstance(source[\"az\"], (int, float)):\n N = 1\n else:\n N = len(source[\"az\"])\n\n # Prepare the encoding matrix\n Y = np.zeros([K, N])\n\n\n \"\"\" \n We now switch to mathematical notions of \"order\" and \"degree\", given that\n this is the makes sense when calculating spherical harmonics when legendre\n calculations are explicitly included.\n degrees: n: 0 <= n <= M (integers)\n orders : m: -M <= m <= M (integers)\n \"\"\"\n # np.arange returns an \"exclusive range\": a list of integers\n # ranging from 0 to the argument - 1. We therefore have to add 1 to\n # the stop value\n degrees = np.arange(M + 1, dtype=int) # integer indices\n\n \"\"\"\n For each degree, calculate the Schmitt Semi Normalised\n legendre coefficients.\n Then, for each order within the degree, calculate the\n the value of the spherical harmonic.\n The, place the result in Y at index determined by the ACN sequence\n \"\"\"\n for n in degrees:\n # Get the legendre coefficients for the degree\n\n P = _sch_lpmv(n, z)\n\n # Case for 0th degree necessary - otherwise,\n # calculation of orders list (below) returns\n # a list which has no elements and can therefore not\n # be used as an iterator for the orders for loop.\n if n == 0:\n if P.size == 1:\n # this is true when N = 1, therefore z is a float\n # which causes P to be a float\n Y[0, :] = P\n else: # P is an array of floats and must be indexed\n Y[0, :] = P[0]\n else:\n\n # list orders from -n to +n, accounting for exclusive\n # range\n orders = np.arange(-n, n+1, dtype=int)\n\n # For each spherical harmonic in the degree\n for m in orders:\n # Sinusoidal term calculation\n if m >= 0:\n sinusoidalTerm = np.cos(m * source[\"az\"])\n else:\n sinusoidalTerm = np.sin(np.abs(m) * source[\"az\"])\n\n # ACN index according to current order and degree\n i = m + n**2 + n\n\n # Get the legendre coefficient and sinusoidal term\n # for the current order and place in Y at ACN position i\n\n Y[i, :] = P[abs(m)]*sinusoidalTerm\n\n return Y\n\ndef calc_order_and_nchannels(layout, force_3D):\n \"\"\"\n \n :param layout: \n :param for_ambix: boolean. When true, sets this function to return max_num_channels for a 3D layout\n even if the layout is 2D. This is required for AmbiX config files\n :return: ambisonic order and the max number of channels\n \"\"\"\n nLpkrs = len(layout[\"az\"])\n if isHorizontalOnly(layout):\n # 2D Order\n order = int(np.floor((nLpkrs-1)/2))\n if force_3D:\n maxNumChannels = (order + 1) ** 2\n else:\n maxNumChannels = 2*(order+1)\n else: # periphonic reproduction\n order = int(np.floor(np.sqrt(nLpkrs)) - 1)\n maxNumChannels = (order + 1) ** 2\n\n return order, maxNumChannels\n\ndef isHorizontalOnly(layout):\n # if array of elevation angles is equal to a an array of zeros of the same shape\n if np.array_equal(layout[\"el\"], np.zeros_like(layout[\"el\"])):\n return True\n else:\n return False\n\ndef get_supported_layouts_as_list():\n \"\"\"\n :return: list containing name, angles, order and path to the image of each loudspeaker layout \n \"\"\"\n supported_layouts = lpkrLayouts.copy()\n layouts_list = []\n\n\n for k in supported_layouts.keys():\n name = supported_layouts[k]['name']\n\n az = supported_layouts[k]['az'].tolist()\n el = supported_layouts[k]['el'].tolist()\n\n order, nchannels = calc_order_and_nchannels(supported_layouts[k], force_3D=True)\n\n image_name = supported_layouts[k]['name'].replace(\" \", \"\")\n path_to_image = os.path.join(root_dir,\n \"database\", \"img\",\n \"{}.png\".format(image_name))\n\n layouts_list.append([name, az, el, order, path_to_image])\n\n return layouts_list\n\ndef gen_decoder_matrix(L, decoding_method, angular_units='rad'):\n \"\"\"\n :param L: dictionary containing the loudspeaker azimuth and elevation angles\n dict = {\"az\": np.array(), \"el\": np.array()} (floats)\n :param decoding_method: otherwise known as decoder 'flavour', sets the operation\n carried out on the spherical harmonic projection of L\n :param angular_units: degrees or radians\n :ivar C: L projected on spherical harmonics\n :return: D: the decoding matrix for the layout L, produced by the decoding_method\n \"\"\"\n if angular_units in [\"deg\", \"degrees\", \"degree\"]:\n L[\"az\"] = np.deg2rad(L[\"az\"])\n L[\"el\"] = np.deg2rad(L[\"el\"])\n elif angular_units not in [\"rad\", \"radians\", \"radian\"]:\n print(\"angular unit not recognised, use \\\"rad\\\" or \\\"deg\\\"\")\n\n ############################################################################\n # Ambisonic Projection of the Loudspeaker Layout\n ############################################################################\n\n order, max_n_channels = calc_order_and_nchannels(L, force_3D=True)\n\n # C = SHT_v1(L, max_num_channels)\n C = encode_SH(L, order)\n\n\n ############################################################################\n # Generate decoding matrix with the specified decoding method\n ############################################################################\n\n if decoding_method == \"Pseudoinverse\":\n D = np.linalg.pinv(C)\n if decoding_method == \"Projection\":\n nLpkrs = len(L[\"az\"])\n D = (1/nLpkrs) * np.transpose(C)\n if decoding_method == \"undefined\":\n D = pinv(C)\n\n return D\n\ndef read_decoder_matrix_from_config_file(file):\n matrix_found = False\n with open(file) as fp:\n for i, line in enumerate(fp):\n if line == \"#DECODERMATRIX\\n\":\n skip_header = i\n matrix_found = True\n if matrix_found and (\"#END\" in line):\n max_rows = i - skip_header\n\n D = np.genfromtxt(file, skip_header=skip_header, max_rows=max_rows)\n return D\n\ndef write_ambix_config(save_path, layout_name, method, D, hrir_paths, optional_string = \"\"):\n\n # File header\n s = \"// Ambix configuration\\n\"\n\n s += \"\\n//-------- HRIR information ---------//\\n\"\n s += \"\\n\".join(optional_string)\n s += \"\\n\"\n # Decoder setup information\n s += \"\\n//------- decoder information -------//\\n\"\n\n # GLOBAL values\n s += \"\\n#GLOBAL\\n\"\n s += \"\\n#END\\n\"\n\n # HRTF\n s += \"\\n#HRTF\\n\"\n hrtf_str = \"\\n\".join(hrir_paths)\n s += hrtf_str\n s += \"\\n#END\\n\"\n\n # DECODER MATRIX\n # build the string from the numpy array\n # TODO learn how this works\n s += \"\\n#DECODERMATRIX\\n\\t\"\n D_str = '\\n\\t'.join('\\t'.join('% 0.6f' % x for x in y) for y in D)\n s += D_str + \"\\n\"\n s += \"#END\\n\"\n\n file = save_path + \"/{}_{}.config\".format(layout_name, method)\n\n f = open(file, \"w\")\n f.write(s)\n f.close()\n\n return file\n\n\nprojectFolder = os.getcwd()\nreferenceDir = os.path.join(projectFolder, 'dev', 'reference')\nambiDir = os.path.join(projectFolder, 'static', 'ambidec')\n\ndef test_against_reference():\n for subdir, dirs, files in os.walk(referenceDir):\n for file in files:\n if \"_MM_pinv\" in file:\n\n # extract the shape from the filename\n # the lowered string will be used to index our layout.lprkLayouts dict\n shape = file.split(\"_MM_pinv\")[0]\n shape = shape.split(\"_\")[2]\n shape = shape.lower()\n\n # search through the lpkrLayout items, if the shape matches one of the\n # items, compare the reference decode matrix to the one generated by\n # gen_decoder_matrix\n for key, value in lpkrLayouts.items():\n if shape.startswith(key):\n\n print(shape)\n layout = lpkrLayouts[key].copy()\n\n # reference decode matrix from the file\n D_ref = read_decoder_matrix_from_config_file(os.path.join(referenceDir, file))\n # decode matrix generated from our function\n D_adg = gen_decoder_matrix(lpkrLayouts[key], \"Pseudoinverse\", \"deg\")\n # round to the precision of the reference decode matrix\n\n\n diff = D_ref-D_adg\n precision = diff.max()\n print(precision)","sub_path":"ambisonic.py","file_name":"ambisonic.py","file_ext":"py","file_size_in_byte":11972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319914368","text":"from PyQt5.QtWidgets import (\n QDialog\n)\n\nfrom ui.settingsDialog import Ui_SettingsDialog\n\nclass SettingsDialog(Ui_SettingsDialog):\n def __init__(self):\n self.window = QDialog()\n self.setupUi(self.window)\n \n def connectSignalsSlots(self):\n pass\n \n def validateForm(self):\n formValid = True\n energyCost = self.energyCostDoubleSpinBox.value()\n laborRate = self.laborCostDoubleSpinBox.value()\n failureRate = self.failureRateDoubleSpinBox.value()\n \n return formValid\n\n def clearForm(self):\n energyCost = self.energyCostDoubleSpinBox.setValue(0.00)\n laborRate = self.laborCostDoubleSpinBox.setValue(0.00)\n failureRate = self.failureRateDoubleSpinBox.setValue(0.00)\n\n\n def getFormData(self):\n energyCost = self.energyCostDoubleSpinBox.value()\n laborRate = self.laborCostDoubleSpinBox.value()\n failureRate = self.failureRateDoubleSpinBox.value()\n \n formData = {\n \"Energy Cost\": energyCost,\n \"Labor Rate\": laborRate,\n \"Failure Rate\": failureRate\n }\n \n return formData\n \n def loadData(self, settings):\n self.clearForm()\n energyCost = settings[\"Energy Cost\"]\n laborRate = settings[\"Labor Rate\"]\n failureRate = settings[\"Failure Rate\"]\n \n energyCost = self.energyCostDoubleSpinBox.setValue(energyCost)\n laborRate = self.laborCostDoubleSpinBox.setValue(laborRate)\n failureRate = self.failureRateDoubleSpinBox.setValue(failureRate)","sub_path":"settingsDialog.py","file_name":"settingsDialog.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440418260","text":"import configobj as cfg\nfrom webinterface.models import Turbine, Datafile\nfrom .ppaTypes import *\nfrom .powerCurve import PowerCurve\n\n\nclass Project(object):\n def __init__(self, name=None, directory=None):\n self.name = name\n self.directory = directory\n self.datafiles = []\n self.turbine = None\n self.siteCalibrationFactors = {}\n print(\"Project: \" + self.name)\n\n def defineTurbine(self, turbine):\n self.turbine = turbine\n print(\"Turbine: \" + self.turbine.name)\n\n def addDatafile(self, name=None, containingDirectory=None, fileType=None, rowsToSkip=[], columnSeparator='\\t', badDataValues=[]):\n self.datafiles.append(name)\n return Datafile(name, containingDirectory, fileType, rowsToSkip, columnSeparator, badDataValues)\n\n def stringifySiteCalibrationFactors(self):\n siteCalibrationFactorsAsStrings = {}\n for scf, value in self.siteCalibrationFactors.items():\n slope = str(value['slope'])\n offset = str(value['offset'])\n siteCalibrationFactorsAsStrings.update({str(scf): {'slope': slope, 'offset': offset}})\n return siteCalibrationFactorsAsStrings\n\n def deStringifySiteCalibrationFactors(self, factorDict):\n siteCalibrationFactorsDict = {}\n for scf, value in factorDict.items():\n slope = float(value['slope'])\n offset = float(value['offset'])\n siteCalibrationFactorsDict.update({int(scf): {'slope': slope, 'offset': offset}})\n return siteCalibrationFactorsDict\n\n def saveMetadata(self):\n config = cfg.ConfigObj()\n config['name'] = self.name\n config['directory'] = self.directory\n config['turbine'] = '' if self.turbine is None else self.turbine.name\n config['datafiles'] = self.datafiles\n config['siteCalibrationFactors'] = self.stringifySiteCalibrationFactors()\n\n config.filename = self.directory + '/' + self.name + '.cfg'\n config.write()\n print(\"Project metadata saved\")\n\n def configFile(self):\n return self.directory + '/' + self.name + \".cfg\"\n\n def loadMetadata(self):\n config = cfg.ConfigObj(self.configFile())\n self.name = config['name']\n self.directory = config['directory']\n self.turbine = Turbine(config['turbine'])\n self.datafiles = config['datafiles']\n self.siteCalibrationFactors = self.deStringifySiteCalibrationFactors(config['siteCalibrationFactors'])\n print(\"Loaded project metadata: \" + self.name)\n\n def makeMeasuredPowerCurve(self, data, windSpeedColumn, powerColumn, binColumn, binWidth=0.5, airDensity=1.225):\n grouped = data.groupby(binColumn).aggregate({windSpeedColumn: 'mean',\n powerColumn: 'mean',\n binColumn: 'count'})\n grouped = grouped.rename(columns={binColumn: 'recordsPerBin', windSpeedColumn: 'meanWindSpeed', powerColumn: 'powerInKilowatts'})\n grouped['bin'] = grouped.index\n grouped['binStatus'] = BinStatus.EXCLUDED\n grouped.index = list(range(len(grouped)))\n\n powerCurve = PowerCurve(grouped.to_dict(orient='list'),\n cutin=self.turbine.warrantedPowerCurve().cutin,\n cutout=self.turbine.warrantedPowerCurve().cutout,\n windSpeedStep=binWidth,\n referenceAirDensity=airDensity)\n\n return powerCurve.validated().padded()\n","sub_path":"GroupProject/windAnalysis/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"495458622","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport pymongo\nfrom re import sub\nfrom decimal import Decimal\nfrom pymongo import MongoClient\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport json\nimport sys\nimport ast\nfrom datetime import datetime\nimport os\nfrom sqlalchemy import *\nimport argparse\nfrom pymongo import MongoClient, errors\nfrom google.cloud import storage\nfrom google.cloud.exceptions import GoogleCloudError, NotFound\nimport os\nfrom google.cloud import datastore\nfrom google.cloud import bigquery\n\n\n#python RSI_analysis_BQ.py 'mysql+pymysql://igenie_readwrite:igenie@127.0.0.1/dax_project' 'PARAM_FINANCIAL_KEY_COLLECTION' 'igenie-project-key.json' 'pecten_dataset_test.RSI'\n\ndef RSI_main(args):\n sys.path.insert(0, args.python_path)\n from Database.BigQuery.backup_table import backup_table, drop_backup_table # Feature PECTEN-9\n from Database.BigQuery.data_validation import before_insert, after_insert # Feature PECTEN-9\n from Database.BigQuery.rollback_object import rollback_object # Feature PECTEN-9\n from utils.Storage import Storage # Feature PECTEN-9\n\n # Feature PECTEN-9\n backup_table_name = backup_table(args.service_key_path, args.table_storage.split('.')[0],\n args.table_storage.split('.')[1])\n\n RSI_table = pd.DataFrame()\n project_name, constituent_list,table_store,table_historical = get_parameters(args)\n from_date, to_date = get_timerange(args)\n from_date = pd.Timestamp(from_date).to_pydatetime()\n to_date = pd.Timestamp(to_date).to_pydatetime()\n\n table_store = args.table_storage\n from_date = datetime.strftime(from_date,'%Y-%m-%d %H:%M:%S') #Convert to the standard time format\n to_date = datetime.strftime(to_date,'%Y-%m-%d %H:%M:%S') \n date = datetime.strftime(datetime.now().date(),'%Y-%m-%d %H:%M:%S') #Current time of analysis\n \n for constituent in constituent_list:\n \n if constituent=='M\\xc3\\xbcnchener R\\xc3\\xbcckversicherungs-Gesellschaft':\n constituent = 'Münchener Rückversicherungs-Gesellschaft'\n elif constituent=='Deutsche B\\xc3\\xb6rse':\n constituent = 'Deutsche Börse'\n\n \n constituent_name = get_constituent_id_name(constituent)[1]\n constituent_id = get_constituent_id_name(constituent)[0] \n his = get_historical_price(project_name,table_historical,constituent,to_date)\n RSI_current,overbought_pct,oversold_pct,RSI_score = RSI_calculate(his,21)\n RSI_table = RSI_table.append(pd.DataFrame({'Constituent':constituent,'Constituent_name':constituent_name, 'Constituent_id':constituent_id, 'Current_RSI':round(RSI_current,2),'percentage_days_overbought':round(overbought_pct*100,2),'percentage_days_oversold':round(oversold_pct*100,2),'RSI_bull_score':RSI_score,'Table':'RSI analysis','Date_of_analysis':date,'From_date':from_date,'To_date':to_date,'Status':'active'},index=[0]),ignore_index=True)\n \n\n print (\"table done\")\n\n #Feature PECTEN-9\n try:\n before_insert(args.service_key_path,args.table_storage.split('.')[0],table_store.split('.')[1],\n from_date,to_date,Storage(args.service_key_path))\n except AssertionError as e:\n drop_backup_table(args.service_key_path, args.table_storage.split('.')[0], backup_table_name)\n e.args += (\"Data already exists\",)\n raise\n\n update_result(args,table_store)\n print (\"update done\")\n store_result(args,project_name, table_store,RSI_table)\n\n #Feature PECTEN-9\n try:\n after_insert(args.service_key_path, args.table_storage.split('.')[0], table_store.split('.')[1],\n from_date, to_date, Storage(args.service_key_path))\n except AssertionError as e:\n e.args += (\"No data was inserted.\",)\n rollback_object(args.service_key_path,'table',args.table_storage.split('.')[0],None,\n table_store.split('.')[1],backup_table_name)\n raise\n\n drop_backup_table(args.service_key_path, args.table_storage.split('.')[0], backup_table_name)\n\n print (\"all done\")\n \n\ndef RSI_calculate(his,n):\n \n delta = his['closing_price'].diff()\n dUp, dDown = delta.copy(), delta.copy()\n dUp[dUp < 0] = 0\n dDown[dDown > 0] = 0 \n #RolUp = pd.rolling_mean(dUp,window=n,center=False) \n #RolDown = pd.rolling_mean(dDown,window=n,center=False).abs()\n RolUp = dUp.rolling(window=n).mean()\n RolDown = dDown.rolling(window=n).mean().abs()\n RS = RolUp/RolDown+0.0\n a=RS.shape[0]\n RSI = np.zeros(a)\n for i in np.arange(n,a):\n RSI[i] = 100-100/(1.0+RS[i])\n #If > 70: overbought signal, <30: oversold signal\n RSI_last_year = RSI[-252:]\n #print RSI[-90:]\n overbought = (RSI_last_year>=70)\n oversold = (RSI_last_year<=30)\n overbought_count = RSI_last_year[overbought].shape[0]\n oversold_count = RSI_last_year[oversold].shape[0]\n \n #Indicating the bullish signal\n if RSI[-1] > 70:\n RSI_score = 2\n elif RSI[-1] < 30:\n RSI_score = 1\n else: \n RSI_score =0\n \n if overbought_count>oversold_count:\n RSI_score = RSI_score+1\n else: \n RSI_score=RSI_score\n #print (float(RSI[-1]))\n return float(RSI[-1]),overbought_count/252.0,oversold_count/252.0,RSI_score\n\n\ndef get_parameters(args):\n query = 'SELECT * FROM'+' '+ args.parameter_table + ';'\n query = 'SELECT * FROM'+' '+ args.parameter_table + ';'\n #print (query)\n parameter_table = pd.read_sql(query, con=args.sql_connection_string)\n project_name = parameter_table[\"PROJECT_NAME_BQ\"].loc[parameter_table['SCRIPT_NAME']=='RSI_analysis'].values[0]\n \n #Obtain the constituent_list\n a = parameter_table['CONSTITUENT_LIST'].loc[parameter_table['SCRIPT_NAME']=='RSI_analysis']\n #print a\n constituent_list=np.asarray(ast.literal_eval((a.values[0])))\n \n #Obtain the table storing historical price\n table_historical = parameter_table[\"TABLE_COLLECT_HISTORICAL_BQ\"].loc[parameter_table['SCRIPT_NAME']=='RSI_analysis'].values[0]\n #print (table_historical)\n table_store = parameter_table['TABLE_STORE_ANALYSIS_BQ'].loc[parameter_table['SCRIPT_NAME']=='RSI_analysis'].values[0]\n return project_name, constituent_list,table_store,table_historical\n\n\n#this makes all the out-dated data in the collection 'inactive'\n##alter the status of collection\ndef update_result(args,table_store):\n from utils.Storage import Storage # Feature PECTEN-9\n #import os\n #os.system(\"Storage.py\")\n storage = Storage(args.service_key_path)\n query = 'UPDATE `' + table_store +'` SET Status = \"inactive\" WHERE Status = \"active\"'\n\n try:\n result = storage.get_bigquery_data(query)\n except Exception as e:\n print(e) \n\ndef store_result(args,project_name,table_store,result_df):\n from utils.Storage import Storage # Feature PECTEN-9\n from Database.BigQuery.backup_table import backup_table, drop_backup_table # Feature PECTEN-9\n from Database.BigQuery.data_validation import before_insert, after_insert # Feature PECTEN-9\n from Database.BigQuery.rollback_object import rollback_object # Feature PECTEN-9\n storage_client = Storage(args.service_key_path)\n\t\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = args.service_key_path\n client = bigquery.Client()\n #Store result to bigquery\n #result_df.to_gbq(table_store, project_id = project_name, chunksize=10000, verbose=True, reauth=False, if_exists='append',private_key=None)\n\t\n result_list = result_df.to_dict(orient='records')\n\n try:\n print(\"Inserting into BQ\")\n #Feature PECTEN-9\n if storage_client.insert_bigquery_data(table_store.split('.')[0],\n table_store.split('.')[1], result_list):\n print(\"Data inserted to BQ\")\n else:\n print(\"Data not inserted\")\n except Exception as e:\n print(e)\n\n \n\n#this obtains the historical price data as a pandas dataframe from source for one constituent. \ndef get_historical_price(project_name,table_historical,constituent,to_date):\n #Obtain project name, table for historical data in MySQL\n #QUERY ='SELECT closing_price, date FROM '+ table_historical + ' WHERE Constituent= \"'+constituent+'\"'+ \" AND date between TIMESTAMP ('2008-01-01 00:00:00 UTC') and TIMESTAMP ('2017-12-11 00:00:00 UTC') ;\"\n QUERY ='SELECT closing_price, date FROM '+ table_historical + ' WHERE Constituent= \"'+constituent+'\"'+ \" AND date between TIMESTAMP ('2009-01-01 00:00:00 UTC') and TIMESTAMP ('\" + to_date + \" UTC') ;\"\n \n #print (QUERY)\n his=pd.read_gbq(QUERY, project_id=project_name,verbose=False)\n his['date'] = pd.to_datetime(his['date'],format=\"%Y-%m-%dT%H:%M:%S\") #read the date format\n his = his.sort_values('date',ascending=1).reset_index(drop=True)\n return his\n\n\ndef get_timerange(args):\n query = 'SELECT * FROM PARAM_READ_DATE WHERE STATUS = \"active\";'\n timetable = pd.read_sql(query, con=args.sql_connection_string)\n from_date = timetable['FROM_DATE'].loc[timetable['ENVIRONMENT'] == args.environment]\n to_date = timetable['TO_DATE'].loc[timetable['ENVIRONMENT'] == args.environment]\n return from_date.values[0], to_date.values[0]\n\n\ndef get_constituent_id_name(old_constituent_name):\n mapping = {}\n mapping[\"BMW\"] = (\"BMWDE8170003036\" , \"BAYERISCHE MOTOREN WERKE AG\")\n mapping[\"Allianz\"] = (\"ALVDEFEI1007380\" , \"ALLIANZ SE\")\n mapping[\"Commerzbank\"] = (\"CBKDEFEB13190\" , \"COMMERZBANK AKTIENGESELLSCHAFT\")\n mapping[\"adidas\"] = (\"ADSDE8190216927\", \"ADIDAS AG\")\n mapping[\"Deutsche Bank\"] = (\"DBKDEFEB13216\" , \"DEUTSCHE BANK AG\")\n mapping[\"EON\"] = (\"EOANDE5050056484\" , \"E.ON SE\")\n mapping[\"Lufthansa\"] = (\"LHADE5190000974\" ,\"DEUTSCHE LUFTHANSA AG\")\n mapping[\"Continental\"] = (\"CONDE2190001578\" , \"CONTINENTAL AG\")\n mapping[\"Daimler\"] = (\"DAIDE7330530056\" , \"DAIMLER AG\")\n mapping[\"Siemens\"] = (\"SIEDE2010000581\" , \"SIEMENS AG\")\n mapping[\"BASF\"] = (\"BASDE7150000030\" , \"BASF SE\")\n mapping[\"Bayer\"] = (\"BAYNDE5330000056\" , \"BAYER AG\")\n mapping[\"Beiersdorf\"] = (\"BEIDE2150000164\" , \"BEIERSDORF AG\")\n mapping[\"Deutsche Börse\"] = (\"DB1DEFEB54555\" , \"DEUTSCHE BOERSE AG\")\n mapping[\"Deutsche Post\"] = (\"DPWDE5030147191\" , \"DEUTSCHE POST AG\")\n mapping[\"Deutsche Telekom\"] = (\"DTEDE5030147137\" , \"DEUTSCHE TELEKOM AG\")\n mapping[\"Fresenius\"] = (\"FREDE6290014544\" , \"FRESENIUS SE & CO.KGAA\")\n mapping[\"HeidelbergCement\"] = (\"HEIDE7050000100\" , \"HEIDELBERGCEMENT AG\")\n mapping[\"Henkel vz\"] = (\"HEN3DE5050001329\" , \"HENKEL AG & CO. KGAA\")\n mapping[\"Infineon\"] = (\"IFXDE8330359160\" , \"INFINEON TECHNOLOGIES AG\")\n mapping[\"Linde\"] = (\"LINDE8170014684\" , \"LINDE AG\")\n mapping[\"Merck\"] = (\"MRKDE6050108507\" , \"MERCK KGAA\")\n mapping[\"ProSiebenSat1 Media\"] = (\"PSMDE8330261794\" , \"PROSIEBENSAT.1 MEDIA SE\")\n mapping[\"RWE\"] = (\"RWEDE5110206610\" , \"RWE AG\")\n mapping[\"SAP\"] = (\"SAPDE7050001788\" , \"SAP SE\")\n mapping[\"thyssenkrupp\"] = (\"TKADE5110216866\" , \"THYSSENKRUPP AG\")\n mapping[\"Vonovia\"] = (\"VNADE5050438829\" , \"VONOVIA SE\")\n mapping[\"DAX\"] = (\"DAX\", \"DAX\")\n mapping[\"Fresenius Medical Care\"] = (\"FMEDE8110066557\" , \"FRESENIUS MEDICAL CARE AG & CO.KGAA\")\n mapping[\"Volkswagen (VW) vz\"] = (\"VOW3DE2070000543\" , \"VOLKSWAGEN AG\")\n mapping[\"Münchener Rückversicherungs-Gesellschaft\"] = (\"MUV2DEFEI1007130\" , \"MUNCHENER RUCKVERSICHERUNGS - GESELLSCHAFT AKTIENGESELLSCHAFT IN MUNCHEN\")\n\n if old_constituent_name in mapping:\n return mapping[old_constituent_name]\n else:\n return old_constituent_name\n\n\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('sql_connection_string', help='The connection string to mysql for parameter table') \n parser.add_argument('parameter_table',help=\"The name of the parameter table in MySQL\")\n parser.add_argument('python_path', help=\"The name of the parameter table in MySQL\")\n parser.add_argument('service_key_path',help='google service key path')\n parser.add_argument('table_storage',help='BigQuery table where the new data is stored')\n parser.add_argument('environment')\n args = parser.parse_args()\n RSI_main(args)\n","sub_path":"Data_analytics/Fundamental_Analysis/Kefei/Fundamental_BQ/RSI_analysis_BQ.py","file_name":"RSI_analysis_BQ.py","file_ext":"py","file_size_in_byte":12231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"276430738","text":"from Parameters import *\n\nclass Bullet:\n def __init__(self, x, y, width, height, bul_width, bul_height, speed, image):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.speed = speed\n self.image = image\n self.bul_width = bul_width\n self.bul_height = bul_height\n self.top = self.y\n self.bottom = self.y + self.bul_height\n self.fow = self.x + self.bul_width\n self.back = self.x\n\n def move(self):\n if self.x <= self.width:\n screen.blit(self.image, (self.x, self.y))\n self.x += self.speed\n self.back += self.speed","sub_path":"Для теста/Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499647136","text":"import pytz\nfrom datetime import datetime, timedelta\n\nfrom django.core.management import call_command\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nWEEKDAYS = (\n (1, 'Monday'),\n (2, 'Tuesday'),\n (3, 'Wednesday'),\n (4, 'Thursday'),\n (5, 'Friday'),\n (6, 'Saturday'),\n (7, 'Sunday'),\n)\n\nMONTHS = (\n (1, 'January'),\n (2, 'February'),\n (3, 'March'),\n (4, 'April'),\n (5, 'May'),\n (6, 'June'),\n (7, 'July'),\n (8, 'August'),\n (9, 'September'),\n (10, 'October'),\n (11, 'November'),\n (12, 'December'),\n)\n\n\nclass Event(models.Model):\n name = models.CharField(max_length=100)\n bio = models.TextField()\n start_time = models.DateTimeField(null=True)\n max_tickets = models.IntegerField(null=True)\n tickets_sold = models.IntegerField(default=0)\n ticket_price = models.DecimalField(decimal_places=2, max_digits=5)\n banner = models.ForeignKey('pages.BannerWidget', null=True, blank=True)\n videos = models.ManyToManyField('pages.VideoWidget', blank=True)\n\n class Meta:\n ordering = ['-start_time']\n\n def __str__(self):\n return self.name\n\n @property\n def name_with_date(self):\n return '{}: {}'.format(self.name, self.event_day())\n\n @property\n def slug(self):\n return slugify(self.name)\n\n def get_api_url(self):\n return reverse('event', kwargs={'event_id': self.pk})\n\n def get_absolute_url(self):\n return reverse('event_wrapper', kwargs={'event_id': self.pk})\n\n def event_day(self):\n \"\"\"\n Provide a user-friendly representation of the event start day\n \"\"\"\n day = ''\n start_time = timezone.localtime(self.start_time)\n if start_time.date() == datetime.today().date():\n return 'TONIGHT'\n elif start_time.date() == datetime.today().date() + timedelta(days=1):\n return 'Tomorrow'\n else:\n day_index = start_time.date().weekday()\n day = WEEKDAYS[day_index][1]\n month_index = start_time.month - 1\n month = MONTHS[month_index][1]\n day += ', {month} {date}'.format(month=month,\n date=start_time.day)\n return day\n\n def event_time(self):\n pm = False\n hour = timezone.localtime(self.start_time).time().hour\n if hour >= 12:\n pm = True\n hour -= 12\n return '{} pm'.format(hour)\n else:\n return '{} am'.format(hour)\n\n def to_data(self):\n data = {\n \"id\": self.id,\n \"name\": self.name,\n \"bio\": self.bio,\n \"event_time\": self.event_time(),\n \"event_day\": self.event_day(),\n \"ticket_price\": self.ticket_price,\n \"name_with_date\": self.name_with_date,\n \"tickets_left\": self.max_tickets - self.tickets_sold\n }\n if self.banner:\n data['banner_url'] = self.banner.image.url\n\n return data\n\n def save(self, *args, **kwargs):\n \"\"\"\n We collect static when banners change because it seems easier than\n implementing webpack\n \"\"\"\n collectstatic = False\n\n if self.pk is not None:\n orig = Event.objects.get(pk=self.pk)\n if orig.banner != self.banner:\n print('banner changed')\n collectstatic = True\n else:\n collectstatic = True # this is a newly created instance\n\n super(Event, self).save(*args, **kwargs)\n if collectstatic:\n call_command('collectstatic', verbosity=1, interactive=False)\n","sub_path":"the_ape/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549725899","text":"from nilearn.image import resample_img\nimport nibabel as nib\nimport os\nimport numpy as np\n\ndatafolder = \"/Users/Joke/Desktop/validating-fmri/data\"\n\nsubs = list(np.unique([x.split(\"_\")[0] for x in os.listdir(os.path.join(datafolder,\"CNP_rest\"))]))\n\nfor sub in subs:\n anatfile = os.path.join(datafolder,\"CNP_rest/%s_T1w_space-MNI152NLin2009cAsym_preproc.nii.gz\"%sub)\n anatfile_reduced = os.path.join(datafolder,\"CNP_rest/%s_T1w_space-MNI152NLin2009cAsym_preproc_reduced.nii.gz\"%sub)\n restfile = os.path.join(datafolder,\"CNP_rest/%s_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz\"%sub)\n anat = nib.load(anatfile)\n rest = nib.load(restfile)\n anat_resampled = resample_img(anat,target_affine=rest.affine,target_shape=rest.shape[:3])\n anat_resampled.to_filename(anatfile_reduced)\n\natlasfile = os.path.join(datafolder,\"MSDL_rois\",\"msdl_rois.nii\")\natlas = nib.load(atlasfile)\natlas_resampled = resample_img(atlas,target_affine=anat_resampled.affine,target_shape=anat_resampled.shape[:3])\n\ndata = atlas_resampled.get_data()\nnewdata = np.zeros(data.shape[:3])\nfor x in range(data.shape[0]):\n for y in range(data.shape[1]):\n for z in range(data.shape[2]):\n if np.max(data[x,y,z])<0.1:\n newdata[x-1.5,y-1.5,z-0] = 0\n else:\n newdata[x-1.5,y-1.5,z-0] = np.where(data[x,y,z]==np.max(data[x,y,z]))[0][0]+1\n\nimg = nib.Nifti1Image(newdata,affine=anat_resampled.affine,header=anat_resampled.header)\natlasfile_reduced = os.path.join(datafolder,\"MSDL_rois\",\"msdl_rois_reduced.nii.gz\")\nimg.to_filename(atlasfile_reduced)\n","sub_path":"validating_fmri/preparation/resample.py","file_name":"resample.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"484209698","text":"from kivy.app import App\nfrom kivy.factory import Factory\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.properties import ObjectProperty, StringProperty, NumericProperty\n\nfrom storage import storage\n\n\nclass StatsLabel(Label):\n\n def set_lbl_size(self, obj, size):\n self.font_size = min(size[0], size[1]) * .04\n\n\nclass StatsSection(BoxLayout):\n _tl_text = StringProperty()\n _tl_font_size = NumericProperty(0)\n _stats_table = ObjectProperty(None)\n\n def set_section_size(self, obj, size):\n self._tl_font_size = min(size[0], size[1]) * .09\n\n\nclass StatsScreen(Screen):\n\n # Here we'll keep the last computed amount\n # for every action and it's corresponding widget\n cache = {}\n\n def __init__(self, **kwargs):\n super(StatsScreen, self).__init__(**kwargs)\n self.screen_layout = Factory.StatsScreenLayout()\n self.add_widget(self.screen_layout)\n\n self.initialize_cache()\n self.populate_cache()\n\n self.screen_layout._content.add_widget(\n self.create_new_section('Instants', storage.instants)\n )\n self.screen_layout._content.add_widget(\n self.create_new_section('Quantity-based', storage.quantity_based)\n )\n self.screen_layout._content.add_widget(\n self.create_new_section('Time-based', storage.time_based)\n )\n\n self.set_total_lbl()\n\n storage.bind(store=self.update)\n\n def initialize_cache(self):\n def initialize_cache_by_category(actions):\n for action in actions:\n self.cache[action] = {}\n self.cache[action]['total_amount'] = 0\n\n initialize_cache_by_category(storage.instants)\n initialize_cache_by_category(storage.quantity_based)\n initialize_cache_by_category(storage.time_based)\n\n def populate_cache(self):\n for entity in storage.store:\n action = entity['action']\n total_amount = int(entity['total_amount'])\n self.cache[action]['total_amount'] += total_amount\n\n def create_new_section(self, category, actions):\n new_section = Factory.StatsSection(_tl_text=category)\n self.bind(size=new_section.set_section_size)\n self.bind(size=new_section._action_header.set_lbl_size)\n self.bind(size=new_section._amount_header.set_lbl_size)\n self.populate_table(new_section, actions)\n\n return new_section\n\n def populate_table(self, section, actions):\n for action in actions:\n action_lbl = self.create_new_stats_label(action)\n\n amount = self.cache[action]['total_amount']\n amount_lbl = self.create_new_stats_label(str(amount))\n\n self.cache[action]['amount_lbl'] = amount_lbl\n section._stats_table.add_widget(action_lbl)\n section._stats_table.add_widget(amount_lbl)\n\n def create_new_stats_label(self, text):\n lbl = Factory.StatsLabel(text=str(text))\n self.bind(size=lbl.set_lbl_size)\n\n return lbl\n\n def update(self, _, new_list):\n new_entity = new_list[-1]\n action = new_entity['action']\n total_amount = int(new_entity['total_amount'])\n\n self.cache[action]['total_amount'] += total_amount\n self.cache[action]['amount_lbl'].text = str(\n self.cache[action]['total_amount'])\n\n self.set_total_lbl()\n\n def set_total_lbl(self):\n total = sum(self.cache[action]['total_amount']\n for action in self.cache)\n self.screen_layout._total_lbl.text = 'Total amount (ml):' + str(total)\n","sub_path":"components/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"606546022","text":"class Solution(object):\n def toHex(self, num):\n if num < 0:\n num += 0x100000000\n ref = '0123456789abcdef'\n ans = []\n while(num):\n ans.append(ref[int(num % 16)])\n num = num // 16\n return ''.join(ans[::-1]) if ans else '0'\n","sub_path":"python/405 Convert a Number to Hexadecimal.py","file_name":"405 Convert a Number to Hexadecimal.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"60732217","text":"#!/usr/bin/env python\nfrom socket import *\nHOST = '118.228.168.70' #服务器的地址\nPORT = 20000\nBUFSIZE = 1024\nADDR = (HOST,PORT)\ntcpCliSock = socket(AF_INET, SOCK_STREAM) \ntcpCliSock.connect(ADDR)\nwhile True:\n data = input('>')\n if data == 'exit':\n break\n tcpCliSock.send(data.encode()) #发送给服务器的数据\n data = tcpCliSock.recv(1024).decode() #接收数据\n if data == 'exit':\n break\n print (data)\ntcpCliSock.close()\n","sub_path":"win-linux_client.py","file_name":"win-linux_client.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"455327416","text":"#func to print the pattern\ndef patt(order):\n pattern=\"*\"\n for i in order: #no of rows\n for j in range(i): #no of cols\n print(pattern,end=\"\")\n print()\n \ntry:\n n=int(input(\"enter the boolean value 0 or 1\")) #pattern result as per 1 or 0\n order1=[1,2,3,4,5]\n order2=[5,4,3,2,1]\n\n if n==1:\n patt(order1)\n else:\n \n patt(order2)\nexcept Exception as e:\n print(e)\n","sub_path":"pattern with coice.py","file_name":"pattern with coice.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"417932761","text":"#\n# @lc app=leetcode.cn id=312 lang=python3\n#\n# [312] 戳气球\n#\n\n# @lc code=start\nclass Solution:\n def maxCoins(self, nums: List[int]) -> int:\n ball = [1] + nums + [1]\n @lru_cache(None)\n # 求出(left, right)获得最大的硬币量\n def solve(left, right):\n # 区间不存在,返回\n if left + 1 == right:\n return 0\n # 区间存在\n # i是介于left + 1到right之间,左开右闭\n best = 0\n for i in range(left + 1, right):\n total = ball[left] * ball[i] * ball[right]\n total += solve(left, i) + solve(i, right)\n best = max(best, total)\n return best\n return solve(0, len(nums) + 1)\n# @lc code=end\n\n","sub_path":"Week06/312.戳气球.py","file_name":"312.戳气球.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"245296191","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tickle', '0004_auto_20150413_1759'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='person',\n name='special_nutrition',\n field=models.ManyToManyField(help_text='Specify any special nutritional needs or habits.', to='tickle.SpecialNutrition', verbose_name='special nutrition', blank=True),\n ),\n migrations.AlterField(\n model_name='product',\n name='categories',\n field=models.ManyToManyField(to='tickle.Category', verbose_name='categories', blank=True),\n ),\n ]\n","sub_path":"tickle/migrations/0005_auto_20150413_1956.py","file_name":"0005_auto_20150413_1956.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"210685065","text":"import numpy as np\nimport pickle, sys, argparse\nfrom keras.models import load_model\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageTransform, ImageChops\nimport cProfile\nfrom CharacterSource import AlphaNumericCharacterSource\nimport Levenshtein\n#pr = cProfile.Profile()\n#pr.enable()\n\nimage_height = 32\nnum_char_columns = 2\nnum_char_rows = 32\nimage_width = num_char_columns * 32 * 8\nsegmentation_width = 16\nsegmentation_height = 32\n\nprint(\"Loading segmentation classifier...\")\nsegmentation_classifier = load_model(\"models/segmentation.hdf5\")\n\nprint(\"Loading character classifier...\")\ncharacter_classifier = load_model(\"models/classifier.hdf5\")\n\ndef prepare_image_for_classification(image):\n w,h = image.size\n image_data = np.array(image).astype('float32') / 255.0\n image_data = image_data.reshape(1,h,w,1)\n return image_data\n\n\ndef check_segmentation(img):\n x = 0\n (w,h) = img.size\n data = []\n while x < w - segmentation_width:\n window_image = img.crop((x,0,x+segmentation_width,segmentation_height))\n window_data = np.array(window_image).astype('float32')/255.0\n window_data = window_data.reshape(segmentation_height,segmentation_width,1)\n data.append(window_data)\n x += 1\n\n return segmentation_classifier.predict(np.array(data))\n\n\ndef expand_image_for_segmentation(img):\n (w,h) = img.size\n left_edge = img.crop((0,0,1,image_height))\n right_edge = img.crop((w-1,0,w,image_height))\n\n expanded_image = Image.new(\"L\", (w + segmentation_width, h), 0)\n (w,h) = expanded_image.size\n\n for x in range(0,segmentation_width / 2):\n expanded_image.paste(left_edge, (x,0))\n expanded_image.paste(right_edge, (w - x - 1,0))\n\n expanded_image.paste(img, (segmentation_width/2,0))\n #expanded_image.save(\"expanded.png\")\n\n return expanded_image\n\n\ndef segment_characters(img, threshold=0.35):\n expanded_image = expand_image_for_segmentation(img)\n (w,h) = expanded_image.size\n x = 0\n is_in_run = False\n x_start = 0\n last_score = 0\n a = 0.5\n b = 1.0 - a\n filtered_score = 0\n seg_array = []\n score_array = []\n filtered_score_array = []\n score_array = check_segmentation(expanded_image)\n i = 0\n while x < w - segmentation_width:\n score = score_array[i]\n filtered_score = score * a + last_score * b\n last_score = filtered_score\n if filtered_score >= threshold and not is_in_run:\n is_in_run = True\n x_start = x\n elif filtered_score < 0.5 * threshold and is_in_run:\n is_in_run = False\n seg_array.append((x_start + x - 1) / 2)\n\n filtered_score_array.append((x,32 * (1.0 - filtered_score)))\n x += 1\n i += 1\n if is_in_run:\n seg_array.append((x_start + x - 2) / 2)\n\n return seg_array, filtered_score_array\n\n\ndef draw_segmentation(img, seg_array):\n draw = ImageDraw.Draw(img)\n for x in seg_array:\n draw.line(((x,0),(x,32)), fill=(128,128,255), width=2)\n\n\ndef draw_score(img, score_array):\n draw = ImageDraw.Draw(img)\n\n for i in range(0,len(score_array)-1):\n x1,y1 = score_array[i]\n x2,y2 = score_array[i+1]\n draw.line(((x1,y1),(x2,y2)), fill=(255,0,0))\n\n\ndef draw_answer(img, text, predicted_text):\n draw = ImageDraw.Draw(img)\n color = (255,0,0) if text != predicted_text else (0,255,0)\n draw.text((0,20), predicted_text, fill=color)\n\n\ndef classify_character(img, x1, x2):\n char_image = img.crop((x1,0,x2,32)).resize((32,32), resample=Image.BILINEAR)\n char_data = prepare_image_for_classification(char_image)\n ans_vector = character_classifier.predict(char_data)[0]\n ans_index = ans_vector.argmax()\n probability = ans_vector[ans_index]\n ans = classify_character.char_source.char_for_index(ans_index)\n return ans, probability\n\nclassify_character.char_source = AlphaNumericCharacterSource()\n\ndef classify_characters(img, seg_array, threshold=0.1):\n text = \"\"\n for i in range(0,len(seg_array)-1):\n char, probability = classify_character(img, seg_array[i], seg_array[i+1])\n if probability > threshold:\n text += char\n else:\n text += 'X'\n\n return text\n\n\ndef preprocess_image(image):\n width, height = image.size\n min_dim = min(width,height) / 2\n image = image.crop()\n\n left = (width - min_dim)/2\n top = (height - min_dim)/2\n right = (width + min_dim)/2\n bottom = (height + min_dim)/2\n clip_rect = (left, top, right, bottom)\n\n image = image.crop(clip_rect).resize((char_size, char_size), Image.BILINEAR)\n# image = image.filter(ImageFilter.GaussianBlur(radius=1.0))\n\n image_data = np.array(image).astype('float32')\n m = np.mean(image_data, axis=(0,1))\n s = np.std(image_data, axis=(0,1))\n image_data = (image_data - m) / s\n image_data = image_data.reshape(1,char_size,char_size,1)\n return image_data, clip_rect\n\n\n\ndef draw_chars(img, seg_array):\n result = Image.new(\"RGB\", (image_width/2, image_height), (255,255,255))\n draw = ImageDraw.Draw(result)\n xi = 0\n char_width = 32\n char_height = 32\n\n for i in range(0,len(seg_array)-1):\n x1 = seg_array[i]\n x2 = seg_array[i+1]\n char_image = img.crop((x1,0,x2,char_height))\n char_image = char_image.resize((char_width,char_height),resample=Image.BILINEAR)\n result.paste(char_image, (xi, 0))\n draw.line(((xi,0),(xi,32)), fill=(128,128,255), width=2)\n xi += char_width\n\n return result\n\n\ndef predict_word(img):\n seg_array, score_array = segment_characters(img)\n return classify_characters(img, seg_array), seg_array\n\n\ndef test_segmentation(max_num=1024*1024, visualize=False, data_dir=\"data\"):\n options={'min_color_delta':16.0, 'min_blur':0.5, 'max_blur':0.5, 'max_rotation':2.5, 'min_noise':4, 'max_noise':4, 'add_background_lines':False}\n n = 0\n correct_predictions = 0\n file = open(data_dir + '/' + 'labels.pickle', 'rb')\n labels = pickle.load(file)\n num_digits = 0\n num_correct_digits = 0\n\n if visualize:\n num_examples = min(max_num, len(labels))\n num_char_rows = num_examples / num_char_columns + num_examples % num_char_columns\n overview_image = Image.new(\"RGB\", (image_width, 2 * image_height * num_char_rows), (255,255,255))\n overview_draw = ImageDraw.Draw(overview_image)\n\n num_examples = min(len(labels),max_num)\n\n for id,text in labels.iteritems():\n img = Image.open(data_dir + '/' + id + \".png\")\n seg_array, score_array = segment_characters(img)\n predicted_text = classify_characters(img, seg_array)\n\n if visualize:\n x = (n % num_char_columns) * image_width / num_char_columns\n y = (n / num_char_columns) * image_height * 2\n img = img.convert(mode='RGB')\n char_image = draw_chars(img, seg_array)\n draw_score(img, score_array)\n draw_segmentation(img, seg_array)\n draw_answer(char_image, text, predicted_text)\n overview_image.paste(img, (x, y))\n overview_image.paste(char_image, (x, y + image_height))\n\n num_digits += len(text)\n distance = Levenshtein.distance(text, predicted_text)\n num_correct_digits += len(text) - min(distance,len(text))\n accuracy = float(num_correct_digits)/num_digits\n\n n+=1\n sys.stdout.write(\"\\r%d/%d acc: %f\" % (n, num_examples, accuracy))\n sys.stdout.flush()\n\n if n >= max_num:\n break\n\n #overview_image.paste(Image.fromarray((batch[0][i].reshape(image_height,image_width) * 255).astype('uint8'),mode=\"L\"), (image_width*i, image_height*j))\n\n if visualize:\n overview_image.save(\"overview.png\")\n\n print(\"\")\n print(\"Accuracy: %f\" % (accuracy))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', help=\"max number of test cases\", action=\"store\", dest=\"n\", type=int, default=1024*1024)\n parser.add_argument('--directory', help=\"directory of test cases\", action='store', dest='data_dir', default='data')\n parser.add_argument(\"--visualize\", help=\"save visualization of char segmentation and classification\", action=\"store_true\", default=False)\n args = parser.parse_args()\n test_segmentation(max_num=args.n, visualize=args.visualize, data_dir=args.data_dir)\n\n\n#pr.disable()\n#pr.print_stats(sort='time')\n","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"548108919","text":"from node import Node \n \nclass DoubleLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.count = 0\n\n def isEmpty(self):\n return self.head == None\n\n def append(self, data):\n if self.tail == None:\n self.head = Node(data)\n self.tail = self.head\n else:\n self.tail.next = Node(data)\n self.tail = self.tail.next\n self.count += 1\n \n def insert(self,node,data):\n if node == None:\n return\n oldNode = Node(node)\n oldNext = oldNode.getNext()\n newNode = Node(data)\n newNode.setNext(oldNext)\n newNode.previous = oldNode\n oldNode.setNext(newNode)\n self.count += 1\n\n def size(self):\n return self.count\n\n def list(self):\n output = []\n current = self.head\n while current != None:\n output.append(current.getData())\n current = current.getNext()\n return output\n \n def find(self,data):\n current = self.head\n found = False\n while current != None:\n if current.getData() == data:\n found = True\n break\n current = current.getNext()\n return found\n \n def remove(self,node):\n if node == None:\n return\n\n ##If node is the head\n if node.previous == None:\n self.head = node.nex\n\n ##If node is the tail\n if node.next == Node:\n self.tail = node.previous\n\n if node.previous != None:\n node.previous.next = node.next\n\n if node.next != None:\n node.next.previous = node.previous\n self.count -= 1\n \n \n def pop(self):\n self.tail.delData()\n current = self.head\n while current != None:\n if current.next.getData() == None:\n current.delNext() \n current = current.getNext()\n","sub_path":"doubleLinkedList.py","file_name":"doubleLinkedList.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"634426063","text":"import functools as ft\r\nimport itertools as it\r\nfrom models import node\r\n\r\nclass Graph :\r\n\r\n def __init__(self,name,nodes=[]):\r\n self.name=name\r\n self.nodes = {}\r\n for _node in nodes:\r\n self.add_node(_node)\r\n #for node in nodes:\r\n # if node not in self.nodes:\r\n # nodekey=node.name\r\n # nodevalues=node.edges\r\n # self.nodes[nodekey]= nodevalues\r\n\r\n #concat 2 graphs\r\n def __add__(self,other_graph):\r\n\r\n #create empty graph , name = concat garph1+ graph2\r\n new_graph = Graph('{}_{}'.format(other_graph.name,self.name),[])\r\n\r\n # add local graph nodes to new graph\r\n for k,v in self.nodes.items():\r\n new_graph.nodes[str(k)]=v\r\n\r\n #add other graph nodes to new graph\r\n for k,v in other_graph.nodes.items():\r\n new_graph.nodes[str(k)]=v\r\n\r\n return new_graph\r\n\r\n def __str__(self):\r\n return(self.printnodes())\r\n\r\n #def __getitem_(self,name):\r\n # for k, v in self.nodes.items():\r\n # if k==name:\r\n # self.nodes[k]\r\n # else:\r\n # raise KeyError\r\n\r\n def __len__(self):\r\n count=0\r\n for x in self.nodes:\r\n count= count+1\r\n return count\r\n\r\n def printnodes(self):\r\n\r\n print('\\n\\n graph {} , content :'.format(self.name))\r\n\r\n content=''\r\n\r\n for k,v in self.nodes.items():\r\n content=content+ ('\\nNode {}, contains {} edges'.format(k, len(v)))\r\n for vk,vv in self.nodes[k].edges.items():\r\n content=content+('\\nKey:{}, value:{}'.format(vk , vv))\r\n content = content + '\\n'\r\n return content\r\n\r\n def add_node(self,_node):\r\n\r\n is_node_exist = False\r\n #check if a node with the same name already exists in the graph\r\n # then existing edges should not be overwritten, but new edges should be added\r\n for kk,vv in self.nodes.items():\r\n if kk == _node.name:\r\n is_node_exist=True\r\n #found item\r\n for k,v in _node.edges.items():\r\n self.nodes[kk].add_edge(k,v)\r\n\r\n #No Node Found then add to Graph\r\n if is_node_exist== False:\r\n #cast to Node Model\r\n newnode= node.Node(_node.name,{})\r\n if len(_node.edges.items())>0:\r\n for k,v in _node.edges.items():\r\n newnode.add_edge(k,v)\r\n self.nodes[_node.name]=newnode\r\n else:\r\n self.nodes[_node.name] = newnode\r\n\r\n def remove_node(self, name):\r\n key_to_remove = ''\r\n for k in self.nodes.keys():\r\n if k == name:\r\n key_to_remove = k\r\n\r\n if key_to_remove != '':\r\n self.nodes.pop(key_to_remove)\r\n print(' {} Removed successfully from Node {}'.format(key_to_remove, self.name))\r\n elif key_to_remove == '':\r\n print(' {} is not found'.format(name))\r\n\r\n # adds an edge making to_name a neighbor of frm_name.\r\n def add_edge(self, frm_name, to_name, weight=1):\r\n for k in self.nodes.keys():\r\n #if k.name == frm_name:\r\n if k == frm_name:\r\n self.nodes[k].edges[to_name] = weight\r\n\r\n # adds an edge making to_name a neighbor of frm_name.*\r\n def remove_edge(self, frm_name, to_name):\r\n key_to_remove=''\r\n for k in self.nodes.keys():\r\n #if k.name == frm_name:\r\n if k == frm_name:\r\n key_to_remove=k\r\n self.nodes[k].edges.pop(to_name)\r\n\r\n # returns True if to_name is a neighbor of frm_name\r\n def is_edge(self, frm_name, to_name):\r\n for k in self.nodes.keys():\r\n if k == frm_name:\r\n if to_name in self.nodes[k].edges.keys():\r\n return True\r\n else:\r\n return False\r\n\r\n # returns True if to_name is reachable from frm_name\r\n def is_reachable(self, frm_name, to_name):\r\n\r\n path=[]\r\n if len(self.find_all_paths(frm_name,to_name,path))>0:\r\n return True\r\n else:\r\n return False\r\n \r\n # Recursive - mapping all paths from frm_name to to_name\r\n def find_all_paths(self, frm_name, to_name, path=[]):\r\n path = path + [frm_name]\r\n if frm_name == to_name:\r\n return [path]\r\n if frm_name not in self.nodes:\r\n return []\r\n\r\n paths = []\r\n\r\n if len(self.nodes[frm_name])>0:\r\n for node in self.nodes[frm_name].edges:\r\n x=len(self.nodes[frm_name])\r\n #print('self.nodes[frm_name]={} ,\\n items {} \\n,len :{}'.format(frm_name,self.nodes[frm_name],x))\r\n #print (node)\r\n if node not in path:\r\n # print('{} not in {}'.format(node, path))\r\n newpaths = self.find_all_paths( node, to_name,path)\r\n #print ('newpaths : {}'.format(newpaths))\r\n for newpath in newpaths:\r\n paths.append(newpath)\r\n return paths\r\n\r\n #find the short path = less edges\r\n def find_shortest_path(self, frm_name, to_name):\r\n allpaths= []\r\n allpaths= self.find_all_paths(frm_name,to_name)\r\n\r\n paths=[]\r\n items=[]\r\n shortpaths=[]\r\n\r\n for _item in allpaths:\r\n #({k:v})\r\n paths.append(len(_item))\r\n\r\n #Get min value\r\n minvalue= str(min(paths))\r\n\r\n\r\n for _item in allpaths:\r\n if str(len(_item))== minvalue:\r\n shortpaths.append(_item)\r\n\r\n return shortpaths\r\n\r\n # get edge weight - return none\r\n def get_edge_weight(self,frm_name,to_name):\r\n for node in self.nodes:\r\n if node == frm_name:\r\n for ek,ev in self.nodes[node].edges.items():\r\n if ek==to_name:\r\n return ev\r\n\r\n #calculate weight of give path- usually list of nodes\r\n def get_path_weight(self, path):\r\n\r\n length = len (path)\r\n i=0\r\n sedge=0\r\n stotal=0\r\n\r\n while length>=i:\r\n sedge=self.get_edge_weight(str(path[i]), str(path[i+1]))\r\n stotal= stotal+sedge\r\n\r\n i = i + 1\r\n length = length - 1\r\n\r\n else:\r\n return stotal\r\n\r\n","sub_path":"models/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"653113694","text":"#coding:utf-8\nimport json\nimport numpy as np\nfrom scipy import stats\n\nf = open('logs.json', 'r')\no = json.load(f)\n\nfor _type, varis in o.items():\n for key, arr in varis.items():\n mtx = {\n \"yellow\": {\"yellow\": [],\"lime\": [],\"magenta\": [],\"cyan\": []},\n \"lime\": {\"yellow\": [],\"lime\": [],\"magenta\": [],\"cyan\": []},\n \"magenta\": {\"yellow\": [],\"lime\": [],\"magenta\": [],\"cyan\": []},\n \"cyan\": {\"yellow\": [],\"lime\": [],\"magenta\": [],\"cyan\": []} }\n for elm in arr:\n if len(elm[\"distribute\"]) == 0: continue\n distances = elm[\"distribute\"][0][\"json\"][\"distances\"]\n if len(distances.keys()) != 4: continue\n for color1, v1 in distances.items():\n for color2, v2 in v1.items():\n mtx[color1][color2].append(v2)\n s = set()\n print(\"\")\n print(_type\n ,\"\\t\",key\n # ,\"\\t\",len(arr)\n )\n for color1, v1 in mtx.items():\n s.add(color1)\n for color2, v2 in v1.items():\n if color2 in s: continue\n #if color1 == \"cyan\" or color1 == \"lime\": color1 += \"___\"\n #if color2 == \"cyan\" or color2 == \"lime\": color2 += \"___\"\n #if color1 == \"yellow\": color1 += \"_\"\n #if color2 == \"yellow\": color2 += \"_\"\n a = np.array(v2)\n # 端末1 & 端末2 & 平均 & 最小値 & 第2四分位 & 中央値 & 第3四分位 & 最大値 & 標準偏差 \\\\\n print(\n #_type\n #+\"-\"+key\n #+\"-\"+str(len(arr))\n #+\"-\"+\n #str(len(v2))\n #,\"\\t\",\n color1\n ,\"&\",color2\n #,\"\\t\",np.average(a) ## 平均\n ,\"&\",np.mean(a) ## 算術平均\n #,\"\\t\",np.median(a) ## 中央値\n ,\"&\",np.amin(a) ## 最小値\n ,\"&\",stats.scoreatpercentile(a, 25) #第2四分位\n ,\"&\",stats.scoreatpercentile(a, 50) #中央値\n ,\"&\",stats.scoreatpercentile(a, 75) #第3四分位\n ,\"&\",np.amax(a) ## 最大値\n #,\"\\t\",np.ptp(a) ## 値の範囲(最大値-最小値)\n #,\"\\t\",np.var(a) ## 分散\n ,\"&\",np.std(a) ## 標準偏差\n ,\"\\\\\\\\\"\n )\n\n\nf.close()\n","sub_path":"tools/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"646972092","text":"#3-1\r\nyear_list = [1981,1982,1983,1984,1985]\r\n\r\n#3-2\r\nyear_list[3]\r\n\r\n#3-3\r\nyear_list[-1:]\r\n\r\n#3-4\r\nthings = [\"mozzarella\",\"cinderella\",\"salmonella\"]\r\n\r\n#3-5\r\nthings[1].capitalize()\r\n\r\n#3-6\r\nthings[0].upper()\r\n\r\n#3-7\r\nthings.pop()\r\nthings\r\n#別解\r\n#things.remove(\"salmonella\")\r\n#del things[2]\r\n\r\n#3-8\r\nsurprise = [\"Groucho\",\"Chico\",\"Harpo\"]\r\n\r\n#3-9\r\nsurprise[-1].lower()[::-1].capitalize()\r\n\r\n#3-10\r\ne2f = {\r\n \"dog\" : \"chine\",\r\n \"cat\" : \"chat\",\r\n \"walrus\" :\"morse\"\r\n}\r\n\r\n#3-11\r\ne2f['walrus']\r\n\r\n#3-12\r\nf2e = {}\r\nfor English , French in e2f.items():\r\n f2e[French] = English\r\n\r\n#3-13\r\nf2e[\"chine\"]\r\n\r\n#3-14\r\nset(e2f.keys())\r\n\r\n#3-15\r\nlife = {\r\n 'animals':{\r\n 'cat':[\r\n 'Henri','Grumpy','Lucy'\r\n ],\r\n 'octop':{},\r\n 'emus':{},\r\n },\r\n 'plants':{},\r\n 'other':{}\r\n}\r\n\r\n#3-16\r\nlife.keys()\r\n\r\n#3-17\r\nlife['animals'].keys()\r\n\r\n#3-18\r\nlife['animals']['cat']","sub_path":"Chapter03/Review03.py","file_name":"Review03.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122531959","text":"import os\nimport sys\nimport tarfile\n\nimport numpy\nimport six.moves.cPickle as pickle\n\nfrom chainer.dataset import download\nfrom chainer.datasets import tuple_dataset\nimport chainer.functions as F\n\nimport sys\nimport os, sys, tarfile, errno\nimport numpy as np\nimport cupy as cp\nimport matplotlib.pyplot as plt\n\nif sys.version_info >= (3, 0, 0):\n import urllib.request as urllib # ugly but works\nelse:\n import urllib\n\n# image shape\nHEIGHT = 96\nWIDTH = 96\nDEPTH = 3\n\n# size of a single image in bytes\nSIZE = HEIGHT * WIDTH * DEPTH\n\n# path to the directory with the data\nDATA_DIR = os.path.expanduser('~/dataset')\n\n# url of the binary data\nDATA_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'\n\n# path to the binary train file with image data\nDATA_PATH = DATA_DIR + '/stl10_binary/train_X.bin'\n\n# path to the binary train file with labels\nLABEL_PATH = DATA_DIR + '/stl10_binary/train_y.bin'\n\n# path to the binary train file with image data\nTEST_DATA_PATH = DATA_DIR + '/stl10_binary/test_X.bin'\n\n# path to the binary train file with labels\nTEST_LABEL_PATH = DATA_DIR + '/stl10_binary/test_y.bin'\n\n# path to the binary train file with image data\nUNLABELED_DATA_PATH = DATA_DIR + '/stl10_binary/unlabeled_X.bin'\n\n# all\nAll_DATA_PATH = DATA_DIR + '/stl10_binary/all.npy'\n\ndef read_labels(path_to_labels):\n \"\"\"\n :param path_to_labels: path to the binary file containing labels from the STL-10 dataset\n :return: an array containing the labels\n \"\"\"\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8)\n return labels\n\n\ndef read_all_images(path_to_data):\n \"\"\"\n :param path_to_data: the file containing the binary images from the STL-10 dataset\n :return: an array containing all the images\n \"\"\"\n\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n\n # We force the data into 3x96x96 chunks, since the\n # images are stored in \"column-major order\", meaning\n # that \"the first 96*96 values are the red channel,\n # the next 96*96 are green, and the last are blue.\"\n # The -1 is since the size of the pictures depends\n # on the input file, and this way numpy determines\n # the size on its own.\n\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n # Now transpose the images into a standard image format\n # readable by, for example, matplotlib.imshow\n # You might want to comment this line or reverse the shuffle\n # if you will use a learning algorithm like CNN, since they like\n # their channels separated.\n # images = np.transpose(images, (0, 3, 2, 1))\n return images\n\ndef download_and_extract():\n \"\"\"\n Download and extract the STL-10 dataset\n :return: None\n \"\"\"\n dest_directory = DATA_DIR\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\rDownloading %s %.2f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.urlretrieve(DATA_URL, filepath, reporthook=_progress)\n print('Downloaded', filename)\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef getSTL(withlabel=False, ndim=3, scale=1.):\n # download data if needed\n download_and_extract()\n if not os.path.exists(All_DATA_PATH):\n unlabeled_x = read_all_images(UNLABELED_DATA_PATH)\n train_x = read_all_images(DATA_PATH)\n test_x = read_all_images(TEST_DATA_PATH)\n\n train = _preprocess_STL(train_x, 0, False, ndim, scale)\n test = _preprocess_STL(test_x, 0, False, ndim, scale)\n unlabeled = _preprocess_STL(unlabeled_x, 0, False, ndim, scale)\n\n alldata = np.concatenate((train, test, unlabeled), axis=0)\n np.save(All_DATA_PATH, alldata)\n else:\n alldata = np.load(All_DATA_PATH)\n\n return alldata\n\ndef _preprocess_STL(images, labels, withlabel, ndim, scale):\n print(\"preprocess image\")\n if ndim == 1:\n images = images.reshape(-1, SIZE)\n elif ndim == 3:\n images = images.reshape(-1, 3, 96, 96)\n else:\n raise ValueError('invalid ndim for dataset')\n print(\"==========================\")\n images = images.astype(numpy.float32)\n print(\"==========================\")\n images *= scale / 255.\n print(\"==========================\")\n images = F.resize_images(images,(48,48)).data\n print(\"==========================\")\n\n if withlabel:\n labels = labels.astype(numpy.int32)\n return tuple_dataset.TupleDataset(images, labels)\n else:\n return images\n","sub_path":"common/getSTL.py","file_name":"getSTL.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"6362469","text":"from threading import Thread\nimport time\nimport sqlite3\nimport telebot\nimport pyowm\nfrom colorama import init\nfrom colorama import Fore, Back, Style\ninit()\nprint(Back.GREEN)\nprint(Fore.BLACK)\ncity = []\nimport codecs\nwith codecs.open( \"cities.txt\", \"r\", \"utf_8_sig\" ) as f:\n\tfor line in f:\n\t\tfor word in line.split():\n\t\t\tcity.append(word) \nbot = telebot.TeleBot(\"803245127:AAEdeY4Q9RMRG8xgfUlJ_r_QduIzXxuxcQc\")\nowm = pyowm.OWM('6d00d1d4e704068d70191bad2673e0cc', language = \"ru\")\ndef send_weather(bot):\n\tspam = False\n\twhile(True):\n\t\tminut = time.strftime(\"%Y%m%d%H %M %S\", time.localtime()).split()\n\t\tif (minut[1] == '00') and (spam == False):\n\t\t\tspam = True\n\t\t\tconn = sqlite3.connect('my.db')\n\t\t\tdb_c = conn.cursor()\n\t\t\tdb_c.execute('SELECT * FROM users')\n\t\t\trow = db_c.fetchone()\n\t\t\twhile row is not None:\n\t\t\t\tobservation = owm.weather_at_place(row[2])\n\t\t\t\tw = observation.get_weather()\n\t\t\t\tanswer = \"В городе \" + row[2] +\" сейчас \" + w.get_detailed_status() + \"\\nТемпература: \" + str(round(w.get_temperature('celsius')[\"temp\"])) + \" градусов\" + \"\\nВлажность: \" + str(w.get_humidity()) + \"%\" + \"\\nСкорость ветра: \" + str(w.get_wind()[\"speed\"]) + \" м/с\"\n\t\t\t\tbot.send_message(row[1], answer)\n\t\t\t\trow = db_c.fetchone()\n\t\t\tdb_c.close()\n\t\t\tconn.close()\n\t\tif (minut[1]=='01'):\n\t\t\tspam = False\nThread(target=send_weather, args=(bot,)).start()\n@bot.message_handler(content_types=['text'])\ndef send_echo(message):\n\tinputed = message.text.split()\n\tprint(message.chat.id)\n\tprint(inputed[0])\n\tcheck = False\n\tcheck2 = False\n\tfor c in city:\n\t\tif (len(inputed)==1):\n\t\t\tif ((inputed[0] == c)):\n\t\t\t\tcheck = True\n\t\t\t\tplace = inputed[0]\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcheck = False\n\t\telif(len(inputed)==2):\n\t\t\tif ((inputed[1] == c)):\n\t\t\t\tcheck2 = True\n\t\t\t\tplace = inputed[1]\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcheck2 = False\n\t\telse:\n\t\t\tcheck = False\n\t\t\tcheck2 = False\n\tif check:\n\t\tobservation = owm.weather_at_place(place)\n\t\tw = observation.get_weather()\n\t\tanswer = \"В городе \" + place +\" сейчас \" + w.get_detailed_status() + \"\\nТемпература: \" + str(round(w.get_temperature('celsius')[\"temp\"])) + \" градусов\" + \"\\nВлажность: \" + str(w.get_humidity()) + \"%\" + \"\\nСкорость ветра: \" + str(w.get_wind()[\"speed\"]) + \" м/с\"\n\t\tbot.send_message(message.chat.id, answer)\t\t\t\t\n\telif (inputed[0] == \"/start\") or (inputed[0] == \"/help\"):\n\t\tprint(inputed[0])\n\t\tbot.send_message(message.chat.id, \"Здравтсвуйте, я погодный флекс бот или же просто бот для Игоря.\\nЯ могу подсказать вам погоду в любом(практически) городе России.\\nЕсли вы желаете подключить рассылку, то напишите мне '/addme (название города с большой буквы)'. Подключить можно только один город, но мой хозяин работает над тем, что бы увеличить колиство подключаемых городов \\nЕсли вы уверенны что указали город верно, но я почему-то не знаю этот город, то сообщите об этом моему создателю по почте: nikita.yurikof@yandex.ru\\nЧто бы увидеть это сообщение еще раз отправьте мне '/help'\")\n\telif ((inputed[0] == '/addme') and check2):\n\t\tconn = sqlite3.connect('my.db')\n\t\tdb_c = conn.cursor()\n\t\tprint('Новый')\n\t\tparam = (int(message.chat.id),inputed[1])\n\t\tdb_c.execute(\"INSERT INTO users (name, city) VALUES (?,?)\",param)\n\t\tconn.commit()\n\t\tdb_c.close()\n\t\tconn.close()\n\t\tprint('complete')\n\t\tbot.send_message(message.chat.id, 'Поздравляем, теперь вы будете каждый час получать данные о погоде в городе ' + inputed[1] +'. \\nЕсли вы хотите отказаться от рассылки просто напишите мне /removeme и я все сделаю сам')\n\telif(inputed[0]=='/removeme'):\n\t\tconn = sqlite3.connect('my.db')\n\t\tdb_c = conn.cursor()\n\t\tdb_c.execute(\"DELETE FROM users WHERE name=?\",(int(message.chat.id),))\n\t\tconn.commit()\n\t\tdb_c.close()\n\t\tconn.close()\n\t\tbot.send_message(message.chat.id, 'Теперь вам не будут приходить уведомления о погоде')\n\telse:\t\n\t\tbot.send_message(message.chat.id, \"Пожалуйста введите название города\")\nbot.polling(none_stop = True)\ninput()","sub_path":"tele-bot228.py","file_name":"tele-bot228.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42208365","text":"import gdb\nimport bson\nfrom pprint import pprint\n\n# Usage examples:\n# coll_dh = get_data_handle(conn, 'file:collection-5550--7194480883124807592.wt')\n# dump_handle(coll_dh)\n# index_dh = get_data_handle(conn, 'file:index-5558--7194480883124807592.wt')\n# dump_handle(index_dh)\n\ndef dbg(ident, var):\n print('----------')\n if type(var) == gdb.Value:\n print('{}: ({}*){}'.format(ident, var.type, var.address))\n else:\n print(ident)\n print(' ' + str(type(var)))\n methods = dir(var)\n out = [name for name in methods if not name.startswith(\"__\")]\n for item in out:\n print(' ' + item)\n\n if type(var) == gdb.Value:\n print('\\n Fields:')\n print('\\t' + '\\n\\t'.join(str(var).split('\\n')))\n\nconn_impl_ptr = gdb.lookup_type(\"WT_CONNECTION_IMPL\").pointer()\ndbg('impl', conn_impl_ptr)\n\nconn = gdb.parse_and_eval(\"session->iface->connection\").reinterpret_cast(conn_impl_ptr).dereference()\ndbg('conn', conn)\n\ndef walk_wt_list(lst):\n ret = []\n node = conn['dhqh']['tqh_first']\n #dbg('node', node)\n while True:\n if not node:\n break\n ret.append(node.dereference())\n node = node['q']['tqe_next']\n\n return ret\n\ndef get_data_handle(conn, handle_name):\n #dbg('datahandles', conn['dhqh'])\n ret = None\n for handle in walk_wt_list(conn['dhqh']):\n #print(\"Handle: \" + str(handle['name']))\n if handle['name'].string() == handle_name:\n ret = handle\n\n return ret\n\ndef get_btree_handle(dhandle):\n btree = gdb.lookup_type('WT_BTREE').pointer()\n return dhandle['handle'].reinterpret_cast(btree).dereference()\n\ndef dump_update_chain(update_chain):\n while True:\n if not update_chain:\n print(' λ')\n break\n #dbg('update', update_chain)\n wt_val = update_chain.dereference()\n obj = None\n #dbg('wt_val', wt_val)\n val_bytes = gdb.selected_inferior().read_memory(wt_val['data'], wt_val['size'])\n can_bson = wt_val['type'] == 3\n if can_bson:\n try:\n obj = bson.decode_all(val_bytes)[0]\n except:\n pass\n print(' ' + '\\n '.join(str(wt_val).split('\\n')) + \" \" + str(obj) + \" =>\")\n\n update_chain = update_chain['next']\n\ndef dump_insert_list(wt_insert):\n key_struct = wt_insert['u']['key']\n key = gdb.selected_inferior().read_memory(int(wt_insert.address) + key_struct['offset'], key_struct['size']).tobytes()\n print('Key: ' + str(key))\n print('Value:')\n update_chain = wt_insert['upd']\n dump_update_chain(update_chain)\n\ndef dump_skip_list(wt_insert_head):\n wt_insert = wt_insert_head['head'][0]\n idx = 0\n while True:\n if not wt_insert:\n break\n dump_insert_list(wt_insert.dereference())\n #dbg('insert' + str(idx), wt_insert.dereference())\n idx+=1\n wt_insert = wt_insert['next'][0]\n\ndef dump_modified(leaf_page):\n print(\"Modify:\")\n if not leaf_page['modify']:\n print(\"No modifies\")\n return\n\n leaf_modify = leaf_page['modify'].dereference()\n #dbg('modify', leaf_modify)\n row_leaf_insert = leaf_modify['u2']['row_leaf']['insert']\n #dbg('row store', row_leaf_modify)\n if not row_leaf_insert:\n print(\"No insert list\")\n else:\n print(\"Insert list:\")\n dump_skip_list(row_leaf_insert.dereference().dereference())\n \n row_leaf_update = leaf_modify['u2']['row_leaf']['update']\n if not row_leaf_update:\n print(\"No update list\")\n else:\n print(\"Update list:\")\n leaf_num_entries = int(leaf_page['entries'])\n for i in range(0, leaf_num_entries):\n dump_update_chain(row_leaf_update[i])\n\ndef dump_disk(leaf_page):\n leaf_num_entries = int(leaf_page['entries'])\n dbg('in-memory page:', leaf_page)\n dsk = leaf_page['dsk'].dereference()\n if int(dsk.address) == 0:\n print(\"No page loaded from disk.\")\n return\n dbg('on-disk page:', dsk)\n wt_page_header_size = 28\n wt_block_header_size = 12\n page_bytes = gdb.selected_inferior().read_memory(int(dsk.address) + wt_page_header_size + wt_block_header_size, int(dsk['mem_size'])).tobytes()\n print(\"Dsk:\\n\" + str(page_bytes))\n\ndef dump_handle(dhandle):\n print(\"Dumping: \" + dhandle['name'].string())\n btree = get_btree_handle(dhandle)\n root = btree['root']\n root_page = root['page'].dereference()\n dbg('btree', get_btree_handle(user))\n dbg('root', btree['root'])\n dbg('root page', root_page)\n rpindex = root_page['u']['intl']['__index'].dereference()\n dbg('rpindex', rpindex)\n dbg('rp-pre-index', rpindex['index'].dereference().dereference())\n leaf_page = rpindex['index'].dereference().dereference()['page'].dereference()\n dbg('leaf', leaf_page)\n dump_disk(leaf_page)\n dump_modified(leaf_page)\n","sub_path":"src/third_party/wiredtiger/tools/gdb/wt_debug_script_update.py","file_name":"wt_debug_script_update.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160259383","text":"# Resolve the problem!!\nimport re\n\npatron = re.compile(r'[a-z]')\n\n\ndef run():\n # Start coding here\n with open('E:\\Cursos\\Curso_Basico_Python\\Challenges\\Challenge3\\challenge-python-03\\src\\encoded.txt', 'r', encoding='utf-8') as f:\n linea = f.read()\n \n mensaje = ''.join(patron.findall(linea))\n print(f'El mensaje oculto es: {mensaje}')\n\n\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"104057219","text":"import os, sys\nimport numpy as np\nfrom numpy import linalg as LA\nimport math\n# from operator import sub\nimport operator\nfrom preprocess import load_batch, obj_loader\n\n\nFOLDER_SIZE = 100\n\n# coarse mesh holding points\nholdings = [1, 188]\n\n# x y must be in same dimention\ndef custom_add(x, y):\n return x + y\n\n# load the column-major matrix file (upsample.txt, out.txt) \ndef load_weights(filename):\n u = []\n with open(filename) as f:\n for line in f:\n s = line.strip().split(' ')\n row = list(map(float, s))\n u.append(row)\n \n # convert to np.array\n return np.transpose(np.array(u))\n\n\n####################################################################\n# load coarse mesh for prediction\n# load the delta for x_pred\n# the orignal coarse mesh for adding to the prediction result\n####################################################################\ndef load_input_only(path_coarse, rest_pos):\n batch_delta_coarse = []\n batch_x_coarse = []\n \n # # load the rest mesh \n # if rest_file:\n # rest_pos = load_pos(rest_file)\n\n # for x in range(1, FOLDER_SIZE + 1):\n for dirName, subdirList, fileList in os.walk(path_coarse):\n for x in range(1, FOLDER_SIZE + 1):\n file_name = str(x).zfill(5) + '_00.obj'\n coarse_file = path_coarse + '/' + file_name\n # get x_train input as coarse positions subtracting the rest positions\n obj2tri(coarse_file, batch_delta_coarse, rest_pos)\n # getting the real vertex position without subtracting the rest pos\n obj2tri(coarse_file, batch_x_coarse)\n\n x_pred = np.array(batch_delta_coarse) \n x_coarse = np.array(batch_x_coarse)\n\n return x_pred, x_coarse\n\n####################################################################\n# load the vertex position\n# return the vertex array\n####################################################################\ndef load_pos(file_name):\n vert = []\n vel = []\n edges = {}\n faces = {}\n\n obj_loader(file_name, vert, vel, edges, faces)\n return vert\n\n\n\n# extend dataset x y\ndef load_data(path_coarse, path_tracking, rest_pos, tdelta=True):\n batch_coarse = []\n batch_fine = []\n \n for x in range(1, FOLDER_SIZE + 1):\n file_name = str(x).zfill(5) + '_00.obj'\n coarse_file = path_coarse + '/' + file_name\n fine_file = path_tracking + '/' + file_name\n obj2tri(coarse_file, batch_coarse, rest_pos)\n obj2tri(fine_file, batch_fine, rest_pos)\n\n # print \"load data:\", len(batch_coarse)\n # Trains the model for a fixed number of epochs \n x_train = np.array(batch_coarse) \n if tdelta:\n # print \"target data: delta \"\n y_list = []\n for i in range(len(batch_fine)):\n y_row = np.array(batch_fine[i]) - np.array(batch_coarse[i])\n y_list.append(y_row)\n y_train = np.array(y_list)\n else:\n # print \"target data: batch fine \"\n y_train = np.array(batch_fine)\n \n return x_train, y_train\n\n\n# face index to index matrix\ndef face2mtx(objfile, dim):\n if not os.path.isfile(objfile):\n print(\"file not exist\")\n return\n \n mtx = np.array([np.zeros(dim[0]*3) for item in range(dim[1])])\n count = np.zeros((dim[1], 1))\n # print \">>> mtx shape: \"\n # print mtx.shape\n tri_id = 0\n\n with open(objfile, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'f':\n id1 = int(s[1].strip().split('/')[0]) - 1 # index start at 0\n id2 = int(s[2].strip().split('/')[0]) - 1\n id3 = int(s[3].strip().split('/')[0]) - 1\n #TODO\n # set the weight as 1\n mtx[id1][tri_id * 3] = 1\n mtx[id2][tri_id*3+1] = 1\n mtx[id3][tri_id*3+2] = 1\n tri_id = tri_id + 1\n\n count[id1][0] += 1.0\n count[id2][0] += 1.0\n count[id3][0] += 1.0\n\n mtx_1 = mtx\n mtx = mtx_1 / count\n # print \"=== count ===========================================\"\n # print count\n # print \"=== mtx_1 ===========================================\"\n # print mtx_1\n # print \"=== mtx ===========================================\"\n # print mtx\n\n return mtx, mtx_1\n\n\n# load data to (pos vel) 6N dimention\n# or: load data to (pos) 3N dimention\ndef obj_parser(file_name, batch_data):\n # position v, velocity nv\n if not os.path.isfile(file_name):\n print(\"file not exist\")\n return\n \n vert = []\n sample = []\n with open(file_name, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'v':\n vert.extend(map(float, s[1:]))\n # elif s[0] == 'nv':\n # vert.extend(map(float, s[1:]))\n # if len(vert) == 6:\n if len(vert) == 3:\n sample.append(vert)\n vert = []\n\n batch_data.append(sample)\n\n\n# parse data as (p1 p2 p3) 9*tri_num (position only)\ndef obj2tri(file_name, batch_data, respos_vert = []):\n if not os.path.isfile(file_name):\n print(file_name + \" file not exist\")\n return\n if not file_name.endswith('.obj'):\n print(file_name + \" Wrong file format, expect obj.\")\n return\n\n vert = []\n data = []\n dim = [0, 0]\n count = 0\n with open(file_name, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'v':\n v = list(map(float, s[1:]))\n # subtract the rest pos\n if respos_vert:\n v = list(map(operator.sub, v, respos_vert[count]))\n vert.append(v)\n count += 1\n elif s[0] == 'f':\n face = []\n id1 = int(s[1].strip().split('/')[0]) - 1 # index start at 1\n id2 = int(s[2].strip().split('/')[0]) - 1\n id3 = int(s[3].strip().split('/')[0]) - 1\n face.extend(vert[id1])\n face.extend(vert[id2])\n face.extend(vert[id3])\n data.append(face)\n\n dim[0] = len(data)\n dim[1] = len(vert)\n batch_data.append(data)\n\n return dim\n\n\n# output: position velocity cotangent length area\ndef obj2tri2(file_name, batch_data, addvel=True, addcot=True, adddist=False, addarea=False):\n if not os.path.isfile(file_name):\n print(file_name + \" file not exist\")\n return\n if not file_name.endswith('.obj'):\n print(file_name + \" Wrong file format, expect obj.\")\n return\n\n vert = []\n vel = [] # velocity\n data = []\n dim = [0, 0]\n with open(file_name, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'v':\n vert.append(map(float, s[1:]))\n elif s[0] == 'nv':\n vel.extend(map(float, s[1:]))\n elif s[0] == 'f':\n face = []\n id1 = int(s[1].strip().split('/')[0]) - 1 # index start at 1\n id2 = int(s[2].strip().split('/')[0]) - 1\n id3 = int(s[3].strip().split('/')[0]) - 1\n # position\n p1 = vert[id1]\n p2 = vert[id2]\n p3 = vert[id3]\n face.extend(p1)\n face.extend(p2)\n face.extend(p3)\n # velocity\n if addvel:\n face.extend([vel[id1], vel[id2], vel[id3]])\n # edge vectors\n v1 = np.array(p3) - np.array(p2) # p3 .- p2 \n v2 = np.array(p1) - np.array(p3)\n v3 = np.array(p2) - np.array(p1)\n if adddist:\n dist1 = LA.norm(v1)\n dist2 = LA.norm(v2)\n dist3 = LA.norm(v3)\n face.extend([dist1, dist2, dist3])\n if addcot:\n ca1 = np.dot(-v2, v3) / LA.norm(np.cross(-v2, v3)) # cot\n ca2 = np.dot(v1, -v3) / LA.norm(np.cross(v1, -v3))\n ca3 = np.dot(-v1, v2) / LA.norm(np.cross(-v1, v2))\n face.extend([ca1, ca2, ca3])\n if addarea:\n area = 0.5 * LA.norm(np.cross(v1, v2)); # triangle area\n face.extend([area, area, area])\n #add current line\n data.append(face)\n\n dim[0] = len(data)\n dim[1] = len(vert)\n batch_data.append(data)\n return dim\n\n#===only get faces once, store the indices/faces info instead of opening the file everytime\ndef getfaces(obj_in):\n indices =[]\n faces = []\n # read from original obj\n with open(obj_in, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'f':\n id1 = int(s[1].strip().split('/')[0]) - 1 # index start at 1\n id2 = int(s[2].strip().split('/')[0]) - 1\n id3 = int(s[3].strip().split('/')[0]) - 1\n indices.append([id1, id2, id3])\n faces.append(line)\n\n return indices, faces\n\n\n# convert result from NN to obj\n# input: pos1 pos2 pos3 --> tri (v9*N)\n# v_dim: total vertices num\ndef tri2obj(input, v_dim, indices, faces, obj_out):\n vertices = np.array([np.empty(3) for item in range(v_dim)])\n tri_id = 0\n dim = len(indices)\n\n for i in range(0,dim):\n vertices[indices[i][0]] = input[tri_id][0:3]\n vertices[indices[i][1]] = input[tri_id][3:6]\n vertices[indices[i][2]] = input[tri_id][6:9]\n tri_id = tri_id + 1\n\n if vertices.shape == 0:\n print(\"Error: vertices.shape=0\")\n\n # write to new file\n with open(obj_out, \"w+\") as f2:\n for x in range(v_dim):\n line = \"v {} {} {}\\n\".format(vertices[x][0], vertices[x][1], vertices[x][2])\n f2.write(line)\n for face in faces:\n f2.write(face)\n\n\n\n# input is 3*n position\ndef write_obj(inputs, faces, obj_out):\n # write to new file\n with open(obj_out, \"w+\") as f:\n for x in range(0,700):\n line = \"v {} {} {}\\n\".format(inputs[x][0], inputs[x][1], inputs[x][2])\n f.write(line)\n for face in faces:\n f.write(face)\n\n\n# Adapting the learning rate\n# 1. Decrease the learning rate gradually based on the epoch.\n# 2. Decrease the learning rate using punctuated large drops at specific epochs.\n# learning rate schedule\ndef step_decay(epoch):\n initial_lrate = 0.1\n drop = 0.5\n epochs_drop = 10.0\n lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\n return lrate\n\n\ndef save_u(upsm):\n row = len(upsm)\n col = len(upsm[0])\n with open('upsample.txt','w') as f:\n for x in range(0,col):\n line = [str(upsm[y][x]) for y in range(0,row)]\n f.writelines([\"%s \" % item for item in line])\n f.writelines(\"\\n\")\n\n\n","sub_path":"smooth_pool_modified/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":10888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"30960618","text":"from rest_framework import viewsets\nfrom .models import Team, Student\nfrom .serializers import TeamSerializer, StudentSerializer\nfrom rest_framework.permissions import AllowAny\n\nclass TeamViewSet(viewsets.ModelViewSet):\n permission_classes = [AllowAny]\n queryset = Team.objects.all().order_by('-totalScore')\n serializer_class = TeamSerializer\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n\n def get_queryset(self):\n queryset = Student.objects.all()\n teamid = self.request.query_params.get(\"teamid\", None)\n if teamid is not None:\n queryset = queryset.filter(team_id=teamid)\n return queryset\n\n\n","sub_path":"checklionproj/people/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"74910599","text":"\nlineId = '1'\n\n\ndef itemset_format(eid, itemset):\n return eid + ':' + \",\".join(itemset)\n\n\ndef build_seq(sid, sequence):\n result = []\n if len(sequence) == 0:\n return ';'.join(result)\n\n eid = sequence[0][0]\n itemset = []\n\n for item in sequence:\n if item[0] == eid:\n itemset.append(item[1])\n else:\n result.append(itemset_format(eid, itemset))\n itemset = []\n eid = item[0]\n itemset.append(item[1])\n result.append(itemset_format(eid, itemset))\n return ';'.join(result)\n\nif __name__ == '__main__':\n\n f = open('format.txt', 'w')\n seq = []\n for line in open('testData.txt', 'r'):\n words = line.rstrip('\\n').split(' ')\n if words[0] == lineId:\n seq.append(words[1:3])\n else:\n result = build_seq(lineId, seq)\n f.write(result + '\\n')\n lineId = words[0]\n seq = []\n seq.append(words[1:3])\n\n f.write(build_seq(lineId, seq) + '\\n')\n\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49008346","text":"from pytest import fixture\nimport vcr\n\nmy_vcr = vcr.VCR(\n\tserializer='yaml',\n\tcassette_library_dir='tests/vcr_cassettes',\n\tpath_transformer=vcr.VCR.ensure_suffix('.yaml'),\n\trecord_mode='once',\n\tfilter_headers=[('authorization', None)]\n)\n\n@my_vcr.use_cassette()\ndef test_supervisor_list(session):\n\tresponse = session.Users.AllSupervisors()\n\tassert isinstance(response, dict)\n\tassert 'Users' in response.keys(), \"The Users should be in the response\"\n\n@my_vcr.use_cassette()\ndef test_interviewer_list(session, params):\n\tresponse = session.Users.AllInterviewers(params['SupervisorId'])\n\tassert isinstance(response, dict)\n\tassert 'Users' in response.keys(), \"The Users should be in the response\"\n\n\n@my_vcr.use_cassette()\ndef test_user_info(session, params):\n\tresponse = session.Users.GetInfo(params['SupervisorId'])\n\tassert isinstance(response, dict)\n\tassert response['UserId'] == params['SupervisorId'], \"The userid should be in the response\"\n\n@my_vcr.use_cassette()\ndef test_user_archive(session, params):\n\tresponse = session.Users.Archive(params['SupervisorId'])\n\tassert response == 200, \"did we archive someone?\"\n\n\n@my_vcr.use_cassette()\ndef test_user_unarchive(session, params):\n\tresponse = session.Users.Unarchive(params['SupervisorId'])\n\tassert response == 200, \"did we unarchive someone?\"\n","sub_path":"tests/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"254603753","text":"import numpy as np\nimport matplotlib.pyplot as plt\nplt.figure(1)\n# ax1=plt.subplot(211)\nx = np.linspace(-60,0,50) \ny = 2/(1+np.exp((-x-15)/10))\nfor i in xrange(1,50):\n\tplt.figure(1)\n\tplt.plot(x,y)\nplt.show()","sub_path":"src/test/07/time4.py","file_name":"time4.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160771067","text":"x = int (input('Вводите деньги: '))\nif x < 0:\n x = int (input('Пожалуйста , Вводите деньги больше 0 :'))\nelif x >= 0 :\n Деньги_50 = x // 50\n money = x - 50 * Деньги_50\n Деньги_10 = money // 10\n money1 = money - 10 * Деньги_10\n Деньги_5= money1 // 5\n money2 = money1 - 5 * Деньги_5\n Деньги_1 = money2 // 1\n\nprint('Количество(50р):',Деньги_50,',''количество(10р):',Деньги_10,',''количество(5р):',Деньги_5,',''количество(1р):',Деньги_1)","sub_path":"Задание- №3.py","file_name":"Задание- №3.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34588388","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\n\nos.makedirs('plots/15-seaborn_pairplot', exist_ok=True)\n\n#darkgrid, whitegrid, dark, white, ticks\nsns.set(style='darkgrid', palette='coolwarm')\n\n#Diabetes Pairplot\ndiabetes_df = pd.read_csv('data/diabetes.data',\n sep='\\s+',\n header=0)\nsns.pairplot(diabetes_df, hue='SEX', diag_kind='hist')\nplt.savefig('plots/15-seaborn_pairplot/diabetes_pairplot.png')\nplt.clf()\n\n# Boston Pairplot\nboston_df = pd.read_csv('data/boston/housing.data',\n sep='\\s+',\n header=None)\nboston_df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT',\n 'MEDV']\nsns.pairplot(boston_df, hue='CHAS', diag_kind='hist')\nplt.savefig('plots/15-seaborn_pairplot/boston_pairplot.png')\nplt.clf()\n\n# Cancer Pairplot\ncancer_df = pd.read_csv('data/breast-cancer/wdbc.data',\n sep=',',\n header=0)\ncancer_df.columns = ['id', 'diagnosis', 'mean radius', 'mean texture', 'mean perimeter', 'mean area',\n 'mean smoothness', 'mean compactness', 'mean concavity',\n 'mean concave points', 'mean symmetry', 'mean fractal dimension',\n 'radius error', 'texture error', 'perimeter error', 'area error',\n 'smoothness error', 'compactness error', 'concavity error',\n 'concave points error', 'symmetry error', 'fractal dimension error',\n 'worst radius', 'worst texture', 'worst perimeter', 'worst area',\n 'worst smoothness', 'worst compactness', 'worst concavity',\n 'worst concave points', 'worst symmetry', 'worst fractal dimension']\ncancer_df['encoded_diagnosis'] = cancer_df['diagnosis'].map({'B': 0, 'M': 1})\nsns.pairplot(cancer_df, hue='encoded_diagnosis', diag_kind='hist')\nplt.savefig('plots/15-seaborn_pairplot/cancer_pairplot.png')\nplt.clf()\n\n\n#Iris Pairplot\niris_df = pd.read_csv('data/iris/iris-encoded.data',\n sep=',',\n header=0)\niris_df.columns = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)', 'class']\nsns.pairplot(iris_df, hue='class', diag_kind='hist')\nplt.savefig('plots/15-seaborn_pairplot/iris_pairplot.png')\nplt.clf()\n\n\n# Wine Pairplot\nwine_df = pd.read_csv('data/wine.data',\n sep=',',\n header=0)\nwine_df.columns = ['class', 'alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols',\n 'flavanoids',\n 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280 od315_of_diluted_wines',\n 'proline']\nsns.pairplot(wine_df, hue='class', diag_kind='hist')\nplt.savefig('plots/15-seaborn_pairplot/wine_pairplot.png')\n\nplt.close()\n","sub_path":"class6-notebook/15-seaborn_pairplot.py","file_name":"15-seaborn_pairplot.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"527847634","text":"#################################################################################\r\n## MSP430G2xx3 based BattLab-One Production Version 1.01\r\n##\r\n## Doug Peters\r\n## Bluebird Labs LLC.\r\n## www.bluebird-labs.com\r\n## February 2019\r\n## Built with CCS V7.0\r\n##\r\n## Copyright (c) 2020, Bluebird Labs LLC\r\n## All rights reserved.\r\n##\r\n## Redistribution and use in source and binary forms, with or without\r\n## modification, are permitted provided that the following conditions\r\n## are met:\r\n##\r\n## * Redistributions of source code must retain the above copyright\r\n## notice, this list of conditions and the following disclaimer.\r\n##\r\n## * Redistributions in binary form must reproduce the above copyright\r\n## notice, this list of conditions and the following disclaimer in the\r\n## documentation and/or other materials provided with the distribution.\r\n##\r\n## * Neither the name of Bluebird Labs LLC nor the names of\r\n## its contributors may be used to endorse or promote products derived\r\n## from this software without specific prior written permission.\r\n##\r\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\r\n## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r\n## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\r\n## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\r\n## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r\n## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\r\n## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\r\n## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\r\n## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\r\n## EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n##\r\n#################################################################################\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\nimport csv\r\nfrom csv import reader\r\nimport time\r\nimport datetime\r\nimport serial\r\nimport serial.tools.list_ports\r\nfrom serial.tools import list_ports\r\nimport matplotlib\r\nfrom matplotlib.widgets import Cursor\r\nimport matplotlib.pyplot as plt\r\nfrom tkinter import colorchooser\r\nfrom tkinter import font\r\nimport os\r\nimport pkg_resources.py2_warn\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nimport webbrowser\r\n\r\nmatplotlib.use('TkAgg')\r\n\r\n#from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom matplotlib.figure import Figure\r\n\r\n################################################################################# \r\n### SETUP ROOT, FRAMES, & MATPLOTLIB CANVAS\r\n#################################################################################\r\n\r\nroot = tk.Tk()\r\nroot.wm_title('BattLab One')\r\nroot.resizable(False,False)\r\nroot.grid_rowconfigure(0, weight=1)\r\nroot.grid_columnconfigure(0, weight=1)\r\nroot.iconbitmap('bbirdlogo.ico')\r\n\r\ns = ttk.Style() \r\ns.configure('TLabelframe', background='dark gray')\r\n\r\nprofile_frame = ttk.Frame(root,style='TLabelframe', width = 800, height = 600)\r\nprofile_frame.grid(row=0, column=0, padx=5,pady=(1,1),sticky = 'n')\r\n\r\ndef openBBL():\r\n webbrowser.open_new('http://bluebird-labs.com')\r\n\r\nimg = PhotoImage(file='bbirdlogo_png1.png')\r\nlogo_button = tk.Button(profile_frame,image=img,command=openBBL,state=tk.NORMAL)\r\nlogo_button.grid(row=25,column=0,rowspan=4, padx=(5,4),pady=(5,5),sticky = 'sw')\r\n\r\nframe = ttk.Frame(profile_frame, borderwidth=5, relief='sunken', width=800, height=450)\r\nframe.grid(row=0, column=1, rowspan=20, padx=(3,10),pady=(1,1), sticky='n')\r\n\r\nframe1 = ttk.Frame(profile_frame, borderwidth=5, relief='sunken', width=800, height=600)\r\nframe1.grid(row=20, column=1, rowspan=10, padx=(10,10),pady=(5,15), sticky='e')\r\n\r\nw = Canvas(frame, width=800, height=400)\r\nw.config(background='black')\r\nw.grid(row=0,column = 3,padx=5,pady=(5,5),sticky = 'n')\r\n\r\nw1 = Canvas(frame1, width=250, height=200)\r\nw1.config(background='black')\r\nw1.grid(padx=5,pady=(5,5),sticky = 'nsew')\r\n\r\n#################################################################################\r\n### SETUP GLOBAL VARIABLES\r\n#################################################################################\r\nglobal si,x,y, avg_active_event_I, line_color, sleep_timer,soc_file\r\n\r\nx = []\r\ny = []\r\nsi = []\r\nsoc = []\r\nocv = []\r\nesr = []\r\nsoc_tab = []\r\nocv_tab = []\r\nesr_tab = []\r\ndc = []\r\ndc1 = []\r\n\r\nhi_offset=tk.DoubleVar()\r\nhi_offset.set(1.0)\r\n\r\nlo_offset=tk.DoubleVar()\r\nlo_offset.set(0)\r\n\r\nsleep_timer=tk.IntVar()\r\nsleep_timer.set(10)\r\n \r\nLSB=0.0025\r\n\r\nline_color = StringVar()\r\nline_color.set(\"blue\")\r\n\r\nline_width = DoubleVar()\r\nline_width.set(0.5)\r\n\r\nreading = StringVar()\r\n\r\nCAL_12_data = StringVar()\r\nCAL_15_data = StringVar()\r\nCAL_24_data = StringVar()\r\nCAL_30_data = StringVar()\r\nCAL_36_data = StringVar()\r\nCAL_37_data = StringVar()\r\nCAL_42_data = StringVar()\r\nCAL_45_data = StringVar()\r\n\r\nOffset_12_data = StringVar()\r\nOffset_15_data = StringVar()\r\nOffset_24_data = StringVar()\r\nOffset_30_data = StringVar()\r\nOffset_36_data = StringVar()\r\nOffset_37_data = StringVar()\r\nOffset_42_data = StringVar()\r\nOffset_45_data = StringVar()\r\n\r\nsense_resistor = tk.DoubleVar()\r\nsense_resistor.set(0.160)\r\n\r\nsense_resistor_LO = tk.DoubleVar()\r\nsense_resistor_LO.set(99)\r\n\r\ndata = tk.StringVar()\r\nversion = tk.StringVar()\r\n\r\nsoc_file = tk.StringVar()\r\nsoc_file.set('SOC_profiles/AA_AAA.csv')\r\n\r\nsamples= tk.IntVar()\r\nsamples.set(4)\r\n\r\n#################################################################################\r\n### GET SERIAL PORT AND CONNECT\r\n#################################################################################\r\n\r\nbaud_rate = 115200\r\ncom_port = \"NONE\"\r\n\r\ndef get_ports():\r\n ports = list(serial.tools.list_ports.comports())\r\n cpl = []\r\n for p in ports:\r\n cpl.append(p.device) \r\n return cpl\r\n\r\ndef popupmsg(msg):\r\n popup = tk.Tk()\r\n popup.iconbitmap('bbirdlogo.ico')\r\n popup.wm_title(\"About\")\r\n label = ttk.Label(popup, text=msg,)\r\n label.grid(row=0,column=0, padx=(10,10),pady= (10,10),sticky = 'nsew')\r\n pop_up_button = tk.Button(popup, text=\"Okay\",command = popup.destroy,state=tk.NORMAL)\r\n pop_up_button.grid(row=3,column=0,padx=30,pady=(10,10),sticky = 'w')\r\n popup.mainloop()\r\n\r\ndef popupmsg1(msg):\r\n popup1 = tk.Tk()\r\n popup1.iconbitmap('bbirdlogo.ico')\r\n popup1.wm_title(\"Sleep Capture Duration\")\r\n label2 = ttk.Label(popup1, text=\"Current Duration in Seconds (10 recommended) = \"+ str(sleep_timer.get()))\r\n label2.grid(row=1,column=0, padx=(10,10),pady= (10,10),sticky = 'nsew')\r\n sleep_dur__combo_box = ttk.Combobox(popup1, width=5, textvariable=sleep_timer)\r\n sleep_dur__combo_box['values'] = (1, 3, 5, 10) \r\n sleep_dur__combo_box.grid(row=3, column=0,padx=(150,4),sticky = 'w')\r\n sleep_dur__combo_box.insert(0,sleep_timer.get())\r\n def set_sleep_time():\r\n sleep_timer.set(float(sleep_dur__combo_box.get()))\r\n popup1.destroy()\r\n label1 = ttk.Label(popup1, text=msg,)\r\n label1.grid(row=0,column=0, padx=(10,10),pady= (10,10),sticky = 'nsew')\r\n pop_up_button2 = tk.Button(popup1, text=\"Set Time\",command = set_sleep_time,state=tk.NORMAL)\r\n pop_up_button2.grid(row=3,column=0,padx=80,pady=(10,10),sticky = 'w')\r\n popup1.mainloop()\r\n\r\ndef popupmsg2(msg):\r\n popup2 = tk.Tk()\r\n popup2.iconbitmap('bbirdlogo.ico')\r\n popup2.wm_title(\"Sleep Current Error\")\r\n label2 = ttk.Label(popup2, text=msg,)\r\n label2.grid(row=0,column=0, padx=(10,10),pady= (10,10),sticky = 'nsew')\r\n pop_up_button2 = tk.Button(popup2, text=\"Okay\",command = popup2.destroy,state=tk.NORMAL)\r\n pop_up_button2.grid(row=3,column=0,padx=30,pady=(10,10),sticky = 'w')\r\n popup2.mainloop()\r\n \r\ndef samplesmsg():\r\n smp = tk.Tk()\r\n def set_samples():\r\n samples.set(int(smp_combo_box.get()))\r\n \r\n if smp_combo_box.get() == '4':\r\n cmd = 't'\r\n bytes_returned = ser.write(cmd.encode())\r\n elif smp_combo_box.get() == '16':\r\n cmd = 'u'\r\n bytes_returned = ser.write(cmd.encode())\r\n elif smp_combo_box.get() == '64':\r\n cmd = 'v'\r\n bytes_returned = ser.write(cmd.encode())\r\n smp.destroy()\r\n smp.iconbitmap('bbirdlogo.ico')\r\n smp.wm_title(\"Set Sample Number\")\r\n label2 = ttk.Label(smp, text=\"Select the number of samples for averaging \\n\\r Number of samples: \",)\r\n label2.grid(row=0,column=0, padx=(10,10),pady= (10,10),sticky = 'nsew')\r\n smp_combo_box = ttk.Combobox(smp, width=3, textvariable=samples)\r\n smp_combo_box['values'] = (4,16,64) \r\n smp_combo_box.grid(row=1, column=0,padx=(110,4),pady=(1,1),sticky = 'w')\r\n smp_combo_box.insert(0,samples.get())\r\n smp_button = tk.Button(smp, text=\"Set Samples\",command = set_samples,state=tk.NORMAL)\r\n smp_button.grid(row=3,column=0,padx=80,pady=(10,10),sticky = 'w')\r\n \r\n smp.mainloop()\r\n\r\nser_port_combo_box = ttk.Combobox(profile_frame, values = get_ports(), width=10)\r\nser_port_combo_box.grid(row=2, column=0,padx=(10,4),sticky = 'w')\r\n\r\nser_port_cct = Label(profile_frame, text=\"BB1 Port = \",background='dark gray')\r\nser_port_cct.grid(row=1, column=0, padx=(10,4),pady=0,sticky = 'w')\r\n\r\nreset_comm = tk.Button(profile_frame,text='Connect',command=lambda:init(str(ser_port_combo_box.get())),state=tk.NORMAL)\r\nreset_comm.grid(row=2,column=0, padx=(160,4),pady= 0,sticky = 'w')\r\n\r\nreset_list = tk.Button(profile_frame,text='Refresh',command=lambda:update_ports(),state=tk.NORMAL)\r\nreset_list.grid(row=2,column=0, padx=(100,4),pady= 0,sticky = 'w')\r\n \r\ndef init(ser_port):\r\n global ser, sense_low, \\\r\n CAL_ADJ_12, CAL_ADJ_15, CAL_ADJ_24, CAL_ADJ_30, CAL_ADJ_36, CAL_ADJ_37, CAL_ADJ_42, CAL_ADJ_45, \\\r\n Offset_12, Offset_15, Offset_24, Offset_30, Offset_36, Offset_37, Offset_42, Offset_45\r\n \r\n try:\r\n ser = serial.Serial(ser_port, baud_rate, timeout= None)\r\n ser_port_cct.configure(text = \"Battlab Connected(\" + ser.name+\")\", foreground='green', font=('Arial Bold',10))\r\n ser_port_combo_box.delete(0,END)\r\n ser_port_combo_box.insert(0,ser.name)\r\n\r\n #Get Calibration data\r\n cmd = 'j'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\n CAL_12_data.set(ser.readline(2).hex())\r\n CAL_12 = int(CAL_12_data.get(),16)/1000\r\n\r\n CAL_15_data.set(ser.readline(2).hex())\r\n CAL_15 = int(CAL_15_data.get(),16)/1000\r\n\r\n CAL_24_data.set(ser.readline(2).hex())\r\n CAL_24 = int(CAL_24_data.get(),16)/1000\r\n\r\n CAL_30_data.set(ser.readline(2).hex())\r\n CAL_30 = int(CAL_30_data.get(),16)/1000\r\n\r\n CAL_36_data.set(ser.readline(2).hex())\r\n CAL_36 = int(CAL_36_data.get(),16)/1000\r\n\r\n CAL_37_data.set(ser.readline(2).hex())\r\n CAL_37 = int(CAL_37_data.get(),16)/1000\r\n\r\n CAL_42_data.set(ser.readline(2).hex())\r\n CAL_42 = int(CAL_42_data.get(),16)/1000\r\n\r\n CAL_45_data.set(ser.readline(2).hex())\r\n CAL_45 = int(CAL_45_data.get(),16)/1000\r\n\r\n #Get Low Current Offset calibration\r\n Offset_12_data.set(ser.readline(2).hex())\r\n Offset_12 = int(Offset_12_data.get(),16)/100000\r\n\r\n Offset_15_data.set(ser.readline(2).hex())\r\n Offset_15 = int(Offset_15_data.get(),16)/100000\r\n\r\n Offset_24_data.set(ser.readline(2).hex())\r\n Offset_24 = int(Offset_24_data.get(),16)/100000\r\n\r\n Offset_30_data.set(ser.readline(2).hex())\r\n Offset_30 = int(Offset_30_data.get(),16)/100000\r\n\r\n Offset_36_data.set(ser.readline(2).hex())\r\n Offset_36 = int(Offset_36_data.get(),16)/100000\r\n\r\n Offset_37_data.set(ser.readline(2).hex())\r\n Offset_37 = int(Offset_37_data.get(),16)/100000\r\n\r\n Offset_42_data.set(ser.readline(2).hex())\r\n Offset_42 = int(Offset_42_data.get(),16)/100000\r\n\r\n Offset_45_data.set(ser.readline(2).hex())\r\n Offset_45 = int(Offset_45_data.get(),16)/100000\r\n\r\n CAL_ADJ_12 = CAL_12\r\n CAL_ADJ_15 = CAL_15\r\n CAL_ADJ_24 = CAL_24\r\n CAL_ADJ_30 = CAL_30\r\n CAL_ADJ_36 = CAL_36\r\n CAL_ADJ_37 = CAL_37\r\n CAL_ADJ_42 = CAL_42\r\n CAL_ADJ_45 = CAL_45\r\n \r\n except serial.SerialException as e:\r\n messagebox.showinfo(\"Error No Battlab Device Connected\",e)\r\n pass\r\n \r\nports = list(serial.tools.list_ports.comports())\r\n\r\nfor p in ports:\r\n \r\n if p.vid == 0x0403 and p.pid == 0x6001:\r\n ser_num_prefix = p.serial_number[0] + p.serial_number[1]\r\n if ser_num_prefix == 'BB':\r\n com_port = list(list_ports.grep(\"0403:6001\"))[0][0] \r\n init(com_port)\r\n\r\nif com_port == 'NONE':\r\n ser_port_cct.configure(text=\"Not Connected!\", foreground='red')\r\n\r\ndef update_ports():\r\n ports = list(serial.tools.list_ports.comports())\r\n cpl = []\r\n for p in ports:\r\n cpl.append(p.device)\r\n ser_port_combo_box.configure(values=cpl)\r\n\r\nsense_resistor_LO.set(99)\r\n\r\n################################################################################# \r\n### GET DEVICE SPECIFIC CALIBRATION DATA\r\n#################################################################################\r\n\r\n#CAL_ADJ_12 = CAL_12 + 0.0254\r\n#CAL_ADJ_15 = CAL_15 + 0.0003\r\n#CAL_ADJ_24 = CAL_24 + 0.0029\r\n#CAL_ADJ_30 = CAL_30 + 0.0015\r\n#CAL_ADJ_36 = CAL_36 + 0.0016\r\n#CAL_ADJ_37 = CAL_37 + 0.0036\r\n#CAL_ADJ_42 = CAL_42 + 0.0047\r\n#CAL_ADJ_45 = CAL_45 + 0.0058\r\n\r\n\r\n#################################################################################\r\n### SETUP MENU FUNCTIONS\r\n#################################################################################\r\n\r\n \r\ndef OpenFile():\r\n name = filedialog.askopenfilename(initialdir = 'C:/Users/dwpete3/Desktop',title = 'Select file', \\\r\n filetypes = (('CSV Files','*.csv'),('all files','*.*')))\r\n\r\n with open(name, newline='') as csvfile:\r\n load_profile = csv.reader(csvfile, delimiter=',')\r\n header = next(load_profile, None)\r\n if header[0] != 'Battery Chemistry':\r\n popupmsg(\"File error! \\n\\r Does not appear to be a Battlab-One file.\")\r\n else:\r\n for row in load_profile:\r\n batt_chem.set(row[0])\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,row[1])\r\n batt_cells.set(row[2])\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,row[3])\r\n voltage.set(row[4])\r\n rt_dur.delete(0,END)\r\n rt_dur.insert(0,row[5])\r\n sl_duration_entry_3.delete(0,END)\r\n sl_duration_entry_3.insert(0,row[6])\r\n ae_captured_current_label_4.configure(text=row[7])\r\n ae_captured_duration_label_4.configure(text=row[8])\r\n sl_captured_current_label_4.configure(text=row[9])\r\n sl_captured_duration_label_4.configure(text=row[10])\r\n dut_cutoff_captured_label_4.configure(text=row[11])\r\n batt_cap_captured_label_4.configure(text=row[12]),\\\r\n average_current_profile_captured_label_4.configure(text=row[13])\r\n ae_optimized_current_entry_4.delete(0,END)\r\n ae_optimized_current_entry_4.insert(0,row[14])\r\n ae_optimized_duration_entry_4.delete(0,END)\r\n ae_optimized_duration_entry_4.insert(0,row[15])\r\n sl_optimized_current_entry_4.delete(0,END)\r\n sl_optimized_current_entry_4.insert(0,row[16])\r\n sl_optimized_duration_entry_4.delete(0,END) \r\n sl_optimized_duration_entry_4.insert(0,row[17])\r\n dut_cutoff_optimized_entry_4.delete(0,END) \r\n dut_cutoff_optimized_entry_4.insert(0,row[18])\r\n batt_cap_optimized_entry_4.delete(0,END)\r\n batt_cap_optimized_entry_4.insert(0,row[19])\r\n average_current_profile_optimized_label_4.configure(text=row[20])\r\n captured_battery_life_hours_graph.configure(text=row[21])\r\n captured_battery_life_days_graph.configure(text=row[22])\r\n optimized_battery_life_hours_graph.configure(text=row[23])\r\n optimized_battery_life_days_graph.configure(text=row[24])\r\n\r\n frame.update()\r\n\r\ndef SaveFile():\r\n root.filename = filedialog.asksaveasfilename(initialdir = 'C:/Users/dwpete3/Desktop',title = 'Select file',\r\n filetypes = (('CSV Files','*.csv'),('all files','*.*')))\r\n \r\n with open((root.filename+'.csv'), 'w', newline='') as analysis:\r\n writer = csv.writer(analysis, delimiter=',') \r\n\r\n writer.writerow([\"Battery Chemistry\",\\\r\n \"Battery Capacity (mAh)\",\\\r\n \"Number of Cells\",\\\r\n \"DUT Cutoff Voltage (V)\",\\\r\n \"PSU Voltage (V)\",\\\r\n \"Entered Active Event Duration (S)\",\\\r\n \"DUT Sleep Duration (S)\",\\\r\n \"Captured Active Event Current (mA)\",\\\r\n \"Captured Active Event Duration(S)\",\\\r\n \"Captured Sleep Current (uA)\",\\\r\n \"Captured DUT Sleep Duration (S)\",\\\r\n \"Captured DUT Cutoff Voltage (V)\",\\\r\n \"Captured DUT Effective Battery Capacity (mAh)\",\\\r\n \"Captured Total Current Profile (mA)\",\\\r\n \"Optimized Active Event Current (S)\",\\\r\n \"Optimized Active Event Duration (mA)\",\\\r\n \"Optimized Sleep Event Current (uA)\",\\\r\n \"Optimized Sleep Event Duration (S)\",\\\r\n \"Optimized DUT Cutoff Voltage (V)\",\\\r\n \"Optimized Effective Battery Capacity (mAh)\",\\\r\n \"Optimized Total Current Profile (mA)\",\\\r\n \"Captured Battery Life (Hours)\",\\\r\n \"Captured Battery Life (Days)\",\\\r\n \"Optimized Battery Life (Hours)\",\\\r\n \"Optimized Battery Life (Days)\"])\r\n \r\n writer.writerow([batt_chem.get(), \\\r\n battery_capactity_entry_1.get(),\\\r\n batt_cells.get(),\\\r\n dut_cutoff_voltage_entry.get(),\\\r\n psu_combo_box.get(),\\\r\n runtime_duration.get(),\\\r\n sleep_duration.get(),\\\r\n ae_captured_current_label_4.cget(\"text\"),\\\r\n ae_captured_duration_label_4.cget(\"text\"),\\\r\n sl_captured_current_label_4.cget(\"text\"),\\\r\n sl_captured_duration_label_4.cget(\"text\"),\\\r\n dut_cutoff_captured_label_4.cget(\"text\"),\\\r\n batt_cap_captured_label_4.cget(\"text\"),\\\r\n average_current_profile_captured_label_4.cget(\"text\"),\\\r\n ae_optimized_current_entry_4.get(),\\\r\n ae_optimized_duration_entry_4.get(),\\\r\n sl_optimized_current_entry_4.get(),\\\r\n sl_optimized_duration_entry_4.get(),\\\r\n dut_cutoff_optimized_entry_4.get(),\\\r\n batt_cap_optimized_entry_4.get(),\\\r\n average_current_profile_optimized_label_4.cget(\"text\"),\\\r\n captured_battery_life_hours_graph.cget(\"text\"),\\\r\n captured_battery_life_days_graph.cget(\"text\"),\\\r\n optimized_battery_life_hours_graph.cget(\"text\"),\\\r\n optimized_battery_life_days_graph.cget(\"text\")]) \r\n \r\n analysis.close\r\n \r\ndef Linecolor():\r\n ln_color, hex_color = colorchooser.askcolor(parent=frame,initialcolor=(255, 0, 0)) \r\n line_color.set(hex_color)\r\n\r\ndef SleepDurationTime():\r\n popupmsg1(\"Please choose the duration for sleep capture mode in seconds\")\r\n\r\ndef set_sample_number():\r\n samplesmsg()\r\n\r\n#def get_config():\r\n # ser.reset_input_buffer()\r\n # cmd = 'm'\r\n # bytes_returned = ser.write(cmd.encode())\r\n # data.set(ser.readline(2).hex())\r\n # print('MFR CAL ',data.get())\r\n # data.set(ser.readline(2).hex())\r\n# print('ADC CONFIG ',data.get())\r\n \r\ndef About():\r\n #messagebox.showinfo(\"Battlab-One Version 1.0 \\n\\r Contact www.bluebird-labs.com/support for issues\")\r\n ser.reset_input_buffer()\r\n cmd = 'p'\r\n bytes_returned = ser.write(cmd.encode())\r\n version.set(ser.readline(2).hex())\r\n popupmsg(\"Battlab-One Version 1.0 \\n\\r Contact www.bluebird-labs.com/support for issues\" + \"\\n\\r Firmware Version \" + str(int(version.get(),16)/1000))\r\n\r\ndef Disclaimer():\r\n popupmsg(' \\n \\\r\n Bluebird Labs LLC. \\n\\\r\n Copyright (c) 2020, Bluebird Labs LLC \\n\\\r\n All rights reserved.\\n\\\r\n \\n\\\r\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \\n\\\r\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \\n\\\r\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \\n\\\r\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR \\n\\\r\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \\n\\\r\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \\n\\\r\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; \\n\\\r\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \\n\\\r\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \\n\\\r\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, \\n\\\r\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.')\r\n\r\n\r\ndef quitapp():\r\n root.destroy()\r\n#################################################################################\r\n### SETUP MENU SYSTEM\r\n#################################################################################\r\nmenu = Menu(root)\r\n\r\nroot.config(menu=menu)\r\n\r\nfilemenu = Menu(menu)\r\nmenu.add_cascade(label='File', menu=filemenu)\r\nfilemenu.add_command(label='Open', command=OpenFile)\r\nfilemenu.add_command(label='Save', command=SaveFile)\r\nfilemenu.add_separator()\r\nfilemenu.add_command(label='Exit', command=quitapp)\r\n\r\noptionsmenu = Menu(menu)\r\nmenu.add_cascade(label='Options', menu=optionsmenu)\r\noptionsmenu.add_command(label='Line Color', command=Linecolor)\r\noptionsmenu.add_command(label='Sleep Current Capture Duration', command=SleepDurationTime)\r\noptionsmenu.add_command(label='Sample Number', command=set_sample_number)\r\n\r\n#optionsmenu.add_command(label='Get CONFIG...', command=get_config)\r\n\r\nhelpmenu = Menu(menu)\r\nmenu.add_cascade(label='Help', menu=helpmenu)\r\nhelpmenu.add_command(label='About', command=About)\r\nhelpmenu.add_command(label='Disclaimer', command=Disclaimer)\r\n\r\n#filemenu.entryconfigure(4, state=tk.DISABLED)\r\n\r\n#################################################################################\r\n#### MATPLOTLIB FOR ACTIVE CURRENT PLOTS\r\n#################################################################################\r\ndef data_plot(x,y,voltage, minimum,maximum,ae_duration,ae_current):\r\n #PLOT THE DATA\r\n\r\n global ax, f\r\n \r\n f = plt.figure(figsize=(8, 4), dpi=100,clear=True)\r\n ax = f.add_subplot(111)\r\n plt.style.use('fast')\r\n \r\n ax.set_title('Voltage: ' + voltage + ' volts ' +'Capture Time: '+ ae_duration + ' seconds' + \\\r\n '\\n' + 'Minimum: ' + minimum + 'mA ' + 'Maximum: '+ maximum + 'mA ' + 'Average Active Current: ' + ae_current + 'mA ', fontsize=10)\r\n\r\n ax.set_xlabel('Milliseconds')\r\n ax.set_ylabel('Milliamps')\r\n \r\n ax.plot(x, y, color=line_color.get(), linewidth=line_width.get())\r\n\r\n ax.grid(b=True, which='major', axis='both',color='black', linestyle='-', linewidth=.1)\r\n \r\n cursor = Cursor(ax,color=line_color.get(), linewidth=.3)\r\n \r\n canvas = FigureCanvasTkAgg(f, master=w)\r\n canvas.get_tk_widget().grid(row=0,column=6)\r\n\r\n toolbarFrame = ttk.Frame(master=frame)\r\n toolbarFrame.grid(row=14, column=3, sticky='w' )\r\n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)\r\n #toolbar = NavigationToolbar2TkAgg(canvas, toolbarFrame)\r\n canvas.draw()\r\n\r\n#################################################################################\r\n### SETUP STATE OF CHARGE GRAPH\r\n################################################################################# \r\ndef update_soc_chart(eventObject):\r\n global ax1, f1\r\n\r\n try:\r\n float(dut_cutoff.get())\r\n except ValueError:\r\n popupmsg(\"Please only enter 0123456789.\")\r\n \r\n f1 = plt.figure(figsize=(4, 2), dpi=100,clear=False)\r\n ax1 = f1.add_subplot(111)\r\n\r\n # Clear the plot\r\n soc_tab[:] = []\r\n ocv_tab[:] = []\r\n esr_tab[:] = []\r\n dc[:] = []\r\n dc1[:] = []\r\n soc_state = 0\r\n curve = 0\r\n \r\n ax1.plot(soc_tab, ocv_tab)\r\n ax1.cla()\r\n \r\n with open(soc_file.get(), 'r')as csvfile:\r\n inp1 = csv.reader(csvfile, delimiter = ',')\r\n headers = next(inp1, None)\r\n headers1= next(inp1, None)\r\n headers2 = next(inp1, None) \r\n\r\n for row in inp1:\r\n soc_tab.append(int(row[0]))\r\n ocv_tab.append(float(row[1]))\r\n esr_tab.append(float(row[2]))\r\n\r\n for i in range(len(ocv_tab)-1):\r\n if float(ocv_tab[i]) < float(dut_cutoff.get()):\r\n soc_state=soc_state+1\r\n curve = ocv_tab[soc_state]\r\n \r\n csvfile.close()\r\n \r\n #PLOT THE DATA\r\n plt.style.use('fast')\r\n \r\n ax1.set_title(soc_file.get())\r\n \r\n \r\n ax1.grid(b=True, which='major', axis='both',color='black', linewidth=.1)\r\n\r\n ax1.plot(soc_tab,ocv_tab, color='blue', linewidth=.5)\r\n \r\n ax1.plot(soc_state,float(curve), marker='o',color='red', linewidth=1)\r\n \r\n ax1.set_xlabel('SOC(%)')\r\n ax1.set_ylabel('OCV(V)')\r\n plt.ylim([min(ocv_tab)-.2,max(ocv_tab)+.2])\r\n ax1.minorticks_on()\r\n f1.tight_layout()\r\n ax1.invert_xaxis()\r\n\r\n cursor1 = Cursor(ax1,color=line_color.get(), linewidth=.3)\r\n \r\n canvas1 = FigureCanvasTkAgg(f1, master=w1)\r\n canvas1.get_tk_widget().grid(row=1,column=1,sticky='w')\r\n\r\n #toolbar = NavigationToolbar2TkAgg(canvas, toolbarFrame)\r\n canvas1.draw()\r\n#################################################################################\r\n### SETUP VOLTAGE PARAMETERS\r\n#################################################################################\r\n \r\ndef set_voltage(eventObject):\r\n\r\n try:\r\n float(batt_cap.get())\r\n except ValueError:\r\n popupmsg(\"Please only enter 0123456789.\")\r\n \r\n if ((batt_chem.get() == 'AA-Alkaline' or batt_chem.get() == 'AAA-Alkaline') and batt_cells.get() == '1'):\r\n psu_combo_box.current(1)\r\n soc_file.set('SOC_profiles/AA_AAA.csv') \r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,1500) \r\n voltage.set(1.5)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,1.0)\r\n elif((batt_chem.get() == 'AA-Alkaline' or batt_chem.get() == 'AAA-Alkaline') and batt_cells.get() == '2'):\r\n psu_combo_box.current(3)\r\n soc_file.set('SOC_profiles/AA_AAA.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,3000)\r\n voltage.set(3.0)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,2.0)\r\n elif((batt_chem.get() == 'AA-Alkaline' or batt_chem.get() == 'AAA-Alkaline') and batt_cells.get() == '3'):\r\n psu_combo_box.current(6)\r\n soc_file.set('SOC_profiles/AA_AAA.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,4500)\r\n voltage.set(4.5)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,3.0)\r\n elif(batt_chem.get() == 'LI-Ion' and batt_cells.get() == '1'):\r\n psu_combo_box.current(5)\r\n soc_file.set('SOC_profiles/LiIon.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,2600)\r\n voltage.set(4.2)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,3.2)\r\n elif(batt_chem.get() == 'LiFepo4' and batt_cells.get() == '1'):\r\n psu_combo_box.current(5)\r\n soc_file.set('SOC_profiles/LiIon.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,2300)\r\n voltage.set(3.6)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,3.0)\r\n elif(batt_chem.get() == 'AA-NiMH/NiCd' or batt_chem.get() == 'AAA-NiMH/NiCd' and batt_cells.get() == '1'):\r\n psu_combo_box.current(0)\r\n soc_file.set('SOC_profiles/NiMH_AA_AAA.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,1500)\r\n voltage.set(1.2)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,1.0)\r\n elif(batt_chem.get() == 'AA-NiMH/NiCd' or batt_chem.get() == 'AAA-NiMH/NiCd' and batt_cells.get() == '2'):\r\n psu_combo_box.current(1)\r\n soc_file.set('SOC_profiles/NiMH_AA_AAA.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,3000)\r\n voltage.set(2.4)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,2.0)\r\n elif(batt_chem.get() == 'AA-NiMH/NiCd' or batt_chem.get() == 'AAA-NiMH/NiCd' and batt_cells.get() == '3'):\r\n psu_combo_box.current(4)\r\n soc_file.set('SOC_profiles/NiMH_AA_AAA.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,4500)\r\n voltage.set(3.6)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,3.0)\r\n elif(batt_chem.get() == batt_chem.get() == 'CR123' and batt_cells.get() == '1'):\r\n psu_combo_box.current(4)\r\n soc_file.set('SOC_profiles/CR123.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,225)\r\n voltage.set(3.0)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,1.8)\r\n elif(batt_chem.get() == 'Li-Coin' and batt_cells.get() == '1'):\r\n psu_combo_box.current(4)\r\n soc_file.set('SOC_profiles/CoinCell.csv')\r\n battery_capactity_entry_1.delete(0,END)\r\n battery_capactity_entry_1.insert(0,225)\r\n voltage.set(3.0)\r\n dut_cutoff_voltage_entry.delete(0,END)\r\n dut_cutoff_voltage_entry.insert(0,1.8)\r\n else:\r\n popupmsg('Battery Info Error. \\n\\r Check number of cells \\n\\r Battlab-One has a maximum 4.5V output')\r\n\r\n update_soc_chart(0)\r\n\r\n#################################################################################\r\n#### STEP1 SET BATTERY PARAMETERS AND PSU OUTPUT\r\n#################################################################################\r\n\r\ndef set_battery_voltage():\r\n if trig_PSU_var.get() == 0:\r\n #Turn OFF low current sense resistor\r\n cmd = 'l'\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n if int(p_radio.get()) == 1:\r\n p_rad.config(foreground='green')\r\n p_rad1.config(foreground='black')\r\n\r\n if voltage.get()== '1.2':\r\n cmd = 'a'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_12)\r\n lo_offset.set(Offset_12)\r\n elif voltage.get()== '1.5':\r\n cmd = 'b'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_15)\r\n lo_offset.set(Offset_15)\r\n elif voltage.get()== '2.4':\r\n cmd = 'c'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_24)\r\n lo_offset.set(Offset_24)\r\n elif voltage.get()== '3.0':\r\n cmd = 'd'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_30)\r\n lo_offset.set(Offset_30)\r\n elif voltage.get()== '3.2':\r\n cmd = 'o'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_30)\r\n lo_offset.set(Offset_30)\r\n elif voltage.get()== '3.6':\r\n cmd = 'n'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_36)\r\n lo_offset.set(Offset_36)\r\n elif voltage.get()== '3.7':\r\n cmd = 'e'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_37)\r\n lo_offset.set(Offset_37)\r\n elif voltage.get()== '4.2':\r\n cmd = 'f'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_42)\r\n lo_offset.set(Offset_42)\r\n elif voltage.get()== '4.5':\r\n cmd = 'g'\r\n bytes_returned = ser.write(cmd.encode())\r\n sense_resistor.set(CAL_ADJ_45)\r\n lo_offset.set(Offset_45)\r\n\r\n cmd = 'h'\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n elif int(p_radio.get()) == 0:\r\n p_rad.config(foreground='black')\r\n p_rad1.config(foreground='red')\r\n cmd = 'i'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\n update_soc_chart(0)\r\n \r\n capture_active_event_button.configure(state=tk.NORMAL)\r\n trigger_box.config(state=tk.NORMAL)\r\n battery_chemistry_combo_box.configure(state=tk.DISABLED)\r\n battery_cells_combo_box.configure(state=tk.DISABLED)\r\n psu_combo_box.configure(state=tk.DISABLED)\r\n rt_dur.configure(state=tk.NORMAL)\r\n \r\n step1_label.configure(foreground='black')\r\n step2_label.configure(foreground='blue')\r\n\r\n\r\n#################################################################################\r\n### STEP2 CAPTURE ACTIVE EVENT\r\n#################################################################################\r\ndef TrigArmed():\r\n capture_active_event_button.configure(text=\"ARM Trigger\", foreground='red')\r\n #Start the data logger\r\n cmd = 'x'\r\n bytes_returned = ser.write(cmd.encode())\r\n time.sleep(0.5)\r\n \r\ndef capture_profile():\r\n\r\n try:\r\n float(runtime_duration.get())\r\n \r\n except ValueError:\r\n popupmsg(\"Please only enter 0123456789.\")\r\n\r\n global ae_captured_average_current_2, \\\r\n ae_captured_duration_2, max_current, min_current, offset\r\n\r\n if trig_PSU_var.get() == 1:\r\n trig_PSU_var.set(0)\r\n set_battery_voltage()\r\n \r\n ser.reset_input_buffer()\r\n ser.reset_output_buffer()\r\n \r\n ser.send_break(duration=0.25)\r\n \r\n ae_captured_average_current_2 = 0\r\n ae_captured_duration_2 = 0\r\n counter = 0\r\n cntr = DoubleVar()\r\n reading = StringVar()\r\n u = 0\r\n t=0\r\n\r\n prev_value='NONE'\r\n \r\n my_file=open('raw_byte_file.txt', mode='w+', buffering =(10*1024*1024))\r\n\r\n\r\n #USE TRIGGER FOR CAPTURE\r\n if trig_var.get()==1:\r\n \r\n progress_label.config(text = 'Capturing...' )\r\n progress_label.config(foreground = 'red' )\r\n\r\n #Start the data logger\r\n cmd = 'x'\r\n bytes_returned = ser.write(cmd.encode())\r\n #time.sleep(0.5)\r\n \r\n while (True):\r\n if ser.in_waiting > 0: \r\n my_file.write(str(t))\r\n my_file.write(',')\r\n reading.set(ser.readline(2).hex())\r\n my_file.write(str(reading.get()))\r\n \r\n my_file.write('\\n')\r\n t=t+1\r\n root.update()\r\n if str(reading.get()) == '0000' and prev_value == 'ffff':\r\n break\r\n\r\n prev_value = str(reading.get())\r\n\r\n cmd = 'y'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\n my_file.close()\r\n\r\n progress_label.config(text = 'Complete' )\r\n progress_label.config(foreground = 'green' )\r\n capture_active_event_button.configure(text=\"Capture Active\", foreground='black')\r\n\r\n #USE TIME (DURATION) FOR CAPTURE\r\n else:\r\n time.sleep(0.5)\r\n \r\n pb_vd = ttk.Progressbar(profile_frame, orient='horizontal', mode='determinate',\r\n maximum = float(runtime_duration.get()),length=100, variable=cntr)\r\n \r\n pb_vd.grid(row=10,column = 0,columnspan=1,padx=(215,1),sticky = 'w')\r\n pb_vd.start()\r\n \r\n progress_label.config(text = 'Capturing...' )\r\n progress_label.config(foreground = 'red' )\r\n \r\n cmd = 'z'\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n \r\n offset=time.clock()\r\n \r\n while counter < (float(runtime_duration.get())*0.81): \r\n my_file.write(str(t))\r\n my_file.write(',')\r\n reading.set(ser.readline(2).hex())\r\n my_file.write(str(reading.get()))\r\n my_file.write('\\n')\r\n\r\n #t=int(time.clock()*1000)\r\n t=t+1\r\n root.update()\r\n \r\n counter = time.clock()-offset\r\n cntr.set(counter)\r\n profile_frame.update()\r\n\r\n cmd = 'y'\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n pb_vd.stop()\r\n pb_vd.destroy()\r\n \r\n progress_label.config(text = 'Complete')\r\n progress_label.config(foreground = 'green')\r\n \r\n my_file.close()\r\n \r\n with open('raw_byte_file.txt', 'r')as csvfile:\r\n inp = csv.reader(csvfile, delimiter = ',')\r\n for row in inp: \r\n if round((int(row[1],16)*LSB)/float(sense_resistor.get()),5) < 500 \\\r\n and round((int(row[1],16)*LSB)/float(sense_resistor.get()),5) != 0.0:\r\n x.append(int(row[0]))\r\n y.append(round((int(row[1],16)*LSB*hi_offset.get())/float(sense_resistor.get()),5))\r\n else:\r\n u=u+1 #Do Nothing\r\n\r\n csvfile.close()\r\n \r\n x.pop()\r\n y.pop()\r\n \r\n #Gather data and statistics\r\n ae_captured_average_current_2 = sum(y)/(len(y))\r\n ae_captured_duration_2 = (max(x)-min(x))/1000\r\n max_current = round(max(y),2)\r\n min_current = round(min(y),2)\r\n average_current_profile_data.configure(text=str(round(ae_captured_average_current_2,2)))\r\n max_data.configure(text=str(max_current))\r\n min_data.configure(text=str(min_current))\r\n avg_active_event_I_2.configure(text=str(round(ae_captured_average_current_2,2)))\r\n\r\n #Fill out partial Step 4 Actuals \r\n ae_captured_current_label_4.configure(text=str(round(float(ae_captured_average_current_2),2)), foreground='black',background='dark gray') \r\n ae_captured_duration_label_4.configure(text=str(round(float(ae_captured_duration_2),2)), foreground='black',background='dark gray')\r\n dut_cutoff_captured_label_4.configure(text=str(float(dut_cutoff_voltage_entry.get())), foreground='black',background='dark gray')\r\n\r\n\r\n data_plot(x,y,voltage.get(),str(min_current),str(round(max_current,2)),str(round(ae_captured_duration_2,)), str(round(ae_captured_average_current_2,2)))\r\n\r\n #Copy partial Step 4 Actuals to Optimized\r\n ae_optimized_current_entry_4.delete(0,END)\r\n ae_optimized_current_entry_4.insert(0,str(round(float(ae_captured_average_current_2),2)))\r\n ae_optimized_duration_entry_4.delete(0,END)\r\n ae_optimized_duration_entry_4.insert(0,str(round(float(ae_captured_duration_2),2)))\r\n dut_cutoff_optimized_entry_4.delete(0,END)\r\n dut_cutoff_optimized_entry_4.insert(0,str(float(dut_cutoff_voltage_entry.get())))\r\n \r\n avg_active_event_I_2.configure(foreground='green',state=tk.NORMAL)\r\n avg_active_event_I_units.configure(foreground='green',state=tk.NORMAL)\r\n\r\n capture_active_event_button.config(state=tk.DISABLED)\r\n capture_sleep_btn_3.configure(state=tk.NORMAL)\r\n shunt_button.configure(state=tk.NORMAL)\r\n shunt_button1.configure(state=tk.NORMAL)\r\n \r\n step2_label.configure(foreground='black')\r\n step3_label.configure(foreground='blue')\r\n sl_duration_entry_3.configure(state=tk.NORMAL)\r\n \r\n os.remove('raw_byte_file.txt')\r\n \r\n#################################################################################\r\n### STEP3 CAPTURE SLEEP CURRENT\r\n#################################################################################\r\n\r\ndef capture_sleep_profile():\r\n global sl_captured_average_current_3, total_event_duration,battery_life_hours, battery_life_days,progress_label_s\r\n\r\n try:\r\n float(sleep_duration.get())\r\n \r\n except ValueError:\r\n popupmsg(\"Please only enter 0123456789.\")\r\n \r\n soc_state = 0\r\n si[:] = []\r\n\r\n avg_sleep_event_I_units.configure(state=tk.NORMAL)\r\n\r\n ser.reset_input_buffer()\r\n ser.reset_output_buffer()\r\n \r\n sleep_reading = StringVar()\r\n counter1 = 0 \r\n cntr1 = DoubleVar()\r\n\r\n pb_vd_s = ttk.Progressbar(profile_frame, orient='horizontal', mode='determinate',\r\n maximum = sleep_timer.get(),length=100, variable=cntr1)\r\n \r\n pb_vd_s.grid(row=13,column = 0,columnspan=1,padx=(215,1),sticky = 'w')\r\n pb_vd_s.start()\r\n \r\n progress_label_s.config(text = 'Capturing...' )\r\n progress_label_s.config(foreground = 'red' )\r\n \r\n if shunt_var.get() == 1: #Lo current shunt is OFF\r\n ser.reset_input_buffer()\r\n ser.reset_output_buffer()\r\n sleep_file=open('sleep_current.txt', mode='w', buffering =(10*1024*1024))\r\n offset1=time.clock()\r\n time.sleep(0.5)\r\n \r\n cmd = 'z'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\n while counter1 < sleep_timer.get():\r\n sleep_reading.set(ser.readline(2).hex())\r\n if round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor.get()),6) < 500 \\\r\n and round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor.get()),6) != 0.0:\r\n si.append(round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor.get()),6))\r\n sleep_file.write(str(round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor.get()),6)))\r\n sleep_file.write('\\n')\r\n root.update()\r\n counter1 = time.clock()-offset1\r\n cntr1.set(counter1)\r\n profile_frame.update()\r\n\r\n pb_vd_s.stop()\r\n pb_vd_s.destroy()\r\n \r\n cmd = 'y'\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n time.sleep(1)\r\n sleep_file.close()\r\n progress_label_s.config(text = 'Complete')\r\n progress_label_s.config(foreground = 'green')\r\n \r\n #sl_captured_average_current_3 = (sum(si)/(len(si)))\r\n sl_captured_average_current_3 = (sum(si)/(len(si)))- lo_offset.get() #Input bias current minus Offset voltage calibration\r\n\r\n if sl_captured_average_current_3 < .811:\r\n avg_sleep_event_I_units.configure(text='')\r\n progress_label_s.config(foreground='red',text=\"Overflow\")\r\n avg_sleep_event_I.configure(foreground='red')\r\n popupmsg2(\"Sleep current out of range. \\n\\r Try using the 10uA - 800uA range. \\n\\r Captured sleep current = \" + str(round(sl_captured_average_current_3,3)) + \"mA\")\r\n sl_captured_average_current_3 = 0\r\n else:\r\n avg_sleep_event_I.configure(text=str(round(sl_captured_average_current_3,2)))\r\n avg_sleep_event_I.configure(state=tk.NORMAL)\r\n avg_sleep_event_I.configure(foreground='green')\r\n avg_sleep_event_I_units.configure(text='mA')\r\n avg_sleep_event_I_units.configure(foreground='green')\r\n \r\n else:\r\n\r\n #Lo current shunt is ON\r\n ser.reset_input_buffer()\r\n ser.reset_output_buffer()\r\n sleep_file=open('sleep_current.txt', mode='w', buffering =(10*1024*1024))\r\n \r\n time.sleep(0.5)\r\n\r\n offset1=time.clock()\r\n cmd = 'z'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\n while counter1 < sleep_timer.get():\r\n sleep_reading.set(ser.readline(2).hex())\r\n if round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor_LO.get()),6) < 812 \\\r\n and round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor_LO.get()),6) != 0.0:\r\n si.append(round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor_LO.get()),6))\r\n sleep_file.write(str(round((int(sleep_reading.get(),16)*LSB)/float(sense_resistor_LO.get()),6)))\r\n sleep_file.write('\\n')\r\n root.update()\r\n counter1 = time.clock()-offset1\r\n cntr1.set(counter1)\r\n profile_frame.update()\r\n\r\n pb_vd_s.stop()\r\n pb_vd_s.destroy()\r\n \r\n cmd = 'y'\r\n bytes_returned = ser.write(cmd.encode())\r\n sleep_file.close()\r\n \r\n progress_label_s.config(text = 'Complete')\r\n progress_label_s.config(foreground = 'green')\r\n \r\n #sl_captured_average_current_3 = (sum(si)/(len(si)))\r\n sl_captured_average_current_3 = (sum(si)/(len(si)))- lo_offset.get() #Input bias current minus Offset voltage calibration\r\n\r\n if sl_captured_average_current_3 *1000 > 811 or sl_captured_average_current_3 *1000 < 0:\r\n avg_sleep_event_I_units.configure(state=tk.DISABLED)\r\n progress_label_s.config(foreground='red', text=\"Overflow\")\r\n avg_sleep_event_I.configure(foreground='red')\r\n avg_sleep_event_I_units.configure(text='')\r\n popupmsg2(\"Sleep current out of range.\\n\\r Try using the 800uA - 500mA range.\\n\\r Captured sleep current = \" + str(round(sl_captured_average_current_3,3)) + \"uA\")\r\n sl_captured_average_current_3 = 0\r\n else:\r\n avg_sleep_event_I.configure(text=str(round(sl_captured_average_current_3*1000,2)))\r\n avg_sleep_event_I.configure(foreground='green')\r\n avg_sleep_event_I_units.configure(state=tk.NORMAL)\r\n avg_sleep_event_I_units.configure(text='uA')\r\n avg_sleep_event_I_units.configure(foreground='green')\r\n \r\n with open(soc_file.get(), 'r')as csvfile:\r\n inp = csv.reader(csvfile, delimiter = ',')\r\n headers = next(inp, None)\r\n headers1= next(inp, None)\r\n headers2 = next(inp, None)\r\n my_list = list(inp)\r\n\r\n for row in inp:\r\n soc.append(int(row[0]))\r\n ocv.append(float(row[1]))\r\n esr.append(float(row[2]))\r\n\r\n for i in range(len(my_list)-1):\r\n if (float(my_list[i][1])) < float(dut_cutoff_optimized_entry_4.get()):\r\n soc_state=soc_state+1\r\n new_bat_cap = float(1-(soc_state/100))*float(battery_capactity_entry_1.get()) \r\n \r\n #Update Results Step4 data\r\n \r\n total_event_duration = float(sleep_duration.get()) + ae_captured_duration_2\r\n\r\n average_current_all_events = (sl_captured_average_current_3 * float(sleep_duration.get()) \\\r\n + (ae_captured_average_current_2*ae_captured_duration_2))/float(total_event_duration)\r\n\r\n #Complete Step 4 Captured Labels\r\n sl_captured_current_label_4.configure(text=str(round(float(sl_captured_average_current_3),2)), foreground='black',background='dark gray')\r\n sl_captured_duration_label_4.configure(text=str(round(float(sleep_duration.get()),2)), foreground='black',background='dark gray')\r\n average_current_profile_captured_label_4.configure(text=str(round(average_current_all_events,2)),foreground='black',background='dark gray')\r\n batt_cap_captured_label_4.configure(text=str(int(new_bat_cap)), foreground='black',background='dark gray')\r\n\r\n #Copy Step 4 Captured Labels to Optimized Entrys\r\n sl_optimized_current_entry_4.delete(0,END)\r\n sl_optimized_current_entry_4.insert(0,str(round(float(sl_captured_average_current_3),2)))\r\n sl_optimized_duration_entry_4.delete(0,END)\r\n sl_optimized_duration_entry_4.insert(0,str(round(float(sleep_duration.get()),2)))\r\n average_current_profile_optimized_label_4.configure(text=str(round(average_current_all_events,2)),foreground='black',background='dark gray')\r\n batt_cap_optimized_entry_4.delete(0,END)\r\n batt_cap_optimized_entry_4.insert(0,str(int(new_bat_cap)))\r\n\r\n si.clear()\r\n\r\n #Calculate Captured Battery Life Hours\r\n\r\n battery_life_hours = float(new_bat_cap)/float(average_current_all_events) \r\n battery_life_days = float(battery_life_hours/24)\r\n \r\n captured_battery_life_hours_graph.configure(text=str(round(float(battery_life_hours),2)),foreground='blue',background='dark gray')\r\n captured_battery_life_days_graph.configure(text=str(round(float(battery_life_days),2)),foreground='blue',background='dark gray')\r\n\r\n optimized_battery_life_hours_graph.configure(text=str(round(float(battery_life_hours),2)),foreground='blue',background='dark gray')\r\n optimized_battery_life_days_graph.configure(text=str(round(float(battery_life_days),2)),foreground='blue',background='dark gray') \r\n\r\n filemenu.entryconfigure(4, state=tk.NORMAL)\r\n reset_button.configure(state=tk.NORMAL)\r\n\r\n step3_label.configure(foreground='black')\r\n step4_label.configure(foreground='blue')\r\n\r\n os.remove('sleep_current.txt')\r\n\r\n#################################################################################\r\n### STEP4 OPTIMIZED RESULTS \r\n#################################################################################\r\ndef optimize_profile():\r\n global optimized_battery_life_hours, optimized_battery_life_days, optimized_average_current_all_events\r\n\r\n\r\n try:\r\n float(batt_cap_optimized_entry_4.get())\r\n float(dut_cutoff_optimized_entry_4.get())\r\n float(sl_optimized_duration_entry_4.get())\r\n float(sl_optimized_current_entry_4.get())\r\n float(ae_optimized_duration_entry_4.get())\r\n float(ae_optimized_current_entry_4.get())\r\n \r\n except ValueError:\r\n popupmsg(\"Please only enter 0123456789.\")\r\n\r\n soc_state = 0\r\n \r\n if batt_chem.get() == 'AA-Alkaline' or batt_chem.get() == 'AAA-Alkaline':\r\n soc_file = 'SOC_profiles/AA_AAA.csv' \r\n elif batt_chem.get() == 'LI-Ion' or batt_chem.get()=='LiFepo4':\r\n soc_file = 'SOC_profiles/LiIon.csv'\r\n elif batt_chem.get() == 'NiMH/NiCd':\r\n soc_file = 'SOC_profiles/NiMH_AA_AAA.csv'\r\n elif batt_chem.get() == 'Li-Coin':\r\n soc_file = 'SOC_profiles/CoinCell.csv'\r\n elif batt_chem.get() == 'CR123':\r\n soc_file = 'SOC_profiles/CR123.csv'\r\n \r\n \r\n with open(soc_file, 'r')as csvfile:\r\n inp = csv.reader(csvfile, delimiter = ',')\r\n headers = next(inp, None)\r\n headers1= next(inp, None)\r\n headers2 = next(inp, None)\r\n my_list = list(inp)\r\n\r\n for row in inp:\r\n soc.append(int(row[0]))\r\n ocv.append(float(row[1]))\r\n esr.append(float(row[2]))\r\n\r\n for i in range(len(my_list)-1):\r\n if (float(my_list[i][1])) < float(dut_cutoff_optimized_entry_4.get()):\r\n soc_state=soc_state+1\r\n new_bat_cap = float(1-(soc_state/100))*float(battery_capactity_entry_1.get()) \r\n\r\n batt_cap_optimized_entry_4.delete(0,END) \r\n batt_cap_optimized_entry_4.insert(0,str(int(new_bat_cap)))\r\n optimized_average_current_all_events = (float(sl_optimized_duration_entry_4.get())* float(sl_optimized_current_entry_4.get()) \\\r\n + (float(ae_optimized_duration_entry_4.get())* \\\r\n float(ae_optimized_current_entry_4.get())))/(float(sl_optimized_duration_entry_4.get()) \\\r\n + float(ae_optimized_duration_entry_4.get()))\r\n\r\n average_current_profile_optimized_label_4.configure(text=str(round(float(optimized_average_current_all_events),2)),\\\r\n foreground='black',background='dark gray')\r\n\r\n #battery_life_hours = capacity in mAH / mA\r\n optimized_battery_life_hours = float(new_bat_cap)/float(optimized_average_current_all_events)\r\n optimized_battery_life_days = float(optimized_battery_life_hours/24)\r\n\r\n optimized_battery_life_hours_graph.configure(text=str(round(float(optimized_battery_life_hours),2)),foreground='blue',background='dark gray')\r\n optimized_battery_life_days_graph.configure(text=str(round(float(optimized_battery_life_days),2)),foreground='blue',background='dark gray')\r\n\r\n filemenu.entryconfigure(4, state=tk.NORMAL)\r\n reset_button = tk.Button(profile_frame,text='Reset',command=reset,state=tk.NORMAL)\r\n \r\n#################################################################################\r\n### RESET\r\n#################################################################################\r\n\r\ndef reset():\r\n\r\n step1_label.configure(foreground='blue')\r\n step2_label.configure(foreground='black')\r\n step3_label.configure(foreground='black')\r\n step4_label.configure(foreground='black')\r\n\r\n #Turn off low current sense resistor\r\n select_lo_shunt()\r\n #Set sense resistor to low resistance\r\n shunt_var.set(1)\r\n shunt_button1.configure(state=tk.DISABLED)\r\n \r\n optimize_button.configure(state=tk.NORMAL)\r\n\r\n capture_active_event_button.configure(state=tk.NORMAL)\r\n psu_combo_box.configure(state=tk.NORMAL)\r\n trigger_box.config(state=tk.NORMAL)\r\n battery_chemistry_combo_box.configure(state=tk.NORMAL)\r\n battery_cells_combo_box.configure(state=tk.NORMAL)\r\n\r\n cmd = 'i' #turn voltage off\r\n bytes_returned = ser.write(cmd.encode())\r\n \r\n p_rad.configure(value=1,foreground='black',background='dark gray')\r\n p_rad.deselect()\r\n p_rad1.configure(value=0,foreground='red',background='dark gray')\r\n p_rad1.select()\r\n\r\n trig_PSU_var.set(1)\r\n\r\n capture_active_event_button.config(text = 'Capture Active' )\r\n capture_active_event_button.config(state=tk.DISABLED)\r\n capture_sleep_btn_3.configure(state=tk.DISABLED)\r\n shunt_button.configure(state=tk.DISABLED)\r\n shunt_button1.configure(state=tk.DISABLED)\r\n\r\n #offset = 0\r\n x[:] = []\r\n y[:] = []\r\n si[:] = []\r\n\r\n ax.plot(x, y)\r\n ax.cla()\r\n plt.close('all')\r\n \r\n data_plot(x,y,str(0),str(0),str(0),str(0),str(0))\r\n \r\n ae_captured_average_current_2 = 0\r\n sl_captured_average_current_3 = 0\r\n ae_captured_duration_2 = 0\r\n soc_state = 0\r\n\r\n progress_label.configure(text='',background='dark gray')\r\n progress_label_s.configure(text='',background='dark gray')\r\n trig_var.set(0)\r\n \r\n #Clear all of the Labels and Entry Fields\r\n ae_captured_current_label_4.configure(text='-',background='dark gray')\r\n ae_captured_duration_label_4.configure(text='-',background='dark gray')\r\n sl_captured_current_label_4.configure(text='-',background='dark gray')\r\n sl_captured_duration_label_4.configure(text='-',background='dark gray')\r\n dut_cutoff_captured_label_4.configure(text='-',background='dark gray')\r\n batt_cap_captured_label_4.configure(text='-',background='dark gray')\r\n average_current_profile_captured_label_4.configure(text='-',background='dark gray')\r\n captured_battery_life_hours_graph.configure(text='-',background='dark gray')\r\n captured_battery_life_days_graph.configure(text='-',background='dark gray')\r\n\r\n ae_optimized_current_entry_4.delete(0,END)\r\n ae_optimized_duration_entry_4.delete(0,END)\r\n dut_cutoff_optimized_entry_4.delete(0,END)\r\n batt_cap_optimized_entry_4.delete(0,END)\r\n sl_optimized_current_entry_4.delete(0,END)\r\n sl_optimized_duration_entry_4.delete(0,END)\r\n sl_optimized_current_entry_4.delete(0,END)\r\n sl_optimized_duration_entry_4.delete(0,END)\r\n \r\n average_current_profile_optimized_label_4.configure(text='-')\r\n optimized_battery_life_hours_graph.configure(text='-')\r\n optimized_battery_life_days_graph.configure(text='-')\r\n\r\n average_current_profile_data.configure(text='-')\r\n max_data.configure(text='-')\r\n min_data.configure(text='-')\r\n \r\n avg_active_event_I_2.configure(text='0.00', foreground = 'black')\r\n avg_active_event_I_units.configure(foreground = 'black')\r\n avg_sleep_event_I.configure(text='0000', foreground = 'black')\r\n avg_sleep_event_I_units.configure(foreground = 'black')\r\n\r\n reset_button = tk.Button(profile_frame,text='Reset',command=reset,state=tk.DISABLED)\r\n \r\n#################################################################################\r\n### SETUP LABELS AND BUTTONS\r\n#################################################################################\r\n\r\nheaderFont = font.Font(family=\"TkDefaultFont\",size=9,underline=1)\r\n\r\n# STEP 1 - INPUT BATTERY INFO & PSU OUTPUT\r\n#Battery Information\r\nbattery_chemistry_label_1 = Label(profile_frame, text='Battery Chemistry',background='dark gray')\r\nbattery_chemistry_label_1.grid(row=4, column=0, padx=10,pady=0,sticky = 'w')\r\nbatt_chem = tk.StringVar()\r\nbattery_chemistry_combo_box = ttk.Combobox(profile_frame, width=16, textvariable=batt_chem)\r\nbattery_chemistry_combo_box['values'] = ('AA-Alkaline', 'AAA-Alkaline', 'LI-Ion','LiFepo4','AA-NiMH/NiCd', 'AAA-NiMH/NiCd','Li-Coin','CR123') \r\nbattery_chemistry_combo_box.grid(row=4, column=0,padx=(150,4),pady=(1,1),sticky = 'w')\r\nbattery_chemistry_combo_box.current(0)\r\nbattery_chemistry_combo_box.bind('<>', set_voltage)\r\n\r\nbattery_capacity_label_1 = Label(profile_frame, text='Battery Capacity',background='dark gray')\r\nbattery_capacity_label_1.grid(row=5, column=0, padx=10,pady=0,sticky = 'w')\r\nbatt_cap = tk.StringVar()\r\nbattery_capactity_entry_1 = Entry(profile_frame, width=6,textvariable=batt_cap)\r\nbattery_capactity_entry_1.grid(row=5, column=0, padx=(150,4),pady=(1,1), sticky = 'w')\r\nbattery_capactity_entry_1.focus_set()\r\nbattery_capactity_entry_1.insert(0,1500)\r\nbattery_capactity_entry_1.bind('', set_voltage)\r\nbattery_capactity_entry_1.bind('', set_voltage)\r\nbattery_capactity_units = Label(profile_frame, text='mAh',background='dark gray')\r\nbattery_capactity_units.grid(row=5, column=0, padx=(190,4),pady=(1,1),sticky = 'w')\r\n\r\nbattery_cells = Label(profile_frame, text='Number of Cells',background='dark gray')\r\nbattery_cells.grid(row=6, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nbatt_cells = tk.StringVar()\r\nbattery_cells_combo_box = ttk.Combobox(profile_frame, width=3, textvariable=batt_cells)\r\nbattery_cells_combo_box['values'] = (1, 2, 3) \r\nbattery_cells_combo_box.grid(row=6, column=0,padx=(150,4),pady=(1,1),sticky = 'w')\r\nbattery_cells_combo_box.current(0)\r\nbattery_cells_combo_box.bind('<>', set_voltage)\r\nbattery_cells_units = Label(profile_frame, text='cells',background='dark gray')\r\nbattery_cells_units.grid(row=6, column=0, padx=(190,4),pady=(1,1),sticky = 'w')\r\n \r\n#Voltage\r\ndut_cutoff_voltage = Label(profile_frame, text='DUT Cutoff Voltage',background='dark gray')\r\ndut_cutoff_voltage.grid(row=7, column=0, padx=10,pady=(1,1),sticky = 'w')\r\ndut_cutoff = tk.StringVar()\r\ndut_cutoff_voltage_entry = Entry(profile_frame, width=6, textvariable = dut_cutoff)\r\ndut_cutoff_voltage_entry.grid(row=7, column=0, padx=(150,4),pady=(1,1), sticky = 'w')\r\ndut_cutoff_voltage_entry.insert(0,1.0)\r\ndut_cutoff_voltage_entry.bind('', update_soc_chart)\r\ndut_cutoff_voltage_entry.focus_set()\r\ndut_cutoff_voltage_units = Label(profile_frame, text='volts',background='dark gray')\r\ndut_cutoff_voltage_units.grid(row=7, column=0, padx=(190,4),pady=(1,1),sticky = 'w')\r\n\r\nvolt_lab = Label(profile_frame, text='PSU Voltage Output',background='dark gray')\r\nvolt_lab.grid(row=8, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nvoltage = tk.StringVar()\r\npsu_combo_box = ttk.Combobox(profile_frame, width=3, textvariable=voltage)\r\npsu_combo_box['values'] = (1.2, 1.5, 2.4, 3.0, 3.2, 3.6, 3.7, 4.2, 4.5) \r\npsu_combo_box.grid(row=8, column=0,padx=(150,4),pady=(1,1),sticky = 'w')\r\npsu_combo_box.current(0)\r\npsu_combo_box_units = Label(profile_frame, text='volts',background='dark gray')\r\npsu_combo_box_units.grid(row=8, column=0, padx=(190,4),pady=(1,1),sticky = 'w')\r\n\r\npsu_lab = Label(profile_frame, text='PSU Output',background='dark gray')\r\npsu_lab.grid(row=9, column=0, padx=10,pady=(1,1),sticky = 'w')\r\np_radio = IntVar()\r\np_rad = Radiobutton(profile_frame, text = 'On', variable = p_radio, value = 1,\r\n command = set_battery_voltage,background='dark gray')\r\np_rad.grid(row=9, column=0, padx=80, pady=(1,1),sticky = 'w')\r\n\r\np_rad1 = Radiobutton(profile_frame, text = 'Off', variable = p_radio, value = 0, command = set_battery_voltage,\r\n foreground='red',background='dark gray')\r\np_rad1.grid(row=9, column=0, padx=(120,10),pady=(1,1), sticky = 'w')\r\n\r\ntrig_PSU_var = IntVar()\r\ntrigger_box = tk.Checkbutton(profile_frame, text='Trigger PSU on Capture', variable=trig_PSU_var,background='dark gray', state=tk.NORMAL)\r\ntrigger_box.grid(row=9, column=0, padx=(180,4),pady=(1,1),sticky='w')\r\ntrig_PSU_var.set(1)\r\n\r\n#STEP2 - SET UP CAPTURE TIME AND EXECUTE CAPTURE\r\nrt_c = Label(profile_frame, text='Capture Duration (s)',background='dark gray')\r\nrt_c.grid(row=11, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nruntime_duration = tk.StringVar()\r\nrt_dur = Entry(profile_frame, width=6,textvariable=runtime_duration)\r\nrt_dur.grid(row=11, column=0, padx=(130,4),pady=(1,1), sticky = 'w')\r\nrt_dur.focus_set()\r\nrt_dur.insert(0,10)\r\nrt_dur.configure(state=tk.DISABLED)\r\n\r\nchoice_lab = Label(profile_frame, text=' -OR- ',background='dark gray')\r\nchoice_lab.grid(row=11, column=0, padx=(170,2),pady=(1,1),sticky = 'w')\r\n\r\ntrig_var = IntVar()\r\ntrigger_box = tk.Checkbutton(profile_frame, text='Ext Trig', variable=trig_var,command = TrigArmed,background='dark gray', state=tk.DISABLED)\r\ntrigger_box.grid(row=11, column=0, padx=(205,4),pady=(1,1),sticky='w')\r\n\r\ntrig_label = ttk.Label(profile_frame, text='', background='dark gray')\r\ntrig_label.grid(row=11, column = 0, padx=(200,4),pady=(1,1),sticky='w')\r\n\r\nprogress_label = ttk.Label(profile_frame, text='',background='dark gray')\r\nprogress_label.grid(row=10, column = 0, padx=(230,4),sticky='w')\r\n \r\ncapture_active_event_button = tk.Button(profile_frame,text='Capture Active',command=capture_profile,state=tk.DISABLED)\r\ncapture_active_event_button.grid(row=12,column=0, padx=(10,4),pady=(1,1),sticky = 'w')\r\navg_active_event_I_2 = ttk.Label(profile_frame, text='0.00',font=('Arial Bold',10), foreground = 'black',background='dark gray',state=tk.DISABLED)\r\navg_active_event_I_2.grid(row=12, column = 0, padx= (120,4),pady=(1,1), sticky = 'w')\r\navg_active_event_I_units = ttk.Label(profile_frame, text='mA',font=('Arial Bold',10), foreground = 'black',background='dark gray',state=tk.DISABLED)\r\navg_active_event_I_units.grid(row=12, column = 0, padx=(170,4),pady=(1,1), sticky = 'w')\r\n\r\n#STEP3 - CAPTURE SLEEP EVENT CURRENT WIDGETS\r\nsl_range_labe1_3 = Label(profile_frame, text='Current Range',background='dark gray')\r\nsl_range_labe1_3.grid(row=14, column=0, padx=(210,2) ,pady=(1,1),sticky = 'w')\r\n\r\ndef select_lo_shunt():\r\n #Turn OFF low current sense resistor\r\n cmd = 'l'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\ndef select_hi_shunt():\r\n #Turn ON low current sense resistor\r\n cmd = 'k'\r\n bytes_returned = ser.write(cmd.encode())\r\n\r\nshunt_var = IntVar()\r\nshunt_button = Radiobutton(profile_frame, text=\"800uA - 500mA\", command=select_lo_shunt, variable=shunt_var, value=1, state=tk.DISABLED)\r\nshunt_button.grid(row=14, column=0, padx=(205,4),pady=(1,1),sticky='w')\r\nshunt_button1 = Radiobutton(profile_frame, text=\"10uA - 800uA\", command=select_hi_shunt,variable=shunt_var, value=2, state=tk.DISABLED)\r\nshunt_button1.grid(row=15, column=0, padx=(205,4),pady=(1,1),sticky='w')\r\nshunt_var.set(1)\r\n\r\nsl_duration_labe1_3 = Label(profile_frame, text='DUT Sleep Duration',background='dark gray')\r\nsl_duration_labe1_3.grid(row=14, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nsleep_duration=tk.StringVar()\r\nsl_duration_entry_3 = Entry(profile_frame, width=6,textvariable=sleep_duration)\r\nsl_duration_entry_3.grid(row=14, column=0,padx=(120,4),pady=(1,1),sticky = 'w')\r\nsl_duration_entry_3.insert(0,60)\r\nsl_duration_entry_3.configure(state=tk.DISABLED)\r\nsl_duration_units_lab_3 = Label(profile_frame, text='Sec',background='dark gray')\r\nsl_duration_units_lab_3.grid(row=14, column=0, padx=(160,2),pady=(1,1),sticky = 'w')\r\n\r\nprogress_label_s = ttk.Label(profile_frame, text='',background='dark gray')\r\nprogress_label_s.grid(row=13, column = 0, padx=(230,4),sticky='w')\r\n \r\ncapture_sleep_btn_3 = tk.Button(profile_frame,text='Capture Sleep',command=capture_sleep_profile,state=tk.DISABLED)\r\ncapture_sleep_btn_3.grid(row=15,column=0, padx=(10,4),pady=(1,1),sticky = 'w')\r\n\r\navg_sleep_event_I = ttk.Label(profile_frame, text='0000',font=('Arial Bold',10), foreground = 'black',background='dark gray',state=tk.DISABLED)\r\navg_sleep_event_I.grid(row=15, column = 0, padx= (120,4),pady=(1,1), sticky = 'w')\r\n\r\navg_sleep_event_I_units = ttk.Label(profile_frame, text='uA',font=('Arial Bold',10), foreground = 'black',background='dark gray')\r\navg_sleep_event_I_units.grid(row=15, column = 0, padx=(170,4),pady=(1,1), sticky = 'w')\r\n\r\n\r\n#STEP4 - RESULTS AND OPTIMIZE WIDGETS\r\n#Section 4 Fields\r\n\r\n#Profile Headers\r\ncaptured_profile_label = ttk.Label(profile_frame, text='Captured' ,font=headerFont,foreground='blue',background='dark gray')\r\ncaptured_profile_label.grid(row=17, column = 0, padx=(150,4),pady=(1,1),sticky = 'w')\r\noptimized_profile_label = Label(profile_frame, text='Optimized',foreground='blue',font=headerFont,background='dark gray')\r\noptimized_profile_label.grid(row=17, column=0, padx=(210,1),pady=(1,1),sticky = 'w')\r\n\r\n#Active Event Current\r\nae_current_label_4 = Label(profile_frame, text='Active Event Current',background='dark gray')\r\nae_current_label_4.grid(row=18, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nae_captured_current_label_4 = Label(profile_frame, text='-', background='dark gray')\r\nae_captured_current_label_4.grid(row=18, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\nae_optimized_current_entry_4 = Entry(profile_frame, width=7)\r\nae_optimized_current_entry_4.grid(row=18, column=0,padx=(210,4),pady=(1,1),sticky = 'w')\r\nae_current_units_lab_4 = Label(profile_frame, text='mA',background='dark gray')\r\nae_current_units_lab_4.grid(row=18, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Active Event Duration\r\nae_duration_label_4 = Label(profile_frame, text='Active Event Duration',background='dark gray')\r\nae_duration_label_4.grid(row=19, column=0, padx=10,pady=4,sticky = 'w')\r\nae_captured_duration_label_4 = Label(profile_frame, text='-',background='dark gray')\r\nae_captured_duration_label_4.grid(row=19, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\nae_optimized_duration_entry_4 = Entry(profile_frame, width=7)\r\nae_optimized_duration_entry_4.grid(row=19, column=0,padx=(210,4),pady=(1,1),sticky = 'w')\r\nae_duration_units_lab_4 = Label(profile_frame, text='S',background='dark gray')\r\nae_duration_units_lab_4.grid(row=19, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Sleep Event Current\r\nsl_current_label_4 = Label(profile_frame, text='Sleep Current',background='dark gray')\r\nsl_current_label_4.grid(row=20, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nsl_captured_current_label_4 = Label(profile_frame, text='-',background='dark gray')\r\nsl_captured_current_label_4.grid(row=20, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\nsl_optimized_current_entry_4 = Entry(profile_frame, width=7)\r\nsl_optimized_current_entry_4.grid(row=20, column=0,padx=(210,4),pady=(1,1),sticky = 'w')\r\nsl_current_units_lab_4 = Label(profile_frame, text='mA',background='dark gray')\r\nsl_current_units_lab_4.grid(row=20, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Sleep Event Duration\r\nsl_duration_labe1_4 = Label(profile_frame, text='Sleep Duration',background='dark gray')\r\nsl_duration_labe1_4.grid(row=21, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nsl_captured_duration_label_4 = Label(profile_frame, text='-',background='dark gray')\r\nsl_captured_duration_label_4.grid(row=21, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\nsl_optimized_duration_entry_4 = Entry(profile_frame, width=7)\r\nsl_optimized_duration_entry_4.grid(row=21, column=0,padx=(210,4),pady=(1,1),sticky = 'w')\r\nsl_duration_units_lab_4 = Label(profile_frame, text='S',background='dark gray')\r\nsl_duration_units_lab_4.grid(row=21, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n \r\n#DUT Cutoff Voltage\r\ndut_cutoff_label_4 = Label(profile_frame, text='DUT Cutoff Voltage',background='dark gray')\r\ndut_cutoff_label_4.grid(row=22, column=0, padx=10,pady=(1,1),sticky = 'w')\r\ndut_cutoff_captured_label_4 = Label(profile_frame, text='-',background='dark gray')\r\ndut_cutoff_captured_label_4.grid(row=22, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\ndut_cutoff_optimized_entry_4 = Entry(profile_frame, width=7)\r\ndut_cutoff_optimized_entry_4.grid(row=22, column=0, padx=(210,4),pady=(1,1), sticky = 'w')\r\ndut_cutoff_units_lab_4 = Label(profile_frame, text='mAh',background='dark gray')\r\ndut_cutoff_units_lab_4.grid(row=22, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Battery Capacity\r\nbatt_cap_label_4 = Label(profile_frame, text='Effective Battery Capacity',background='dark gray')\r\nbatt_cap_label_4.grid(row=23, column=0, padx=10,pady=(1,1),sticky = 'w')\r\nbatt_cap_captured_label_4 = Label(profile_frame, text='-',background='dark gray')\r\nbatt_cap_captured_label_4.grid(row=23, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\nbatt_cap_optimized_entry_4 = Entry(profile_frame, width=7)\r\nbatt_cap_optimized_entry_4.grid(row=23, column=0, padx=(210,4),pady=(1,1), sticky = 'w')\r\nbatt_cap_units_lab_4 = Label(profile_frame, text='mAh',background='dark gray')\r\nbatt_cap_units_lab_4.grid(row=23, column=0, padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Average Current Profile\r\naverage_current_profile_label_4 = Label(profile_frame, text='Average Current Profile',background='dark gray')\r\naverage_current_profile_label_4.grid(row=24, column=0, padx=10,pady=(1,1),sticky = 'w')\r\naverage_current_profile_captured_label_4 = Label(profile_frame, text='-',background='dark gray')\r\naverage_current_profile_captured_label_4.grid(row=24, column=0, padx=(150,4),pady=(1,1),sticky = 'w')\r\naverage_current_profile_optimized_label_4 = Label(profile_frame, text='-',foreground='black', background='dark gray')\r\naverage_current_profile_optimized_label_4.grid(row=24, column=0, padx=(210,4),pady=(1,1),sticky = 'w')\r\naverage_current_profile_units_lab_4 = Label(profile_frame, text='mA',foreground='black',background='dark gray')\r\naverage_current_profile_units_lab_4.grid(row=24, column=0,padx=(260,2),pady=(1,1),sticky = 'w')\r\n\r\n#Statistics\r\nstatistics_label = Label(profile_frame, font=headerFont,text='Statistics',background='dark gray')\r\nstatistics_label.grid(row=20, column=1, padx=5,pady=(1,1),sticky = 'w')\r\naverage_current_profile = Label(profile_frame, text='Average Active Event Current (mA) = ',background='dark gray')\r\naverage_current_profile.grid(row=21, column=1, padx=5,pady=(1,1),sticky = 'w')\r\naverage_current_profile_data = Label(profile_frame, text='-',background='dark gray')\r\naverage_current_profile_data.grid(row=21, column=1, padx=(240,2),pady=(1,1),sticky = 'w')\r\nmax_label = ttk.Label(profile_frame, text='Max active event current captured (mA) = ',background='dark gray')\r\nmax_label.grid(row=22, column = 1, rowspan=1, padx=5,pady=(2,2),sticky = 'w')\r\nmax_data = Label(profile_frame, text='-',background='dark gray')\r\nmax_data.grid(row=22, column=1, rowspan=1,padx=(240,2),pady=(2,2),sticky = 'w')\r\nmin_label = ttk.Label(profile_frame, text='Min active event current captured (mA) = ',background='dark gray')\r\nmin_label.grid(row=23, column = 1,padx=5,pady=(2,2),sticky = 'w')\r\nmin_data = Label(profile_frame, text='-',background='dark gray')\r\nmin_data.grid(row=23,column=1,padx=(240,2),pady=(2,2),sticky = 'w')\r\n\r\n#Profile Headers\r\ncaptured_profile_label1 = ttk.Label(profile_frame, text='Captured' ,font=headerFont,foreground='blue',background='dark gray')\r\ncaptured_profile_label1.grid(row=25, column = 1, padx=(180,4),pady=(1,1),sticky = 'w')\r\n\r\noptimized_profile_label1 = Label(profile_frame, text='Optimized',font=headerFont,foreground='blue',background='dark gray')\r\noptimized_profile_label1.grid(row=25, column=1, padx=(240,1),pady=(1,1),sticky = 'w')\r\n\r\n\r\n#Battery Life Hours\r\nbatt_life_hours_captured_graph_label = ttk.Label(profile_frame, text='Estimated Battery Life (hours) ',foreground = 'blue',background='dark gray')\r\nbatt_life_hours_captured_graph_label.grid(row=26, column=1, padx=5,pady=(1,1), sticky = 'w')\r\ncaptured_battery_life_hours_graph = ttk.Label(profile_frame, text='-',foreground='blue',background='dark gray')\r\ncaptured_battery_life_hours_graph.grid(row=26, column = 1,padx=(180,4),pady=(1,1),sticky = 'w')\r\noptimized_battery_life_hours_graph = ttk.Label(profile_frame, text='-',foreground='blue',background='dark gray')\r\noptimized_battery_life_hours_graph.grid(row=26, column = 1,padx=(240,4),pady=(1,1),sticky = 'w')\r\noptimized_battery_life_hours_units_lab_4 = Label(profile_frame, text='hours',foreground = 'blue',background='dark gray')\r\noptimized_battery_life_hours_units_lab_4.grid(row=26, column=1,padx=(300,2),pady=(1,1),sticky = 'w')\r\n\r\n#Battery Life Days\r\nbatt_life_days_captured_graph_label = ttk.Label(profile_frame, text='Estimated Battery Life (days) ', foreground = 'blue',background='dark gray')\r\nbatt_life_days_captured_graph_label.grid(row=27, column = 1,padx=5,pady=(1,1),sticky = 'w')\r\ncaptured_battery_life_days_graph = ttk.Label(profile_frame, text='-' ,foreground='blue',background='dark gray')\r\ncaptured_battery_life_days_graph.grid(row=27, column = 1,padx=(180,4),pady=(1,1),sticky = 'w')\r\noptimized_battery_life_days_graph = ttk.Label(profile_frame, text='-',foreground='blue',background='dark gray')\r\noptimized_battery_life_days_graph.grid(row=27, column = 1,padx=(240,4),pady=(1,1),sticky = 'w')\r\noptimized_battery_life_days_units_lab_4 = Label(profile_frame, text='days',foreground = 'blue',background='dark gray')\r\noptimized_battery_life_days_units_lab_4.grid(row=27, column=1,padx=(300,2),pady=(1,1),sticky = 'w')\r\n\r\n#OPTIMIZE BUTTON\r\noptimize_button = tk.Button(profile_frame,text='Optimize',command=optimize_profile,state=tk.NORMAL)\r\noptimize_button.grid(row=25,column=0, rowspan=5,padx=(210,4),pady=(1,1),sticky = 'nw')\r\n\r\n##RESET BUTTON\r\nreset_button = tk.Button(profile_frame,text='Reset',command=reset,state=tk.NORMAL)\r\nreset_button.grid(row=26,column=0,padx=(90,4),pady=(1,1),sticky = 'w')\r\n\r\n##SAVE BUTTON\r\nsave_button = tk.Button(profile_frame,text='Save Results',command=SaveFile,state=tk.NORMAL)\r\nsave_button.grid(row=27,column=0,padx=(90,4),pady=(1,1),sticky = 'sw')\r\n\r\n#STEP LABELS\r\nstep1_label = ttk.Label(profile_frame, text='Step1 - Battery Info and PSU Output',font=('Arial Bold',11), foreground = 'blue',background='dark gray')\r\nstep1_label.grid(row=3, column = 0, padx=2,pady=(1,1), sticky = 'w',columnspan=2)\r\n\r\nstep2_label = ttk.Label(profile_frame, text='Step2 - Active Event Current',font=('Arial Bold',11), background='dark gray')\r\nstep2_label.grid(row=10, column = 0, padx=2,pady=(1,1),sticky = 'w',columnspan=2)\r\n\r\nstep3_label = ttk.Label(profile_frame, text='Step3 - Sleep Event Current',font=('Arial Bold',11), background='dark gray')\r\nstep3_label.grid(row=13, column = 0, padx=2, pady=(1,1), sticky = 'w')\r\n\r\nstep4_label = ttk.Label(profile_frame, text='Step4 - Results and Optimization',font=('Arial Bold',11), background='dark gray')\r\nstep4_label.grid(row=16, column = 0, padx=2, pady=(1,1), sticky = 'w')\r\n\r\nupdate_soc_chart(0)\r\ndata_plot(x,y,str(0),str(0),str(0),str(0),str(0))\r\n#cmd = 'i' #turn voltage off\r\n#bytes_returned = ser.write(cmd.encode())\r\n \r\nroot.mainloop()\r\n","sub_path":"Older_BattLab_One_Versions/BattLab_One_V1.01.py","file_name":"BattLab_One_V1.01.py","file_ext":"py","file_size_in_byte":75768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"559591415","text":"import string\n\nfrom art import logo\nalphabet = []\nalphabet_letters =string.ascii_lowercase\nfor letter in alphabet_letters:\n alphabet += letter\nalphabet = alphabet * 2\nwanna_continue = True\nprint(logo)\nwhile wanna_continue:\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\").lower()\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n shift = shift % 26\n def ceaser(start_text, shift_amount, cypher_direction):\n end_text = \"\"\n if cypher_direction == \"decode\":\n shift_amount *= -1\n for letter in start_text:\n if letter in alphabet:\n position = alphabet.index(letter)\n new_position = position + shift_amount\n end_text += alphabet[new_position]\n else:\n end_text += letter\n print(end_text)\n\n ceaser(start_text=text, shift_amount=shift, cypher_direction=direction)\n choice = input(\"yes or no\").lower()\n if choice == \"no\":\n wanna_continue = False\n print(\"bye\")","sub_path":"caesar_project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"625437175","text":"import os\nfrom flask import Flask, render_template, request, redirect\nimport requests\nimport logging\nimport json\n\nURL = os.environ.get(\"SERVER\", \"http://localhost:8082\")\nDEBUG = os.environ.get(\"DEBUG\", True)\nPORT = os.environ.get(\"PORT\", 5000)\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n logging.info('home page requested')\n try:\n data = requests.get(URL + '/events').content\n # Parse JSON into a python object with attributes corresponding to dict keys.\n model = json.loads(data)\n except Exception:\n # backend is down, so provide alternative data\n model = {}\n return render_template(\"home.html\", model=model)\n\n@app.route(\"/event\", methods=['POST'])\ndef create_happening():\n logging.info('event submitted')\n headers = {\n 'Content-Type': 'application/json'\n }\n data = request.form.to_dict(flat=True)\n requests.post(URL + '/event', headers=headers, data=json.dumps(data))\n return redirect('/')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=PORT, debug=DEBUG)\n","sub_path":"sample-pythonclient-master/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"394795255","text":"# coding: utf-8\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import unicode_literals\n\nfrom six import string_types\n\nfrom .decisions import MergeDecisionBuilder\nfrom .chunks import make_merge_chunks\nfrom ..diffing import diff\nfrom ..diff_format import (DiffOp, as_dict_based_diff, op_patch, op_addrange)\nfrom ..diffing.notebooks import notebook_predicates, notebook_differs\nfrom ..utils import star_path\n\n\n# Set to true to enable some expensive debugging assertions\nDEBUGGING = 0\n\n# Sentinel to allow None value\nMissing = object()\n\n\n# =============================================================================\n#\n# Decision-making code follows\n#\n# =============================================================================\n\n\ndef _merge_dicts(base, local_diff, remote_diff, path, decisions):\n \"\"\"Perform a three-way merge of dicts. See docstring of merge.\"\"\"\n assert isinstance(base, dict)\n\n # Converting to dict-based diff format for dicts for convenience\n # This step will be unnecessary if we change the diff format to work this\n # way always\n local_diff = as_dict_based_diff(local_diff)\n remote_diff = as_dict_based_diff(remote_diff)\n\n # Summary of diff entry cases with (#) references to below code\n # r\\l | N/A - + : !\n # ----|----------------------\n # N/A | (1) (2)---------(2)\n # - | (3) (4) (5)-----(5)\n # + | | (5) (6) (5) (5)\n # : | | | (5) (7) (5)\n # ! | (3) (5) (5 (5) (8)\n\n # Get diff keys\n bldkeys = set(local_diff.keys())\n brdkeys = set(remote_diff.keys())\n dkeys = bldkeys | brdkeys\n\n # (1) Use base values for all keys with no change\n for key in sorted(set(base.keys()) - dkeys):\n pass\n\n # (2)-(3) Apply one-sided diffs\n for key in sorted(bldkeys ^ brdkeys):\n decisions.onesided(path,\n local_diff.get(key),\n remote_diff.get(key))\n\n # (4) (5) (6) (7) (8)\n # Then we have the potentially conflicting changes\n for key in sorted(brdkeys & bldkeys):\n # Get diff entries for this key (we know both sides have an\n # entry here because all other cases are covered above)\n ld = local_diff[key]\n rd = remote_diff[key]\n\n # Get values (using Missing as a sentinel to allow None as a value)\n bv = base.get(key, Missing)\n\n # Switch on diff ops\n lop = ld.op\n rop = rd.op\n if lop != rop:\n # Note that this means the below cases always have the same op\n # (5) Conflict: removed one place and edited another, or edited in\n # different ways\n decisions.conflict(path, ld, rd)\n elif lop == DiffOp.REMOVE:\n # (4) Removed in both local and remote, just don't add it to merge\n # result\n decisions.agreement(path, ld, rd)\n elif lop in (DiffOp.ADD, DiffOp.REPLACE, DiffOp.PATCH) and ld == rd:\n # If inserting/replacing/patching produces the same value, just use\n # it\n decisions.agreement(path, ld, rd)\n elif lop == DiffOp.ADD:\n # (6) Insert in both local and remote, values are different\n # This can possibly be resolved by recursion, but leave that to\n # autoresolve\n decisions.conflict(path, ld, rd)\n elif lop == DiffOp.REPLACE:\n # (7) Replace in both local and remote, values are different,\n # record a conflict against original base value\n decisions.conflict(path, ld, rd)\n elif lop == DiffOp.PATCH:\n # (8) Patch on both local and remote, values are different\n # Patches produce different values, try merging the substructures\n # (a patch command only occurs when the type is a collection, so we\n # can safely recurse here and know we won't encounter e.g. an int)\n _merge(bv, ld.diff, rd.diff, path + (key,), decisions)\n else:\n raise ValueError(\"Invalid diff ops {} and {}.\".format(lop, rop))\n\n\ndef _split_addrange(key, local, remote, path):\n \"\"\"Compares two addrange value lists, and splits decisions on similarity\n\n Uses diff of value lists to identify which items to align. Identical,\n aligned inserts are decided as in agreement, while inserts that are aligned\n without being identical are treated as conflicts (possibly to be resolved\n by autoresolve). Non-aligned inserts are treated as conflict free,\n one-sided inserts.\n \"\"\"\n # FIXME: This uses notebook predicates and differs, which\n # doesn't really belong in a generic merge algorithm...\n\n # First, find diff between local and remote insertion values.\n # This will align common subsequences according to the similarity\n # measures defined in notebook predicates.\n intermediate_diff = diff(local, remote, path=star_path(path),\n predicates=notebook_predicates.copy(),\n differs=notebook_differs.copy())\n\n # Next, translate the diff into decisions\n decisions = MergeDecisionBuilder()\n taken = 0\n offset = 0 # Offset between diff keys (ref local) and remote\n for i, d in enumerate(intermediate_diff):\n # This should only occur after (*) marked below:\n if d.key < taken:\n continue\n if taken < d.key:\n # No diff, which means elements are inserted on both sides\n overlap = [op_addrange(key, local[taken:d.key])]\n decisions.agreement(path, overlap, overlap)\n taken = d.key\n\n # Either (1) conflicted, (2) local onesided, or (3) remote onesided\n # First checks whether the next op is a removal on the same key\n # as the current one (i.e. a range substitution).\n if (i + 1 < len(intermediate_diff) and\n intermediate_diff[i+1].op == DiffOp.REMOVERANGE and\n intermediate_diff[i+1].key == d.key):\n # This indicates a non-similar sub-sequence, according\n # to the predicates.\n # (1) Conflicted addition\n local_len = intermediate_diff[i+1].length\n ld = [op_addrange(key, local[d.key:d.key+local_len])]\n rd = [op_addrange(key, d.valuelist)]\n decisions.conflict(path, ld, rd)\n offset += len(d.valuelist) - local_len\n # (*) Here we treat two ops in one go, which we mark\n # by setting taken beyond the key of the next op:\n taken += local_len\n elif d.op == DiffOp.REPLACE:\n # Same as above, but length of one, so simpler\n # (1) Conflict (one element each)\n ld = [op_addrange(key, [local[d.key]])]\n rd = [op_addrange(key, [d.value])]\n decisions.conflict(path, ld, rd)\n taken += 1\n elif d.op in (DiffOp.REMOVE, DiffOp.REMOVERANGE):\n # (2) Local onesided\n if d.op == DiffOp.REMOVE:\n vl = [local[d.key]]\n else:\n vl = local[d.key:d.key + d.length]\n decisions.onesided(path, [op_addrange(key, vl)], None)\n offset -= len(vl)\n taken += len(vl)\n elif d.op in (DiffOp.ADD, DiffOp.ADDRANGE):\n # (3) Remote onesided\n if d.op == DiffOp.ADD:\n vl = [d.value]\n else:\n vl = d.valuelist\n decisions.onesided(path, None, [op_addrange(key, vl)])\n offset += len(vl)\n elif d.op == DiffOp.PATCH:\n # Predicates indicate that local and remote are similar!\n # Mark as conflcit, possibly for autoresolve to deal with\n decisions.conflict(path,\n [op_addrange(key, [local[d.key]])],\n [op_addrange(key, [remote[d.key + offset]])])\n taken += 1\n else:\n raise ValueError(\"Invalid diff op: %s\" % d.op)\n\n # We have made at least one split\n if taken < len(local):\n # Have elements that are inserted on both sides\n overlap = [op_addrange(key, local[taken:])]\n decisions.agreement(path, overlap, overlap)\n if len(decisions.decisions) > 1 or not decisions.decisions[0].conflict:\n return decisions.decisions\n else:\n return None\n\n\ndef _merge_concurrent_inserts(base, ldiff, rdiff, path, decisions):\n \"\"\"Merge concurrent inserts, optionally with one or more removeranges.\n\n This method compares the addition/removals on both sides, and splits it\n into individual agreement/onesided/conflict decisions.\n \"\"\"\n # Assume first ops are always inserts\n assert ldiff[0].op == DiffOp.ADDRANGE and rdiff[0].op == DiffOp.ADDRANGE\n\n subdec = _split_addrange(ldiff[0].key, ldiff[0].valuelist, rdiff[0].valuelist, path)\n\n if subdec:\n # We were able to split insertion [+ onesided removal]\n decisions.decisions.extend(subdec)\n\n # Add potential one-sided removal at end\n if len(ldiff) > 1 or len(rdiff) > 1:\n decisions.onesided(path, ldiff[1:], rdiff[1:])\n else:\n # Were not able to infer any additional information,\n # simply add as they are (conflicted).\n decisions.conflict(path, ldiff, rdiff)\n\n\ndef _merge_lists(base, local_diff, remote_diff, path, decisions):\n \"\"\"Perform a three-way merge of lists. See docstring of merge.\"\"\"\n assert isinstance(base, list)\n\n # Split up and combine diffs into chunks\n # format: [(begin, end, localdiffs, remotediffs)]\n chunks = make_merge_chunks(base, local_diff, remote_diff)\n\n # Loop over chunks of base[j:k], grouping insertion at j into\n # the chunk starting with j\n for (j, k, d0, d1) in chunks:\n if not (bool(d0) or bool(d1)):\n # Unmodified chunk\n pass # No-op\n\n elif not (bool(d0) and bool(d1)):\n # One-sided modification of chunk\n decisions.onesided(path, d0, d1)\n\n elif d0 == d1:\n # Exactly the same modifications\n decisions.agreement(path, d0, d1)\n\n # Below notation: A: addition, R: removal, P: patch\n # Double operations: AP and AR (addition followed by patch or removal)\n # 15 combinations to cover below:\n # A/R: addition on one side, removal on other\n # AR/R: addtion and removal on one side, removal on other\n # etc.\n # These can be partially resolved, except for the case of\n # AR/AR, which is basically a sequence replacement. These are often\n # complex enough that they are best left as conflicts!\n\n elif (len(d0) == len(d1) == 1 and\n not d0[0].op == d1[0].op == DiffOp.ADDRANGE):\n # A/R, A/P, R/P or P/P\n # (R/R will always agree above because of chunking)\n ld, rd = d0[0], d1[0]\n ops = (ld.op, rd.op)\n\n if ld.op == rd.op == DiffOp.PATCH:\n # P/P, recurse\n assert ld.key == rd.key\n key = ld.key\n bv = base[key]\n _merge(bv, ld.diff, rd.diff,\n path + (key,), decisions)\n elif DiffOp.REMOVERANGE in ops and DiffOp.PATCH in ops:\n # R/P, always conflict\n decisions.conflict(path, d0, d1)\n else:\n # A/R or A/P, by eliminiation\n # Simply perform addition first, then patch/removal\n # Mark conflicted, as this is suspect\n assert DiffOp.ADDRANGE in ops\n if ld.op == DiffOp.ADDRANGE:\n # Addition locally\n decisions.local_then_remote(path, d0, d1, conflict=True)\n else:\n # Addition remotely\n decisions.remote_then_local(path, d0, d1, conflict=True)\n\n elif d0[0].op != d1[0].op:\n # AR/R, AP/R, AR/P or AP/P\n if len(d0) > 1:\n ddouble = d0\n dsingle = d1[0]\n else:\n dsingle = d0[0]\n ddouble = d1\n double_ops = [d.op for d in ddouble]\n if double_ops[1] != dsingle.op:\n # AR/P or AP/R\n # Onesided addition + conflicted patch/delete as above\n # TODO: Should we make addition conflicted as well?\n if dsingle == d1[0]:\n # Addition is locally\n decisions.onesided(path, d0[0:1], None)\n decisions.conflict(path, d0[1:], d1)\n else:\n # Addtion is remotely\n decisions.onesided(path, None, d1[0:1])\n decisions.conflict(path, d0, d1[1:])\n elif dsingle.op == DiffOp.REMOVERANGE:\n # AR/R\n # As chunking assures identical Rs, there is no conflict\n # here! Simply split into onesided A + agreement R\n if dsingle == d1[0]:\n # Addition is locally\n decisions.onesided(path, d0[0:1], None)\n decisions.agreement(path, d0[1:], d1)\n else:\n # Addtion is remotely\n decisions.onesided(path, None, d1[0:1])\n decisions.agreement(path, d0, d1[1:])\n else:\n # AP/P, by eliminiation\n assert dsingle.op == DiffOp.PATCH\n # Simply mark as conflict, and let auto resolve deal with this\n decisions.conflict(path, d0, d1)\n\n else:\n # A/AR, A/AP, AR/AP, AR/AR, AP/AP\n ops = [d.op for d in d0 + d1]\n if DiffOp.PATCH in ops:\n # A/AP, AR/AP or AP/AP:\n # In these cases, simply merge the As, then conflict remaining\n # op for autoresolve to deal with\n _merge_concurrent_inserts(\n base, d0[:1], d1[:1], path, decisions)\n decisions.conflict(path, d0[1:], d1[1:])\n elif len(d0) < 2 or len(d1) < 2:\n # A/A or A/AR\n _merge_concurrent_inserts(base, d0, d1, path, decisions)\n else:\n # AR/AR\n # This is in principle a range substitution!\n decisions.conflict(path, d0, d1)\n\n\ndef _merge_strings(base, local_diff, remote_diff,\n path, decisions):\n \"\"\"Perform a three-way merge of strings. See docstring of merge.\"\"\"\n assert isinstance(base, string_types)\n\n # This functions uses a (static) state variable to track recursion.\n # The first time it is called, base can (potentially) be a\n # multi-line string. If so, we split this string on line endings, and merge\n # it as a list of lines (giving line-based chunking). However, if\n # there are conflicting edits (patches) of a line, we will re-enter this\n # function. If so, we simply mark it as conflicted lines.\n\n if _merge_strings.recursion:\n # base is a single line with differing edits. We could merge as list of\n # characters, but this is unreliable, and will conflict with line-based\n # chunking.\n\n # Mark as a conflict on parent (line):\n k = path[-1]\n decisions.conflict(path[:-1],\n [op_patch(k, local_diff)],\n [op_patch(k, remote_diff)])\n else:\n # Merge lines as lists\n _merge_strings.recursion = True\n base = base.splitlines(True)\n\n try:\n _merge_lists(\n base, local_diff, remote_diff, path, decisions)\n finally:\n _merge_strings.recursion = False\n\n_merge_strings.recursion = False\n\n\ndef _merge(base, local_diff, remote_diff, path, decisions):\n if isinstance(base, dict):\n return _merge_dicts(\n base, local_diff, remote_diff, path, decisions)\n elif isinstance(base, list):\n return _merge_lists(\n base, local_diff, remote_diff, path, decisions)\n elif isinstance(base, string_types):\n return _merge_strings(\n base, local_diff, remote_diff, path, decisions)\n else:\n raise ValueError(\"Cannot handle merge of type {}.\".format(type(base)))\n\n\ndef decide_merge_with_diff(base, local, remote, local_diff, remote_diff):\n \"\"\"Do a three-way merge of same-type collections b, l, r with given diffs\n b->l and b->r.\"\"\"\n path = ()\n decisions = MergeDecisionBuilder()\n _merge(base, local_diff, remote_diff, path,\n decisions)\n return decisions.validated(base)\n\n\ndef decide_merge(base, local, remote):\n \"\"\"Do a three-way merge of same-type collections b, l, r.\n\n Terminology:\n\n collection = list | dict | string\n value = int | float | string\n\n (string is a collection of chars or an atomic value depending on parameters)\n\n (an alternative way to handle string parameters would be a pre/postprocessing\n splitting/joining of strings into lists of lines, lists of words, lists of chars)\n\n Input:\n\n b - base collection\n l - local collection\n r - remote collection\n bld - base-local diff\n brd - base-remote diff\n\n ### Output:\n\n md - list of merge decisions\n\n The merge result can be computed by applying the decisions to the base.\n If any decisions have the conflict field set to True, the merge result will\n use the suggested action, which might not always be correct.\n\n ## Trying to figure out problem with diff vs diff entry in recursion:\n\n decide_merge(b, l, r) -> compute bld,brd and call _merge\n _merge(b, bld, brd) -> switch on type of b,l,r\n _merge_dicts(b, bld, brd)\n _merge_lists(b, bld, brd)\n _merge_strings(b, bld, brd)\n\n Case: b,l,r are dicts, bld,brd are dict diffs, keys of bld,brd correspond to keys in b,l,r.\n Case: b,l,r are lists, bld,brd are list diffs, indices in bld,brd entries correspond to indices in b(,l,r).\n\n Case: purely nested dicts of values. Alternatives for each dict key:\n\n One sided ops always ok:\n N,-\n N,!\n N,:\n N,+\n -,N\n !,N\n :,N\n +,N\n\n Two sided equal ops ok if argument is the same:\n -,- = ok (agree on delete)\n +,+ = ok if equal inserts, otherwise conflict (two sided insert)\n !,! = ok if equal patches, otherwise conflict (two sided patch)\n :,: = ok if equal replacement value, otherwise conflict (two sided replace)\n\n Different op always conflicts:\n !,- = conflict (delete and patch)\n -,! = conflict (delete and patch)\n :,- = conflict (delete and replace)\n -,: = conflict (delete and replace)\n :,! = conflict (patch and replace)\n !,: = conflict (patch and replace)\n\n Conflict situations (symmetric, only listing from one side):\n delete / replace or delete / patch -- manual resolution needed\n replace / replace with different value -- manual resolution needed\n insert / insert with different value -- manual resolution needed - recursion will not have a base value for further merging.\n patch / patch with different diff -- recurse!\n replace / patch -- manual resolution needed, will only happen if collection type changes in replace\n\n\n Takeaways:\n - Ensure that diff always uses patch on collections unless the type changes and replace on values.\n - The only recursion will happen on the patch / patch op of equal type collections!\n - Patch op is [DiffOp.PATCH, key, subdiff], providing subdiff for both sides, and meaning values exist on both sides.\n\n\n ## Next trying to figure out list situations:\n\n Case: purely nested lists of values. Alternatives for each base item:\n\n One sided ops always ok:\n N,-\n N,+\n N,!\n\n Delete and patch is a conflict:\n -,! = conflict (delete and patch)\n\n Two sided equal ops ok if argument is the same:\n -,- = ok (agree on deleting this item)\n -,+ = ok (delete this item and insert new values)\n +,+ = ok (always insert both, or pick one if new values are equal?)\n !,! = ok (recurse)\n !,+ = ok (patch this item and insert new values)\n\n Conflict situations (symmetric, only listing from one side):\n delete / replace or delete / patch -- manual resolution needed\n replace / replace with different value -- manual resolution needed\n insert / insert with different value -- manual resolution needed - recursion will not have a base value for further merging.\n patch / patch with different diff -- recurse!\n replace / patch -- manual resolution needed, will only happen if collection type changes in replace\n\n \"\"\"\n local_diff = diff(base, local)\n remote_diff = diff(base, remote)\n return decide_merge_with_diff(base, local, remote, local_diff, remote_diff)\n","sub_path":"nbdime/merging/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":20933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"223400123","text":"#! python3\n#\n# Find the hypotenuse\n# Your program will ask the user to enter in the 2 short sides of a right triangle.\n# You will calculate the length of the hypotenuse and display the result.\n# You will need to use the math module to use the command that finds the square root.\n#\n# Inputs:\n# side, side\n#\n# Outputs:\n# hypotenuse\n#\n# Test output\n# input sides of 5 and 7 should give hypotenuse of 8.60232526704\nimport math\na = input(\"Enter side 1\")\nb = input(\"Enter side 2\")\na=float(a)\nb=float(b)\nc = a**2\nx = c + b**2\nprint(\"math.sqrt(x) : \", math.sqrt(x))\n","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85787098","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 23 16:27:38 2015\n\n@author: stephane\n\"\"\"\n\n'''measurement of the taylor scale'''\n\nimport numpy as np\n# import turbulence.analysis.strain_tensor as strain_tensor\nimport turbulence.display.graphes as graphes\n\n\ndef compute(M, i, Dt=50, display=False):\n # compute the taylor scale by averaging U dU/dx over space.\n # the derivative can be taken either in the direction of U or in the direction perpendicular to it\n # call functions from the derivative module, that compute spatial derivative accurately\n nx, ny, nt = M.shape()\n\n start = max(0, i - Dt // 2)\n end = min(nt, i + Dt // 2)\n\n n = end - start\n Ux = M.Ux[:, :, start:end]\n Uy = M.Uy[:, :, start:end]\n\n # compute the strain tensor from Ux and Uy components\n edge = 3;\n d = 2\n dU = np.zeros((nx - edge * 2, ny - edge * 2, d, d, n))\n\n fx = max([np.mean(np.diff(M.x)), np.mean(np.diff(M.x))]) # in mm/box\n\n for k in range(n):\n U = np.transpose(np.asarray([Ux[..., k], Uy[..., k]]),\n (1, 2, 0)) # shift the dimension to compute the strain tensor along axis 0 and 1\n dU[..., k] = fx * strain_tensor.strain_tensor(U, d=2, step=1) # strain tensor computed at the box size\n\n # naive length scale, computed from Ux dUx/dx\n index = (slice(3, -3, None), slice(3, -3, None), slice(None))\n\n E_dE = Ux[index] * dU[..., 0, 0, :] + Uy[index] * dU[..., 1, 1, :]\n E = np.power(Ux[index], 2) + np.power(Uy[index], 2)\n\n if display:\n graphes.hist(E_dE / np.std(E_dE), num=1000, label='ko--', fignum=1)\n graphes.hist(E / np.std(E), num=1000, label='r^-', fignum=1)\n graphes.set_axes(-10, 10, 1, 10 ** 5)\n graphes.legende('E', 'pdf(E)', '')\n\n lambda_R0 = np.mean(E) / np.std(E_dE)\n print('')\n print((str(M.t[i]) + ' : ' + str(lambda_R0)))\n # input()\n\n dtheta = np.pi / 100\n angles = np.arange(0, np.pi, dtheta)\n\n E_dE_l = []\n E_dE_t = []\n E_theta = []\n\n lambda_R_l = []\n lambda_R_t = []\n\n for j, theta in enumerate(angles):\n U_theta = Ux[index] * np.cos(theta) + Uy[index] * np.sin(theta)\n\n dU_l = dU[..., 0, 0, :] * np.cos(theta) + dU[..., 1, 1, :] * np.sin(theta)\n dU_t = dU[..., 1, 0, :] * np.cos(theta) + dU[..., 0, 1, :] * np.sin(\n theta) # derivative of the same component, but in the normal direction\n\n # longitudinal of U dU\n E_dE_l.append(np.std(U_theta * dU_l))\n E_dE_t.append(np.std(U_theta * dU_t))\n E_theta.append(np.mean(np.power(U_theta, 2)))\n\n lambda_R_l.append(E_theta[j] / E_dE_l[j])\n lambda_R_t.append(E_theta[j] / E_dE_t[j])\n\n lambda_Rl = np.mean(np.asarray(lambda_R_l))\n lambda_Rt = np.mean(np.asarray(lambda_R_t))\n\n lambda_Rl_std = np.std(np.asarray(lambda_R_l))\n lambda_Rt_std = np.std(np.asarray(lambda_R_t))\n\n print((str(M.t[i]) + ' : ' + str(lambda_Rl)))\n print((str(M.t[i]) + ' : ' + str(lambda_Rt)))\n\n # graphes.graph(angles,E_dE_l,fignum=1,label='ko')\n # graphes.graph(angles,E_dE_t,fignum=1,label='r^')\n\n # lambda_R = lambda_Rl\n lambdas = {}\n lambdas['l_moy'] = lambda_Rl\n lambdas['t_moy'] = lambda_R0\n lambdas['l_std'] = lambda_Rl_std\n lambdas['t_std'] = lambda_Rt_std\n\n Urms = np.sqrt(np.std(E)) # E is in mm^2/s^-2\n return lambdas, Urms\n\n\ndef taylor_scale(M, fignum=1, display=True, label='k^'):\n nx, ny, nt = M.shape()\n t = M.t\n Dt = 20\n step = 1\n\n lambda_R = {}\n Urms = []\n t_R = []\n for i in range(Dt, nt - Dt, step):\n t_R.append(t[i])\n lambdas, U = compute(M, i, Dt=Dt)\n Urms.append(U)\n if lambda_R == {}:\n for key in list(lambdas.keys()):\n lambda_R[key] = [lambdas[key]]\n else:\n for key in list(lambdas.keys()):\n lambda_R[key] += [lambdas[key]]\n\n graphes.semilogx(t_R, lambda_R['t_moy'], fignum=fignum, label=label[0] + '^')\n graphes.semilogx(t_R, lambda_R['l_moy'], fignum=fignum, label=label[0] + '>')\n\n graphes.graphloglog(t_R, np.asarray(Urms) * np.asarray(lambda_R['t_moy']), fignum=fignum + 1, label=label)\n graphes.graphloglog(np.asarray(Urms), np.asarray(lambda_R['t_moy']), fignum=fignum + 2, label=label)\n graphes.legende('', 'lambda', '')\n","sub_path":"analysis/taylor_scale.py","file_name":"taylor_scale.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"462264235","text":"from app.model.product import Products\nfrom app import response,db\nfrom flask import request,jsonify\nfrom app import db\n\ndef store():\n try:\n name = request.json['name']\n price = request.json['price']\n\n product = Products(name=name, price=price)\n db.session.add(product)\n db.session.commit()\n return response.ok('', 'Successfully create Product !')\n except Exception as e:\n print(e)\n\ndef index():\n try:\n product = Products.query.all()\n data = transform(product)\n return response.ok(data,\"Data Product Ditemukan!\")\n except Exception as e:\n print(e)\n\ndef update(id):\n try:\n name = request.json['name']\n price = request.json['price']\n\n product = Products.query.filter_by(id = id).first()\n product.name = name\n product.price = price\n\n db.session.commit()\n return response.ok('', 'Successfully update Product !')\n except Exception as e:\n print(e)\n\ndef show(id):\n try:\n product = Products.query.filter_by(id=id).first()\n if not product:\n return response.badRequest([], 'Empty....')\n data = singleTransform(product)\n return response.ok(data,\"Data Product Ditemukan!\")\n except Exception as e:\n print(e)\n\ndef delete(id):\n try:\n product = Products.query.filter_by(id = id).first()\n if not product:\n return response.badRequest([], 'Empty....')\n db.session.delete(product)\n db.session.commit()\n return response.ok('', 'Successfully delete Product !')\n except Exception as e:\n print(e)\n\n\ndef transform(product):\n array = []\n for i in product:\n array.append(singleTransform(i))\n return array\n\n\ndef singleTransform(product):\n data = {\n 'id' : product.id,\n 'name' : product.name,\n 'price' : product.price,\n 'created_at' : product.created_at,\n 'updated_at' : product.updated_at\n }\n\n return data","sub_path":"app/controllers/controllerProduct.py","file_name":"controllerProduct.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"31002805","text":"from pyramid.httpexceptions import HTTPFound\n\nfrom alchemist.models.base import DBSession\nfrom alchemist.models.company import BaseCompany\nfrom alchemist.models.tag import Tag\nfrom alchemist.models.user import User\nfrom alchemist.system.route import route\n\n\ndef obj_to_dict(row):\n d = {}\n prefix = ''\n if not row:\n return d\n for column in row.__table__.columns:\n d[prefix + column.name] = getattr(row, column.name)\n\n return d\n\n\n@route(path='/tagmanager/list', permission='admin')\ndef tagmanager_list(request):\n tags = DBSession.query(Tag)\n tags_uses = []\n for tag in tags:\n tag_w_use = obj_to_dict(tag)\n if tag.only_type == 'area_of_expertise':\n uses = DBSession.query(User.id, User.firstname, User.lastname).filter(User.tags.any(Tag.id == tag.id)).all()\n else:\n uses = DBSession.query(BaseCompany.id, BaseCompany.name).filter(\n BaseCompany.tags.any(Tag.id == tag.id)).all()\n link = lambda x: '/%s/%s' % (tag.only_type, x[0])\n name = lambda x: '%s %s' % (\n (x[1] or 'connection', x[2] or x[0]) if tag.only_type == 'area_of_expertise' else (x[1], ''))\n uses = ['%s' % (link(use), name(use)) for use in uses]\n tag_w_use['uses'] = ', '.join(uses)\n tags_uses.append(tag_w_use)\n return {\n 'tags': tags_uses\n }\n\n\n@route(path='/tagmanager/delete/{tag_id}', permission='admin')\ndef tagmanager_delete(request):\n tag_id = request.matchdict.get('tag_id')\n tag = DBSession.query(Tag).filter(Tag.id == int(tag_id)).first()\n DBSession.delete(tag)\n return HTTPFound(request.referer)\n","sub_path":"alchemist/tags/tag_manager.py","file_name":"tag_manager.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"430471515","text":"menu_list =[\r\n\t\t{\"id\": 1, \"name\":\"1.漢堡\",\"price\":70,\"$\":\"元\"},\r\n\t\t{\"id\": 2, \"name\":\"2.薯條\", \"price\": 45,\"$\":\"元\"},\r\n \t{\"id\": 3, \"name\":\"3.雪碧\", \"price\": 30,\"$\":\"元\"},\r\n \t{\"id\": 4, \"name\":\"4.薯餅\", \"price\": 45,\"$\":\"元\"},\r\n \t{\"id\": 5, \"name\":\"5.熱狗\",\"price\":35,\"$\":\"元\"},\r\n {\"id\": 6, \"name\":\"6.洋蔥圈\", \"price\": 45,\"$\":\"元\"},\r\n {\"id\": 7, \"name\":\"7.可樂\", \"price\": 30,\"$\":\"元\"}]\r\nOrder_list = []\r\nprint('==========================歡迎光臨本餐廳,祝您用餐愉快====================================')\r\nprint('今日菜單:')\r\nfor menu in menu_list:\r\n print(menu.get('name'),menu.get('price'),menu.get('$'))\r\nwhile True:\r\n print('='*50)\r\n print(\"1.點餐\\n2.取消點餐\\n3.確認菜單\\n4.結帳\")\r\n server = int(input(\"請選擇服務:\"))\r\n if server == 1:\r\n while True:\r\n menu_add = input(\"請輸入菜名編號或輸入Y结束點菜:\")\r\n if menu_add != 'Y':\r\n for m in menu_list:\r\n if m.get('id')== int(menu_add):\r\n Order_list.append(m)\r\n break\r\n else:\r\n print('==================已點菜=====================')\r\n total_price = 0\r\n for order in Order_list:\r\n print(order.get('name'),order.get('price'),order.get('$'))\r\n total_price += int(order.get('price'))\r\n print(' 小計:{}元'.format(total_price))\r\n break\r\n elif server == 2: \r\n menu_del = input(\"請輸入要取消的菜名:\")\r\n Order_list.remove(order)\r\n print('==================已點菜=====================')\r\n total_price = 0\r\n for order in Order_list:\r\n print(order.get('name'),order.get('price'),order.get('$'))\r\n total_price += int(order.get('price'))\r\n print(' 小計:{}元'.format(total_price))\r\n elif server == 3:\r\n print('==================已點菜=====================')\r\n total_price = 0\r\n for order in Order_list:\r\n print(order.get('name'),order.get('price'),order.get('$'))\r\n total_price += int(order.get('price'))\r\n print(' 小計:{}元'.format(total_price))\r\n elif server == 4:\r\n print('=================您的消費菜單=======================')\r\n total_price = 0\r\n for order in Order_list:\r\n print(order.get('name'),order.get('price'),order.get('$'))\r\n total_price += int(order.get('price'))\r\n print(' 總計:{}元'.format(total_price))\r\n print('==================謝謝光臨,歡迎您再次光臨本店!=====================')\r\n break","sub_path":"小組的點餐系統 正版.py","file_name":"小組的點餐系統 正版.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574841890","text":"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport pdb\nimport os\nimport numpy as np\nfrom scipy import stats\nimport ctypes\nimport popart\nfrom sparse_attention_utils import Heads, Convert\n\n\nso_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"../../custom_ops.so\")\nctypes.cdll.LoadLibrary(so_path)\n\nnp.random.seed(0)\n\n# Sequence Parameters\nn_batch = 1\nn_heads = 4\nn_hidden = 256\nsequence_length = 1024\nblocksize = [16, 16, 16] # 3d b/c specifying all dimension of a matmul (m, n) x (n, k) -> (m, k)\n\n# Attention parameters\nwindow_size = 128\nn_summary_blocks = 1\nn_windows = sequence_length//window_size\nn_block_gram = 4\n\n# ATTENTION PATTERN DEFINITION\n# use the predefined sparse attention heads\nassert sequence_length % window_size == 0, \"Sequence length must be divisible by window length\"\nhead1 = Heads.causal_windows_with_summaries(window_size, n_windows, n_summary_blocks, blocksize[-2:])\nhead2 = Heads.causal_block_gram(sequence_length, n_block_gram, blocksize[-2:])\n\n# Use each head twice (multi-head attention with 4 heads)\nblocks, sparsity, bsr_rhs_lengths_per_2d_plane = \\\n Heads.concatenate_heads([head1, head2], [2, 2])\n\n# BUILD THE MODEL IN POPART\nbuilder = popart.Builder()\n\n# INPUTS\n# in this short demo the queries, keys and values are considered direct inputs\nqueries = builder.addInitializedInputTensor(np.float16(np.random.randn(n_batch, n_heads, sequence_length, n_hidden//n_heads)), 'queries')\nkeys = builder.addInitializedInputTensor(np.float16(np.random.randn(n_batch, n_heads, n_hidden//n_heads, sequence_length)), 'keys')\nvalues_t = builder.addInitializedInputTensor(np.float16(np.random.randn(n_batch, n_heads, n_hidden//n_heads, sequence_length)), 'values_t')\n\n# Multiply the queries and keys in a sparse way\nlogits = builder.customOp(opName = \"BSMatMul\",\n opVersion = 1,\n domain = \"ai.graphcore\",\n inputs = [queries, keys],\n attributes = {\n \"bsr_rhs_lengths_per_2d_plane\": bsr_rhs_lengths_per_2d_plane,\n \"matrix_dims\": [sequence_length, n_hidden//n_heads, sequence_length],\n \"block_size\": blocksize,\n \"sparsity_mask\": sparsity,\n \"bsmatmul_type\": 1, # Dense @ Dense -> Sparse out\n \"transpose_rhs\": False, # see the project below for an instance of when True\n \"memory_cycle_ratio\": 0.6, # IF YOU ARE NOT GRAPHCORE -- DO NOT TOUCH\n \"in_type\": \"float16\",\n \"out_type\": \"float16\",\n \"pp_type\": \"float16\"\n })[0]\n\n# The gradients of the logits will have the following name\n# we'll show how to inspect them at the end of the demo\ndlogits = popart.reservedGradientPrefix() + logits\n\n# Self attention scales the logits by head size to control variance.\nvariance_scale = np.array([np.sqrt(1.0/(n_hidden/n_heads))])\nvariance_scale = builder.aiOnnx.constant(np.float16(variance_scale), \"variance_scale\")\nscaled_logits = builder.aiOnnx.mul([logits, variance_scale], \"logits_scaling\")\n\n# Next apply the special BsSoftmax to convert the logits into probabilities\n# this will treat zero-blocks as having 0 probability. There's also on option\n# to apply causal masking (per group), which we use\nprobs = builder.customOp(opName = \"BsSoftmax\",\n opVersion = 1,\n domain = \"ai.graphcore\",\n inputs = [scaled_logits],\n attributes = {\n \"matrixDims\": [n_batch, n_heads, sequence_length, sequence_length],\n \"blockSize\": blocksize,\n \"sparsity\": sparsity,\n \"groupSizes\": bsr_rhs_lengths_per_2d_plane,\n \"subBlockMaskPerGroup\": \"ZeroUpperTriangle \" * len(bsr_rhs_lengths_per_2d_plane)\n })[0]\n\n# The gradients wrt probabilities will have the name (can be visualized in the same way\n# as dlogits\ndprobs = popart.reservedGradientPrefix() + probs\n\n# Multiply the sparse probabilities with the dense values to produce a dense output\n# This requires the operation (values.T @ probs.T).T\n# To transpose the probs we set the transpose_rhs flag to True\n# This means the matmul shapes are [n_hidden/n_heads, (seq] x [seq), seq], in short\nmatmul_dims = [n_hidden//n_heads, sequence_length, sequence_length]\nattendedValues_t = builder.customOp(opName = \"BSMatMul\",\n opVersion = 1,\n domain = \"ai.graphcore\",\n inputs = [values_t, probs],\n attributes = {\n \"bsr_rhs_lengths_per_2d_plane\": bsr_rhs_lengths_per_2d_plane,\n \"matrix_dims\": matmul_dims,\n \"block_size\": blocksize,\n \"sparsity_mask\": sparsity,\n \"bsmatmul_type\": 0, # Dense @ Sparse -> Dense Out\n \"transpose_rhs\": True, # this will transpose the probabilities matrix for (values.T @ probs.T)\n \"memory_cycle_ratio\": 0.5, # IF YOU ARE NOT GRAPHCORE -- DO NOT TOUCH\n \"in_type\": \"float16\",\n \"out_type\": \"float16\",\n \"pp_type\": \"float16\"\n })[0]\nattendedValues = builder.aiOnnx.transpose([attendedValues_t], perm = [0, 1, 3, 2])\n# We've now arrived at the attended values which concludes the forward pass\n\n# LOSS\n# something arbitrary just so the graph compiles and generates grads\nloss = builder.aiGraphcore.l1loss([attendedValues], 1.0)\n\n# SESSION DETAILS\nanchor_desc = {loss: popart.AnchorReturnType(\"ALL\"),\n logits: popart.AnchorReturnType(\"ALL\"),\n dlogits: popart.AnchorReturnType(\"ALL\"),\n probs: popart.AnchorReturnType(\"ALL\"),\n dprobs: popart.AnchorReturnType(\"ALL\")}\ndataFlow = popart.DataFlow(1, anchor_desc)\nsession = popart.TrainingSession(fnModel = builder.getModelProto(),\n loss = loss,\n deviceInfo = popart.DeviceManager().acquireAvailableDevice(1),\n optimizer = popart.ConstSGD(0.01),\n dataFlow = dataFlow)\n\n# COMPILE\nsession.prepareDevice()\n\n# RUN A STEP (FWD, BWD, UPDATE)\nsession.weightsFromHost()\nanchors = session.initAnchorArrays()\nsession.run(popart.PyStepIO({}, anchors))\n\n# INSPECT\n# we can use the utilities to convert the sparse probabilities back\n# into a dense numpy array for inspection\nprobBlocks = anchors[probs]\nnumpyShape = [n_batch, n_heads, sequence_length, sequence_length]\nprobArray = Convert.to_np_array(numpyShape, probBlocks, sparsity,\n bsr_rhs_lengths_per_2d_plane, blocksize[-2:])\nprint(f\"Probability mean {np.mean(probArray)}, std {np.std(probArray)}\")\n\n# The same process can be used to inspect the grads\ndlogitsBlocks = anchors[dlogits]\nnumpyShape = [n_batch, n_heads, sequence_length, sequence_length]\ndlogitsArray = Convert.to_np_array(numpyShape, dlogitsBlocks, sparsity,\n bsr_rhs_lengths_per_2d_plane, blocksize[-2:])\nprint(f\"Logits grad mean {np.mean(dlogitsArray)}, std {np.std(dlogitsArray)}\")\n","sub_path":"code_examples/popart/block_sparse/examples/sparse_attention/short_demo.py","file_name":"short_demo.py","file_ext":"py","file_size_in_byte":7652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247974690","text":"# -*- coding: utf-8 -*-\n\n#------------------------------------------------------------------------------#\n# Import modules\n#------------------------------------------------------------------------------#\n\n# To keep track of time:\nimport time\nt=time.time()\n\n# To clear all variables:\nfrom ClearAllVariables import clear_all\n\n# Classes setup:\nfrom classes_setup import Template\n\nfrom write_child import write_vector_of_interval_of_messages\n\nfrom analyze_bins import analyze_bins_and_write\n\nfrom sliding_window import create_chi_tests_count, create_flags, create_distributions, create_bins, create_bins_counter, sliding_window_flags_eff_new, create_parents_distribution, reindexing_bins_cluster\n\nfrom functions_folder.write_stuff import write_templates , write_different_distribution_messages, write_chi_ineffective_messages, write_lines#, write_vector_of_interval_of_messages\nfrom functions_folder.set_log_files import set_log_files \nfrom functions_folder.find_template import find_template\nfrom functions_folder.determine_anomalous_bins import determine_anomalous_bins_parents, anomalous_bins_frequencies\n#from functions_folder.plot_dictionary import plot_dictionary\nfrom functions_folder.find_overlapping_indeces import find_overlapping_indeces_parents \nfrom functions_folder.determine_child_bins import determine_child_bins\n\n\n\n\n\n#------------------------------------------------------------------------------#\n# Clears the variables space from the workspace\n#------------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n clear_all()\n#------------------------------------------------------------------------------#\n \n\n\n\n#------------------------------------------------------------------------------#\n# Set the list of log files to analize\n#------------------------------------------------------------------------------#\n# set syslog to analyze: type and years. Example: set_log_files(\"monitor\",[\"2015\",\"2016\"]) \n# type of log files : lab-fs0, monitor, nat (~4 300 sec/ 1h)\naddress = set_log_files(\"monitor\",[\"2015\",\"2016\"]) \n#------------------------------------------------------------------------------#\n\n\n#------------------------------------------------------------------------------#\n# Initialization\n#------------------------------------------------------------------------------#\nmessages_count=0\ntemplates=[]\nTemplates_stats=[]\ni=0\nM_T=[] #message-template \n#storage_distributions=[]\n\nexamining_file_time=[]\n\n\n#gaps for chi tests\ngap_vector=[90,170]\nchi_tests_count={}\ncreate_chi_tests_count(gap_vector,chi_tests_count)\nflags={}\ndistributions = {}\nbins_cluster={}\nbins_count={}\nparents_distribution={}\ncreate_parents_distribution(parents_distribution)\ncreate_flags(gap_vector,flags)\ncreate_distributions(gap_vector, distributions)\ncreate_bins(gap_vector, bins_cluster)\ncreate_bins_counter(gap_vector,bins_count)\n\nrare_threshold = 10\n#------------------------------------------------------------------------------#\n# Algorithm\n#------------------------------------------------------------------------------#\n\n\nfor files in address:\n f = open(files,\"r\")\n \n file_timer=time.time()\n \n FoundMatching= False\n MoreFiles = False#\n \n print(\"\\n Examining \" + f.name + \" ...\\n\")\n \n \n for line in f:\n # Count number of messages analyzed:\n messages_count+=1\n\n #----------------------------------------------------------------------#\n # Find the template of the current message\n #----------------------------------------------------------------------#\n FoundMatching= False\n \n #Split line into its words\n new_message = line.split()\n \n for i in range(len(templates)):\n # If a match has NOT been found, keep comparing the line with the templates:\n comparing_template=templates[i]\n if len(comparing_template) != len(new_message):\n # If the lenghts are different then they are not the same template\n FoundMatching=False\n else:\n # If the lenghts are the same check if there is any matching template (to update):\n FoundMatching=find_template(i,new_message,comparing_template,templates,Templates_stats)\n if FoundMatching == True:\n # Exit loop if matching template was found:\n break\n # If you never found a match add a new template:\n if FoundMatching==False:\n templates.append(new_message)\n Templates_stats.append(Template(i,templates[i]))\n #----------------------------------------------------------------------#\n # Now that we have the message and its template we compute statistics\n #----------------------------------------------------------------------#\n Templates_stats[i]._counts += 1 \n Templates_stats[i]._line_indeces.append(messages_count)\n M_T.append(i)\n #----------------------------------------------------------------------#\n # Use sliding windows that compute CHI SQUARED TESTS to find strange bins of messages\n #----------------------------------------------------------------------# \n #sliding_window_flags_eff(flags,messages_count, M_T, gap_vector,chi_tests_count,storage_distributions)\n sliding_window_flags_eff_new(flags,messages_count, M_T, gap_vector,chi_tests_count,distributions,bins_cluster,bins_count)\n # compute elapsed time for each file\n #examining_file_time.append([len(storage_distributions),time.time()-file_timer])\n \n\nprint(\"\\n\\n Finding interesting bins of messages ... \\n\") \n \n# reindexing : \nreindexing_bins_cluster(bins_cluster, distributions, gap_vector)\n\n#----------------------------------------------------------------------#\n# Messages that have a distribution different than usual\n#----------------------------------------------------------------------#\nchi_different_indeces=find_overlapping_indeces_parents(flags,gap_vector,\"different\",bins_cluster,parents_distribution)#find_overlapping_indeces_new(flags,gap_vector,\"different\")\nchi_different_bins, P_different= determine_anomalous_bins_parents(chi_different_indeces, parents_distribution,\"different\") #anomalous_distribution_bins= determine_anomalous_bins(different_distribution_indeces) ###chNGE TO NEW\ndifferent_child_bins = determine_child_bins(bins_cluster,P_different, messages_count, gap_vector)\n\n\nchi_different_frequencies = anomalous_bins_frequencies(M_T, chi_different_bins)\nunique_templates_anomalous, rare_templates_anomalous = analyze_bins_and_write(chi_different_frequencies, Templates_stats, rare_threshold,\"chi_different\")\n\n#----------------------------------------------------------------------#\n# Messages that the CHI tets was not able to tests\n#----------------------------------------------------------------------#\nchi_ineffective_indeces=find_overlapping_indeces_parents(flags,gap_vector,\"ineffective\",bins_cluster, parents_distribution)#find_overlapping_indeces_new(flags,gap_vector,\"ineffective\")\nchi_ineffective_bins, P_ineffective = determine_anomalous_bins_parents(chi_ineffective_indeces,parents_distribution,\"ineffective\")\nineffective_child_bins = determine_child_bins(bins_cluster,P_ineffective, messages_count, gap_vector)# determine_child_bins(bins_cluster,P, messages_count, gap_vector)\n\n\nchi_ineffective_frequencies = anomalous_bins_frequencies(M_T,chi_ineffective_bins)\nunique_templates_ineffective_chi, rare_templates_ineffective_chi = analyze_bins_and_write(chi_ineffective_frequencies, Templates_stats, rare_threshold, \"chi_ineffective\")\n\n\n#------------------------------------------------------------------------------#\n# Write a txt file with all the templates\n#------------------------------------------------------------------------------#\nwrite_templates(templates)\n\n#------------------------------------------------------------------------------#\n# Write txt files with anomalous bins of messages\n#------------------------------------------------------------------------------#\n# write parents bins\nwrite_different_distribution_messages(address,chi_different_indeces)\nwrite_chi_ineffective_messages(address,chi_ineffective_indeces)\n\n# write unique or rare lines\nwrite_lines(address,[unique_templates_ineffective_chi, rare_templates_ineffective_chi])\n\n# write child bins\nwrite_vector_of_interval_of_messages(address, different_child_bins, \"different\")\nwrite_vector_of_interval_of_messages(address, ineffective_child_bins, \"ineffective\")\n#------------------------------------------------------------------------------#\n\n\n\n\n#------------------------------------------------------------------------------#\n# Compute elapsed time\n#------------------------------------------------------------------------------#\nelapsed = time.time() - t\nprint(\"Elapsed time: {0}\".format(elapsed))\n ","sub_path":"StefanoConsole.py","file_name":"StefanoConsole.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"578694848","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport random\n\nfrom models import setup_db, Question, Category\nfrom random import randrange\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef paginate_questions(request, selection):\n \"\"\"\n Return a list of question objects corresponding to the \n specified page in the request argument.\n \"\"\"\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n\n questions = [question.format() for question in selection]\n current_questions = questions[start:end]\n\n return current_questions\n\n\ndef create_app(test_config=None):\n app = Flask(__name__)\n setup_db(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add(\n 'Access-Control-Allow-Methods', 'GET, POST, DELETE')\n return response\n\n @app.route('/categories')\n def get_categories():\n \"\"\"\n Return a list of containing all the available categories and\n a success value that determines whether the request was\n successful.\n \"\"\"\n categories_query = Category.query.order_by(Category.id).all()\n\n if len(categories_query) == 0:\n abort(404)\n\n categories = []\n\n for category in categories_query:\n categories.append(category.type)\n\n return jsonify({\n 'success': True,\n 'categories': categories,\n })\n\n @app.route('/questions')\n def retrieve_questions():\n \"\"\"\n Return a list of question objects paginated in group of\n 10 items per page, the total of questions, the list of\n all categories, and the success value.\n \"\"\"\n selection = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n if len(current_questions) == 0:\n abort(404)\n\n categories_query = Category.query.all()\n categories = []\n\n for category in categories_query:\n categories.append(category.type)\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions': len(selection),\n 'current_category': None,\n 'categories': categories\n })\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_question(question_id):\n \"\"\"\n Permanently delete the question with the provided\n id and return a success value indicating whether \n the request was succesful.Return True if successfuly \n deleted.\n \"\"\"\n try:\n question = Question.query.filter(\n Question.id == question_id).one_or_none()\n\n if question is None:\n abort(404)\n\n question.delete()\n\n return jsonify({\n 'success': True,\n 'deleted': question_id,\n })\n\n except:\n abort(422)\n\n @app.route('/questions', methods=['POST'])\n def submit_question():\n \"\"\"\n Add a new question with answer, difficulty and category\n provided in Json format. Return True if succesfully\n added.\n \"\"\"\n body = request.get_json()\n\n question = body.get('question', None)\n answer = body.get('answer', None)\n difficulty = body.get('difficulty', None)\n category = body.get('category', None)\n\n try:\n\n new_question = Question(\n question=question,\n answer=answer,\n difficulty=difficulty,\n category=category\n )\n\n new_question.insert()\n\n return jsonify({\n 'success': True,\n 'created': new_question.id\n })\n\n except:\n abort(422)\n\n @app.route('/search', methods=['POST'])\n def search_question():\n \"\"\"\n Fetches all the questions that matches exactly or partially\n the search term provided. Returns the list of questions, the total\n of questions and the current category.\n \"\"\"\n body = request.get_json()\n search = body.get('searchTerm', '')\n\n try:\n selection = Question.query.order_by(Question.id).filter(\n Question.question.ilike('%{}%'.format(search))).all()\n\n questions = []\n\n for question in selection:\n questions.append(question.format())\n\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': None\n })\n\n except:\n abort(422)\n\n @app.route('/categories//questions')\n def retrieve_questions_by_category(category_id):\n \"\"\"\n Retrieve a list of question objects that belong to\n the category id provided along with the total\n of questions and the current category. \n \"\"\"\n try:\n selection = Question.query.filter(\n Question.category == category_id).order_by(Question.id).all()\n\n if len(selection) == 0:\n abort(404)\n\n questions = []\n\n for question in selection:\n questions.append(question.format())\n\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': Category.query.filter(Category.id == category_id).first().type,\n })\n\n except:\n abort(422)\n\n @app.route('/quizzes', methods=['POST'])\n def play_quizz():\n \"\"\"\n Return a random question that belong to the category\n provided and that is not any of the previous\n questions.\n \"\"\"\n body = request.get_json()\n\n previous_questions = body.get('previous_questions', [])\n quiz_category = body.get('quiz_category', None)\n\n try:\n quiz_category['id'] = int(quiz_category['id'])\n except:\n abort(405)\n\n selection = []\n ALL_CATEGORIES = 0\n\n if quiz_category['id'] == ALL_CATEGORIES:\n selection = Question.query.filter(~Question.id.in_(\n previous_questions)).order_by(Question.id).all()\n else:\n selection = Question.query.filter(~Question.id.in_(previous_questions)).filter(\n Question.category == quiz_category['id']).all()\n\n if len(selection) == 0:\n abort(404)\n\n STEP = 1\n index = randrange(0, len(selection), STEP)\n\n return jsonify({\n 'success': True,\n 'question': selection[index].format()\n })\n\n ''' 4xx - Client Error Handlers '''\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"Error: A bad request was made\"\n }), 400\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Error: The requested resource was not found\"\n }), 404\n\n @app.errorhandler(405)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"Error: Forbidden method used\"\n }), 405\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"Error: Cannot proccess request\"\n }), 422\n\n ''' 5xx - Internal Server Error Handler '''\n @app.errorhandler(500)\n def internal_server_error(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \"Error: Internal Server Error ocurred\"\n }), 500\n\n return app","sub_path":"projects/02_trivia_api/starter/backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"408603088","text":"from haystack import connections\n\n\ndef prepare_object(obj, using='default'):\n \"\"\"\n Returns a Python dictionary representation of the given object, expected to\n be a Model object with an associated SearchIndex. The optional argument\n `using` specifies the backend to use from the Haystack connections list.\n \"\"\"\n model = obj.__class__\n unified_index = connections[using].get_unified_index()\n index = unified_index.get_index(model)\n prepped_data = index.full_prepare(obj)\n final_data = {}\n for key, value in prepped_data.items():\n final_data[key] = connections[using].get_backend()._from_python(value)\n return final_data\n","sub_path":"elasticstack/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"130034039","text":"from django.shortcuts import render, redirect\nimport bcrypt\nfrom .models import *\nfrom django.contrib import messages\nfrom django.db.models import Q\n\ndef login_regis(request):\n return render(request, 'examapp/index.html')\n\ndef register(request):\n errors = registereduser.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(\"/\")\n else:\n hash = bcrypt.hashpw(request.POST[\"inPW\"].encode(), bcrypt.gensalt())\n regiuser = registereduser.objects.create(firstname=request.POST['inFN'], lastname=request.POST['inLN'], email=request.POST['inEM'],dob=request.POST['inDOB'], hashedpassword=hash)\n request.session['userid']= regiuser.id\n messages.success(request, \"Successfully Registered!\")\n return redirect('/dashboard')\n\ndef login(request):\n try:\n registereduser.objects.get(email=request.POST['inEM'])\n except:\n messages.error(request, \"User does not exist.\")\n return redirect('/')\n match = registereduser.objects.get(email=request.POST['inEM'])\n if bcrypt.checkpw(request.POST['inPW'].encode(), match.hashedpassword.encode()):\n request.session['userid'] = match.id\n messages.success(request, \"Successful Login!\")\n return redirect('/dashboard')\n else:\n messages.error(request, \"Incorrect password.\")\n return redirect('/')\n\ndef dashboard(request):\n if not \"userid\" in request.session:\n messages.error(request, \"Not Logged In.\")\n return redirect('/')\n else:\n user = registereduser.objects.get(id = request.session['userid'])\n nfqts = quotes.objects.all().exclude(quotefaved = request.session['userid'])\n fvs = quotes.objects.filter(quotefaved = request.session['userid'])\n context ={\n \"loggedinuser\": user,\n \"nonfavorites\": nfqts,\n \"favorited\": fvs, \n }\n return render(request, 'examapp/quotes.html', context)\n\ndef createnewquote(request):\n errors = quotes.objects.quoteval(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/dashboard')\n else:\n user = registereduser.objects.get(id=request.session['userid']) \n newquote = user.regadd.create(authorname = request.POST['inAUTH'], quote = request.POST['inQTE'])\n quoteid = newquote.id\n messages.success(request, 'Quote added successfully')\n return redirect('/dashboard')\n \ndef displayuserquotes(request, clickeduser):\n user = registereduser.objects.get(id=clickeduser)\n allquotes = quotes.objects.filter(quotecreator_id = user)\n count = allquotes.count()\n print(count)\n context={\n \"userinfo\": user,\n \"userquotes\": allquotes, \n \"count\": count,\n }\n return render(request, 'examapp/show.html', context)\n\ndef makefavorite (request, makefav):\n addquote = quotes.objects.get(id=makefav)\n loguser = registereduser.objects.get(id=request.session['userid'])\n loguser.regfavorite.add(addquote)\n loguser.save()\n return redirect('/dashboard')\n\ndef cancelfavorite (request, cancelfav):\n trip = quotes.objects.get(id=cancelfav)\n trip.quotefaved.remove(registereduser.objects.get(id=request.session['userid']))\n return redirect('/dashboard')\n\ndef logout(request):\n request.session.clear()\n return redirect('/')","sub_path":"apps/examapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160964721","text":"##\n## Imprima por cada fila, la columna 1 y la suma de los valores\n## de la columna 5\n##\n## E,22\n## A,14\n## B,14\n## ....\n## C,8\n## E,11\n## E,16\n##\n\n#Punto q13\nfile = open('data.csv','r').readlines()\nfile = [row[0:-1] for row in file]\nfile = [row.split('\\t') for row in file]\n\ndata = []\ni=0\nfor registro in file:\n data.append([])\n for e in registro:\n a = e.split(',')\n if (len(a) == 1):\n data[i].append(a[0])\n else:\n data[i].append(a)\n i += 1\n\n\nresultado = {}\n\nfor registro2 in data:\n aux = 0\n for a in registro2[4]:\n aux += int(a.split(':')[1])\n print(registro[0] + ',' + str(aux))","sub_path":"q13.py","file_name":"q13.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"483371861","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.urls import reverse\n\nfrom .forms import NewArticle\nfrom .models import Articles\n\n\ndef main(request, pk=None):\n title = 'главная'\n main_article = Articles.objects.order_by('-upload')[:1]\n other_articles_1 = Articles.objects.order_by('-upload').all()[1:4]\n other_articles_2 = Articles.objects.order_by('-upload').all()[4:7]\n other_articles_3 = Articles.objects.order_by('-upload').all()[7:10]\n content = {\n 'title': title,\n 'main_article': main_article,\n 'other_articles_1': other_articles_1,\n 'other_articles_2': other_articles_2,\n 'other_articles_3': other_articles_3,\n }\n return render(request, 'mainapp/index.html', content)\n\n\ndef article(request, pk=None):\n title = 'статья'\n data = Articles.objects.filter(pk=pk)\n content = {\n 'title': title,\n 'data': data,\n }\n return render(request, 'mainapp/article.html', content)\n\n\ndef add(request):\n title = 'опубликовать'\n if request.method == 'POST':\n article_form = NewArticle(request.POST, request.FILES)\n article_form.save()\n return HttpResponseRedirect(reverse('articles:index'))\n else:\n article_form = NewArticle()\n content = {\n 'title': title,\n 'article_form': article_form,\n }\n return render(request, 'mainapp/add.html', content)","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"50474028","text":"from __future__ import annotations\nfrom neo3 import contracts, storage\nfrom neo3.contracts.interop import register\n\n\n@register(\"Neo.Native.Deploy\", 0, contracts.native.CallFlags.ALLOW_MODIFIED_STATES, False, [])\ndef deploy_native(engine: contracts.ApplicationEngine) -> None:\n if engine.snapshot.persisting_block.index != 0:\n raise ValueError(\"Can only deploy native contracts in the genenis block\")\n\n for nc in contracts.NativeContract()._contracts.values():\n engine.snapshot.contracts.put(storage.ContractState(nc.script, nc.manifest))\n nc._initialize(engine)\n\n\n@register(\"Neo.Native.Call\", 0, contracts.native.CallFlags.NONE, False, [str])\ndef call_native(engine: contracts.ApplicationEngine, contract_name: str) -> None:\n contracts.NativeContract.get_contract(contract_name).invoke(engine)\n","sub_path":"neo3/contracts/interop/native.py","file_name":"native.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"191063416","text":"#!/usr/bin/env python\nimport json\nimport os\nimport sqlite3\nfrom kafka import KafkaProducer\nfrom flask import Flask, request, jsonify, make_response\n\n# Set the Directory path for the file system\ndirname = os.path.dirname(__file__)\n\napp = Flask(__name__)\nproducer = KafkaProducer(bootstrap_servers='kafka:29092')\n\n\n# Log events to Kafka\ndef log_to_kafka(topic, event):\n event.update(request.headers)\n producer.send(topic, json.dumps(event).encode())\n \n\n#Convert query function to a format that can be transformed into json\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\n#Done\n@app.route(\"/join_a_guild/\")\ndef join_a_guild(guild_id):\n \n \"\"\"\n This function responds to a request for /api/join_a_guild/{guild_id}\n with one matching transaction from quest.db\n \n :param guild_id: ID of the enemy\n :return: quest and attributes matching ID as JSON\n \"\"\"\n \n query = \"SELECT * \\\n FROM guild \\\n WHERE guild_id =\" + str(guild_id) +';'\n\n \n conn = sqlite3.connect(os.path.join(dirname, 'db/guild.db'))\n conn.row_factory = dict_factory\n cur = conn.cursor()\n \n results = cur.execute(query).fetchall()\n \n join_guild_event = results[0]\n \n log_to_kafka('events', join_guild_event)\n return \"Joined Guild!\\n\"\n\n\n@app.route(\"/kill_enemy/\")\ndef kill_enemy(enemy_id):\n \n \"\"\"\n This function responds to a request for /api/kill_enemy/{enemy_id}\n with one matching transaction from quest.db\n \n :param enemy_id: ID of the enemy\n :return: quest and attributes matching ID as JSON\n \"\"\"\n \n query = \"SELECT * \\\n FROM enemy \\\n WHERE enemy_id =\" + str(enemy_id) +';'\n \n conn = sqlite3.connect(os.path.join(dirname, 'db/enemy.db'))\n conn.row_factory = dict_factory\n cur = conn.cursor()\n \n results = cur.execute(query).fetchall()\n \n kill_enemy_event = results[0]\n \n log_to_kafka('events', kill_enemy_event)\n return \"kill_enemy!\\n\"\n\n\n@app.route(\"/take_damage/\")\ndef take_damage(enemy_id):\n \n \"\"\"\n This function responds to a request for /api/take_damage/{enemy_id}\n with one matching transaction from quest.db\n \n :param enemy_id: ID of the quest\n :return: quest and attributes matching ID as JSON\n \"\"\"\n \n query = \"SELECT * \\\n FROM damage \\\n WHERE enemy_id =\" + str(enemy_id) +';'\n \n conn = sqlite3.connect(os.path.join(dirname, 'db/take_damage.db'))\n conn.row_factory = dict_factory\n cur = conn.cursor()\n \n results = cur.execute(query).fetchall()\n \n \n take_damage_event = results[0]\n \n log_to_kafka('events', take_damage_event)\n return \"Took Damage!\\n\"\n\n\n@app.route(\"/accepted_a_quest/\")\ndef accept_quest(quest_id):\n \n \"\"\"\n This function responds to a request for /api/accepted_a_quest/{quest_id}\n with one matching transaction from quest.db\n \n :param quest_id: ID of the quest\n :return: quest and attributes matching ID as JSON\n \"\"\"\n \n query = \"SELECT * \\\n FROM quest \\\n WHERE quest_id =\" + str(quest_id) +';'\n \n conn = sqlite3.connect(os.path.join(dirname, 'db/quest.db'))\n conn.row_factory = dict_factory\n cur = conn.cursor()\n \n results = cur.execute(query).fetchall()\n \n accept_quest_event = results[0]\n \n log_to_kafka('events', accept_quest_event)\n return \"Quest Accepted!\\n\"\n\n\n@app.route(\"/transaction/\")\ndef transaction(inventory_id):\n \n \"\"\"\n This function responds to a request for /api/transaction/{transaction_id}\n with one matching transaction from store_transactions.db\n \n :param inventory_id: ID of the line item being transacted\n :return: transaction and attributes matching ID as JSON\n \"\"\"\n \n query = \"SELECT * \\\n FROM inventory \\\n WHERE inventory_id =\" + str(inventory_id) +';'\n \n conn = sqlite3.connect(os.path.join(dirname, 'db/store_transactions.db'))\n conn.row_factory = dict_factory\n cur = conn.cursor()\n \n results = cur.execute(query).fetchall()\n #print(results)\n #print(results[0])\n \n transaction_event = results[0]\n \n log_to_kafka('events', transaction_event)\n return \"Transaction Complete!\\n\" \n","sub_path":"project_3/game_api_v2.py","file_name":"game_api_v2.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"570018923","text":"# Circular Queue Implementation\r\n\r\n# To initialise the queue\r\ndef initialise(s):\r\n global front, rear, size, maxSize, q\r\n front = 0\r\n rear = -1\r\n size = 0\r\n maxSize = s\r\n q = []\r\n for item in range(maxSize):\r\n q.append(None)\r\n\r\n\r\n# To test for an empty queue\r\ndef isEmpty():\r\n global size\r\n return size == 0\r\n\r\n# To test for a full queue\r\ndef isFull():\r\n global size\r\n return size == maxSize\r\n\r\n# To add an element to the queue\r\ndef enqueue(newItem):\r\n global rear, maxSize, q, size\r\n if isFull():\r\n print('Queue is full!')\r\n else:\r\n print('Adding',newItem,'to rear of queue ..')\r\n rear = (rear+1) % maxSize\r\n q[rear] = newItem\r\n size += 1\r\n\r\n# To remove an item from the queue\r\ndef dequeue():\r\n global front, size, q, maxSize\r\n if isEmpty():\r\n print('Queue is empty')\r\n item = None\r\n else:\r\n item = q[front]\r\n print('Removing',item,'from front of queue ..')\r\n q[front] = None\r\n front = (front+1) % maxSize\r\n size -= 1\r\n return item\r\n\r\n# To see what item is at the front of the queue\r\ndef peek():\r\n global q, front\r\n if isEmpty():\r\n print('Queue is empty')\r\n item = None\r\n else:\r\n item = q[front]\r\n print('The item at the front of queue is:',item)\r\n\r\n# To display the queue visually\r\ndef display():\r\n global front, rear, q \r\n for item in range(front,maxSize):\r\n print(q[item],'\\t',end='')\r\n for item in range(0,front):\r\n print(q[item],'\\t',end='') \r\n print('')\r\n\r\n\r\n# Main program to test queue\r\ninitialise(6)\r\ndisplay()\r\nenqueue('Ben')\r\ndisplay()\r\nenqueue('Sue')\r\nenqueue('Tom')\r\nenqueue('Pam')\r\nenqueue('Wes')\r\nenqueue('Sal')\r\ndisplay()\r\ndequeue()\r\ndisplay()\r\npeek()\r\nenqueue('Huw')\r\ndisplay()\r\nenqueue('Zoe')\r\npeek()\r\n\r\n\r\n","sub_path":"circular-queue.py","file_name":"circular-queue.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"468197345","text":"import json\nimport pandas as pd\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom django.db.models import Q\nfrom django.db.models import Count\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom rest_framework import status\nfrom rest_framework import mixins\nfrom rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django_pandas.io import read_frame\n\nfrom delphi_rest.models import EventItem, DataType, TimeSeries,\\\n TemporalityChoices\nfrom delphi_rest.serializers import EventItemSerializer, TimeSeriesSerializer,\\\n TemporalityChoicesSerializer\n\nTEMPORALITY_CHOICES = {\n 'daily':\n (['year', 'day'], 'D'),\n 'weekly':\n (['year', 'week'], 'W'),\n 'monthly':\n (['year', 'month'], 'M'),\n 'quartly':\n (['year', 'quarter'], 'Q'),\n 'yearly':\n (['year'], 'AS'),\n}\n\n# views\nclass EventListView(generics.ListAPIView):\n queryset = EventItem.objects.all()\n serializer_class = EventItemSerializer\n\nclass EventDetailView(generics.RetrieveAPIView):\n queryset = EventItem.objects.all()\n serializer_class = EventItemSerializer\n\nclass TimeSeriesView(APIView):\n def get(self, request,\n # get parameters\n data_type, start_date, temporality, observations, unit_level):\n\n start_dt = datetime.strptime(start_date, \"%Y-%m-%d\")\n data_obj = DataType.objects.filter(Q(type = data_type))[0]\n c_type = ContentType.objects.get_for_model(\n data_obj.entry_desc.all()[0].entry_type\n )\n\n if temporality not in TEMPORALITY_CHOICES.keys():\n content = {'temporality ' + temporality + ' not a valid type'}\n return Response(content, status.HTTP_400_BAD_REQUEST)\n else:\n curr_temporality = TEMPORALITY_CHOICES[temporality]\n\n qry = EventItem.objects.\\\n filter(\n content_type__pk=c_type.id,\n date__gte=start_date,\n # Q(unit=unit_level),\n ).\\\n values('date', 'unit').\\\n annotate(events=Count('unit')).\\\n order_by('date')\n\n # make dataframe\n df = read_frame(qry)\n\n for t in curr_temporality[0]:\n df[t] = getattr(pd.to_datetime(df.date).dt, t)\n\n agg_df = df.\\\n groupby(curr_temporality[0] + ['unit']).\\\n sum()['events'].\\\n reset_index()\n\n min_date = min([datetime.date(start_dt), df['date'].min()])\n max_date = min_date + relativedelta(weeks=+int(observations))\n\n # fill in zeros by merging\n temp_df = pd.DataFrame({\n 'date':\n pd.date_range(min_date, max_date, freq=curr_temporality[1])\n })\n\n for t in curr_temporality[0]:\n temp_df[t] = getattr(temp_df['date'].dt, t)\n\n mrg_df = pd.merge(\n temp_df[curr_temporality[0]],\n agg_df,\n how='left',\n )\n mrg_df = mrg_df.fillna(0)\n\n # convert to JSON\n rslt_data = mrg_df.\\\n sort_values(by=curr_temporality[0]).\\\n groupby(['unit'])['events'].\\\n apply(list).\\\n reset_index().\\\n to_json(orient='records')\n\n # cast to an object\n rslt_ts = TimeSeries(\n start=start_date,\n temporality=temporality,\n unit_level=unit_level,\n data=rslt_data\n )\n\n # serialize object\n serialized_rslt = TimeSeriesSerializer(rslt_ts)\n\n return Response(serialized_rslt.data)\n\nclass TemporalityChoicesView(APIView):\n def get(self, request):\n try:\n choices = TemporalityChoices(\n choices=json.dumps(TEMPORALITY_CHOICES.keys())\n )\n serialized_choices = TemporalityChoicesSerializer(choices)\n return Response(serialized_choices.data)\n except NameError:\n return Response('NameError raised', status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response('unknown internal error', status.HTTP_500_INTERNAL_SERVER_ERROR)\n","sub_path":"delphi_rest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22037462","text":"#!/usr/bin/python3\nfrom bs4 import BeautifulSoup\nimport requests\nimport sys\nimport csv\n\nnames, mails = [], []\nfor pg in range(1, 7):\n r = requests.get('https://web.ee.ntu.edu.tw/teacher_index3.php?page_num=' \\\n + str(pg) + '&&dept=&sort=&key=')\n r.encoding = 'utf-8'\n s = BeautifulSoup(r.text, 'html.parser')\n\n for link in s.find_all(class_='teacher_list_title'):\n t = -1\n dat = str(link)\n for i in range(3):\n t = dat.find('<', t+1)\n name = (' '.join(dat[t-3:t].split('\\\"'))).split()[0]\n names.append(name)\n\n for link in s.find_all(style='position:absolute; cursor:default;'):\n dat = str(link)\n hstr = r'?email='\n h = dat.find(hstr)\n h += len(hstr)\n tstr = r'&text_r=232'\n t = dat.find(tstr)\n mails.append(dat[h:t])\n\nwith open('mail_list' if len(sys.argv) == 1 else sys.argv[1], 'w') as csvfile:\n csvf = csv.writer(csvfile, delimiter=' ')\n for name, mail in zip(names, mails):\n csvf.writerow([name, mail])\n","sub_path":"get_pro_email/get_pro_email.py","file_name":"get_pro_email.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"214367605","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 23 15:09:07 2014\n\n@author: jroth\n\"\"\"\n\ndef is_prime(n):\n n=float(n)\n import math\n lim = int(math.ceil(math.sqrt(n)))+1\n for i in xrange(2, lim , 1):\n if n%i == 0:\n return False \n break\n if i == lim - 1:\n return True\n \nimport sys\nx = is_prime(sys.argv[1])\nif x == False:\n print(sys.argv[1], ' is not prime')\nelse:\n print(sys.argv[1], ' is prime')\n \n \nx = is_prime(47) ","sub_path":"gen-misc/is_prime.py","file_name":"is_prime.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"20951025","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom xml.etree.ElementTree import Element, SubElement, Comment, tostring\nfrom xml.dom import minidom\nfrom os import listdir\nfrom os.path import realpath\n\n\ndef prettify(elem):\n \"\"\"\n Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\n\ndef get_file_paths(dir_path, extension=None):\n file_names = listdir(dir_path)\n file_paths = list(map(lambda x: \"{}/{}\".format(realpath(dir_path), x), file_names))\n if extension is not None:\n file_paths = list(filter(lambda x: x[-(len(extension)):] == extension, file_paths))\n return file_paths\n\n\ndef create_initialization_launch_file(rosbag=True):\n launch = Element(\"launch\")\n\n launch.append(Comment(\"set parameters\"))\n if rosbag:\n SubElement(launch, \"param\", {\"name\": \"use_sim_time\", \"value\": \"true\"})\n SubElement(launch, \"param\", {\"name\": \"localizer\", \"value\": \"velodyne\"})\n SubElement(launch, \"param\", {\"name\": \"tf_x\", \"value\": \"1.2\"})\n SubElement(launch, \"param\", {\"name\": \"tf_y\", \"value\": \"0.0\"})\n SubElement(launch, \"param\", {\"name\": \"tf_z\", \"value\": \"2.0\"})\n SubElement(launch, \"param\", {\"name\": \"tf_yaw\", \"value\": \"0.0\"})\n SubElement(launch, \"param\", {\"name\": \"tf_pitch\", \"value\": \"0.0\"})\n SubElement(launch, \"param\", {\"name\": \"tf_roll\", \"value\": \"0.0\"})\n\n\n launch.append(Comment(\"TF: base_link to velodyne\"))\n SubElement(launch, \"arg\", {\"name\": \"x\", \"value\": \"1.2\"})\n SubElement(launch, \"arg\", {\"name\": \"y\", \"value\": \"0.0\"})\n SubElement(launch, \"arg\", {\"name\": \"z\", \"value\": \"2.0\"})\n SubElement(launch, \"arg\", {\"name\": \"yaw\", \"value\": \"0.0\"})\n SubElement(launch, \"arg\", {\"name\": \"pitch\", \"value\": \"0.0\"})\n SubElement(launch, \"arg\", {\"name\": \"roll\", \"value\": \"0.0\"})\n SubElement(launch, \"arg\", {\"name\": \"frame_id\", \"value\": \"/base_link\"})\n SubElement(launch, \"arg\", {\"name\": \"child_frame_id\", \"value\": \"/velodyne\"})\n SubElement(launch, \"arg\", {\"name\": \"period_in_ms\", \"value\": \"10\"})\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"tf\",\n \"type\": \"static_transform_publisher\",\n \"name\": \"base_link_to_localizer\",\n \"args\": \"$(arg x) $(arg y) $(arg z) $(arg yaw) $(arg pitch) $(arg roll) $(arg frame_id) $(arg child_frame_id) $(arg period_in_ms)\"\n }\n )\n\n\n launch.append(Comment(\"vehicle model\"))\n vehicle_model = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find model_publisher)/launch/vehicle_model.launch\",\n }\n )\n SubElement(vehicle_model, \"arg\", {\"name\": \"model_path\", \"value\": realpath(\"../../../../ros/src/.config/model/default.urdf\")})\n\n\n launch.append(Comment(\"tf2 web republisher\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"tf2_web_republisher\",\n \"type\": \"tf2_web_republisher\",\n \"name\": \"tf2_web_republisher\"\n }\n )\n\n\n launch.append(Comment(\"rosbridge server (websocket)\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find rosbridge_server)/launch/rosbridge_websocket.launch\",\n }\n )\n\n\n launch.append(Comment(\"web_vider_server\"))\n SubElement(\n launch, \"node\", {\"pkg\": \"web_video_server\", \"type\": \"web_video_server\", \"name\": \"web_vide_server\"}\n )\n\n\n launch.append(Comment(\"pc2 downsampler\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"pc2_downsampler\",\n \"type\": \"app.py\",\n \"name\": \"pc2_downsampler\",\n }\n )\n\n with open(\"./res/initialization/initialization.launch\", \"w\") as f:\n f.write(prettify(launch))\n\n\ndef create_map_launch_file():\n launch = Element(\"launch\")\n\n launch.append(Comment(\"TF: world to map\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"tf\",\n \"type\": \"static_transform_publisher\",\n \"name\": \"world_to_map\",\n \"args\": \"14771 84757 -39 0 0 0 /world /map 10\"\n }\n )\n\n launch.append(Comment(\"Point Cloud\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"map_file\",\n \"type\": \"points_map_loader\",\n \"name\": \"points_map_loader\",\n \"args\": \" \".join([\"noupdate\"] + get_file_paths(\"./res/map/points\", \"pcd\"))\n }\n )\n\n launch.append(Comment(\"Vector Map\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"map_file\",\n \"type\": \"vector_map_loader\",\n \"name\": \"vector_map_loader\",\n \"args\": \" \".join(get_file_paths(\"./res/map/vectors\", \"csv\")),\n }\n )\n\n with open(\"./res/map/map.launch\", \"w\") as f:\n f.write(prettify(launch))\n\n return True\n\n\ndef create_localization_launch_file():\n launch = Element(\"launch\")\n\n launch.append(Comment(\"voxel_grid_filter\"))\n voxelGridFilter = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find points_downsampler)/launch/points_downsample.launch\"\n }\n )\n SubElement(voxelGridFilter, \"arg\", {\"name\": \"node_name\", \"value\": \"voxel_grid_filter\"})\n\n launch.append(Comment(\"nmea2tfpose\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find gnss_localizer)/launch/nmea2tfpose.launch\"\n }\n )\n\n launch.append(Comment(\"ndt matching\"))\n ndt_matching = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find ndt_localizer)/launch/ndt_matching.launch\"\n }\n )\n SubElement(ndt_matching, \"arg\", {\"name\": \"use_openmp\", \"value\": \"false\"})\n SubElement(ndt_matching, \"arg\", {\"name\": \"get_height\", \"value\": \"true\"})\n\n with open(\"./res/localization/localization.launch\", \"w\") as f:\n f.write(prettify(launch))\n\n return True\n\n\ndef create_sensing_launch_file(rosbag=True):\n launch = Element(\"launch\")\n\n launch.append(Comment(\"calibration file path\"))\n SubElement(launch, \"arg\", {\"name\": \"velodyne_calib\", \"default\": \"$(find velodyne_pointcloud)/params/32db.yaml\"})\n if not rosbag:\n SubElement(launch, \"arg\", {\"name\": \"camera_calib\", \"default\": realpath(\"./res/detection/calibration_camera_lidar_3d_prius_nic-150407.yml\")})\n\n\n launch.append(Comment(\"HDL-32e\"))\n hdl32e = SubElement(launch, \"include\", {\"file\": \"$(find velodyne_pointcloud)/launch/velodyne_hdl32e.launch\"})\n SubElement(hdl32e, \"arg\", {\"name\": \"calibration\", \"value\": \"$(arg velodyne_calib)\"})\n\n\n if not rosbag:\n # launch.append(Comment(\"Javad Delta 3\"))\n # SubElement(launch, \"node\", {\"pkg\": \"javad_navsat_driver\", \"type\": \"gnss.sh\", \"name\": \"javad_driver\"})\n\n\n launch.append(Comment(\"PointGrey Grasshopper3\"))\n pointGrayGrashopper3 = SubElement(launch, \"include\", {\"file\": \"$(find pointgrey)/scripts/grasshopper3.launch\"})\n SubElement(pointGrayGrashopper3, \"arg\", {\"name\": \"CalibrationFile\", \"value\": \"$(arg camera_calib)\"})\n\n\n with open(\"./res/sensing/sensing.launch\", \"w\") as f:\n f.write(prettify(launch))\n\n\ndef create_detection_launch_file():\n launch = Element(\"launch\")\n\n\n launch.append(Comment(\"setting of this launch file\"))\n SubElement(launch, \"arg\", {\"name\": \"car_detection\", \"default\": \"true\"})\n SubElement(launch, \"arg\", {\"name\": \"pedestrian_detection\", \"default\": \"false\"})\n SubElement(launch, \"arg\", {\"name\": \"is_use_gpu\", \"default\": \"true\"})\n SubElement(launch, \"arg\", {\"name\": \"is_register_lidar2camera_tf\", \"default\": \"true\"})\n SubElement(launch, \"arg\", {\"name\": \"is_publish_projection_matrix\", \"default\": \"true\"})\n SubElement(launch, \"arg\", {\"name\": \"is_publish_camera_info\", \"default\": \"true\"})\n SubElement(launch, \"arg\", {\"name\": \"camera_calib\", \"default\": realpath(\"./res/detection/calibration_camera_lidar_3d_prius_nic-150407.yml\")})\n\n\n launch.append(Comment(\"calibration_publisher\"))\n calibration_publisher = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find runtime_manager)/scripts/calibration_publisher.launch\",\n }\n )\n SubElement(calibration_publisher, \"arg\", {\"name\": \"file\", \"value\": \"$(arg camera_calib)\"})\n SubElement(calibration_publisher, \"arg\", {\"name\": \"register_lidar2camera_tf\", \"value\": \"$(arg is_register_lidar2camera_tf)\"})\n SubElement(calibration_publisher, \"arg\", {\"name\": \"publish_extrinsic_mat\", \"value\": \"$(arg is_publish_projection_matrix)\"})\n SubElement(calibration_publisher, \"arg\", {\"name\": \"publish_camera_info\", \"value\": \"$(arg is_publish_camera_info)\"})\n\n\n launch.append(Comment(\"points2image\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"points2image\",\n \"type\": \"points2image\",\n \"name\": \"points2image\"\n }\n )\n\n\n launch.append(Comment(\"car and pedestrian detection\"))\n launch.append(Comment(\"dpm_XXX\"))\n dpm_XXX = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find cv_tracker)/launch/dpm_ttic.launch\",\n }\n )\n SubElement(dpm_XXX, \"arg\", {\"name\": \"car\", \"value\": \"$(arg car_detection)\"})\n SubElement(dpm_XXX, \"pedestrian\", {\"name\": \"car\", \"value\": \"$(arg pedestrian_detection)\"})\n SubElement(dpm_XXX, \"use_gpu\", {\"name\": \"car\", \"value\": \"$(arg is_use_gpu)\"})\n\n\n launch.append(Comment(\"range_fusion\"))\n range_fusion = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find cv_tracker)/launch/ranging.launch\",\n }\n )\n SubElement(range_fusion, \"arg\", {\"name\": \"car\", \"value\": \"$(arg car_detection)\"})\n SubElement(range_fusion, \"pedestrian\", {\"name\": \"car\", \"value\": \"$(arg pedestrian_detection)\"})\n\n\n launch.append(Comment(\"XXX_track\"))\n xxx_track = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find cv_tracker)/launch/klt_tracking.launch\",\n }\n )\n SubElement(xxx_track, \"arg\", {\"name\": \"car\", \"value\": \"$(arg car_detection)\"})\n SubElement(xxx_track, \"pedestrian\", {\"name\": \"car\", \"value\": \"$(arg pedestrian_detection)\"})\n\n\n launch.append(Comment(\"obj_reproj\"))\n obj_reproj = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find cv_tracker)/launch/reprojection.launch\",\n }\n )\n SubElement(obj_reproj, \"arg\", {\"name\": \"car\", \"value\": \"$(arg car_detection)\"})\n SubElement(obj_reproj, \"pedestrian\", {\"name\": \"car\", \"value\": \"$(arg pedestrian_detection)\"})\n\n\n launch.append(Comment(\"euclidean_cluster\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find lidar_tracker)/launch/euclidean_clustering.launch\",\n }\n )\n\n\n launch.append(Comment(\"obj_fusion\"))\n obj_fusion = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find lidar_tracker)/launch/obj_fusion.launch\",\n }\n )\n SubElement(obj_fusion, \"arg\", {\"name\": \"car\", \"value\": \"$(arg car_detection)\"})\n SubElement(obj_fusion, \"pedestrian\", {\"name\": \"car\", \"value\": \"$(arg pedestrian_detection)\"})\n\n\n # launch.append(Comment(\"traffic light recognition\"))\n # launch.append(Comment(\"feat_proj\"))\n # SubElement(\n # launch,\n # \"node\",\n # {\n # \"pkg\": \"road_wizard\",\n # \"type\": \"feat_proj\",\n # \"name\": \"feat_proj\"\n # }\n # )\n\n\n # launch.append(Comment(\"region_tlr\"))\n # SubElement(\n # launch,\n # \"include\",\n # {\n # \"file\": \"$(find road_wizard)/launch/traffic_light_recognition.launch\",\n # }\n # )\n\n with open(\"./res/detection/detection.launch\", \"w\") as f:\n f.write(prettify(launch))\n return True\n\n\ndef create_mission_launch_file():\n launch = Element(\"launch\")\n\n\n launch.append(Comment(\"setting path parameter\"))\n SubElement(launch, \"arg\", {\"name\": \"multi_lane_csv\", \"default\": realpath(\"./res/mission/waypoints.csv\")})\n SubElement(launch, \"arg\", {\"name\": \"topic_pose_stamped\", \"default\": \"/ndt_pose\"})\n SubElement(launch, \"arg\", {\"name\": \"topic_twist_stamped\", \"default\": \"/estimate_twist\"})\n\n \"\"\" \n launch.append(Comment(\"Tablet UI\"))\n \n \"\"\"\n\n launch.append(Comment(\"vel_pose_mux\"))\n vel_pose_mux = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find autoware_connector)/launch/vel_pose_connect.launch\",\n }\n )\n SubElement(vel_pose_mux, \"arg\", {\"name\": \"topic_pose_stamped\", \"value\": \"$(arg topic_pose_stamped)\"})\n SubElement(vel_pose_mux, \"arg\", {\"name\": \"topic_twist_stamped\", \"value\": \"$(arg topic_twist_stamped)\"})\n\n\n launch.append(Comment(\"waypoint_loader\"))\n waypoint_loader = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find waypoint_maker)/launch/waypoint_loader.launch\",\n }\n )\n SubElement(waypoint_loader, \"arg\", {\"name\": \"multi_lane_csv\", \"value\": \"$(arg multi_lane_csv)\"})\n\n\n \"\"\"\n launch.append(Comment(\"lane_navi\"))\n \n \"\"\"\n\n launch.append(Comment(\"lane_rule\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"lane_planner\",\n \"type\": \"lane_rule\",\n \"name\": \"lane_rule\"\n }\n )\n\n\n launch.append(Comment(\"lane_stop\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"lane_planner\",\n \"type\": \"lane_stop\",\n \"name\": \"lane_stop\"\n }\n )\n\n\n launch.append(Comment(\"lane_select\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"lane_planner\",\n \"type\": \"lane_select\",\n \"name\": \"lane_select\"\n }\n )\n\n\n launch.append(Comment(\"velocity_set\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find astar_planner)/launch/velocity_set.launch\",\n }\n )\n\n\n launch.append(Comment(\"obstacle_avoid\"))\n obstacleAvoid = SubElement(launch, \"include\", {\"file\": \"$(find astar_planner)/launch/obstacle_avoid.launch\"})\n SubElement(obstacleAvoid, \"arg\", {\"name\": \"avoidance\", \"value\": \"false\"})\n SubElement(obstacleAvoid, \"arg\", {\"name\": \"avoid_distance\", \"value\": \"13\"})\n SubElement(obstacleAvoid, \"arg\", {\"name\": \"avoid_velocity_limit_mps\", \"value\": \"4\"})\n\n\n with open(\"./res/mission/mission.launch\", \"w\") as f:\n f.write(prettify(launch))\n return True\n\n\ndef create_motion_launch_file():\n launch = Element(\"launch\")\n\n\n launch.append(Comment(\"Vehicle Contorl\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find runtime_manager)/scripts/vehicle_socket.launch\",\n }\n )\n\n\n launch.append(Comment(\"path_select\"))\n SubElement(\n launch,\n \"node\",\n {\n \"pkg\": \"lattice_planner\",\n \"type\": \"path_select\",\n \"name\": \"path_select\"\n }\n )\n\n\n launch.append(Comment(\"pure_pursuit\"))\n params = \"{header: {seq: 0, stamp: {secs: 0, nsecs: 0}, frame_id: ''}, param_flag: 0, velocity: 5.0, lookahead_distance: 4.0, lookahead_ratio: 2.0, minimum_lookahead_distance: 6.0, displacement_threshold: 0.0, relative_angle_threshold: 0}\"\n SubElement(\n launch, \"node\", {\n \"pkg\": \"rostopic\", \"type\": \"rostopic\", \"name\": \"rostopic\",\n \"args\": \"pub /config/waypoint_follower autoware_msgs/ConfigWaypointFollower '\" + params + \"'\"\n }\n )\n purePersuit = SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find waypoint_follower)/launch/pure_pursuit.launch\",\n }\n )\n SubElement(purePersuit, \"arg\", {\"name\": \"is_linear_interpolation\", \"value\": \"true\"})\n SubElement(purePersuit, \"arg\", {\"name\": \"publishes_for_steering_robot\", \"value\": \"true\"})\n\n\n launch.append(Comment(\"twist_filter\"))\n SubElement(\n launch,\n \"include\",\n {\n \"file\": \"$(find waypoint_follower)/launch/twist_filter.launch\",\n }\n )\n\n launch.append(Comment(\"marker downsampler\"))\n SubElement(launch, \"node\", {\"pkg\": \"marker_downsampler\", \"type\": \"app.py\", \"name\": \"marker_downsampler\"})\n\n with open(\"./res/motion/motion.launch\", \"w\") as f:\n f.write(prettify(launch))\n return True\n\n\ndef create_rosbag_launch_file():\n launch = Element(\"launch\")\n\n SubElement(launch, \"node\", {\n \"pkg\": \"rosbag\", \"type\": \"play\", \"name\": \"rosbag_play\",\n \"args\": \"{} --clock --pause\".format(\n get_file_paths(\"./res/rosbag/bagfile/\", \"bag\")[0])})\n\n with open(\"./res/rosbag/rosbag.launch\", \"w\") as f:\n f.write(prettify(launch))\n return True\n\n\ndef create_rosbag_pause_launch_files():\n launch = Element(\"launch\")\n\n SubElement(launch, \"service\", {\n \"pkg\": \"rosbag\", \"type\": \"play\", \"name\": \"rosbag_play\",\n \"args\": \"{} --clock --pause\".format(\n get_file_paths(\"./res/rosbag/bagfile/\")[0])})\n\n with open(\"./res/rosbag/rosbag.launch\", \"w\") as f:\n f.write(prettify(launch))\n return True\n\n\nif __name__ == '__main__':\n create_initialization_launch_file()\n create_map_launch_file()\n create_localization_launch_file()\n create_detection_launch_file()\n create_sensing_launch_file()\n create_mission_launch_file()\n create_motion_launch_file()\n create_rosbag_launch_file()\n","sub_path":"ui/web/app/controllers/make_launch_files.py","file_name":"make_launch_files.py","file_ext":"py","file_size_in_byte":17432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"314197340","text":"\"\"\"\n[Problem 13]\n아래에 50자리 숫자가 100개 있습니다. 이것을 모두 더한 값의 첫 10자리는 얼마입니까?\n\nauthor : GeonWoo Kim \ndate : 2019. 03. 23\n\"\"\"\nimport numpy as np\nfrom numba import jit\n\nfrom dest.timer import Timer\nfrom dest.timer import showTimeData\n\nDATAFILE = \"data/13_data.txt\"\n\n\"\"\"\nNOTE:\n자리수끼리 더하면서 배열에 지속적으로 자리수를 업데이트한다.\n => 자리수 올림 로직을 수정해야 한다.\n => 초기화 오류를 조심하고, 디버거를 꼭 사용하자!\n\"\"\"\n\ndef getDataByString() :\n data = []\n with open(DATAFILE, \"r\") as file: \n line = file.readline().strip(\"\\n\") \n while line :\n data.append(line)\n line = file.readline().strip(\"\\n\") \n return data\n\n@jit\ndef getSignNumbers(sum) :\n return list(map(int, list(str(sum))))\n\n@jit\ndef roundElements(signNumbers) :\n for index in range(len(signNumbers) - 1) :\n if signNumbers[index] >= 10 :\n signNumbers[index + 1] += int(signNumbers[index] / 10)\n signNumbers[index] -= (signNumbers[index] // 10) * 10\n return signNumbers\n\ndef solution1() :\n signNumbers = []\n numberData = getDataByString()\n\n currentSum = 0\n currentSignIndex = 49\n currentNumberIndex = 0\n\n while currentSignIndex >= 0 :\n\n while currentNumberIndex <= 99 :\n currentSum += int(numberData[currentNumberIndex][currentSignIndex])\n currentNumberIndex += 1\n\n signs = getSignNumbers(currentSum)\n signsIndex = len(signs) - 1\n tempSignIndex = 49 - currentSignIndex\n\n while signsIndex >= 0 :\n try :\n signNumbers[tempSignIndex] += signs[signsIndex]\n except :\n signNumbers.append(signs[signsIndex])\n signsIndex -= 1\n tempSignIndex += 1\n\n currentSum = 0\n currentSignIndex -= 1\n currentNumberIndex = 0\n\n signNumbers = roundElements(signNumbers)\n\n return signNumbers\n\nif __name__ == \"__main__\" :\n print(solution1())","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"244423258","text":"import logger_module\nimport os\nimport json\n\n\nlogger = logger_module.setup_logger(\"utils\")\n\n\ndef check_config():\n try:\n logger.debug(\"check_config invoked\")\n if os.path.isfile('./config.json'):\n return True\n else:\n return False\n except IOError:\n logger.error('An error occured trying to read the key.')\n except Exception as error:\n logger.error(error)\n\n\ndef get_config():\n try:\n logger.debug(\"get_config invoked\")\n with open('./config.json') as json_file:\n data = json.load(json_file)\n except IOError:\n logger.error('An error occured trying to read the key.')\n except Exception as error:\n logger.error(error)\n return data\n","sub_path":"Secret_Sasha/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499487696","text":"import socket\r\nfrom _thread import *\r\nimport sys\r\n\r\nserver = \"10.0.0.21\"\r\nport = 5556\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #creates socket, AF INET is IPv4\r\n\r\ntry:\r\n\r\n s.bind((server,port))\r\n\r\nexcept socket.error as e:\r\n str(e)\r\n print(\"Error:\", e)\r\n\r\ns.listen(2) #opens the port, specifies number.\r\n\r\nprint(\"Waiting fotr connection server started\")\r\n\r\n\r\ndef read_pos(str):\r\n str = str.split(\",\")\r\n return int(str[0]), int(str[1])\r\n\r\ndef make_pos(tup):\r\n return str(tup[0]) + \",\" + str(tup[1])\r\n\r\npos = [(0,0),(100,100)]\r\n\r\n\r\n\r\n\r\ndef threaded_client(conn, player):\r\n conn.send(str.encode(make_pos(pos[player])))\r\n reply = \"\"\r\n while True:\r\n try:\r\n data = read_pos(conn.recv(2048).decode())\r\n\r\n pos[player] = data\r\n\r\n\r\n #reply = data.decode(\"utf-8\")\r\n\r\n if not data:\r\n print(\"Disconnect\")\r\n break\r\n\r\n\r\n else:\r\n\r\n if player == 1:\r\n reply = pos[0]\r\n else:\r\n reply = pos[1]\r\n\r\n\r\n print(\"Received: \", data)\r\n print(\"Sending: \", reply)\r\n\r\n conn.sendall(str.encode(make_pos(reply)))\r\n\r\n except:\r\n break\r\n print(\"Lost connection\")\r\n conn.close\r\n\r\n\r\n\r\ncurrentPlayer = 0 #every time we get a new connetion, we add a new one\r\n\r\nwhile True:\r\n\r\n conn, addr = s.accept() #conn is what's connected, addr is IP address\r\n print(\"Connected to:\", addr)\r\n\r\n start_new_thread(threaded_client, (conn,currentPlayer))\r\n currentPlayer +=1\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591737882","text":"from collections import Counter\nimport tensorflow.contrib.keras as kr\nimport numpy as np\nimport os\n\ndef open_file(filename, mode = 'r'):\n \"\"\"\n commonly used file reader, change this to switch between\n :param filename:\n :param mode:\n :return:\n \"\"\"\n return open(filename, mode, encoding='latin-1',errors='ignore')\n\ndef read_file(filename):\n \"\"\"read date from file\"\"\"\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels\n\n\ndef build_vocab(train_dir, vocab_dir, vocab_size=5000):\n \"\"\"build dictionary by train set, then save\"\"\"\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')\n\ndef read_vocab(voca_dir):\n \"\"\"read dictionary\"\"\"\n words = open_file(voca_dir).read().strip().split('/n')\n word_to_id = dict(zip(words,range(words)))\n\n return words,word_to_id\n\ndef read_category():\n \"\"\"read labels\"\"\"\n categories = ['HillaryClition','DonaldTrump']\n cat_to_id = dict(zip(categories,range(len(categories))))\n\n return categories,cat_to_id\n\ndef to_words(content,words):\n \"\"\"transfer the content of id into words\"\"\"\n return ''.join(words[x] for x in content)\n\ndef process_file(filename, word_to_id, cat_to_id, max_length=600):\n \"\"\"transfer the document into id\"\"\"\n contents, labels = read_file(filename)\n data_id, label_id = [],[]\n for i in range(len(contents)):\n data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])\n label_id.append(cat_to_id[labels[i]])\n\n # 使用keras提供的pad_sequences来将文本pad为固定长度\n x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)\n y_pad = kr.utils.to_categorical(label_id) # 将标签转换为one-hot表示\n return x_pad, y_pad\n\ndef batch_iter(x, y, batch_size=64):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = x[indices]\n y_shuffle = y[indices]\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\n\n\n\n","sub_path":"homework5/cnn_loader.py","file_name":"cnn_loader.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"426591982","text":"# -*- coding: utf-8 -*-\n\nfrom .compat import ImproperlyConfigured, get_installed_pools, import_string\nfrom .exceptions import InvalidBackendError\n\n\nclass BackendPool(object):\n \"\"\"\n BackendPool is an interface to get instances of backend types\n \"\"\"\n\n backend_type = None\n\n @classmethod\n def get(cls, backend_id):\n \"\"\"\n Return an instance of backend type\n \"\"\"\n\n for backend_class in cls._get_backends_classes():\n if backend_class.id == backend_id:\n return backend_class.create()\n\n raise InvalidBackendError(\n cls.backend_type,\n backend_id,\n get_installed_pools()[cls.backend_type]\n )\n\n @classmethod\n def all(cls):\n \"\"\"\n Return a list of instances of backend type\n \"\"\"\n\n return [\n backend_class.create()\n for backend_class in cls._get_backends_classes()\n ]\n\n @classmethod\n def _get_backends_classes(cls):\n try:\n backend_list = get_installed_pools()[cls.backend_type]\n except KeyError:\n raise ImproperlyConfigured(\n u'Backend type \"{}\" config not found'.format(cls.backend_type)\n )\n\n return [\n cls._get_backend_class(backend_path)\n for backend_path in backend_list\n ]\n\n @classmethod\n def _get_backend_class(cls, backend_path):\n backend_class = import_string(backend_path)\n\n return backend_class\n","sub_path":"ramos/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"358051730","text":"\"\"\"\nBased on unitests in https://github.com/wndhydrnt/python-oauth2/tree/master/oauth2/test\n\"\"\"\n\nimport pytest\nimport unittest\nimport mock\n\nfrom twitcher.datatype import AccessToken\nfrom twitcher.utils import expires_at\nfrom twitcher.store.mongodb import MongodbTokenStore\n\nfrom twitcher.datatype import Service\nfrom twitcher.store.mongodb import MongodbServiceStore\n\n\nclass MongodbTokenStoreTestCase(unittest.TestCase):\n def setUp(self):\n self.access_token = AccessToken(token=\"abcdef\", expires_at=expires_at(hours=1))\n\n def test_fetch_by_token(self):\n collection_mock = mock.Mock(spec=[\"find_one\"])\n collection_mock.find_one.return_value = self.access_token\n\n store = MongodbTokenStore(collection=collection_mock)\n access_token = store.fetch_by_token(token=self.access_token.token)\n\n collection_mock.find_one.assert_called_with({\"token\": self.access_token.token})\n assert isinstance(access_token, AccessToken)\n\n def test_save_token(self):\n collection_mock = mock.Mock(spec=[\"insert_one\"])\n\n store = MongodbTokenStore(collection=collection_mock)\n store.save_token(self.access_token)\n\n collection_mock.insert_one.assert_called_with(self.access_token)\n\n\nclass MongodbServiceStoreTestCase(unittest.TestCase):\n def setUp(self):\n self.service = dict(name=\"loving_flamingo\", url=\"http://somewhere.over.the/ocean\", type=\"wps\",\n public=False, auth='token', verify=True, purl=\"http://purl/wps\")\n self.service_public = dict(name=\"open_pingu\", url=\"http://somewhere.in.the/deep_ocean\", type=\"wps\",\n public=True, auth='token', verify=True, purl=\"http://purl/wps\")\n self.service_special = dict(url=\"http://wonderload\", name=\"A special Name\", type='wps',\n auth='token', verify=False, purl=\"http://purl/wps\")\n\n def test_fetch_by_name(self):\n collection_mock = mock.Mock(spec=[\"find_one\"])\n collection_mock.find_one.return_value = self.service\n\n store = MongodbServiceStore(collection=collection_mock)\n service = store.fetch_by_name(name=self.service['name'])\n\n collection_mock.find_one.assert_called_with({\"name\": self.service['name']})\n assert isinstance(service, dict)\n\n def test_save_service_default(self):\n collection_mock = mock.Mock(spec=[\"insert_one\", \"find_one\", \"count_documents\"])\n collection_mock.count_documents.return_value = 0\n collection_mock.find_one.return_value = self.service\n\n store = MongodbServiceStore(collection=collection_mock)\n store.save_service(Service(self.service))\n\n collection_mock.insert_one.assert_called_with(self.service)\n\n def test_save_service_with_special_name(self):\n collection_mock = mock.Mock(spec=[\"insert_one\", \"find_one\", \"count_documents\"])\n collection_mock.count_documents.return_value = 0\n collection_mock.find_one.return_value = self.service_special\n\n store = MongodbServiceStore(collection=collection_mock)\n store.save_service(Service(self.service_special))\n\n collection_mock.insert_one.assert_called_with({\n 'url': 'http://wonderload', 'type': 'wps', 'name': 'a_special_name', 'public': False, 'auth': 'token',\n 'verify': False, 'purl': \"http://purl/wps\"})\n\n def test_save_service_public(self):\n collection_mock = mock.Mock(spec=[\"insert_one\", \"find_one\", \"count_documents\"])\n collection_mock.count_documents.return_value = 0\n collection_mock.find_one.return_value = self.service_public\n\n store = MongodbServiceStore(collection=collection_mock)\n store.save_service(Service(self.service_public))\n\n collection_mock.insert_one.assert_called_with(self.service_public)\n","sub_path":"tests/store/test_mongodb.py","file_name":"test_mongodb.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432412696","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nclass ValidatingEntry(ttk.Entry):\n\n \"\"\"Entry Widget serving as base class for datatype specific Entry Widgets. \"\"\"\n\n# base class for validating entry widgets\n def __init__(self, parent, value=\"\", **kwargs):\n ttk.Entry.__init__(self, parent, **kwargs)\n self.__value = value\n self.__variable = tk.StringVar()\n self.__variable.set(value)\n self.__variable.trace(\"w\", self.__callback)\n self.config(textvariable=self.__variable)\n\n def __callback(self, *dummy):\n value = self.__variable.get()\n newvalue = self.validate(value)\n if newvalue is None:\n self.__variable.set(self.__value)\n elif newvalue != value:\n self.__value = newvalue\n self.__variable.set(self.newvalue)\n else:\n self.__value = value\n\n def validate(self, value):\n # override: return value, new value, or None if invalid\n return value\n\n\nclass IntegerEntry(ValidatingEntry):\n def validate(self, value):\n try:\n if value:\n v = int(value)\n return value\n except ValueError:\n return None\n\n\nclass FloatEntry(ValidatingEntry):\n def validate(self, value):\n try:\n if value:\n v = float(value)\n return value\n except ValueError:\n return None\n\n\nclass ArrayEntry(ValidatingEntry):\n\n def validate(self, value):\n try:\n if value:\n v = list(map(lambda x: int(x), value.split()))\n return value\n except ValueError:\n return None\n\n\nclass StringEntry(ValidatingEntry):\n def validate(self, value):\n try:\n if value:\n if '\"' in value:\n raise ValueError\n return value\n except ValueError:\n return None\n","sub_path":"validating_entry.py","file_name":"validating_entry.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"445770784","text":"import os\nimport requests\nfrom datetime import datetime\nfrom openpyxl import load_workbook\nimport pandas as pd\nimport shutil\n\n\ndef reformat_sheet(path, filename):\n file_path = os.path.join(path, filename)\n wb = load_workbook(filename=file_path)\n ws = wb.active\n # iterate thru all cells and if hyperlink found attempt modification of cell\n for row in ws.rows:\n for cell in row:\n try:\n if len(cell.hyperlink.target) > 0:\n cell.value = \"\".join([cell.value, \"|\", cell.hyperlink.target])\n # Join cell.value and hyperlink target into string (optionally just assign the hyperlink.target to the cell.value\n except:\n pass\n temp_filename = os.path.join(path, \"temp\" + filename)\n wb.save(temp_filename)\n\n # read with pandas\n data = pd.read_excel(temp_filename)\n # take DataSeries and rsplit by \"|\" and expand to 2 columns\n hyper1 = (data.sfz_photo_1.str.rsplit(\"|\", expand=True))\n hyper1.columns = [\"sfz_photo_1_name\", \"sfz_photo_1_link\"]\n hyper2 = (data.sfz_photo_2.str.rsplit(\"|\", expand=True))\n hyper2.columns = [\"sfz_photo_2_name\", \"sfz_photo_2_link\"]\n # join them back to dataframe on index\n data_new = data[[\"uploader_id\", \"name\", \"sfz_number\"]]\n data_new = data_new.join(hyper1, how=\"left\").join(hyper2, how=\"left\")\n new_filename = os.path.join(path, \"new_\" + filename)\n data_new.to_excel(new_filename)\n print(\"Successfully saved \" + new_filename)\n return new_filename\n\n\ndef save_imgs(path, filename):\n file_path = os.path.join(path, filename)\n df = pd.read_excel(file_path)\n root_path = os.path.join(path, \"sfz_package\")\n if not os.path.exists(root_path):\n os.mkdir(root_path)\n for index, row in df.iterrows():\n new_path = os.path.join(root_path, str(row[\"name\"])+\"_\"+str(row[\"sfz_number\"]))\n if not os.path.exists(new_path):\n os.mkdir(new_path)\n img_1_filepath = os.path.join(new_path, \"正面.jpeg\")\n with open(img_1_filepath, 'wb') as handle:\n response = requests.get(row[\"sfz_photo_1_link\"], stream=True)\n if not response.ok:\n print(response)\n for block in response.iter_content(1024):\n if not block:\n break\n handle.write(block)\n img_2_filepath = os.path.join(new_path, \"反面.jpeg\")\n with open(img_2_filepath, 'wb') as handle:\n response = requests.get(row[\"sfz_photo_2_link\"], stream=True)\n if not response.ok:\n print(response)\n for block in response.iter_content(1024):\n if not block:\n break\n handle.write(block)\n now = datetime.now()\n dt_string = now.strftime(\"%Y%m%d_%H%M%S\")\n new_filename = os.path.join(path, 'sfz_package_'+dt_string)\n shutil.make_archive(new_filename, 'zip', root_path)\n print(\"Successfully saved \" + new_filename)\n return new_filename\n\n\ndef extract_and_save(path, filename):\n new_sheet = reformat_sheet(path, filename)\n zipped_file = save_imgs(path, new_sheet)\n print(\"Successfully saved \" + zipped_file)\n return 0\n\n\nif __name__ == \"__main__\":\n dir_path = input(\"Path: \")\n file = input(\"Filename: \")\n extract_and_save(dir_path, file)\n","sub_path":"extract_hyperlink.py","file_name":"extract_hyperlink.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"502376250","text":"\"\"\"\nThe Real Script utils.\n\ncreated: 2021-09-04\nauthor: Roberto Nogueras Zondag\nemail: rnogueras@protonmail.com\n\"\"\"\n\nfrom typing import Optional, Union, Tuple, List, Sequence\n\nimport numpy as np\n\nfrom constants import NOTES, INTERVALS, PITCHSET_NAMES, C_BASE_SCALES, DEGREES\n\n\ndef invert(values: np.array, inversion: int) -> np.array:\n \"\"\"Return the specified musical inversion of the values.\"\"\"\n if np.abs(inversion) > (len(values) - 1):\n raise ValueError(\"Inversion out of range\")\n \n return np.hstack([values[inversion:], values[:inversion]]).astype(int)\n\n\ndef calculate_intervals(values: np.array) -> Tuple[int]:\n \"\"\"Calculate intervals between the provided note values.\"\"\"\n return tuple(\n [(next_note - note) % 12 for note, next_note in zip(values, values[1:])]\n )\n\n# TODO: init family (major, minor, dominant)\n# TODO: init type (chord, scale, other)\nclass PitchSet:\n \"\"\"\n A PitchSet is a collection of notes. Chords, scales and \n melodies can be instanciated as PitchSets. Simple arithmetical \n operations can be performed with them.\n \"\"\"\n\n def __init__(self, values: Sequence[int]) -> None:\n \"\"\"Class instance constructor.\"\"\"\n \n valid_builder_types = (int, tuple, list, np.ndarray, PitchSet)\n \n if not isinstance(values, valid_builder_types):\n raise TypeError(\n f\"Valid object types to build a PitchSet are: {valid_builder_types}\"\n )\n \n if isinstance(values, int):\n values = [values]\n if isinstance(values, PitchSet):\n values = values.values\n \n valid_builder_data_types = (int, np.int_)\n are_valid = [isinstance(value, valid_builder_data_types) for value in values]\n \n if not all(are_valid):\n raise TypeError(\n f\"A PitchSet can only be constructed from integers.\"\n )\n\n # Value attributes\n self.values = np.array(values) % 12\n self.interval_values = calculate_intervals(self.values)\n self.structure_values = np.hstack([0, np.cumsum(self.interval_values)])\n self.relation_values = self.init_relation_values()\n\n # Name attributes\n self.tonic = NOTES[self.values[0]]\n self.notes = [NOTES[value] for value in self.values]\n self.name = self.init_name()\n self.intervals = [INTERVALS[interval] for interval in self.interval_values]\n self.structure = [INTERVALS[interval] for interval in self.structure_values]\n self.third = self.init_third()\n self.relations = [INTERVALS[interval] for interval in self.relation_values]\n\n def init_name(self) -> str:\n \"\"\"Initialize set name.\"\"\"\n try:\n return self.tonic + PITCHSET_NAMES[self.interval_values]\n except KeyError:\n return f\"Unknown set: {self.notes}\"\n \n def init_third(self) -> str:\n \"\"\"Initialize third\"\"\" \n if 3 in self.structure_values:\n return \"minor\"\n elif 4 in self.structure_values:\n return \"major\"\n else:\n return \"suspended\"\n \n def init_relation_values(self) -> Sequence[int]:\n \"\"\"Initialize relation values.\"\"\"\n reversed_structure_values = self.structure_values[::-1]\n return [\n minuend - subtrahend \n for index, minuend in enumerate(reversed_structure_values) \n for subtrahend in reversed_structure_values[index + 1:]\n ]\n\n def __iter__(self) -> str:\n \"\"\"Make class iterable.\"\"\"\n for notes in self.notes:\n yield notes\n\n def __getitem__(self, index) -> str:\n \"\"\"Make class indexable.\"\"\"\n return self.notes[index]\n\n def __add__(self, summand) -> \"PitchSet\":\n \"\"\"Make class summable.\"\"\"\n return PitchSet(self.values + PitchSet(summand).values)\n\n def __radd__(self, summand) -> \"PitchSet\":\n \"\"\"Reverse sum.\"\"\"\n return self.__add__(summand)\n\n def __sub__(self, subtrahend) -> \"PitchSet\":\n \"\"\"Make class subtractable.\"\"\"\n return PitchSet(self.values - PitchSet(subtrahend).values)\n \n def __rsub__(self, minuend) -> \"PitchSet\":\n \"\"\"Reverse subtraction.\"\"\"\n return PitchSet(minuend) - self\n \n def __repr__(self) -> str:\n \"\"\"Return name when print is called.\"\"\"\n return self.name\n \n def invert(self, inversion: int) -> \"PitchSet\":\n \"\"\"Return specified inversion of the set.\"\"\"\n return PitchSet(invert(self.values, inversion=inversion))\n\n\nclass Tonality:\n \"\"\"Tonality class.\"\"\"\n\n def __init__(\n self,\n tonic: str,\n scale_type: Optional[str] = \"diatonic\",\n mode: Optional[str] = \"I\",\n ) -> None:\n \"\"\"Class instance constructor.\"\"\"\n self.tonic = tonic\n self.scale_type = scale_type\n self.mode = mode\n self.cromatic = PitchSet(invert(C_BASE_SCALES[\"cromatic\"], NOTES.index(tonic)))\n self.scale = self.init_scale()\n \n def __repr__(self):\n \"\"\"Return scale when print is called.\"\"\"\n return f\"{self.scale}\"\n\n def init_scale(self) -> PitchSet:\n \"\"\"Return main scale of the tonality.\"\"\"\n tonic_value = NOTES.index(self.tonic)\n inversion_values = invert(C_BASE_SCALES[self.scale_type], DEGREES.index(self.mode))\n interval_values = calculate_intervals(inversion_values)\n modal_values = np.hstack([tonic_value, (tonic_value + np.cumsum(interval_values))])\n return PitchSet(modal_values)\n\n def chords(self, degrees: Sequence[Union[str, int]], size: int = 4) -> List[PitchSet]:\n \"\"\"Return list of chords from the chosen degrees with the specified number of notes.\"\"\"\n \n if isinstance(degrees, (str, int)):\n degrees = [degrees]\n \n chords = []\n for degree in degrees:\n \n if isinstance(degree, int):\n degree_value = degree - 1\n if isinstance(degree, str):\n degree_value = DEGREES.index(degree)\n \n if degree_value not in range(0, 7):\n raise ValueError(f\"Invalid degree: {degree}\")\n \n three_octaves_scale = list(self.scale.values) * 3\n seven_note_chord = three_octaves_scale[degree_value : degree_value + 14 : 2]\n cropped_chord = seven_note_chord[0:size]\n chords.append(PitchSet(cropped_chord))\n \n if len(chords) == 1:\n chords = chords[0]\n\n return chords\n","sub_path":"the_real_script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"264786669","text":"\"\"\"\nFile I/O\n'w' -> Write-Only Mode\n'r' -> Read-only Mode\n'r+' -> Read And Write Mode\n'a' -> Append Mode\n\"\"\"\n\nmy_list = [1, 2, 3]\n\ntry:\n\n my_file = open(\"firstfile.txt\", \"w\")\n\n for item in my_list:\n my_file.write(str(item) + \"\\n\") # write function takes a string argument\n\n my_file.close()\n\nexcept:\n print(\"Something went wrong...\")\nfinally:\n print(\"Process finished...\")\n","sub_path":"Working with Files/filedemo1.py","file_name":"filedemo1.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247687181","text":"from socket import *\r\n\r\n\r\nport = 8080\r\n\r\nclientSock = socket(AF_INET, SOCK_STREAM)\r\nclientSock.connect(('127.0.0.1', port))\r\n\r\nprint('접속 완료')\r\n\r\nwhile True:\r\n recvData = clientSock.recv(1024)\r\n print('상대방 :', recvData.decode('utf-8'))\r\n","sub_path":"other/인준_정리/donut_final/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"592621493","text":"# _*_ coding:utf-8 _*_\nfrom organization.views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescriptionView, OrgTeacherView, \\\n AddFavView, TeacherListView, TeacherDetailView\n\n__author__ = 'supan'\n__date__ = '2017/2/16 22:10'\n\nfrom django.conf.urls import url, include\n\n\nurlpatterns = [\n # 课程机构列表页\n url(r'^list/$', OrgView.as_view(), name=\"org_list\"),\n url(r'^add_ask/$', AddUserAskView.as_view(), name=\"add_ask\"),\n url(r'^home/(?P\\d+)/$', OrgHomeView.as_view(), name=\"org_home\"),\n url(r'^course/(?P\\d+)/$', OrgCourseView.as_view(), name=\"org_course\"),\n url(r'^desc/(?P\\d+)/$', OrgDescriptionView.as_view(), name=\"org_desc\"),\n url(r'^org_teacher/(?P\\d+)/$', OrgTeacherView.as_view(), name=\"org_teacher\"),\n #机构收藏\n url(r'^add_fav/$', AddFavView.as_view(), name=\"add_fav\"),\n #机构教师\n url(r'^teacher/list/$', TeacherListView.as_view(), name=\"teacher_list\"),\n #机构教师详情\n url(r'^teacher/detail/(?P\\d+)/$', TeacherDetailView.as_view(), name=\"teacher_detail\"),\n\n]\n\n\n","sub_path":"apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376878457","text":"import webapp2\r\nfrom google.appengine.ext import db\r\nfrom webapp2_extras import sessions\r\n\r\nclass Movies(db.Model):\r\n name = db.StringProperty()\r\n release = db.DateProperty()\r\n desc = db.TextProperty()\r\n rating = db.FloatProperty()\r\n\r\nclass Users2(db.Model):\r\n username = db.StringProperty()\r\n email = db.StringProperty()\r\n password = db.StringProperty()\r\n\r\nclass Sverify(webapp2.RequestHandler):\r\n def get(self):\r\n usr = self.request.get('username')\r\n if len(usr)>0:\r\n ch = db.GqlQuery(\"SELECT * FROM Users2 where username = :user\",user = usr)\r\n if ch.count()>0:\r\n self.response.write(\"Sorry Username already Exists\")\r\n else:\r\n self.response.write(\"Username is available\")\r\n else:\r\n self.response.write(\"Please provide Username\")\r\napp = webapp2.WSGIApplication([\r\n ('/sverify', Sverify)\r\n], debug=True)\r\n","sub_path":"sverify.py","file_name":"sverify.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"565073996","text":"#1. (3-12-2021)\n# Given a list of integers, and an integer k, find if any 2 values in the list add up to k\ndef equal(lst, k):\n\n if (len(lst) == 0) or (type(k) != int):\n return False\n\n lst = list(set(lst))\n dict = {val:lst.count(k-val) for val in lst}\n\n if 1 in list(dict.values()):\n return True\n else:\n return False\n\n#2. (3-13-2021)\n# Given an array of integers,\n# return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i\n# (NO DIVISION ALLOWED!)\ndef multiplyList(lst):\n\n if len(lst) == 0:\n return 0\n\n def recursiveMultiply(lst):\n if len(lst) == 1:\n return lst[0]\n\n firstTerm = lst[0]\n rest = lst[1:]\n\n return firstTerm * recursiveMultiply(rest)\n\n totalProduct = recursiveMultiply(lst)\n answer = [int(totalProduct * (term ** -1)) for term in lst]\n\n return answer\n\n#3. (3-14-2021)\n# Given an input string s, reverse the order of the words. (Remove extra spaces!)\ndef reverseString(input):\n\n if type(input) != str:\n return None\n\n if len(input) == 1:\n return input\n\n allWords = input.split()\n allWords.reverse()\n allWords = \" \".join(allWords)\n\n return allWords\n\n#4. (3-15-2021)\n# Given an array of integers, find the first missing positive integer in linear time and constant space.\n# In other words, find the lowest positive integer that does not exist in the array.\n# The array can contain duplicates and negative numbers as well.\ndef findMissing(arr):\n\n if len(arr) == 0:\n return None\n\n arr = [val for val in arr if val >= 0]\n arr.sort()\n\n if len(arr) == 0:\n return 0\n\n for val in arr:\n if (val + 1) not in arr:\n return val + 1\n\n#5. (3-16-2021)\n# cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns the first and last element of that pair.\n# For example, car(cons(3, 4)) returns 3, and cdr(cons(3, 4)) returns 4.\n# Given an implementation of cons, find cdr and car\ndef cons(a, b):\n def pair(f):\n return f(a, b)\n return pair\n\n #Create a function that will be passed as \"f\" in the cons function\ndef car(input):\n def findFirst(a, b):\n return a\n return input(findFirst)\n\ndef cdr(input):\n def findLast(a, b):\n return b\n return input(findLast)\n\n#6. (3-17-2021)\n# An XOR linked list is a more memory efficient doubly linked list.\n# Instead of each node holding next and prev fields, it holds a field named both, which is an XOR of the next node and the previous node.\n# Implement an XOR linked list; it has an add(element) which adds the element to the end, and a get(index) which returns the node at index.\nclass Node:\n def __init__(self, element):\n self.element = element\n self.both = None\n\nclass XORList:\n def __init__(self):\n self.head = None\n\n #def calculateXOR(self, a, b):\n # self.xor = a\n\n def add(element):\n toAdd = Node(element)\n #toAdd. = toAdd.calculateXOR(element, b)\n\n# HMM this is hard\n\n#7. (3-18-2021)\n# Given the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the number of ways it can be decoded.\n# For example, the message '111' would give 3, since it could be decoded as 'aaa', 'ka', and 'ak'.\n\nmapping = {}\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\nfor i in range(len(alphabet)):\n mapping[alphabet[i]] = i + 1\n\ndef decode(input):\n\n n = len(input)\n\n if n <= 1:\n return 1\n\n currentCount = 0\n if input[n-1] > \"0\":\n currentCount = decode(input[0:n-1])\n if input[n-2] + input[n-1] < \"26\":\n currentCount += decode(input[0:n-2])\n\n return currentCount\n\n#8. (3-23-2021)\n# Implement an autocomplete system.\n# That is, given a query string s and a set of all possible query strings, return all strings in the set that have s as a prefix.\ndef autocomplete(query, possible):\n\n prefixLength = len(query)\n results = []\n\n for string in possible:\n\n if query == string[0:prefixLength]:\n results.append(string)\n\n return results\n","sub_path":"dailyCodingProblems.py","file_name":"dailyCodingProblems.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"227150784","text":"from flask import g, url_for, current_app\n\n\nclass ReviewRouter(object):\n\n @staticmethod\n def get_redirect_url(default_route):\n \"\"\"Return the url for redirect_route in session if it has been set, or the url for the default_route otherwise.\n\n\n - default_route: The route to return the URL of if the redirect_url is not set.\n :return: The url of session.redirect_route if set, or the url of the given route otherwise.\n \"\"\"\n redirect_route = g.session.redirect_route\n redirect_url = url_for(default_route)\n if redirect_route:\n redirect_url = url_for(redirect_route)\n\n current_app.logger.info('Returning redirect URL: {}'.format(redirect_url))\n return redirect_url\n\n @staticmethod\n def update_edited_field(field, value):\n \"\"\"Adds the field to edited_fields if the value in session and the given values are different.\n\n\n - field: A field to look up in the add charge state.\n - value: The value to test the field against.\n \"\"\"\n if g.session.redirect_route and value != getattr(g.session.add_lon_charge_state, field):\n current_app.logger.info('Adding {} field to edited_fields'.format(field))\n g.session.edited_fields[field] = field\n g.session.commit()\n\n @staticmethod\n def update_edited_dominant_address(field, value):\n \"\"\"Adds the field to edited_fields if the value in session and the given values are different.\n\n Checks charge_address and charge_geographic_description as only one can be entered, so removes\n\n the other value from edited fields list.\n\n\n - field: A field to look up in the add charge state.\n - value: The value to test the field against.\n \"\"\"\n if g.session.redirect_route and value != getattr(g.session.add_lon_charge_state, field):\n if field == 'charge_geographic_description' and 'charge_address' in g.session.edited_fields:\n del g.session.edited_fields['charge_address']\n elif field == 'charge_address' and 'charge_geographic_description' in g.session.edited_fields:\n del g.session.edited_fields['charge_geographic_description']\n\n current_app.logger.info('Adding {} field to edited_fields'.format(field))\n g.session.edited_fields[field] = field\n g.session.commit()\n\n @staticmethod\n def update_edited_height_or_position(value):\n \"\"\"Adds the height or position to edited_fields if the value in session and the given values are different.\n\n\n - value: The updated position_and_dimension dict to compare against session\n \"\"\"\n if g.session.redirect_route:\n sess_pos = getattr(g.session.add_lon_charge_state, 'structure_position_and_dimension')\n\n if value['height'] != sess_pos['height'] or \\\n ('units' in value and 'units' in sess_pos and value['units'] != sess_pos['units']):\n current_app.logger.info('Adding servient_height field to edited_fields')\n g.session.edited_fields['servient_height'] = 'servient_height'\n g.session.commit()\n\n if value['extent-covered'] != sess_pos['extent-covered'] or \\\n ('part-explanatory-text' in value and 'part-explanatory-text' in sess_pos and\n value['part-explanatory-text'] != sess_pos['part-explanatory-text']):\n current_app.logger.info('Adding servient_position field to edited_fields')\n g.session.edited_fields['servient_position'] = 'servient_position'\n g.session.commit()\n\n @staticmethod\n def update_edited_filename_field(value):\n \"\"\"Adds the individual filename to edited_fields if the value in session and the given values are different.\n\n\n - value: The value to test the field against.\n \"\"\"\n if g.session.redirect_route:\n if value['form_a'] != getattr(g.session, 'filenames')['form_a']:\n current_app.logger.info('Adding form_a_file field to edited_fields')\n g.session.edited_fields['form_a_file'] = 'form_a_file'\n g.session.commit()\n if value['temporary_lon_cert'] != getattr(g.session, 'filenames')['temporary_lon_cert']:\n if value['temporary_lon_cert'] == '':\n ReviewRouter.remove_from_edited_fields('temporary_lon_file')\n else:\n current_app.logger.info('Adding temporary_lon_file field to edited_fields')\n g.session.edited_fields['temporary_lon_file'] = 'temporary_lon_file'\n g.session.commit()\n if value['definitive_lon_cert'] != getattr(g.session, 'filenames')['definitive_lon_cert']:\n if value['definitive_lon_cert'] == '':\n ReviewRouter.remove_from_edited_fields('definitive_lon_file')\n else:\n current_app.logger.info('Adding definitive_lon_file field to edited_fields')\n g.session.edited_fields['definitive_lon_file'] = 'definitive_lon_file'\n g.session.commit()\n\n @staticmethod\n def remove_from_edited_fields(field):\n if field in g.session.edited_fields:\n current_app.logger.info('Removing {} field from edited_fields'.format(field))\n del g.session.edited_fields[field]\n","sub_path":"maintain_frontend/add_lon/routing/review_router.py","file_name":"review_router.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"491711322","text":"from scrapy.contrib.spiders.init import InitSpider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.contrib.spiders import Rule\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.selector import Selector\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom scrapy.crawler import CrawlerProcess\nimport operator\nimport collections\nfrom scrapy.selector import Selector\nfrom scrapy.conf import settings\nimport logging\nlogger = logging.getLogger()\n\n\nclass MassScrapSpiderLogin(CrawlSpider):\n name = 'mass_scrap_spider_login'\n \n handle_httpstatus_list = [302]\n\n rules = (\n Rule(LinkExtractor(\n allow=['.*'],\n deny=['\\.jpg','\\.png','\\.jpeg' ]\n ),\n follow=True,\n callback='parse_package',\n ),\n\n )\n\n def __init__(self, start_urls=None,crawl_url=None, *args, **kwargs):\n \n \n self.username = \"bdqnghi\"\n self.password = \"123456\"\n self.login_url = \"https://theshopmill.com/login\"\n self.signin_form = \"signinForm\"\n self.allowed_domains = [\"theshopmill.com\"]\n self.start_urls = [\"https://theshopmill.com/home/products\"]\n self.crawl_url = \"https://theshopmill.com/home/products\"\n\n super(MassScrapSpiderLogin, self).__init__(*args, **kwargs)\n\n def start_requests(self):\n yield Request(\n url=self.login_url,\n callback=self.login,\n dont_filter=True\n )\n\n def login(self, response):\n print(\"FUCK DOG\")\n #\"\"\"Generate a login request.\"\"\"\n return FormRequest.from_response(response,formid=self.signin_form,formdata={'username': self.username, 'password': self.password },callback=self.check_login_response)\n\n def check_login_response(self, response):\n #\"\"\"Check the response returned by a login request to see if we aresuccessfully logged in.\"\"\"\n \n yield Request(url=self.crawl_url,dont_filter=True)\n\n\n def parse_package(self, response):\n \n print(\"\\n\\nWe got data\\n\\n\")\n # Do what ever you want to do the reponse\n # print(response.body)\n","sub_path":"mass_scrap_login/spiders/login_spider.py","file_name":"login_spider.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"299720814","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport cv2\n\nclass Images(Dataset):\n def __init__(self, path):\n f = open(path, \"r\")\n self.imgs = []\n for line in f.readlines():\n img_path = line.strip(\"\\n\").split(\" \")[0]\n img_label = line.strip(\"\\n\").split(\" \")[1]\n self.imgs.append((img_path, img_label))\n \n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n tup = self.imgs[index]\n img = cv2.imread(tup[0])\n img = cv2.resize(img, (224, 224)).T\n img = img / 255\n label = int(tup[1])\n return img, label","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"281834915","text":"#from logging import raiseExceptions\nimport streamlit as st\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom rdkit import Chem\n#from mordred import Calculator, descriptors\nfrom rdkit.Chem import PandasTools\n#import json\nimport joblib\n\nimport os\nfrom descriptastorus.descriptors.DescriptorGenerator import MakeGenerator\n#import pybel\n#from molmod import *\n#from itertools import cycle\n#from collections import namedtuple\n\n\n###################\n#FONCTIONS UTILES##\n###################\n\n# Compute descriptors\n@st.cache\ndef mordred_calculator(esol_data):\n mordred_calc = Calculator(descriptors, ignore_3D=True) # can't do 3D without sdf or mol filz\n mordred = mordred_calc.pandas([mol for mol in esol_data['ROMol']])\n return mordred\n\n# Création d'un fonction permettant de lire XYZ et convertir au format SMILES\n@st.cache\ndef xyz_to_smiles(fname: str) -> str:\n mol = next(pybel.readfile(\"xyz\", fname))\n smi = mol.write(format=\"smi\")\n return smi.split()[0].strip()\n\n\ndef file_selector(folder_path='.'):\n filenames = os.listdir(folder_path)\n selected_filename = st.selectbox('Sélectionnez un fichier au format .xyz', filenames)\n return os.path.join(folder_path, selected_filename)\n\n\n\n#########################\n#MISE EN PAGE STREAMLIT##\n#########################\n\ndef main():\n\n menu = ['Home',\n 'Smiles conversion',\n 'Database information',\n 'Model performance information',\n \"Prediction\"]\n\n choice = st.sidebar.selectbox('Menu',menu)\n\n if choice == 'Home':\n #st.subheader('Acceuil')\n image = Image.open('img/VolatilityPredictorApp.png')\n st.image(image, use_column_width=True)\n st.write(\"\"\"\n# Volatility Predictor Web App\nThis application predicts **volatility ** values for organometallic compounds (boiling temperature and enthalpy of vaporization). \nThe data to create the models were collected through different sources presented below: \n* [Chemsrc](https://www.chemsrc.com/en/Catg/134.html) \n* [Strem](https://www.strem.com/catalog/family/CVD+%26+ALD+Precursors/)\n* [Nist] (https://www.nist.gov/)\n* Chickos J.S., Acree W.E. ***Enthalpies of vaporization of organic and organometallic compounds *** , 1880–2002 // J. Phys.Chem. Ref. Data. – 2003. – Vol. 32. – Article No. 519.\n\"\"\") \n #Coordonnées\n expander_bar = st.beta_expander(\"My contact information\")\n expander_bar.markdown(\"\"\"\n * **Email:** juliensade75@gmail.com\n * **Linkedin:** [Profil] (https://www.linkedin.com/in/julien-sade-635b711ba/)\n * **Rapport:** Mettre le lien plus tard du rapport fini\n \"\"\")\n\n\n elif choice == \"Smiles conversion\":\n st.title('Smiles conversion')\n #st.subheader(\"Importez votre fichier\")\n #xyz_file = st.file_uploader(\"Télécharger votre fichier xyz ou txt\",\n #type =['txt',\"xyz\"])\n st.subheader('Convert xyz to SMILES')\n filename = st.text_input('Enter the path of your xyz/txt file:')\n if filename != '':\n \tsmi = xyz_to_smiles(filename)\n \tst.write('The SMILES of your compound **%s**' % smi)\n\n st.subheader(\"Draw your molecule\")\n st.write(\"\"\" You can draw your molecule on [Pubchem](https://pubchem.ncbi.nlm.nih.gov//edit3/index.html) to get the SMILES of your molecule\n\"\"\")\n\n #filename = st.text_input('Enter a file path:')\n #smi = xyz_to_smiles(filename)\n #smi\n #st.title(\"Conversion\")\n #st.subheader('Convertissez xyz en SMILES')\n #filename = file_selector()\n #st.write('Vous avez sélectionné `%s`' % filename)\n #smi = xyz_to_smiles(filename)\n #st.write('Le SMILES de votre composé **%s**' % smi)\n\n #if st.button('Convertir'):\n #if xyz_file is not None:\n # if xyz_file.type == \"application/octet-stream\" or \"text/plain\":\n #Read docs\n #raw_text = xyz_file.read()\n #st.write(raw_text)\n #Read as string\n #raw_text = str(xyz_file.read(),\"utf-8\")\n\n\n\n\n\n\n\n\n elif choice == \"Database information\":\n st.header('Database information') \n st.subheader(\"Number of occurrences for each metal\")\n image = Image.open('img/metal_occurences.PNG')\n st.image(image, use_column_width=True)\n st.subheader(\"Number of occurrences for each type of ligand\")\n image = Image.open('img/ligandtype_occurences.PNG')\n st.image(image, use_column_width=True)\n st.subheader(\"Histogram and density function of the predictor variable (Boiling temperature)\")\n image = Image.open('img/densityV2.png')\n st.image(image, use_column_width=True)\n st.subheader(\"Whisker box of the predictor variable (Boiling temperature)\")\n image = Image.open('img/boxplot.png')\n st.image(image, use_column_width=True)\n\n\n elif choice == \"Model performance information\":\n st.header('Model performance information') \n st.subheader('Model performance')\n st.write('After comparing different models with statistical metrics, the model with the best performance after optimization is the XGBoost') \n image = Image.open('img/PerformanceApresTuningEnSurlignage.PNG')\n st.image(image, use_column_width=True)\n st.subheader(\"Model learning curve\")\n image = Image.open('img/LearningCurveV2.png')\n st.image(image, use_column_width=True)\n st.subheader(\"Prediction on the validation dataset\")\n image = Image.open('img/PredictionAfterTuned.png')\n st.image(image, use_column_width=True)\n st.subheader(\"The most important variables of the model\")\n image = Image.open('img/FeatureImportance.png')\n st.image(image, use_column_width=True)\n st.write('On the documentation of the [MORDRED] library (https://mordred-descriptor.github.io/documentation/master/descriptors.html), the different descriptors are described') \n\n\n\n\n \n elif choice == \"Prediction\":\n st.title(\"Prediction\")\n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n ## Read SMILES input\n ## Read SMILES input\n SMILES_input = \"[Li+].CCC[CH2-]\\nC=CC[Sn](CC=C)(CC=C)CC=C\"\n\n SMILES = st.sidebar.text_area(\"SMILES input\", SMILES_input)\n SMILES = \"C\\n\" + SMILES #Adds C as a dummy, first item\n SMILES = SMILES.split('\\n')\n st.header('SMILES')\n\n SMILES_LIST = []\n for i in SMILES:\n SMILES_LIST.append(i)\n \n del SMILES_LIST[0]\n\n esol_data = pd.DataFrame(SMILES_LIST,columns=['smiles'])\n for index, row in esol_data.iterrows():\n \tif row['smiles'] == \"\":\n \t\tesol_data.drop(index, inplace=True)\n st.dataframe(esol_data)\n PandasTools.AddMoleculeColumnToFrame(esol_data, smilesCol='smiles')\t\n for index, row in esol_data.iterrows():\n \tif row['ROMol'] is None:\n \t\tst.write(f\"SMILES **{row[0]}** inserted is not recognized\")\n \t\tesol_data.drop(index, inplace=True)\n \n generator = MakeGenerator((\"RDKit2D\",))\n rdkit2d = [generator.process(x)[1:] for x in esol_data['smiles']]\n rdkit2d_name = []\n for name, numpy_type in generator.GetColumns():\n rdkit2d_name.append(name)\n df_rdkit2d = pd.DataFrame(rdkit2d,columns=rdkit2d_name[1:])\n\n\n\n\n for index, row in esol_data.iterrows():\n if row['ROMol'] is not None:\n generator = MakeGenerator((\"RDKit2D\",))\n rdkit2d = [generator.process(x)[1:] for x in esol_data['smiles']]\n rdkit2d_name = []\n for name, numpy_type in generator.GetColumns():\n rdkit2d_name.append(name)\n df_rdkit2d = pd.DataFrame(rdkit2d,columns=rdkit2d_name[1:])\n model_rdkit = joblib.load('RDKIT_meilleur_model.pkl')\n prediction_rdkit = model_rdkit.predict(df_rdkit2d)\n prediction_rdkit = pd.DataFrame(prediction_rdkit)\n #esol_data[\"prediction_rdkit\"] = prediction_rdkit\n st.header('Prediction')\n esol_data = esol_data.drop(\"ROMol\", axis=1)\n esol_data[\"prediction_boiling_point \"] = prediction_rdkit\n st.dataframe(esol_data)\n \n\n\nif __name__ == '__main__':\n main()","sub_path":"appV2.py","file_name":"appV2.py","file_ext":"py","file_size_in_byte":8297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"519514007","text":"from orator.migrations import Migration\n\n\nclass CreateLibroPrestamoTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('Prestamo') as table:\n table.increments('idPrestamo')\n table.integer('idLector').unsigned()\n table.foreign('idLector').references('idLector').on('Lector') \n table.integer('idDetalle_Libro_Biblioteca').unsigned()\n table.foreign('idDetalle_Libro_Biblioteca').references('idDetalle_Libro_Biblioteca').on('Detalle_Libro_Biblioteca')\n table.string('Estado', 5).default('1')\n table.timestamps()\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('Prestamo')\n","sub_path":"Semana8Hackaton/Bryan Arias/App/migrations/2020_07_18_211125_create_libro_prestamo_table.py","file_name":"2020_07_18_211125_create_libro_prestamo_table.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374207999","text":"# 集合的推导式与列表的非常相似,唯一区别在于用{}代替[]。而且set集合还有去重的功能\n\n# 集合推导式语法:\n# 变量名={表达式 for 变量 in 可迭代对象} 或者 变量名 = {表达式 for 变量 in 可迭代对象 for 变量 in ......}\n# 变量名 = {表达式 for 变量 in 可迭代对象 if 条件}\n\n# 题目1:使用其他序列创建一个新的集合序列\n# 集合\nset1 = {1, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7}\nset2 = {i for i in set1}\nprint(set2)\n\n# 字符串\nstr1 = \"42314124312\"\nset3 = {i for i in str1}\nprint(set3)\n\n# 元组\ntup1 = (1, 3, 4, 4, 5, 6, 6, 7)\nset4 = {i for i in tup1}\nprint(set4)\n\n# 列表\nlist1 = [0, 1, 2, 3, 4, 5, 6, 7]\nset5 = {i for i in list1}\nprint(set5)\n\n# 字典\ndict01 = {\"k1\": \"v1\", \"k2\": \"v2\", \"k3\": \"v3\"}\nset6 = {i for i in dict01}\nprint(set6)\n# 注意:使用推导式把字典转为新的集合推导式,只有字典的key被转入","sub_path":"推导式/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"18959247","text":"from scipy.integrate import quad\nfrom matplotlib import pyplot\n\n\ndef calculate_midpoint_x_values(partitions, start, end):\n \"\"\"\n Calculates the x values for the midpoint and trapezoidal sums\n :param partitions: The number of partitions of the function\n :param start: The starting value of the integral\n :param end: The end value of the integral\n :return: a list of the x values of the midpoint sum\n \"\"\"\n midpointx=[]\n for i in range(partitions):\n midpointx.append((end-start)/(2*partitions)+((end-start)/partitions)*i)\n return midpointx\n\ndef calculate_trapezoid_x_values(partitions, start, end):\n \"\"\"\n Calculates the x values for the midpoint and trapezoidal sums\n :param partitions: The number of partitions of the function\n :param start: The starting value of the integral\n :param end: The end value of the integral\n :return: a list of the x values of the trapezoidal sum\n \"\"\"\n trapazoidx=[]\n for i in range(2*partitions):\n trapazoidx.append(((end-start)/(2*partitions))*i)\n trapazoidx.append(end)\n return trapazoidx\n\ndef calculate_y_values(power, xvalues):\n \"\"\"\n Calculates the y values of the function\n :param power: The power x is being raised to\n :param lst: the list of x values\n :return: a list of y values of the function\n \"\"\"\n values=[]\n for x in xvalues:\n values.append(x**power)\n return values\n\ndef midpoint_sum(yvalues, partition):\n \"\"\"\n Calculates the midpoint approximation of the function\n :param yvalues: A list of the midpoint y values\n :param partition: the lenght of the partitions used\n :return: the midpoint approximation of the function\n \"\"\"\n tally=sum(yvalues)\n return tally*partition\n\ndef trapazoid_sum(yvalues, partition, start, end):\n \"\"\"\n Calculates the trapezoidal approximation of the function\n :param yvalues: List of the y values\n :param partition: the lenght of the partitions in the approximation\n :param start: the start value of the integral\n :param end: the end value of the integral\n :return: the trapezoidal approximation\n \"\"\"\n end_index=len(yvalues) - 1\n tally=sum(yvalues[1:end_index])\n tally=(tally*2)+yvalues[end_index]\n return tally*((end-start)/(partition*2))\n\ndef integral(start, end, power):\n \"\"\"\n Calculates the integral of the function\n :param start: the start value of the integral\n :param end: the end vale of the integral\n :param power:\n :return: the value of the integral from start to end\n \"\"\"\n xprime=lambda x: x**power\n answer, error=quad(xprime, start, end)\n return answer\n\ndef ratio(midpoint, trapezoid, actual):\n \"\"\"\n Calculates the ratio of midpoint to trapezoid approximation to get the actual value of the integral\n :param midpoint: the midpoint approximation\n :param trapezoid: the trapezoidal approximation\n :param actual: the actual integral value\n :return: the ratios of the midpoint and trapezoidal sums needed to get the actual integral value\n \"\"\"\n midpoint_error=actual-midpoint\n trapezoid_error=trapezoid-midpoint\n MvT_ratio=midpoint_error/trapezoid_error\n return MvT_ratio, 1-MvT_ratio\n\ndef make_graph(xaxis, yaxis):\n \"\"\"\n Creates a graph of the data\n :param xaxis: The values for the x axis\n :param yaxis: The values for the y axis\n :return: NULL\n \"\"\"\n pyplot.plot(xaxis, yaxis)\n pyplot.title('Ratio Graph')\n pyplot.ylabel('Midpoint Ratio')\n pyplot.xlabel('Power of X')\n pyplot.savefig(\"CalcGraph1.png\")\n\ndef main():\n \"\"\"\n Takes in user input for thr valus and loops over all the specified functions then graphs the data\n :return: NULL\n \"\"\"\n partitions=int(input(\"Enter the number of partitions: \"))\n start=int(input(\"Enter the start value of the integral: \"))\n end=int(input(\"Enter the end value of the integral: \"))\n power=int(input(\"Enter the power of x to calculate to: \"))\n\n power_x_axis=[]\n Midpoint_ratio_y_axis=[]\n midpointXs=calculate_midpoint_x_values(partitions, start, end)\n trapezoidXs=calculate_trapezoid_x_values(partitions, start, end)\n\n for exponent in range(2, power+1):\n print(\"power: x^\", exponent,sep=\"\")\n midpointYs=calculate_y_values(exponent, midpointXs)\n trapezoidYs=calculate_y_values(exponent, trapezoidXs)\n print(\"midpont Xs:\", midpointXs,\"\\nmidpoint Ys:\", midpointYs,\"\\ntrapezoid Xs:\", trapezoidXs,\n \"\\ntrpezoid Ys:\", trapezoidYs)\n midpoint_appox=midpoint_sum(midpointYs, (end - start) / partitions)\n trapezoid_approx=trapazoid_sum(trapezoidYs, 2 * partitions, start, end)\n print(\"midpoint approximation:\", midpoint_appox,\"\\nTrapezoid approximation:\", trapezoid_approx)\n actual=integral(start, end, exponent)\n print(\"actual: \", actual)\n trapezoid_ratio, midpoint_ratio=ratio(midpoint_appox, trapezoid_approx, actual)\n power_x_axis.append(exponent)\n Midpoint_ratio_y_axis.append(midpoint_ratio)\n print(actual,\"=\", midpoint_ratio, \"Midpoint\", \"+\", trapezoid_ratio, \"Trapezoid\")\n\n make_graph(power_x_axis, Midpoint_ratio_y_axis)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Math project.py","file_name":"Math project.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"578903738","text":"from termcolor import colored\n\nfrom lib import loop\n\n\n# def writeMatrix(x):\n# for i in range(len(x)):\n# print(x[i])\n\n\n# c = [\n# [0, 0, 10, 0],\n# [0, 0, 2, 0],\n# [0, 0, 50, 50],\n# [80, 120, 88, 0],\n# ]\n#\n# cost = [\n# [5, 4, 8, 3],\n# [4, 7, 4, 7],\n# [5, 3, 6, 1],\n# [0, 0, 0, 0],\n# ]\n\n# c = [\n# [0, 0, 0, 0, 43.5],\n# [0, 15.5, 0, 10.75, 23.25],\n# [0, 0, 9000, 0, 39.5],\n# [9.75, 0, 0, 18.75, 0],\n# [0, 0, 25.75, 8.0, 0],\n# ]\n#\n# cost = [\n# [45.5, 24.5, 49.0, 12.5, 0],\n# [46.5, 9.5, 12.5, 25.5, 0],\n# [27.0, 43.5, 37.5, 13.75, 0],\n# [17.0, 15.75, 20.5, 3.0, 0],\n# [45.5, 11.75, 2.5, 1.75, 0],\n# ]\n\n\ndef find_potencials(res, cost):\n u = [None for i in range(len(res))]\n v = [None for j in range(len(res[0]))]\n\n # v[len(v)-1] = 0\n u[0] = 0\n\n while None in u or None in v:\n for i in range(len(res)):\n for j in range(len(res[0])):\n if res[i][j] != 0:\n if u[i] is not None and v[j] is None:\n v[j] = cost[i][j] - u[i]\n elif v[j] is not None and u[i] is None:\n u[i] = cost[i][j] - v[j]\n return u, v\n\n\ndef is_degeneracy(res):\n # Обходим вырожденность по методу Данцига\n k = 0\n stop = False\n for i in range(len(res)):\n for j in range(len(res[0])):\n if res[i][j] != 0:\n k += 1\n if k < (len(res) + len(res[0]) - 1):\n for i in range(len(res)):\n if stop:\n break\n for j in range(len(res[0])):\n if res[i][j] == 0:\n res[i][j] = 0.001\n stop = True\n break\n return res\n else:\n return res\n# u, v = find_potencials(c, cost)\n#\n# print('U = {u}'.format(u=u))\n# print('V = {v}'.format(v=v))\n# print()\n\n\ndef find_delta(res, cost, u, v):\n delta_matrix = [[0] * len(cost[0]) for i in range(len(cost))]\n delta_max = 0\n delta_index_I = 0\n delta_index_J = 0\n\n for i in range(len(cost)):\n for j in range(len(cost[0])):\n if res[i][j] == 0:\n # delta_matrix[i][j] = u[i] + v[j] - cost[i][j]\n delta_matrix[i][j] = cost[i][j] - u[i] - v[j]\n\n for i in range(len(cost)):\n for j in range(len(cost[0])):\n if delta_max > delta_matrix[i][j]:\n delta_max = delta_matrix[i][j]\n delta_index_I = i\n delta_index_J = j\n\n if delta_max == 0:\n return 0\n return delta_matrix, delta_max, delta_index_I, delta_index_J\n\n\ndef build_delta(res, cost, u, v):\n delta_matrix = [[0] * len(cost[0]) for i in range(len(cost))]\n\n for i in range(len(cost)):\n for j in range(len(cost[0])):\n if res[i][j] == 0:\n # delta_matrix[i][j] = u[i] + v[j] - cost[i][j]\n delta_matrix[i][j] = cost[i][j] - u[i] - v[j]\n return delta_matrix\n#\n#\n# delta_matrix, delta_max, delta_index_I, delta_index_J = find_delta(c, cost, u,\n# v)\n#\n# print(colored('DELTA матрица:', 'blue'))\n# writeMatrix(delta_matrix)\n# print()\n# print(colored(\n# 'Максимальная DELTA в матрице: {d}, номер строки: {i}, номер столбца: {j}'.format(\n# d=delta_max, i=delta_index_I, j=delta_index_J), 'blue'))\n#\n# writeMatrix(c)\n# print()\n# loops = loop.FindPath(delta_index_I, delta_index_J, c, len(c), len(c[0]))\n# print(loops)\n# print()\n# better = loop.BetterOptimal(c, delta_index_I, delta_index_J, len(c), len(c[0]))\n# writeMatrix(better)\n#\n# u, v = find_potencials(better, cost)\n#\n# print('U = {u}'.format(u=u))\n# print('V = {v}'.format(v=v))\n# print()\n#\n# if not find_delta(better, cost, u, v):\n# print('Решение окончено')\n# else:\n# delta_matrix, delta_max, delta_index_I, delta_index_J = find_delta(better,\n# cost, u,\n# v)\n# print(colored('DELTA матрица:', 'blue'))\n# writeMatrix(delta_matrix)\n# print()\n# print(colored(\n# 'Максимальная DELTA в матрице: {d}, номер строки: {i}, номер столбца: {j}'.format(\n# d=delta_max, i=delta_index_I, j=delta_index_J), 'blue'))\n","sub_path":"lib/potencial.py","file_name":"potencial.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"522944372","text":"# -*- coding:utf-8 -*-\r\nfrom enthought.traits.api import *\r\nfrom enthought.traits.ui.api import *\r\nfrom enthought.traits.ui.menu import *\r\n\r\n\r\nclass Personal(HasTraits):\r\n first_name = Str()\r\n last_name = Str()\r\n year = Int()\r\n\r\n def _first_name_changed(self):\r\n self.last_name = self.first_name\r\n\r\n\r\n view1 = View(Group(Item('first_name'),\r\n Item('last_name'),\r\n label='Internal view',\r\n show_border=True))\r\n\r\n view2 = View(Group(Item('first_name', style='custom'),\r\n Item('last_name', style='text'),\r\n orientation='horizontal', layout='split',\r\n label='Horizotal View',\r\n show_border=True),\r\n kind='live',\r\n buttons=LiveButtons\r\n )\r\n\r\n view3 = View(Group(Item('first_name', style='custom'),\r\n Item('last_name', style='text'),\r\n orientation='vertical',\r\n label='Vertical View',\r\n show_border=True),\r\n buttons=ModalButtons\r\n )\r\n\r\n\r\nray = Personal()\r\nray.configure_traits(view='view1')\r\n\r\nalex = Personal()\r\nalex.configure_traits(view='view2')\r\n\r\njohn = Personal()\r\njohn.configure_traits(view='view3')\r\n","sub_path":"GUI/Traits/InteralView_trait.py","file_name":"InteralView_trait.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"527431631","text":"import os\nimport common.ezlog as ezlog\n\nlogger = ezlog.get_logger()\n\nrd = input(\"LaTeX project root directory: \")\n\nlogger.info('removing aux files')\nfor dirpath, dirnames, filenames in os.walk(rd):\n for filename in [f for f in filenames if f.endswith(\".aux\")]:\n os.remove(os.path.join(dirpath, filename))\nlogger.info('aux files removed')\n","sub_path":"latex/remove_aux.py","file_name":"remove_aux.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"255915191","text":"from Processor import Processor\nfrom collections import deque\n\nclass EMFACSPeakProcessor(Processor):\n\n \n def __init__(self):\n Processor.__init__(self,\"EMFACSPeakProcessor\", [(\"happy\",\"happy\"),(\"angry\",\"angry\"),(\"fear\",\"fear\"),(\"surprise\",\"surprise\"),(\"sad\",\"sad\"),(\"nausea\",\"nausea\"),(\"neutral\",\"neutral\")], [(\"happy\",\"happy\"),(\"angry\",\"angry\"),(\"fear\",\"fear\"),(\"surprise\",\"surprise\"),(\"sad\",\"sad\"),(\"nausea\",\"nausea\"),(\"neutral\",\"neutral\")],[])\n self.dominantQ = None\n self.vals = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n self.run()\n \n def clean(self,value):\n if \"%HIGHLIGHT%\" in str(value):\n nvalue = float(value[:-11])\n return nvalue\n return value\n \n def trigger(self):\n peak = 0\n peakVal = 0\n # note hack for ignoring final values\n for i in range(0,len(self.vals)-1):\n self.vals[i] = self.clean(self.vals[i])\n if self.vals[i] > peakVal:\n peakVal = self.vals[i]\n peak = i\n\n self.vals[peak] = str(self.vals[peak])+\"%HIGHLIGHT%\"\n self.addProcessedValues(self.vals)\n \n # main data processing function\n def process(self,timeStamp,values,queueNo):\n \n self.vals[queueNo] = float(values[queueNo])\n \n # if self.dominantQ == None:\n # self.dominantQ = queueNo\n # elif self.dominantQ == queueNo:\n self.trigger()\n \n\nif __name__ == '__main__': EMFACSPeakProcessor()\n","sub_path":"legacy/legacy-processors/EMFACSPeakProcessor.py","file_name":"EMFACSPeakProcessor.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"66917348","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n\n even = ListNode(head.val)\n odd = ListNode(head.next.val)\n even_head = even\n odd_head = odd\n head = head.next.next\n\n i = 0\n while head:\n if i % 2 == 0:\n even.next = ListNode(head.val)\n even = even.next\n else:\n odd.next = ListNode(head.val)\n odd = odd.next\n\n head = head.next\n i += 1\n\n even.next = odd_head\n return even_head\n","sub_path":"may_challenge/16_odd_even_linked_list.py","file_name":"16_odd_even_linked_list.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"273670928","text":"from keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.models import Sequential \nfrom keras.optimizers import Adam\nfrom keras_preprocessing.image import ImageDataGenerator\nimport os.path\n\n\n#convolution layer 2 variables\nfilters2=32\n#fully connected layer 1 variables\nunits1=256\n#Activation function\nactivationconvo='relu'\nactiavtiondense='relu'\n# fit variables\nsteps_per_epoch1=1284\nvalidation_steps1=4297\nepochs1=3\n\nmodel = Sequential() \n#Convolution Layer-1\nmodel.add(Convolution2D(filters=16, kernel_size=(3,3),activation='relu',input_shape=(100, 100, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2),))\nmodel.summary()\n\n#Convolution layer-2\nmodel.add(Convolution2D(filters=filters2,kernel_size=(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.summary()\n\nmodel.add(Convolution2D(filters=64,kernel_size=(3,3),activation=activationconvo))\n\n\n\n\n\n#Flattening layer\nmodel.add(Flatten())\n\n#Fully connected layer 1\n\nmodel.add(Dense(units=units1,activation='relu'))\nmodel.add(Dense(units=128,activation=actiavtiondense))\n\n\n\n\n\n\n\n#Output layer\nmodel.add(Dense(units=26,activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\ntest_datagen = ImageDataGenerator(rescale=1./255)\ntraining_set = train_datagen.flow_from_directory(\n '/cnn/furit2/Traning',\n target_size=(100,100),\n batch_size=32,\n class_mode='categorical')\ntest_set = test_datagen.flow_from_directory(\n '/cnn/furit2/Test',\n target_size=(100, 100),\n batch_size=32,\n class_mode='categorical')\nhistory = model.fit(\n training_set,\n steps_per_epoch=steps_per_epoch1,\n epochs=epochs1,\n validation_data=test_set,\n validation_steps=validation_steps1)\n\na = history.history['accuracy'][2]\n\nsave_to_path = '/cnn'\nname_of_file = \"AccuracyCNN\"\ncomplete_name = os.path.join(save_to_path, name_of_file+\".txt\")\nprint(a, file = open(complete_name,\"a\"))\n\n\n","sub_path":"Aftertweaking___cnn2.py","file_name":"Aftertweaking___cnn2.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56978780","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy2postgre.items import Biaoxun\n\nclass ZycgSpider(scrapy.Spider):\n name = 'zycg'\n allowed_domains = ['zycg.gov.cn']\n start_urls = [\n 'http://www.zycg.gov.cn/home/jqkbxm?catalog=StockAffiche',\n 'http://www.zycg.gov.cn/article/llist?catalog=StockAffiche',\n 'http://www.zycg.gov.cn/article/llist?catalog=ZhongBiao',\n 'http://www.zycg.gov.cn/article/llist?catalog=bggg',\n 'http://www.zycg.gov.cn/article/llist?catalog=fbgg'\n ]\n\n def parse(self, response):\n\n if response.url.find('http://www.zycg.gov.cn/home/jqkbxm?catalog=StockAffiche') != -1:\n type = '近期开标项目'\n elif response.url.find('http://www.zycg.gov.cn/article/llist?catalog=StockAffiche') != -1:\n type = '招标公告'\n elif response.url.find('http://www.zycg.gov.cn/article/llist?catalog=ZhongBiao') != -1:\n type = '中标公告'\n elif response.url.find('http://www.zycg.gov.cn/article/llist?catalog=bggg') != -1:\n type = '变更公告'\n elif response.url.find('http://www.zycg.gov.cn/article/llist?catalog=fbgg') != -1:\n type = '废标公告'\n else:\n type = '无'\n page_num = response.css('ul.lby-list a::text').extract()\n max_num = int(page_num[len(page_num) - 2])\n for i in range(max_num):\n url = response.url + '&page=' + str(i + 1)\n yield response.follow(url, self.parse_list, meta={'type': type})\n\n def parse_list(self, response):\n type = response.meta['type']\n title = response.css('ul.lby-list a::attr(\"title\")').extract()\n pub_time = response.css('ul.lby-list span::text').extract()\n artical_url = response.css('ul.lby-list a::attr(\"href\")').extract()\n\n for i in range(len(title)):\n a_title = title[i]\n a_pub_time = pub_time[i].splitlines()[1].lstrip().lstrip('[').rstrip(']')\n a_url = 'http://zycg.gov.cn' + artical_url[i]\n yield response.follow(a_url, self.parse_artical, meta={'type':type, 'title':a_title, 'arctical_url':a_url, 'a_pub_time':a_pub_time})\n\n\n def parse_artical(self, response):\n biaoxun = Biaoxun()\n biaoxun[\"a_type\"] = response.meta['type']\n biaoxun[\"a_title\"] = response.meta['title']\n biaoxun[\"a_url\"] = response.meta['arctical_url']\n biaoxun[\"a_pub_time\"] = response.meta['a_pub_time']\n biaoxun[\"a_content\"] = response.css('div.pages_content script::text').extract()\n yield biaoxun\n\n\n\n","sub_path":"scrapy2postgre/spiders/biaoxun.py","file_name":"biaoxun.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"636642038","text":"import random\r\nimport datetime\r\n\r\n#A[m][k]\r\nm = 2**7 #in order to test example 6, the matrix sizes have to be powers of 2, all other examples run with arbitrary sizes\r\nk = 2**7\r\n#B[k][n]\r\nn = 2**7\r\n#C[m][n]\r\nsparcity = 0.7\r\n\r\nprint(m,k,n,sparcity)\r\n\r\nprint('generating matrix \"A\"')\r\nA = [[(round(10*random.random()) if (random.random() < sparcity) else 0) for e in range(k)] for e in range(m)]\r\nprint('generating matrix \"B\"')\r\nB = [[(round(10*random.random()) if (random.random() < sparcity) else 0) for e in range(n)] for e in range(k)]\r\n\r\nC1 = [[0 for e in range(n)] for e in range(m)]\r\nC2 = [[0 for e in range(n)] for e in range(m)]\r\nC3 = [[0 for e in range(n)] for e in range(m)]\r\nC4 = [[0 for e in range(n)] for e in range(m)]\r\nC5 = [[0 for e in range(n)] for e in range(m)]\r\nC6 = [[0 for e in range(n)] for e in range(m)]\r\nBt = [[0 for e in range(k)] for e in range(n)]\r\n\r\ndef matrix_mult_1(A, B, C): # (TRIVIAL)\r\n for i in range(len(A)):\r\n for j in range(len(B[0])):\r\n for k in range(len(B)):\r\n C[i][j] += A[i][k] * B[k][j]\r\n\r\ndef matrix_mult_2(A, B, C): # (DSP)\r\n for i in range(len(A)):\r\n for j in range(len(B[0])):\r\n tmp = 0\r\n for k in range(len(B)):\r\n tmp += A[i][k] * B[k][j] #this is the exact equation of multiply accumlate, and it translates well too\r\n C[i][j] = tmp\r\n\r\ndef matrix_mult_3(A, B, C): # (RAM)\r\n for i in range(len(A)):\r\n for k in range(len(B)): #switching lines of indexes \"k\" and \"j\" results in both matrices being read sequentially from DRAM \r\n for j in range(len(B[0])):\r\n C[i][j] += A[i][k] * B[k][j]\r\n\r\ndef matrix_mult_4(A, B, C): # (DONKEY)\r\n for i in range(len(A)):\r\n tmp = [0 for e in range(len(B[0]))] #with no transposition, the output values of consecutive reads do not map to a single element of matrix \"C\"\r\n for k in range(len(B)):\r\n for j in range(len(B[0])):\r\n tmp[j] += A[i][k] * B[k][j]\r\n C[i] = tmp\r\n\r\ndef matrix_mult_5(A, Bt, C): # (DSP AND RAM WITH TRANSPOSED B)\r\n for i in range(len(A)):\r\n for j in range(len(Bt)): #index value loops through the first dimension of the array, like in the 3rd function\r\n tmp = 0 #yet the lines read from memory map to a single output value\r\n for k in range(len(Bt[0])): #which just needs to be summed up\r\n tmp += A[i][k] * Bt[j][k] #in a multiply accumlate unit\r\n C[i][j] = tmp\r\n\r\ndef matrix_mult_6(A, Bt, C, CLS): # (DSP, RAM, CACHED READ)\r\n for i in range(len(A)):\r\n for j in range(len(Bt)):\r\n for k in range(len(Bt[0])//CLS):\r\n A_tmp = A[i][k*CLS:(k+1)*CLS]\r\n B_tmp = Bt[j][k*CLS:(k+1)*CLS]\r\n tmp = 0\r\n for cache_index in range(CLS):\r\n tmp += A_tmp[cache_index] * B_tmp[cache_index]\r\n C[i][j] += tmp\r\n\r\nprint(datetime.datetime.now(), '1st multiplication')\r\nmatrix_mult_1(A, B, C1)\r\n\r\nprint(datetime.datetime.now(), '2nd multiplication')\r\nmatrix_mult_2(A, B, C2)\r\n\r\nprint(datetime.datetime.now(), '3. multiplication')\r\nmatrix_mult_3(A, B, C3)\r\n\r\nprint(datetime.datetime.now(), '4. multiplication')\r\nmatrix_mult_4(A, B, C4)\r\n\r\nprint(datetime.datetime.now(), 'transposition')\r\nfor i in range(len(B)):\r\n for j in range(len(B[0])):\r\n Bt[j][i] = B[i][j]\r\n\r\nprint(datetime.datetime.now(), '5. multiplication')\r\nmatrix_mult_5(A, Bt, C5)\r\n\r\n#for p in range(5):\r\nCLS = 2**5\r\nprint(datetime.datetime.now(), '6. multiplication')\r\nmatrix_mult_6(A, Bt, C6, CLS)\r\n\r\nprint(datetime.datetime.now(), 'comparing outputs')\r\nprint(C1 == C2, 'C2')\r\nprint(C1 == C3, 'C3')\r\nprint(C1 == C4, 'C4')\r\nprint(C1 == C5, 'C5')\r\nprint(C1 == C6, 'C6')\r\n\r\nprint(datetime.datetime.now(), 'The End')\r\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"614146173","text":"\"\"\" Implementation of the Longitudinal Dynamics Model for work done by\n the bus engine along route.\n\n From:\n Asamer J, Graser A, Heilmann B, Ruthmair M. Sensitivity\n analysis for energy demand estimation of electric vehicles.\n Transportation Research Part D: Transport and Environment.\n 2016 Jul 1;46:182-99.\n\n This file contains one main class, which constructs the route DataFrame\n consisting of rows corresponding to points along the route\n \"\"\"\n\n# from ..route_elevation import single_route as rsr\nfrom ..route_elevation import base as re_base\nfrom . import knn\nfrom . import constant_a as ca\n\nimport numpy as np\nimport geopandas as gpd\n\n\nclass IllegalArgumentError(ValueError):\n \"\"\" \"\"\"\n pass\n\n\nclass PlottingTools(object):\n \"\"\" Place holder for now, but eventually this will wrap up the\n plotting tools written by last quarter's RouteDynamics team.\n \"\"\"\n\n def __init__(self):\n pass\n\n\n# Thinking this is not the best implementation since I don't actually\n# know how to make objects print like pandas DataFrames.\nclass RouteTrajectory(PlottingTools):\n \"\"\" Takes 2d route coordinates extracted from shapefile and\n combines the information with elevation to create a route\n trajectory dataframe.\n \"\"\"\n\n def __init__(self,\n route_num,\n shp_filename,\n elv_raster_filename,\n bus_speed_model='stopped_at_stops__15mph_between',\n stop_coords=None,\n mass_array=None,\n unloaded_bus_mass=12927,\n charging_power_max=0., # should be kW\n # charging_power_max=50000 # should be kW\n a_m=1.0,\n v_lim=15.0,\n ):\n \"\"\" Build DataFrame with bus trajectory and shapely connections\n for plotting. This object is mostly a wrapper object to\n build and return the Route DataFrame, but will later\n contain plotting methods as well.\n\n\n Args:\n\n route_num: needs to be one that Erica made work.\n\n bus_speed_model: has options;\n - 'stopped_at_stops__15mph_between'\n - 'constant_15mph'\n - 'const_accel_between_stops_and_speed_lim'\n\n Methods:\n\n ...\n\n \"\"\"\n\n self._initialize_instance_args(\n bus_speed_model,\n a_m,\n v_lim,\n stop_coords,\n mass_array,\n unloaded_bus_mass,\n charging_power_max,\n )\n\n # Build Route DataFrame, starting with columns:\n # - 'elevation'\n # - 'cum_distance'\n # - 'is_bus_stop\n self.route_df = self.build_route_coordinate_df(\n route_num = route_num,\n shp_filename = shp_filename,\n elv_raster_filename = elv_raster_filename,\n )\n\n self.route_df = self._add_dynamics_to_df(\n route_df=self.route_df,\n stop_coords=stop_coords,\n bus_speed_model=self.bus_speed_model,\n )\n\n\n def _initialize_instance_args(self,\n bus_speed_model,\n a_m,\n v_lim,\n stop_coords,\n mass_array,\n unloaded_bus_mass,\n charging_power_max,\n ):\n\n # Store algorithm name for future reference.\n self.bus_speed_model = bus_speed_model\n\n # default speed limit and acceleration constant\n self.a_m = a_m\n self.v_lim = v_lim\n\n\n self.stop_coords = stop_coords\n\n # Mass stuff\n self.mass_array = mass_array\n self.unloaded_bus_mass = unloaded_bus_mass\n\n # Boolean check for instance argument 'mass_array'\n self.mass_arg_is_list = (\n type(self.mass_array) is list\n or\n type(self.mass_array) is np.ndarray\n )\n ####\n\n # Store chargeing ability as instance attribute\n self.charging_power_max = charging_power_max\n\n\n def _add_dynamics_to_df(self,\n route_df,\n stop_coords,\n bus_speed_model,\n ):\n\n # Try to determine bus stops from list of coordinates\n route_df = self._add_stops_to_df(stop_coords, route_df)\n\n # Depending on the method of bus speed estimation, the next\n # block of code will exicute in different orders\n if bus_speed_model in [\n 'constant_15mph',\n 'stopped_at_stops__15mph_between'\n ]:\n # Add 'velocity' column to route_df first\n # This will also involve calulating the velocity.\n route_df = self._add_velocities_to_df(\n route_df,\n bus_speed_model=bus_speed_model,\n )\n\n route_df = self._add_delta_times_to_df(route_df)\n\n # Add 'acceleration' column to route_df, calculated as\n # finite difference from velocities\n route_df = self._add_accelerations_to_df(\n route_df,\n alg='finite_diff',\n )\n\n\n elif bus_speed_model in [\n 'const_accel_between_stops_and_speed_lim'\n ]:\n\n # Add 'acceleration' column to route_df\n route_df = self._add_accelerations_to_df(\n route_df,\n alg='const_accel_between_stops_and_speed_lim',\n )\n\n route_df = self._add_velocities_to_df(\n route_df,\n bus_speed_model='const_accel_between_stops_and_speed_lim',\n )\n\n route_df = self._add_delta_times_to_df(route_df, 'model')\n\n # Add passenger mass column to route_df\n route_df = self._add_mass_to_df(route_df)\n\n # Add force columns to route_df:\n # - 'grav_force' : gravitation force determined by road grade\n # - 'roll_fric' : rolling friction\n # - 'aero_drag' : areodynamic drag\n # - 'inertia' : inertial force, F = ma. Changes with passenger load\n # on bus.\n route_df = self._add_forces_to_df(route_df)\n\n # Add column to route_df containing instantaneous power experted by\n # bus at each point along route.\n route_df = self._add_power_to_df(route_df)\n\n return route_df\n\n\n def build_route_coordinate_df(self,\n route_num,\n shp_filename,\n elv_raster_filename,\n ):\n \"\"\" Builds GeoDataFrame with rows cooresponding to points on\n route with columns corresponding to elevation, elevation\n gradiant, and connecting line segments between points in\n the form of Shapely Linstring objects.\n\n Also adds bus stop column and assigns bus stops based on\n 'stop_coords' argument\n\n Args:\n 'stop_coords': list of coordinates of bus stops. Will\n assign points along bus route based on these values\n .\n\n \"\"\"\n\n # Build the df of 2D route coordinates and\n route_shp = re_base.read_shape(shp_filename, route_num)\n\n # print(f'route_shp: {route_shp}\\n')\n\n route_2Dcoord_df = re_base.extract_point_df(route_shp)\n\n # print(f'elv_raster_filename: {elv_raster_filename}\\n')\n\n (\n elevation,\n elevation_gradient,\n route_cum_distance,\n back_diff_distance\n ) = re_base.gradient(route_shp, elv_raster_filename)\n\n route_df = re_base.make_multi_lines(\n route_2Dcoord_df,\n elevation_gradient\n )\n\n\n route_df = self._add_distance_to_df(back_diff_distance, route_df)\n\n route_df = self._add_elevation_to_df(elevation, route_df)\n\n route_df = self._add_cum_dist_to_df(route_cum_distance, route_df)\n\n return route_df\n\n\n def _add_distance_to_df(self, back_diff_distance, route_df):\n\n distance_array = np.append(np.nan,back_diff_distance)\n\n rdf = route_df.assign(\n distance_from_last_point=distance_array\n )\n return rdf\n\n def _add_stops_to_df(self, stop_coords, route_df):\n \"\"\" Find rows in route_df matching the stop_coordinates and\n mark as bus stop under new column.\n \"\"\"\n\n # By default, 'stop_coords' is set to 'None', if this is true,\n # then 10 bus stops will be assigned randomly\n if stop_coords is 'random':\n # Randomly select certain route coordinates to be marked as\n # a stop with 5% probability.\n # Fix seed for reproducability\n np.random.seed(5615423)\n # Return binary array with value 'True' 5% of time\n is_stop__truth_array = (\n np.random.random(len(route_df.index)) < .15\n )\n\n route_df = route_df.assign(\n is_bus_stop = is_stop__truth_array\n )\n\n elif stop_coords is None:\n # Mark no stops\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n elif (type(stop_coords) is list) or (type(stop_coords) is np.ndarray):\n\n # Calculate indicies of 'stop_coords' that match bus_stops\n self.stop_nn_indicies, self.stop_coord_nn = knn.find_knn(\n 1,\n route_df.coordinates.values,\n stop_coords\n )\n # the 'jth' element of stop_nn_indicies also selects the\n\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n for i in self.stop_nn_indicies.ravel():\n route_df.at[i, 'is_bus_stop'] = True\n\n\n else:\n raise IllegalArgumentError(\n \"'stop_coords' must be 'random', None, \"\n \"or type(list)/type(ndarray)\"\n )\n\n # route_df.at[0, 'is_bus_stop'] = True\n # route_df.at[-1, 'is_bus_stop'] = True\n\n return route_df\n\n\n def _add_elevation_to_df(self, elevation, route_df):\n\n # print(len(elevation), len(route_df.index))\n # print('elevation', elevation)\n\n rdf = route_df.assign(\n elevation=elevation.ravel()\n )\n\n\n\n return rdf\n\n\n def _add_cum_dist_to_df(self, cum_distance, route_df):\n\n rdf = route_df.assign(\n cum_distance=cum_distance\n )\n\n return rdf\n\n\n def _add_velocities_to_df(self, route_df, bus_speed_model):\n \"\"\" For now just adds a constant velocity as a placeholder.\n \"\"\"\n\n lazy_choise_for_speed = 6.7056 # 6.7056 m/s (= 15 mph)\n\n # 'test' algorithm set by default for now.\n if bus_speed_model == 'constant_15mph':\n # Assign constant velocity\n bus_speed_array = (\n lazy_choise_for_speed * np.ones(len(route_df.index))\n )\n\n elif bus_speed_model == 'stopped_at_stops__15mph_between':\n # Really I want something here to use the stop array to calcularte bus speed.\n # Step !: Calculate distance to next stop, which should determine the strajectory (speed at point)\n # can use difference of 'cum_dist's\n # 2) Assign trajectory as function of distance\n # 3) plug in each route point between stops intor trajectory function.\n # ... This is all UNDER CONSTRUCTION ...\n\n # Right now, this will just make stop points have zero velocity.\n zero_if_stop__one_if_not = (\n np.logical_not(route_df.is_bus_stop.values)*1\n )\n\n # Mark endpoints of route as well\n zero_if_stop_start_end__one_if_not = zero_if_stop__one_if_not\n zero_if_stop_start_end__one_if_not[0] = 0\n zero_if_stop_start_end__one_if_not[-1] = 0\n\n # if not stop, set velocity to 15 mph\n bus_speed_array = zero_if_stop__one_if_not * lazy_choise_for_speed\n\n\n elif bus_speed_model is 'const_accel_between_stops_and_speed_lim':\n bus_speed_array = self.const_a_velocities\n\n rdf = route_df.assign(\n velocity=bus_speed_array\n )\n\n return rdf\n\n\n def _add_delta_times_to_df(self, route_df, alg='finite_diff'):\n \"\"\" Add delta_times for finite_difference calculation of acceleration \"\"\"\n\n if alg is 'finite_diff':\n delta_times = self._calculate_delta_times_on_linestring_distance(\n route_df)\n elif alg is 'model':\n delta_times = np.append(\n 0,\n np.diff(self.route_time)\n )\n\n rdf = route_df.assign(\n delta_time=delta_times\n )\n\n return rdf\n\n\n def _calculate_delta_times_on_linestring_distance(self,\n route_df,\n alg='finite_diff',\n ):\n\n back_diff_delta_x = route_df.distance_from_last_point.values\n\n try:\n velocities = route_df.velocity.values\n except AttributeError:\n print(\"Does 'route_df' have 'velocity' column? \")\n\n if alg is 'finite_diff':\n # Calcule average velocities along segment but backward difference\n segment_avg_velocities = (\n velocities\n +\n np.append(0,velocities[:-1])\n )/2\n\n self.delta_times = back_diff_delta_x * segment_avg_velocities\n\n else:\n raise IllegalArgumentError(\"time calculation only equiped to \"\n \"implement finite difference.\")\n\n\n self.time_on_route = np.append(\n 0,\n np.cumsum(self.delta_times[1:])\n )\n\n return self.delta_times\n\n\n def _add_accelerations_to_df(self, route_df, alg='finite_diff'):\n \"\"\" For now just adds a acceleration velocity as a placeholder.\n \"\"\"\n # print(route_df.head())\n accelerations = self._calculate_acceleration(route_df, alg)\n\n #Assign acceleration values to new row in route DataFrame.\n rdf = route_df.assign(\n acceleration=accelerations\n )\n\n return rdf\n\n\n def _calculate_acceleration(self,\n route_df,\n alg='finite_diff',\n a_m=None,\n v_lim=None,\n ):\n\n # Calculate acceleration\n if alg=='finite_diff':\n # Use finite difference of velocities to calculate accelerations\n velocity_array = route_df.velocity.values\n\n delta_distance_array = route_df.distance_from_last_point.values\n\n # assert (np.shape(np.diff(velocity_array))==np.shape(delta_distance_array)), (\n # \"np.shape(np.diff(velocity_array) = {}\\n\"\n # \"np.shape(delta_distance_array) = {}\\n\".format(\n # np.shape(np.diff(velocity_array)),\n # np.shape(delta_distance_array)\n # )\n # )\n\n # Calculate acceleraetion by central difference\n\n zero_in_a_list = np.array([0])\n\n back_diff_velocity_array = np.append(\n zero_in_a_list,\n np.diff(velocity_array)\n )\n\n # Assign backward diff velocities as instance attribute\n self.delta_v = back_diff_velocity_array\n\n # forward_diff_velocity_array = np.append(\n # np.diff(velocity_array),\n # zero_in_a_list\n # )\n\n # central_diff_velocity_array = (\n # back_diff_velocity_array\n # +\n # forward_diff_velocity_array\n # )/2.\n\n # But average acceleration cooresponding to the linestring\n # distance will be the backward difference in velovity...\n # divided by time and not distance...\n\n dt = route_df.delta_time.values\n\n accelerations = np.append(\n np.nan,\n self.delta_v[1:] / dt[1:]\n )\n\n elif alg=='const_accel_between_stops_and_speed_lim':\n\n if v_lim is None: v_lim=self.v_lim\n if a_m is None: a_m=self.a_m\n\n (\n accelerations,\n self.const_a_velocities,\n self.x_ls,\n self.x_ns,\n self.route_time\n ) = ca.const_a_dynamics(\n route_df,\n a_m,\n v_lim,\n )\n\n else:\n raise IllegalArgumentError((\n \"'alg' keywarg must be implemented algorithm. \"\n \"Currently supported are; \\n\"\n \" - 'finite_diff' : calculates finite difference in\"\n \" velocities and distances and takes the ratio.\\n\"\n \"and nothing else... maybe one day it will have an analytic\"\n \" option.\"\n ))\n\n return accelerations\n\n\n def _add_mass_to_df(self,\n route_df,\n ):\n \"\"\" Compute number of passengers along the route.\n\n Eventually this will use Ryan's ridership module, which\n determines the ridership at each bus stop.\n \"\"\"\n if self.mass_arg_is_list:\n\n lengths_equiv = len(self.mass_array)==len(\n self.stop_coords)\n # Does mass array check out for calculation?\n mass_array_correct_length = (\n lengths_equiv and self.mass_arg_is_list\n )\n\n full_mass_column = self.calculate_mass(\n alg='list_per_stop',\n len_check=mass_array_correct_length\n )\n\n else: # Add default mass to every row\n full_mass_column = self.unloaded_bus_mass*np.ones(\n len(route_df.index))\n\n\n route_df = route_df.assign(\n mass = full_mass_column\n )\n\n return route_df\n\n\n def calculate_mass(self,\n alg='list_per_stop',\n len_check=None,\n ):\n \"\"\" Take mass array that is length of bus stop array and store\n as df column with interpolated values in between stops\n (value from last stop). If no mass array was input as class\n arg, then default bus mass is stored in every df row.\n \"\"\"\n\n\n if alg=='list_per_stop' and len_check:\n\n if not hasattr(self, 'stop_nn_indicies'):\n raise AttributeError('Cant calculate from list')\n\n\n # Initialize array of Nan's for mass column of rdf\n full_mass_column = np.zeros(len(self.route_df.index))\n full_mass_column[:] = np.nan\n\n # Iterate through the length of the given mass_array\n # (already determined equal length to 'stop_coords').\n for i in range(len(self.mass_array)):\n # Set values of mass at bus_stops\n full_mass_column[\n self.stop_nn_indicies[i]\n ] = self.mass_array[i]\n\n # Set initial and value to unloaded bus mass.\n full_mass_column[0] = self.unloaded_bus_mass\n full_mass_column[-1] = self.unloaded_bus_mass\n\n # Iterate through the half constructed rdf mass column\n # ('full_mass_column') and fill in sapce between stops with previous value\n for i in range(len(full_mass_column)-1):\n j = 1\n try:\n while np.isnan(full_mass_column[i+j]):\n full_mass_column[i+j] = full_mass_column[i]\n # print(full_mass_column[i+j] )\n j+=1\n except: IndexError\n\n if np.any(full_mass_column < self.unloaded_bus_mass):\n raise IllegalArgumentError(\"Class arg 'unloaded_bus_mass' \"\n \"is heavier than values in arg 'mass_array'\")\n\n elif alg=='list_per_stop' and (\n self.mass_arg_is_list and not len_check\n ):\n raise IllegalArgumentError(\n \"'stop_coords' and 'mass_array' must be same length\"\n )\n\n else:\n raise IllegalArgumentError(\n \"Algorithm for mass calculation must be 'list_per_stop'\"\n )\n\n\n return full_mass_column\n\n\n def _add_forces_to_df(self, route_df):\n \"\"\" Calculate forces on bus relevant to the Longitudinate\n dynamics model.\n \"\"\"\n\n (\n grav_force,\n roll_fric,\n aero_drag,\n inertia\n ) = self.calculate_forces(route_df)\n\n route_df = route_df.assign(\n grav_force = grav_force,\n roll_fric = roll_fric,\n aero_drag = aero_drag,\n inertia = inertia,\n )\n\n return route_df\n\n\n def calculate_forces(self, rdf):\n \"\"\" Requires GeoDataFrame input with mass column \"\"\"\n\n vels = rdf.velocity.values\n acce = rdf.acceleration.values\n grad = rdf.gradient.values\n grad_angle = np.arctan(grad)\n\n\n # Physical parameters\n gravi_accel = 9.81\n air_density = 1.225 # air density in kg/m3; consant for now,\n # eventaully input from weather API\n v_wind = 0.0 # wind speed in km per hour; figure out component,\n # and also will come from weather API\n fric_coeff = 0.01\n\n # List of Bus Parameters for 40 foot bus\n if self.mass_array is None:\n loaded_bus_mass = self.unloaded_bus_mass # Mass of bus in kg\n else:\n loaded_bus_mass = rdf.mass.values\n\n width = 2.6 # in m\n height = 3.3 # in m\n bus_front_area = width * height\n drag_coeff = 0.34 # drag coefficient estimate from paper (???)\n rw = 0.28575 # radius of wheel in m\n\n\n # Calculate the gravitational force\n grav_force = -(\n loaded_bus_mass * gravi_accel * np.sin(grad_angle)\n )\n\n # Calculate the rolling friction\n roll_fric = -(\n fric_coeff * loaded_bus_mass * gravi_accel * np.cos(grad_angle)\n )\n\n # Calculate the aerodynamic drag\n aero_drag = -(\n drag_coeff\n *\n bus_front_area\n *\n (air_density/2)\n *\n (vels-v_wind)\n )\n\n # Calculate the inertial force\n inertia = loaded_bus_mass * acce\n\n return (grav_force, roll_fric, aero_drag, inertia)\n\n\n def _calculate_batt_power_exert(self, rdf):\n\n f_resist = (\n rdf.grav_force.values\n +\n rdf.roll_fric.values\n +\n rdf.aero_drag.values\n )\n\n f_traction = rdf.inertia.values - f_resist\n\n velocity = rdf.velocity.values\n\n # calculate raw power before capping charging ability of bus\n batt_power_exert = f_traction * velocity\n self.raw_batt_power_exert = np.copy(batt_power_exert)\n\n for i in range(len(batt_power_exert)):\n if batt_power_exert[i] < -self.charging_power_max:\n batt_power_exert[i] = -self.charging_power_max\n\n return batt_power_exert\n\n\n def _add_power_to_df(self, rdf):\n\n batt_power_exert = self._calculate_batt_power_exert(rdf)\n\n new_df = rdf.assign(\n power_output = batt_power_exert\n )\n\n return new_df\n\n\n def energy_from_route(self):\n\n rdf = self.route_df\n\n delta_t = rdf.delta_time.values[1:]\n\n power = rdf.power_output.values[1:]\n\n energy = np.sum(power * delta_t)\n\n return energy\n","sub_path":"route_dynamics/route_energy/longi_dynam_model.py","file_name":"longi_dynam_model.py","file_ext":"py","file_size_in_byte":23523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"251643690","text":"from inverted_index import Entry\r\nfrom inverted_index import Posting\r\nfrom utils import get_term_freq_dict\r\nfrom utils import get_terms\r\n\r\nimport getopt\r\nimport numpy as np\r\nimport os\r\nimport sys\r\ntry:\r\n import cPickle as pickle\r\nexcept:\r\n import pickle\r\n\r\ndef usage():\r\n print(\"usage: \" + sys.argv[0] + \" -i directory-of-documents -d dictionary-file -p postings-file\")\r\n\r\n# generate the dict of (term, tf) \r\ndef analyze_doc(filename):\r\n f = open(input_directory + str(filename))\r\n terms = []\r\n for line in f:\r\n terms = terms + get_terms(line)\r\n f.close()\r\n return get_term_freq_dict(terms)\r\n\r\n# calculate the doclen for each document based on the pairs (term, tf)\r\ndef calc_len(values):\r\n return np.linalg.norm(1 + np.log10(values))\r\n\r\n# add all postings for a certain doc\r\ndef add_postings(entries, docid, term_freq_dict, dict_terms):\r\n for term in term_freq_dict:\r\n freq = term_freq_dict[term]\r\n idx = dict_terms[term]\r\n entries[idx].update(Posting(docid, freq))\r\n\r\n# add term into set\r\ndef create_set(set_terms, keys):\r\n for key in keys:\r\n if key not in set_terms:\r\n set_terms.add(key)\r\n\r\n# convert the set of terms to \r\n# a mapping from sorted terms to increasing docids\r\ndef create_dict(set_terms, entries):\r\n dict_terms = dict()\r\n cnt = 0\r\n for term in set_terms:\r\n dict_terms[term] = cnt\r\n cnt = cnt + 1\r\n entries.append(Entry(term))\r\n return dict_terms\r\n\r\ndef output_entries(entries):\r\n for entry in entries:\r\n entry.output()\r\n\r\ndef output_to_file(files, doc_len, entries):\r\n f_dictionary = open(output_file_dictionary, \"w\")\r\n f_postings = open(output_file_postings, \"wb\")\r\n\r\n # write total number of docs into the dictionary\r\n f_dictionary.write(str(batch_size) + '\\n')\r\n \r\n for i in range(batch_size):\r\n # write the filename and doclen into the dictionary\r\n f_dictionary.write(str(files[i]) + ' ' + str(doc_len[i]) + '\\n')\r\n\r\n for entry in entries:\r\n # write the postings list into postings file\r\n offset = f_postings.tell()\r\n # write (term, document_frequency, offset) into dictionary\r\n pickle.dump(entry.head, f_postings)\r\n f_dictionary.write(entry.term + ' ' + str(entry.cnt) + ' ' + str(offset) + '\\n')\r\n\r\n f_dictionary.close()\r\n f_postings.close()\r\n\r\ninput_directory = output_file_dictionary = output_file_postings = None\r\n\r\ntry:\r\n opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')\r\nexcept getopt.GetoptError as err:\r\n usage()\r\n sys.exit(2)\r\n \r\nfor o, a in opts:\r\n if o == '-i': # input directory\r\n input_directory = a\r\n elif o == '-d': # dictionary file\r\n output_file_dictionary = a\r\n elif o == '-p': # postings file\r\n output_file_postings = a\r\n else:\r\n assert False, \"unhandled option\"\r\n \r\nif input_directory == None or output_file_postings == None or output_file_dictionary == None:\r\n usage()\r\n sys.exit(2)\r\n\r\nsys.setrecursionlimit(50000)\r\n\r\n# get the list of all files in the given directory\r\nfiles = os.listdir(input_directory)\r\n\r\n# assign each file a docid same as the filename\r\nfor i in range(len(files)):\r\n files[i] = int(files[i])\r\nfiles.sort()\r\n\r\nterm_freq_dicts = []\r\ndoc_len = []\r\nbatch_size = len(files)\r\n\r\nset_terms = set()\r\nfor i in range(batch_size):\r\n # get a dict of (term, tf) pairs for each document\r\n term_freq_dict = analyze_doc(files[i])\r\n term_freq_dicts.append(term_freq_dict)\r\n # calculate the doclen using the (term, tf) dict\r\n doc_len.append(calc_len(list(term_freq_dict.values())))\r\n # add the newly appeared term into dictionary (set of terms)\r\n create_set(set_terms, list(term_freq_dict.keys()))\r\n\r\nentries = []\r\n# generate a mapping from sorted terms to increasing numbers\r\ndict_terms = create_dict(sorted(list(set_terms)), entries)\r\n\r\nfor i in range(batch_size):\r\n # create postings list and append postings file by file\r\n add_postings(entries, files[i], term_freq_dicts[i], dict_terms)\r\n\r\n# output the information into dictionary and postings file\r\noutput_to_file(files, doc_len, entries)","sub_path":"proj-3/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"244943328","text":"import logging\nfrom uuid import uuid4\n\nfrom kafka import KafkaProducer, KafkaConsumer, TopicPartition\nfrom lazy_object_proxy.simple import Proxy\n\n\nfrom championship_service.utils.serializer import dumps, loads\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChampionshipKafkaProducer(KafkaProducer):\n def __init__(self):\n super().__init__(\n value_serializer=lambda m: dumps(m).encode('utf-8'),\n bootstrap_servers='robot-championship-kafka:9092'\n )\n\n def send(self, value):\n return super().send('robot-championship.championships', value)\n\n\nclass ChampionshipKafkaConsumer(KafkaConsumer):\n def __init__(self):\n super().__init__(\n value_deserializer=lambda m: loads(m.decode('utf-8')),\n bootstrap_servers='robot-championship-kafka:9092',\n auto_offset_reset='earliest',\n group_id=str(uuid4()),\n )\n\n self.my_partition = TopicPartition('robot-championship.championships', 0)\n self.assigned_topic = [self.my_partition]\n self.assign(self.assigned_topic)\n\n def get_all_events(self):\n events = []\n self.seek_to_beginning(self.my_partition)\n for _ in range(10):\n msg_pack = self.poll(timeout_ms=0.05)\n for tp, messages in msg_pack.items():\n for message in messages:\n events.append(message.value)\n return events\n \n \nchampionship_message_sender = Proxy(ChampionshipKafkaProducer)\n\n\ndef championship_event_one_time_consumer_factory():\n return ChampionshipKafkaConsumer()\n\n\ndef init_championship_event_listening(app):\n from championship_service.messaging.championship_event_handler import ChampionshipKafkaListener\n logger.info('Initializing message listening')\n championship_event_consumer = ChampionshipKafkaListener(KafkaConsumer(\n 'robot-championship.championships',\n value_deserializer=lambda m: loads(m.decode('utf-8')),\n bootstrap_servers='robot-championship-kafka:9092',\n group_id='championship-service'\n ), championship_message_sender, app)\n championship_event_consumer.start()\n","sub_path":"championship-service/championship_service/infra/kafka.py","file_name":"kafka.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"362542670","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\n\napp = Flask(__name__)\n\n\n\nmongo = PyMongo(app, uri=\"mongodb://localhost:27017/mars_app\")\nmars_info = mongo.db.mars_info \n\n@app.route(\"/\")\ndef index():\n \n mars_mission_data=mongo.db.mars_info.find_one()\n \n return render_template(\"index.html\", mars=mars_mission_data)\n\n@app.route(\"/scrape\")\ndef scrape(): \n \n \n results = scrape_mars.scrape()\n mars_info.update({},results,upsert=True)\n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"351161482","text":"# TG-UserBot - A modular Telegram UserBot script for Python.\n# Copyright (C) 2019 Kandarp \n#\n# TG-UserBot is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# TG-UserBot is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with TG-UserBot. If not, see .\n\n# Source/Credit: https://github.com/penn5/meval/\n\nimport ast\n\n\n# We dont modify locals VVVV ; this lets us keep the message available to the\n# user-provided function\nasync def meval(code, globs, **kwargs):\n # Note to self: please don't set globals here as they will be lost.\n # Don't clutter locals\n locs = {}\n # Restore globals later\n globs = globs.copy()\n # This code saves __name__ and __package into a kwarg passed to the func.\n # It is set before the users code runs to make sure relative imports work\n global_args = \"_globs\"\n while global_args in globs.keys():\n # Make sure there's no name collision, just keep prepending _s\n global_args = \"_\" + global_args\n kwargs[global_args] = {}\n for glob in [\"__name__\", \"__package__\"]:\n # Copy data to args we are sending\n kwargs[global_args][glob] = globs[glob]\n\n root = ast.parse(code, \"exec\")\n code = root.body\n\n ret_name = \"_ret\"\n ok = False\n while True:\n if ret_name in globs.keys():\n ret_name = \"_\" + ret_name\n continue\n for node in ast.walk(root):\n if isinstance(node, ast.Name) and node.id == ret_name:\n ret_name = \"_\" + ret_name\n break\n ok = True\n if ok:\n break\n\n if not code:\n return None\n\n if not any(isinstance(node, ast.Return) for node in code):\n for i in range(len(code)):\n if isinstance(code[i], ast.Expr):\n if (i == len(code) - 1\n or not isinstance(code[i].value, ast.Call)):\n code[i] = ast.copy_location(\n ast.Expr(\n ast.Call(func=ast.Attribute(value=ast.Name(\n id=ret_name, ctx=ast.Load()),\n attr=\"append\",\n ctx=ast.Load()),\n args=[code[i].value],\n keywords=[])), code[-1])\n else:\n for node in code:\n if isinstance(node, ast.Return):\n node.value = ast.List(elts=[node.value], ctx=ast.Load())\n\n code.append(\n ast.copy_location(\n ast.Return(value=ast.Name(id=ret_name, ctx=ast.Load())), code[-1]))\n\n # globals().update(**)\n glob_copy = ast.Expr(\n ast.Call(\n func=ast.Attribute(value=ast.Call(func=ast.Name(id=\"globals\",\n ctx=ast.Load()),\n args=[],\n keywords=[]),\n attr=\"update\",\n ctx=ast.Load()),\n args=[],\n keywords=[\n ast.keyword(arg=None,\n value=ast.Name(id=global_args, ctx=ast.Load()))\n ]))\n ast.fix_missing_locations(glob_copy)\n code.insert(0, glob_copy)\n ret_decl = ast.Assign(targets=[ast.Name(id=ret_name, ctx=ast.Store())],\n value=ast.List(elts=[], ctx=ast.Load()))\n ast.fix_missing_locations(ret_decl)\n code.insert(1, ret_decl)\n args = []\n for a in list(map(lambda x: ast.arg(x, None), kwargs.keys())):\n ast.fix_missing_locations(a)\n args += [a]\n args = ast.arguments(args=[],\n vararg=None,\n kwonlyargs=args,\n kwarg=None,\n defaults=[],\n kw_defaults=[None for i in range(len(args))])\n args.posonlyargs = []\n fun = ast.AsyncFunctionDef(name=\"tmp\",\n args=args,\n body=code,\n decorator_list=[],\n returns=None)\n ast.fix_missing_locations(fun)\n mod = ast.parse(\"\")\n mod.body = [fun]\n comp = compile(mod, \"\", \"exec\")\n\n exec(comp, {}, locs)\n\n r = await locs[\"tmp\"](**kwargs)\n for i in range(len(r)):\n if hasattr(r[i], \"__await__\"):\n r[i] = await r[i] # workaround for 3.5\n i = 0\n while i < len(r) - 1:\n if r[i] is None:\n del r[i]\n else:\n i += 1\n if len(r) == 1:\n [r] = r\n elif not r:\n r = None\n return r\n","sub_path":"userbot/utils/meval.py","file_name":"meval.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"344598494","text":"# Copyright 2018 Cloudbase Solutions Srl\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom nova import exception\n\nfrom compute_hyperv.nova.cluster import volumeops\nfrom compute_hyperv.nova import constants\nfrom compute_hyperv.nova import volumeops as base_volumeops\nfrom compute_hyperv.tests.unit import test_base\n\n\nclass ClusterVolumeOpsTestCase(test_base.HyperVBaseTestCase):\n _autospec_classes = [\n base_volumeops.cinder.API,\n ]\n\n def setUp(self):\n super(ClusterVolumeOpsTestCase, self).setUp()\n self._volumeops = volumeops.ClusterVolumeOps()\n\n def test_loaded_volume_drivers(self):\n self.assertEqual(set([constants.STORAGE_PROTOCOL_SMBFS]),\n set(self._volumeops.volume_drivers.keys()))\n\n def test_get_blacklisted_volume_driver(self):\n conn_info = dict(driver_volume_type=constants.STORAGE_PROTOCOL_ISCSI)\n\n self.assertRaises(\n exception.VolumeDriverNotFound,\n self._volumeops._get_volume_driver,\n conn_info)\n\n def test_get_supported_volume_driver(self):\n conn_info = dict(driver_volume_type=constants.STORAGE_PROTOCOL_SMBFS)\n drv = self._volumeops._get_volume_driver(conn_info)\n\n self.assertIsInstance(drv, base_volumeops.SMBFSVolumeDriver)\n","sub_path":"compute_hyperv/tests/unit/cluster/test_volumeops.py","file_name":"test_volumeops.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"367539545","text":"from project.code import general_functions as fun\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom sklearn.manifold import MDS\nimport project.code.quality_assesment.reconstruction_error as er\nclass Kmeans:\n\n def __init__(self):\n self.path_to_file = 'project/resources/'\n self.path_to_results = 'project/results/'\n self.currentFile = 'DM - D_PP - p_min 3 - delta 0.5 - q1 -5 - q2 -0.5.csv'\n self.clusters = 7\n\n def run(self):\n mat, labels = fun.readMatrix(self.path_to_file + self.currentFile)\n mat = np.array(mat, dtype=np.float64)\n\n kmeans = KMeans(n_clusters=self.clusters).fit(mat)\n centroids = kmeans.cluster_centers_\n\n d = {}\n for i in range(self.clusters):\n d[i] = 0\n for k in kmeans.labels_:\n d[k] += 1\n\n seed = np.random.RandomState(seed=3)\n embedding = MDS(n_components=2, dissimilarity='precomputed', random_state=seed)\n\n X_transformed = embedding.fit_transform(mat)\n\n plt = fun.plot2(kmeans.labels_, X_transformed)\n plt.savefig(self.path_to_results + 'mds_kmeans.png')\n print('Error: ', str(er.error(mat, X_transformed)) + '%')\n\n\ndef main():\n kmeans = Kmeans()\n kmeans.run()","sub_path":"project/code/clustering/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"620698206","text":"\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Numeric,Boolean\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport pymysql\nfrom decimal import Decimal\n\n\npymysql.install_as_MySQLdb()\nengine = create_engine('sqlite:///transction_record.db')\nSession = sessionmaker(bind=engine)\nBase = declarative_base()\nsession=Session()\n\nclass TX_RECORD(Base):\n __tablename__ = 'tx_record'\n id = Column(Integer, primary_key=True)\n tx_id = Column(String(256))\n state = Column(Boolean)\n asset_type = Column(String(64))\n address_from = Column(String(64))\n address_to = Column(String(64))\n value = Column(Numeric(16,8))\n\n\n @staticmethod\n def save(tx_id,asset_type,address_from,address_to,value,state):\n new_instance = TX_RECORD(tx_id=tx_id,state=state,\n asset_type=asset_type,address_from=address_from,\n address_to=address_to,value=Decimal(value))\n session.add(new_instance)\n session.commit()\n\n @staticmethod\n def query(address):\n res=session.query(TX_RECORD).filter(TX_RECORD.address_from == address or TX_RECORD.address_from == address).all()\n return res\n\n def to_json(self):\n return {\n \"tx_id\":self.tx_id,\n \"asset_type\":self.asset_type,\n \"address_from\":self.address_from,\n \"address_to\":self.address_to,\n \"value\":self.value,\n \"state\":self.state,\n }\n\n\nBase.metadata.create_all(engine)","sub_path":"lightwallet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"348923589","text":"#coding=utf-8\n\"\"\"\n显示消息 feed\n\"\"\"\n__author__ = \"Hick\"\n__license__ = 'MIT'\n__email__ = 'hickwu@gmail.com'\n\nFILTER_PRIVATE = True\nALREADY_READ = set()\nDELETED = set()\nSEARCH_TAGS = set()\nREADING = False\nTIME_FMT = '%A %b-%d, %Y at %r'\n\nfrom x84.bbs import DBProxy, echo, getterminal, getsession\n\ndef quote_body(msg, width=79, quote_txt=u'> ', hardwrap=u'\\r\\n'):\n \"\"\"\n Given a message, return new string suitable for quoting it.\n \"\"\"\n from x84.bbs import getterminal\n import dateutil.tz\n term = getterminal()\n ucs = u''\n for line in msg.body.splitlines():\n ucs += u''.join((\n quote_txt,\n u'\\r\\n'.join(term.wrap(line, width - len(quote_txt), subsequent_indent=quote_txt)),\n hardwrap,))\n return u''.join((\n 'On ',\n msg.stime.replace(tzinfo=dateutil.tz.tzlocal()).astimezone(dateutil.tz.tzutc()).strftime(TIME_FMT), u' UTC ',\n msg.author, u' wrote:',\n hardwrap, ucs, hardwrap))\n\n\ndef allow_tag(idx):\n \"\"\"\n Returns true if user is allowed to 't'ag message at idx:\n * sysop and moderator\n * author or recipient\n * a member of any message tag matching user group\n \"\"\"\n from x84.bbs import getsession, get_msg\n session = getsession()\n if ('sysop' in session.user.groups\n or 'moderator' in session.user.groups):\n return True\n msg = get_msg(idx)\n if session.user.handle in (msg.recipient, msg.author):\n return True\n for tag in msg.tags:\n if tag in session.user.groups:\n return True\n return False\n\n\ndef mark_undelete(idx):\n \"\"\" Mark message ``idx`` as deleted. \"\"\"\n from x84.bbs import getsession\n session = getsession()\n # pylint: disable=W0603\n # Using the global statement\n global DELETED\n DELETED = session.user.get('trash', set())\n if idx in DELETED:\n DELETED.remove(idx)\n session.user['trash'] = DELETED\n return True\n\n\ndef mark_delete(idx):\n \"\"\" Mark message ``idx`` as deleted. \"\"\"\n from x84.bbs import getsession\n session = getsession()\n # pylint: disable=W0603\n # Using the global statement\n global DELETED\n DELETED = session.user.get('trash', set())\n if not idx in DELETED:\n DELETED.add(idx)\n session.user['trash'] = DELETED\n return True\n\n\ndef mark_read(idx):\n \"\"\" Mark message ``idx`` as read. \"\"\"\n from x84.bbs import getsession\n session = getsession()\n # pylint: disable=W0603\n # Using the global statement\n global ALREADY_READ\n ALREADY_READ = session.user.get('readmsgs', set())\n if idx not in ALREADY_READ:\n ALREADY_READ.add(idx)\n session.user['readmsgs'] = ALREADY_READ\n return True\n\n\ndef read_messages(msgs, new):\n \"\"\"\n Provide message reader UI given message list ``msgs``,\n with new messages in list ``new``.\n \"\"\"\n # pylint: disable=R0914,R0912,R0915\n # Too many local variables\n # Too many branches\n # Too many statements\n from x84.bbs import timeago, get_msg, getterminal, echo, gosub\n from x84.bbs import ini, Pager, getsession, getch, Msg\n import x84.default.writemsg\n session, term = getsession(), getterminal()\n\n session.activity = 'reading msgs'\n # build header\n len_idx = max([len('%d' % (_idx,)) for _idx in msgs])\n len_author = ini.CFG.getint('nua', 'max_user')\n len_ago = 9\n len_subject = ini.CFG.getint('msg', 'max_subject')\n len_preview = min(len_idx + len_author + len_ago + len_subject + -1, term.width - 2)\n reply_depth = ini.CFG.getint('msg', 'max_depth')\n indent_start, indent, indent_end = u'\\\\', u'-', u'> '\n\n def get_header(msgs_idx):\n \"\"\"\n Return list of tuples, (idx, unicodestring), suitable for Lightbar.\n \"\"\"\n import datetime\n msg_list = list()\n thread_indent = lambda depth: (term.red(\n (indent_start + (indent * depth) + indent_end))\n if depth else u'')\n\n def head(msg, depth=0, maxdepth=reply_depth):\n \"\"\" This recursive routine finds the 'head' message\n of any relationship, up to maxdepth.\n \"\"\"\n if (depth <= maxdepth\n and hasattr(msg, 'parent')\n and msg.parent is not None):\n return head(get_msg(msg.parent), depth + 1, maxdepth)\n return msg.idx, depth\n\n for idx in msgs_idx:\n msg = get_msg(idx)\n author, subj = msg.author, msg.subject\n tm_ago = (datetime.datetime.now() - msg.stime).total_seconds()\n # pylint: disable=W0631\n # Using possibly undefined loop variable 'idx'\n attr = lambda arg: (\n term.bold_green(arg) if (\n not idx in ALREADY_READ\n and msg.recipient == session.user.handle) else\n term.red(arg) if not idx in ALREADY_READ\n else term.yellow(arg))\n status = [u'U' if not idx in ALREADY_READ else u' ',\n u'D' if idx in DELETED else u' ', ]\n row_txt = u'%s %s %s %s %s%s ' % (\n u''.join(status),\n attr(str(idx).rjust(len_idx)),\n attr(author.ljust(len_author)),\n (timeago(tm_ago)).rjust(len_ago),\n attr(u'ago'),\n term.bold_black(':'),)\n msg_list.append((head(msg), idx, row_txt, subj))\n msg_list.sort(reverse=True)\n return [(idx, row_txt + thread_indent(depth) + subj)\n for (_threadid, depth), idx, row_txt, subj in msg_list]\n\n def get_selector(mailbox, prev_sel=None):\n \"\"\"\n Provide Lightbar UI element given message mailbox returned from\n function get_header, and prev_sel as previously instantiated Lightbar.\n \"\"\"\n from x84.bbs import Lightbar\n pos = prev_sel.position if prev_sel is not None else (0, 0)\n sel = Lightbar(\n height=(term.height / 3\n if term.width < 140 else term.height - 3),\n width=len_preview,\n yloc=2, xloc=0)\n sel.glyphs['top-horiz'] = u''\n sel.glyphs['left-vert'] = u''\n sel.colors['highlight'] = term.yellow_reverse\n sel.update(mailbox)\n sel.position = pos\n return sel\n\n def get_reader():\n \"\"\"\n Provide Pager UI element for message reading.\n \"\"\"\n reader_height = (term.height - (term.height / 3) - 2)\n reader_indent = 2\n reader_width = min(term.width - 1, min(term.width - reader_indent, 80))\n reader_ypos = ((term.height - 1 ) - reader_height if\n (term.width - reader_width) < len_preview else 2)\n reader_height = term.height - reader_ypos - 1\n msg_reader = Pager(\n height=reader_height,\n width=reader_width,\n yloc=reader_ypos,\n xloc=min(len_preview + 2, term.width - reader_width))\n msg_reader.glyphs['top-horiz'] = u''\n msg_reader.glyphs['right-vert'] = u''\n return msg_reader\n\n def format_msg(reader, idx):\n \"\"\" Format message of index ``idx`` into Pager instance ``reader``. \"\"\"\n msg = get_msg(idx)\n sent = msg.stime.strftime(TIME_FMT)\n to_attr = term.bold_green if (\n msg.recipient == session.user.handle) else term.underline\n ucs = u'\\r\\n'.join((\n (u''.join((\n term.yellow('fROM: '),\n (u'%s' % term.bold(msg.author,)).rjust(len_author),\n u' ' * (reader.visible_width - (len_author + len(sent))),\n sent,))),\n u''.join((\n term.yellow('tO: '),\n to_attr((u'%s' % to_attr(msg.recipient,)).rjust(len_author)\n if msg.recipient is not None else u'All'),)),\n (u'\\r\\n'.join((term.wrap(\n term.yellow('tAGS: ')\n + (u'%s ' % (term.bold(','),)).join((\n [term.bold_red(_tag)\n if _tag in SEARCH_TAGS\n else term.yellow(_tag)\n for _tag in msg.tags])),\n reader.visible_width,\n subsequent_indent=u' ' * 6)))),\n (term.yellow_underline(\n (u'SUbj: %s' % (msg.subject,)).ljust(reader.visible_width)\n )),\n u'', (msg.body),))\n return ucs\n\n def get_selector_title(mbox, new):\n \"\"\"\n Returns unicode string suitable for displaying as title of mailbox.\n \"\"\"\n newmsg = (term.yellow(u' ]-[ ') +\n term.yellow_reverse(str(len(new))) +\n term.bold_underline(u' NEW')) if len(new) else u''\n return u''.join((term.yellow(u'[ '),\n term.bold_yellow(str(len(mbox))),\n term.bold(\n u' MSG%s' % (u's' if 1 != len(mbox) else u'',)),\n newmsg, term.yellow(u' ]'),))\n\n dispcmd_mark = lambda idx: (\n (term.yellow_underline(u' ') + u':mark' + u' ')\n if idx not in ALREADY_READ else u'')\n dispcmd_delete = lambda idx: (\n (term.yellow_underline(u'D') + u':elete' + u' ')\n if idx not in DELETED else u'')\n dispcmd_tag = lambda idx: (\n (term.yellow_underline(u't') + u':ag' + u' ')\n if allow_tag(idx) else u'')\n\n def get_selector_footer(idx):\n \"\"\"\n Returns unicode string suitable for displaying\n as footer of mailbox when window is active.\n \"\"\"\n return u''.join((\n term.yellow(u'- '),\n u''.join((\n term.yellow_underline(u'>') + u':read ',\n term.yellow_underline(u'r') + u':eply ',\n dispcmd_mark(idx),\n dispcmd_delete(idx),\n dispcmd_tag(idx),\n term.yellow_underline(u'q') + u':uit',)),\n term.yellow(u' -'),))\n\n def get_reader_footer(idx):\n \"\"\"\n Returns unicode string suitable for displaying\n as footer of reader when window is active\n \"\"\"\n\n return u''.join((\n term.yellow(u'- '),\n u' '.join((\n term.yellow_underline(u'<') + u':back ',\n term.yellow_underline(u'r') + u':eply ',\n dispcmd_delete(idx),\n dispcmd_tag(idx),\n term.yellow_underline(u'q') + u':uit',)),\n term.yellow(u' -'),))\n\n def refresh(reader, selector, mbox, new):\n \"\"\"\n Returns unicode string suitable for refreshing the screen.\n \"\"\"\n from x84.bbs import getsession\n session = getsession()\n\n if READING:\n reader.colors['border'] = term.bold_yellow\n selector.colors['border'] = term.bold_black\n else:\n reader.colors['border'] = term.bold_black\n selector.colors['border'] = term.bold_yellow\n title = get_selector_title(mbox, new)\n padd_attr = (term.bold_yellow if not READING\n else term.bold_black)\n sel_padd_right = padd_attr(\n u'-'\n + selector.glyphs['bot-horiz'] * (\n selector.visible_width - term.length(title) - 7)\n + u'-\\u25a0-' if READING else u'- -')\n sel_padd_left = padd_attr(\n selector.glyphs['bot-horiz'] * 3)\n idx = selector.selection[0]\n return u''.join((term.move(0, 0), term.clear, u'\\r\\n',\n u'// REAdiNG MSGS ..'.center(term.width).rstrip(),\n selector.refresh(),\n selector.border() if READING else reader.border(),\n reader.border() if READING else selector.border(),\n selector.title(\n sel_padd_left + title + sel_padd_right),\n selector.footer(get_selector_footer(idx)\n ) if not READING else u'',\n reader.footer(get_reader_footer(idx)\n ) if READING else u'',\n reader.refresh(),\n ))\n\n echo((u'\\r\\n' + term.clear_eol) * (term.height - 1))\n dirty = 2\n msg_selector = None\n msg_reader = None\n idx = None\n # pylint: disable=W0603\n # Using the global statement\n global READING\n while (msg_selector is None and msg_reader is None\n ) or not (msg_selector.quit or msg_reader.quit):\n if session.poll_event('refresh'):\n dirty = 2\n if dirty:\n if dirty == 2:\n mailbox = get_header(msgs)\n msg_selector = get_selector(mailbox, msg_selector)\n idx = msg_selector.selection[0]\n msg_reader = get_reader()\n msg_reader.update(format_msg(msg_reader, idx))\n echo(refresh(msg_reader, msg_selector, msgs, new))\n dirty = 0\n inp = getch(1)\n if inp in (u'r', u'R'):\n reply_to = get_msg(idx)\n reply_msg = Msg()\n reply_msg.recipient = reply_to.author\n reply_msg.tags = reply_to.tags\n reply_msg.subject = reply_to.subject\n reply_msg.parent = reply_to.idx\n # quote between 30 and 79, 'screen width - 4' as variable dist.\n reply_msg.body = quote_body(reply_to,\n max(30, min(79, term.width - 4)))\n echo(term.move(term.height, 0) + u'\\r\\n')\n if gosub('writemsg', reply_msg):\n reply_msg.save()\n dirty = 2\n READING = False\n else:\n dirty = 1\n mark_read(idx) # also mark as read\n\n # 't' uses writemsg.prompt_tags() routine, how confusing ..\n elif inp in (u't',) and allow_tag(idx):\n echo(term.move(term.height, 0))\n msg = get_msg(idx)\n if x84.default.writemsg.prompt_tags(msg):\n msg.save()\n session.user['msgs_sent'] = session.user.get('msgs_sent', 0) + 1\n dirty = 2\n\n # spacebar marks as read, goes to next message\n elif inp in (u' ',):\n dirty = 2 if mark_read(idx) else 1\n msg_selector.move_down()\n idx = msg_selector.selection[0]\n READING = False\n\n # D marks as deleted, goes to next message\n elif inp in (u'D',):\n dirty = 2 if mark_delete(idx) else 1\n msg_selector.move_down()\n idx = msg_selector.selection[0]\n READING = False\n\n # U undeletes, does not move.\n elif inp in (u'U',):\n dirty = 2 if mark_undelete(idx) else 1\n msg_selector.move_down()\n idx = msg_selector.selection[0]\n READING = False\n\n if READING:\n echo(msg_reader.process_keystroke(inp))\n # left, <, or backspace moves UI\n if inp in (term.KEY_LEFT, u'<', u'h',\n '\\b', term.KEY_BACKSPACE):\n READING = False\n dirty = 1\n else:\n echo(msg_selector.process_keystroke(inp))\n idx = msg_selector.selection[0]\n # right, >, or enter marks message read, moves UI\n if inp in (u'\\r', term.KEY_ENTER, u'>',\n u'l', 'L', term.KEY_RIGHT):\n dirty = 2 if mark_read(idx) else 1\n READING = True\n elif msg_selector.moved:\n dirty = 1\n echo(term.move(term.height, 0) + u'\\r\\n')\n return\n\n\n\ndef main(autoscan_tags=None):\n \"\"\" Main procedure. \"\"\"\n # pylint: disable=W0603,R0912\n # Using the global statement\n # Too many branches\n from x84.bbs import getsession, getterminal, echo, getch\n from x84.bbs import list_msgs\n session, term = getsession(), getterminal()\n session.activity = 'autoscan msgs'\n\n ### 尝试\n session.log.info(\"Hick3\")\n ### 获取所有 tag\n tagdb = DBProxy('tags')\n all_tags = sorted(tagdb.items())\n session.log.info(all_tags)\n ### 尝试直接调出显示的 message ,第二个参数应该是标识为未读的\n msg = new = [1, 2, 3, 4,5 ,6, 7, 8,9]\n read_messages(msg, new)\n return\n\n ### 首先是显示提示输入的 tag 的标签\n echo(banner())\n global ALREADY_READ, SEARCH_TAGS, DELETED\n if autoscan_tags is not None:\n SEARCH_TAGS = autoscan_tags\n echo(u''.join((\n term.bold_black('[ '),\n term.yellow('AUtOSCAN'),\n term.bold_black(' ]'), u'\\r\\n')))\n ### 默认就往这里了, 提示输入 tag\n else:\n # 默认 tag 为 public\n SEARCH_TAGS = set(['hick3'])\n # also throw in user groups, maybe the top 3 .. ?\n SEARCH_TAGS.update(session.user.groups)\n SEARCH_TAGS = prompt_tags(SEARCH_TAGS)\n # user escape\n if SEARCH_TAGS is None:\n return\n\n echo(u'\\r\\n\\r\\n%s%s ' % (\n term.bold_yellow('SCANNiNG'),\n term.bold_black(':'),))\n echo(u','.join([term.red(tag) for tag in SEARCH_TAGS]\n if 0 != len(SEARCH_TAGS) else ['', ]))\n\n ### 直到有选择 tags , 保存到 session.user 中\n if (SEARCH_TAGS != session.user.get('autoscan', None)):\n echo(u'\\r\\n\\r\\nSave tag list as autoscan on login [yn] ?\\b\\b')\n while True:\n inp = getch()\n if inp in (u'q', 'Q', unichr(27), u'n', u'N'):\n break\n elif inp in (u'y', u'Y'):\n session.user['autoscan'] = SEARCH_TAGS\n break\n\n # retrieve all matching messages,: list_msgs 根据 tags 获得所有记录,看情形这个信息量大了有问题哈\n all_msgs = list_msgs(SEARCH_TAGS)\n echo(u'\\r\\n\\r\\n%s messages.' % (term.yellow_reverse(str(len(all_msgs),))))\n if 0 == len(all_msgs):\n getch(0.5)\n return\n\n # filter messages public/private/group-tag/new\n ### 分 tag 和是否未读,删除等统计\n ALREADY_READ = session.user.get('readmsgs', set())\n DELETED = session.user.get('trash', set())\n msgs, new = msg_filter(all_msgs)\n if 0 == len(msgs) and 0 == len(new):\n getch(0.5)\n return\n\n # prompt read 'a'll, 'n'ew, or 'q'uit\n echo(u'\\r\\n REAd [%s]ll %d%s message%s [qa%s] ?\\b\\b' % (\n term.yellow_underline(u'a'),\n len(msgs), (\n u' or %d [%s]EW ' % (\n len(new), term.yellow_underline(u'n'),)\n if new else u''),\n u's' if 1 != len(msgs) else u'',\n u'n' if new else u'',))\n while True:\n inp = getch()\n if inp in (u'q', 'Q', unichr(27)):\n return\n elif inp in (u'n', u'N') and len(new):\n # read only new messages\n msgs = new\n break\n elif inp in (u'a', u'A'):\n break\n\n # 根君上面的用户选择,读取消息, 某次记录 log msgs 和 new 都是帖子 id 的 set \n # read target messages\n # session.log.info(msgs)\n # session.log.info(new)\n read_messages(msgs, new)\n","sub_path":"x84/default/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":19133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"458235698","text":"#! /usr/bin/env python3\n\n\"\"\"\nProof of concept for simple pubsub system with Redis bridge. this will be the messaging\nbackbone of the next remote control system\n\"\"\"\n\nimport os\nimport re\nimport time\nimport logging\nimport asyncio\nimport aioredis\nfrom collections import namedtuple\nfrom sshtunnel import SSHTunnelForwarder\n\nfrom aiotools import patch, wait_gracefully\nfrom yamaha import Yamaha\n\nMessage = namedtuple(\"Message\", \"source, key, value\")\n\nlogger = logging.getLogger(__name__)\n\n\ndef ts():\n return time.time()\n\n\nclass RegexPattern:\n \"\"\" regex patterns \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n self.regex = re.compile(pattern)\n\n def match(self, subject):\n return self.regex.fullmatch(subject)\n\n def __str__(self):\n return self.pattern\n\n\nclass DotPattern:\n \"\"\" dot-separated path-style patterns \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n\n def match(self, subject):\n if self.pattern.upper() == subject.upper() or self.pattern in (\"\", \".\"):\n return True\n\n prefix = self.pattern + \"\" if self.pattern[-1] == \".\" else \".\"\n return subject.upper().startswith(prefix.upper())\n\n def __str__(self):\n return self.pattern\n\n\nclass MessageBus:\n \"\"\" MessageBus interface \"\"\"\n\n async def send(self, message):\n pass\n\n async def listen(self, pattern):\n pass\n\n async def close(self):\n pass\n\n\nclass BasicMessageBus(MessageBus):\n \"\"\" Basic MessageBus implementation \"\"\"\n\n def __init__(self):\n super().__init__()\n self.conn = None\n self._channels = {}\n self.listeners = set()\n\n def set_channel(self, key, value):\n self._channels[key] = value\n\n def get_channels(self):\n return self._channels.items()\n\n async def connect(self, address=None):\n self.conn = self\n logger.info(f\"bus: connected ({address})\")\n\n async def send(self, message):\n if not self.conn:\n raise RuntimeError(\"bus not connected\")\n\n if message.key.endswith(\".\"):\n raise ValueError(\"trailing '.' in key\")\n\n self.set_channel(message.key, message.value)\n for pattern, q in self.listeners:\n if pattern.match(message.key):\n await q.put(message)\n\n async def listen(self, pattern):\n if not self.conn:\n raise RuntimeError(\"bus not connected\")\n\n p = DotPattern(pattern)\n q = asyncio.Queue()\n\n try:\n self.listeners.add((p, q))\n\n # yield current values\n for k, v in self.get_channels():\n if p.match(k):\n yield Message(\"local\", k, v)\n\n # yield the messages as they come through\n while True:\n msg = await q.get()\n if not msg:\n logger.info(f\"listener {pattern}: null message received. done.\")\n break\n yield msg\n except asyncio.CancelledError:\n logger.info(f\"listener {pattern}: cancelled\")\n finally:\n self.listeners.remove((p, q))\n\n async def status(self):\n if self.conn:\n return {\n \"status\": \"connected\",\n \"listeners\": [str(p) for p, _ in self.listeners],\n \"channels\": list(self.get_channels()),\n \"timestamp\": ts(),\n }\n else:\n return {\"status\": \"disconnected\"}\n\n async def close(self):\n \"\"\" send all listeners a null message and close the bus \"\"\"\n\n for p, q in self.listeners:\n await q.put(None)\n self.conn = None\n logger.info(f\"bus: connection closed\")\n\n\n# todo: should this be initialized with a mask to restrict the namespace?\n# todo: using patterns makes many of the status metrics unavailable. Should there just be a single channel?\nclass RedisMessageBus(MessageBus):\n \"\"\" A MessageBus implemented as a Redis pubsub channel \"\"\"\n\n def __init__(self, pattern):\n super().__init__()\n self.tunnel = None\n self.aredis = None\n self.pattern = pattern\n\n async def connect(self, tunnel_config):\n def create_tunnel():\n self.tunnel = SSHTunnelForwarder(**tunnel_config)\n self.tunnel.start()\n\n await asyncio.get_event_loop().run_in_executor(None, create_tunnel)\n\n address = self.tunnel.local_bind_address\n self.aredis = await aioredis.create_redis_pool(address, encoding=\"utf-8\")\n logger.info(f\"Redis connected: {self.aredis.address}\")\n\n def redis_pattern(self):\n return self.pattern + \"*\" if self.pattern.endswith('.') else self.pattern\n\n async def send(self, message):\n if not self.aredis:\n raise RuntimeError(\"Redis not connected\")\n\n if message.key.endswith(\".\"):\n raise ValueError(\"trailing '.' in key\")\n\n logger.info(f\"Redis send: {message}:{message.value}\")\n await self.aredis.publish(message.key, message.value)\n\n async def listen(self, pattern='*'):\n if not self.aredis:\n raise RuntimeError(\"Redis not connected\")\n\n try:\n chan, = await self.aredis.psubscribe(self.redis_pattern())\n while await chan.wait_message():\n k, v = await chan.get(encoding=\"utf-8\")\n yield Message(\"redis\", k.decode(), v)\n except Exception:\n raise\n\n async def status(self):\n if self.aredis:\n\n return {\n \"status\": \"connected\",\n \"patterns\": list(self.aredis.patterns.keys()),\n \"timestamp\": ts(),\n }\n else:\n return {\"status\": \"disconnected\"}\n\n async def close(self):\n self.aredis.close()\n await self.aredis.wait_closed()\n self.aredis = None\n\n # this is slow so run in a thread pool\n await asyncio.get_event_loop().run_in_executor(None, self.tunnel.stop)\n self.tunnel = None\n logger.info(f\"Redis connection closed\")\n\n\nclass RedisMessageBridge:\n \"\"\" bridge a local MessageBus with an external Redis pubsub channel \"\"\"\n\n def __init__(self, pattern, tunnel_config, bus):\n self.bus = bus\n self.aredis = None\n self.pattern = pattern\n self.tunnel_config = tunnel_config\n\n async def receiver(self):\n redis_pattern = self.pattern\n if redis_pattern.endswith('.'):\n redis_pattern += '*'\n\n try:\n chan, = await self.aredis.psubscribe(redis_pattern)\n while await chan.wait_message():\n k, v = await chan.get(encoding=\"utf-8\")\n logger.info(f\"bridge in {self.pattern}: message {k}: {v}\")\n await self.bus.send(Message(\"redis\", k.decode(), v))\n except asyncio.CancelledError:\n logger.info(f\"bridge in {self.pattern}: cancelled\")\n finally:\n await self.aredis.punsubscribe(redis_pattern)\n\n async def sender(self):\n \"\"\" route local messages to redis \"\"\"\n\n try:\n async for message in self.bus.listen(self.pattern):\n if message.source != \"redis\":\n logger.info(f\"bridge out {self.pattern}: {message}\")\n await self.aredis.publish(message.key, message.value)\n except asyncio.CancelledError:\n logger.info(f\"bridge out {self.pattern}: cancelled\")\n\n async def start(self):\n \"\"\" open tunnel to redis server and start sender/receiver tasks \"\"\"\n\n with SSHTunnelForwarder(**self.tunnel_config) as tunnel:\n address = tunnel.local_bind_address\n self.aredis = await aioredis.create_redis_pool(address, encoding=\"utf-8\")\n logger.info(f\"bridge connected: {self.aredis.address}\")\n\n try:\n await asyncio.gather(\n self.receiver(),\n self.sender(),\n )\n except asyncio.CancelledError:\n logger.info(f\"bridge start {self.pattern}: cancelled\")\n except Exception as e:\n logger.info(f'bridge start {self.pattern}: exception {e} {type(e)}')\n\n self.aredis.close()\n await self.aredis.wait_closed()\n\n\nclass YamahaComponent:\n \"\"\" bridge Yamaha YNCA component onto MessageBus \"\"\"\n\n def __init__(self, ynca_hostname, bus):\n self.bus = bus\n self.yamaha = Yamaha(ynca_hostname)\n\n # start up the listener\n self.listen_task = asyncio.create_task(self.listen())\n\n async def listen(self):\n \"\"\" listen for commands and relay them to the component \"\"\"\n try:\n async for message in self.bus.listen():\n print(f\"yamaha:\", message)\n except asyncio.CancelledError:\n pass\n\n async def close(self):\n \"\"\" Shut down the listener \"\"\"\n self.listen_task.cancel()\n self.listen_task = None\n\n logger.info(f\"yamaha: closed\")\n\n\nasync def main():\n \"\"\" main synchronous entry point \"\"\"\n\n async def talk(bus, keys):\n \"\"\" generate some test messages \"\"\"\n\n for v in range(5):\n for k in keys:\n await asyncio.sleep(0.35)\n await bus.send(Message(\"local\", k, v))\n\n async def listen(bus, pattern):\n await asyncio.sleep(1.5)\n try:\n async for message in bus.listen(pattern):\n print(f\"listen({pattern}):\", message)\n except asyncio.CancelledError:\n pass\n\n async def monitor():\n \"\"\" echo bus status every 2 sec \"\"\"\n\n count = 0\n try:\n while True:\n await asyncio.sleep(1)\n count += 1\n print(f\"monitor status: 0:{count:02}\", await ps.status())\n except asyncio.CancelledError:\n pass\n\n tunnel_config = {\n \"ssh_address_or_host\": (\"robnee.com\", 22),\n \"remote_bind_address\": (\"127.0.0.1\", 6379),\n \"local_bind_address\": (\"127.0.0.1\",),\n \"ssh_username\": \"rnee\",\n \"ssh_pkey\": os.path.expanduser(r\"~/.ssh/id_rsa\"),\n }\n\n ps = RedisMessageBus(\"cat.\")\n await ps.connect(tunnel_config)\n\n yam = YamahaComponent('CL-6EA47', ps)\n\n tasks = [asyncio.create_task(c) for c in (\n talk(ps, (\"cat.dog\", \"cat.pig\", \"cow.emu\")),\n listen(ps, \".\"),\n listen(ps, \"cat.\"),\n listen(ps, \"cat.pig\"),\n monitor(),\n )]\n\n await wait_gracefully(tasks, timeout=12)\n await yam.close()\n await ps.close()\n print(\"main: done\")\n\n\nif __name__ == \"__main__\":\n print(\"all: start\")\n patch()\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler()\n try:\n logger.addHandler(handler)\n asyncio.run(main(), debug=True)\n finally:\n logger.removeHandler(handler)\n print(\"all: done\")\n","sub_path":"poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":10829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"448892631","text":"# --------------\n# Importing header files\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\n#Reading file\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\n#Code starts here\r\n#print(\"\\nPath: \\n\\n\", path)\r\n#print(\"\\nData: \\n\\n\", data)\r\n#print(\"\\nType of data: \\n\\n\", type(data))\r\n\r\ncensus = np.concatenate((data, new_record))\r\nprint(\"\\nArray Size : \\n\\n\", len(census))\r\nprint(\"\\nCensus: \\n\\n\", census.shape)\r\n\r\n## Step 2 ## \r\nprint(\"\\nStep 2 starts here: \\n\\n\")\r\nage = census[:,0]\r\nprint(\"\\nAge: \\n\\n\", age)\r\nmax_age = age.max()\r\nmin_age = age.min()\r\nage_mean = round(age.mean(),2)\r\nage_std = round(age.std(),2)\r\nprint(\"\\nMax Age : \\n\\n\", max_age,\r\n \"\\nMinimum Age : \\n\\n\", min_age, \r\n \"\\nMean Age : \\n\\n\", age_mean,\r\n \"\\nStandard Age : \\n\\n\", age_std\r\n)\r\n\r\n## Step 3 ## \r\nprint(\"\\nStep 3 starts here: \\n\\n\")\r\n\r\nrace = census[:,2]\r\nprint(\"\\nRace : \\n\\n\", len(race))\r\n##race_0 = [ r for r in race if race[r] == 0 ] \r\n## race[race[:,0] == 0]\r\n#val0 = 0/val1 = 1/val2 = 2/val3 = 3/val4 = 4\r\n#race_x = [] \r\n#for val in range(len(race)): \r\n#race_0 = race[race[val] == val1]\r\n#race_x = np.append(race_x,race_0)\r\n\r\nrace_0 = [x for x in race if x == 0]\r\nrace_1 = [x for x in race if x == 1]\r\nrace_2 = [x for x in race if x == 2]\r\nrace_3 = [x for x in race if x == 3]\r\nrace_4 = [x for x in race if x == 4]\r\n\r\nlen_0 = len(race_0)\r\nlen_1 = len(race_1)\r\nlen_2 = len(race_2)\r\nlen_3 = len(race_3)\r\nlen_4 = len(race_4)\r\n\r\nprint(\"Length of race_0 : \", len_0, \"\\n\\n\",\r\n\"Length of race_1 : \", len_1, \"\\n\\n\",\r\n\"Length of race_2 : \", len_2, \"\\n\\n\",\r\n\"Length of race_3 : \", len_3, \"\\n\\n\",\r\n\"Length of race_4 : \", len_4\r\n)\r\n## print(\"\\race_0 : \\n\\n\", (race[val] == 0)) \r\nlength = np.array([len_0, len_1, len_2, len_3, len_4])\r\n## min_val = [x for x in length if x == np.min(length)]\r\nm_race = [x for x in range(len(length)) if length[x] == np.min(length)]\r\nminority_race = m_race[0]\r\nprint(\"Minority Race : \", minority_race)\r\n\r\n## Step 4 ## \r\n\r\nprint(\"\\nStep 4 starts here: \\n\\n\")\r\n\r\nsenior_citizens = [x for x in age if x > 60]\r\nsenior_citizens_index = [x for x in range(len(age)) if age[x] > 60]\r\n\r\nprint(\"Senior Citizens : \", senior_citizens)\r\nprint(\"Senior Citizens Index : \", senior_citizens_index)\r\nsenior_citizens_len = len(senior_citizens)\r\nprint(\"Total Senior Citizens : \", senior_citizens_len)\r\n\r\nworking_hours = census[:,6]\r\nprint(\"Working Hours : \", working_hours.dtype)\r\n\r\nprint(\"senior_citizens_index : \", working_hours[senior_citizens_index[0]])\r\n\r\n##sr_age_group = np.concatenate(age, working_hours)\r\n##print(\"sr_age_group : \", sr_age_group)\r\n\r\n## working_hours_sr1 = [x for x in senior_citizens_index if working_hours[x] != 0 ]\r\n## working_hours_sr = [x for x in working_hours if np.where() == senior_citizens_index[x] ]\r\n## print(\"working_hours lengh : \", len(working_hours_sr))\r\n## print(\"working_hours_sr : \", working_hours_sr)\r\n\r\n#working_hours_sr1 = []\r\nworking_hours_sr = []\r\n\r\nfor x in senior_citizens_index:\r\n working_hours_sr.append(working_hours[x]) \r\n\r\n# working_hours_sr1 = working_hours[senior_citizens_index[x]]\r\n# working_hours_sr = working_hours_sr.append(working_hours_sr1)\r\n\r\nprint(\"working_hours_sr : \", working_hours_sr)\r\n\r\nworking_hours_sum = np.sum(working_hours_sr)\r\nprint(\"working_hours_sum : \", working_hours_sum)\r\n\r\navg_working_hours = working_hours_sum / senior_citizens_len\r\nprint(\"avg_working_hours : \", round(avg_working_hours,2))\r\n\r\n## Step 5 ## \r\nprint(\"\\nStep 5 starts here: \\n\\n\")\r\n\r\neducation = census[:,1]\r\n\r\nhigh = [x for x in education if x > 10]\r\nhigh_index = [x for x in range(len(education)) if education[x] > 10]\r\n\r\nlow = [x for x in education if x <= 10 ]\r\nlow_index = [x for x in range(len(education)) if education[x] <= 10]\r\n\r\nincome = census[:,7]\r\n\r\nhigh_income = []\r\nlow_income = []\r\n\r\nfor x in high_index:\r\n high_income.append(income[x]) \r\n\r\nfor x in low_index:\r\n low_income.append(income[x]) \r\n\r\navg_pay_high = round(np.mean(high_income),2)\r\navg_pay_low = round(np.mean(low_income),2)\r\n\r\nprint(\"\\nAvg_pay_high : \", avg_pay_high)\r\nprint(\"\\nAvg_pay_low : \", avg_pay_low)\r\n\r\n\n\n\n","sub_path":"Array-Examples/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"7511236","text":"# the inclusion of the tests module is not meant to offer best practices for\n# testing in general, but rather to support the `find_packages` example in\n# setup.py that excludes installing the \"tests\" package\n\nimport unittest\n\nfrom hangman.hangman import print_hanged_man\n\n\nclass TestSimple(unittest.TestCase):\n\n\tdef test_print_hanged_man(self):\n\t\tfrom io import StringIO\n\n\t\tout = StringIO()\n\t\tprint_hanged_man(9,out)\n\t\toutput= out.getvalue()\n\t\tself.assertEqual(output, \"9 turns left\\n -------- \\n\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_simple.py","file_name":"test_simple.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"531097409","text":"import sys\n\nwith open (\"/Users/andreeagrosu/Desktop/python/anagram.txt\", \"r\") as file:\n for line in file:\n word = list(file)\n words = word.strip(\"\\n\")\n print(word)\n\nj = len(word)\n\"\"\"\nfor i in range(0,j):\n word[i]=list(word)\n print(word)\n\n\"\"\" ","sub_path":"Week1/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"632171019","text":"import pygame\nimport sys\nfrom pygame.draw import rect\nfrom pygame.locals import *\nfrom text_utils import SmartText, SmartSurface\nfrom debug_utils import draw_grid\nimport numpy as np\nimport random as rand\n\nfrom observer_system import Observer, Observable\nfrom game_systems import *\nfrom game_base import *\n\npygame.init()\npygame.display.set_caption(\"Slayin.py\")\n \n# Color Constants\nBLUE = pygame.Color(0, 100, 255)\nGREY = pygame.Color(100, 100, 100)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nSCREEN_WIDTH = 600\nSCREEN_HEIGHT = 400\n\nclass Border(Renderable):\n\n def __init__(self, rect, color, side, **kwargs):\n super().__init__(rect, color, **kwargs)\n self.side = side\n\n# change to only collidable in the future, but for now like this for debugging purposes\nclass Hitbox(Renderable):\n\n def __init__(self, rect, hitbox_type, **kwargs):\n super().__init__(rect, color=BLACK, name=\"hitbox\")\n # hitbox is on\n self.active = True\n self.hitbox_type = hitbox_type\n \n def handle_collision(self, entity):\n print(self.hitbox_type)\n if self.hitbox_type == HitBoxType.destroy:\n self.destroy(entity)\n if self.hitbox_type == HitBoxType.hurt:\n return self.hurt(entity)\n else:\n return self.heal(entity)\n pass\n\n def destroy(self, entity):\n print(f\"damaging {entity.id}\")\n entity.health -= 1\n \n def hurt(self, entity):\n pass\n\n def heal(self, entity):\n pass\n\n def clip_to_border(self, border):\n pass\n\n\n# class HitboxAction:\n\n# def __init__(self, game_manager)\n\n# \"Actor\" could probably be extracted. This seems more as a Player class.\n# additionally, a base \"Actor\" class could be used by the Enemy too\nclass Player(Actor):\n\n def __init__(self, size, color, **kwargs):\n super().__init__(size, color, **kwargs)\n self.gravity = 0\n self.actions = 1\n self.health = 10\n self.sword = Hitbox(Rect(self.rect.right, self.rect.centery, 20, 8), HitBoxType.destroy)\n # self.sword.rect.left = self.rect.right\n # self.sword.rect.centery = self.rect.centery\n\n def turn_right(self):\n if self.xvel < 0:\n self.xvel *= -1\n # hacky fix for this glitch\n # self.sword.rect.left = self.rect.right\n \n def turn_left(self):\n if self.xvel > 0:\n self.xvel *= -1\n # hacky fix for this glitch\n # self.sword.rect.right = self.rect.left\n\n def move(self):\n super().move()\n self.yvel += self.gravity\n # fix this to a different class that is composed of multiple hitboxes\n if self.xvel < 0:\n self.sword.rect.right = self.rect.left\n else:\n self.sword.rect.left = self.rect.right\n self.sword.rect.centery = self.rect.centery\n \n def action(self):\n if self.actions > 0:\n # should be genericized, but this works for now\n self.yvel = -15\n self.gravity = 2\n self.actions -= 1\n\n def clip_to_border(self, border):\n super().clip_to_border(border)\n # code repetition here of the if-statement, could be a better way to do this?\n if border.side == 'bottom':\n self.reset_actions()\n\n def reset_actions(self):\n self.actions = 1\n self.__ground()\n\n def __ground(self):\n self.yvel = 0\n self.gravity = 0\n\n# Consider refactoring Actor and Enemy -- they share a lot of similarities\nclass Enemy(Renderable):\n\n # hard-coded thing for moving frames\n ACTION_LENGTH_MINIMUM = 5\n ACTION_LENGTH_MAXIMUM = 40\n\n MOVE_SPEED = 3\n \n def __init__(self, size, color, **kwargs):\n self.surface = pygame.Surface(size)\n self.health = 1\n super().__init__(self.surface.get_rect(), color, **kwargs)\n # a.i sequence -- currently just moves randomly\n self.moving_frames = Enemy.ACTION_LENGTH_MINIMUM\n self.vel = Enemy.MOVE_SPEED\n\n def __initiate_action(self):\n # initiate the settings for this movement (duration, speed)\n self.moving_frames = self.__random_duration()\n self.vel = Enemy.MOVE_SPEED * self.__random_direction()\n\n def action(self):\n # if we've exhausted the last movement duration, generate a new one\n if self.moving_frames <= 0:\n self.__initiate_action()\n # do the movement\n self.rect.x += self.vel\n self.moving_frames -= 1\n\n # done to prevent enemy from going off screen\n def clip_to_border(self, border):\n if border.side == 'left':\n self.rect.left = border.rect.right\n elif border.side == 'right':\n self.rect.right = border.rect.left\n elif border.side == 'bottom':\n self.rect.bottom = border.rect.top\n\n def handle_collision(self, enemy):\n # destroy the enemy\n # could maybe use observer pattern for this?\n # on collision, broadcast the tag of the enemy killed\n # AI System, Collission System, and Render System all subscribe to the player\n # when broadcasted, lookup entity with that tag and remove it\n pass\n\n def __random_direction(self):\n return rand.randint(-1, 1)\n \n def __random_duration(self):\n return rand.randint(Enemy.ACTION_LENGTH_MINIMUM, Enemy.ACTION_LENGTH_MAXIMUM)\n\n\n# Initialize the clock used, and set the framerate cap to 30 FPS\nclock = pygame.time.Clock()\n\n# Initialize the screen\nSCREEN_SURFACE = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n# Initialize the background surface. This will also be used to erase our player\nBACKGROUND = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))\nBACKGROUND = BACKGROUND.convert()\nBACKGROUND.fill(WHITE)\n\nleft_border = Border(pygame.Rect((0, 0), (50, SCREEN_HEIGHT)), BLUE, \"left\") # Border that the player will watch for\nright_border = Border(pygame.Rect((SCREEN_WIDTH - 50, 0), (50, SCREEN_HEIGHT)), BLUE, \"right\") # Border that the player will watch for\nbottom_border = Border(pygame.Rect((0, SCREEN_HEIGHT - 100), (SCREEN_WIDTH, 100)), GREY, \"bottom\") # Border that the player will watch for\n\n# have player start in the bottom middle of the screen\nplayer = Player((25, 50), RED, name='player')\n# set to bottom initially, with speed of 2 (this should be moved to an instantiator somewhere)\nplayer.rect.midbottom = (SCREEN_WIDTH // 2, SCREEN_HEIGHT-100)\nplayer.xvel = 5\n\n# enemies\nenemies = []\nfor i in range(10):\n enemy = Enemy((30, 25), GREEN, name=f'enemy{i}')\n # set test enemy to bottom initially, with no speed\n enemy.rect.midbottom = (SCREEN_WIDTH // 2, SCREEN_HEIGHT-40)\n enemies.append(enemy)\n\n# create the enemy system\nai_system = AISystem(enemies)\ncollision_system = CollisionSystem([player, player.sword, *enemies], [left_border, right_border, bottom_border])\nrender_system = RenderSystem(SCREEN_SURFACE, BACKGROUND, [player, player.sword, bottom_border, left_border, right_border, *enemies])\n\n# blit the whole background to the screen\nSCREEN_SURFACE.blit(BACKGROUND, (0, 0))\n\nwhile True:\n for event in pygame.event.get(): \n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_a or event.key == pygame.K_LEFT:\n player.turn_left()\n elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:\n player.turn_right()\n elif event.key == pygame.K_SPACE:\n player.action()\n \n # keep the FPS at 30\n clock.tick(60)\n # erase the player from the screen\n # player.erase(SCREEN_SURFACE, BACKGROUND)\n render_system.handle_erases()\n # move the player to the new position (this should be put into a system soon)\n player.move()\n # move the enemy to the new position (this should be put into a system soon)\n ai_system.perform_actions()\n # handle collisions that have occurred\n collision_system.handle_collisions()\n # render everything\n render_system.handle_renders()\n # update the screen\n pygame.display.update()","sub_path":"title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"324799550","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n# only call upon this statement if need to install gym smake and other dependencies\n# get_ipython().system('pip install gym cmake gym[atari] scipy')\n\n\n# In[2]:\n\n\nimport gym\nenv = gym.make(\"Taxi-v2\").env\nenv.render()\n\n\n# In[3]:\n\n\nprint(\"Action Space {}\".format(env.action_space))\nprint(\"State Space {}\".format(env.observation_space))\n\n\n# In[4]:\n\n\nstate = env.encode(3, 1, 2, 0) # (taxi row, taxi column, passenger index, destination index)\nprint(\"State:\", state)\nenv.s = state\nenv.render()\n\n\n# In[5]:\n\n\n# Reward Table\nenv.P[328]\n\n\n# In[6]:\n\n\nenv.s = 328 # set environment to illustration's state\nepochs = 0\npenalties, reward = 0, 0\nframes = [] # for animation\ndone = False\nwhile not done:\n action = env.action_space.sample()\n state, reward, done, info = env.step(action)\n if reward == -10:\n penalties += 1\n # Put each rendered frame into dict for animation\n frames.append({\n 'frame': env.render(mode='ansi'),\n 'state': state,\n 'action': action,\n 'reward': reward\n }\n )\n epochs += 1\nprint(\"Timesteps taken: {}\".format(epochs))\nprint(\"Penalties incurred: {}\".format(penalties))\n\n\n# Without Learning, just random steps\n\n# In[7]:\n\n\nfrom IPython.display import clear_output\nfrom time import sleep\ndef print_frames(frames, time):\n for i, frame in enumerate(frames):\n clear_output(wait=True)\n# print(frame['frame'].getvalue())\n print(frame['frame'])\n print(f\"Timestep: {i + 1}\")\n print(f\"State: {frame['state']}\")\n print(f\"Action: {frame['action']}\")\n print(f\"Reward: {frame['reward']}\")\n sleep(time)\n\n\n# In[8]:\n\n\nprint_frames(frames, 0.01)\n\n\n# With Q Learning\n\n# In[9]:\n\n\nimport numpy as np\nq_table = np.zeros([env.observation_space.n, env.action_space.n])\n\n\n# In[10]:\n\n\n\"\"\"Training the agent\"\"\"\n\nimport random\nfrom IPython.display import clear_output\n\n# Hyperparameters\nalpha = 0.1\ngamma = 0.6\nepsilon = 0.1\n\n# For plotting metrics\nall_epochs = []\nall_penalties = []\n\nfor i in range(1, 100001):\n state = env.reset()\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n \n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space\n else:\n action = np.argmax(q_table[state]) # Exploit learned values\n\n next_state, reward, done, info = env.step(action) \n \n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n \n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n\n state = next_state\n epochs += 1\n \n if i % 100 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\nprint(\"Training finished.\\n\")\n\n\n# In[11]:\n\n\nq_table[328]\n\n\n# In[12]:\n\n\n\"\"\"Evaluate agent's performance after Q-learning\"\"\"\n\ntotal_epochs, total_penalties = 0, 0\nepisodes = 100\n\nfor _ in range(episodes):\n state = env.reset()\n epochs, penalties, reward = 0, 0, 0\n \n done = False\n \n while not done:\n action = np.argmax(q_table[state])\n state, reward, done, info = env.step(action)\n\n if reward == -10:\n penalties += 1\n\n epochs += 1\n\n total_penalties += penalties\n total_epochs += epochs\n\nprint(f\"Results after {episodes} episodes:\")\nprint(f\"Average timesteps per episode: {total_epochs / episodes}\")\nprint(f\"Average penalties per episode: {total_penalties / episodes}\")\n\n","sub_path":"Homework2/Code/HomeWork2-Taxi.py","file_name":"HomeWork2-Taxi.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"620987512","text":"from django import forms\n\nfrom localflavor.us.forms import USZipCodeField, USStateField\nfrom localflavor.us.us_states import STATE_CHOICES\nfrom timezones.zones import PRETTY_TIMEZONE_CHOICES\n\nfrom hikers.models import Hiker, HikerAddress, HikerDiaryEntry, HikerPhoto\n\n\nclass HikerRegistrationForm(forms.Form):\n\n timezone = forms.CharField(\n widget=forms.Select(choices=PRETTY_TIMEZONE_CHOICES),\n initial='America/Los_Angeles')\n zipcode = USZipCodeField(initial='97219')\n city = forms.CharField(max_length=50, initial='Portland')\n state = USStateField(widget=forms.Select(choices=STATE_CHOICES),\n initial='OR')\n\n def signup(self, request, user):\n hiker = Hiker.objects.create(hiker=user,\n timezone=self.cleaned_data['timezone'])\n HikerAddress.objects.create(hiker=hiker,\n zipcode=self.cleaned_data['zipcode'],\n city=self.cleaned_data['city'],\n state=self.cleaned_data['state'])\n\n\nclass HikerBasicInfoForm(forms.ModelForm):\n first_name = forms.CharField(max_length=30, required=False)\n last_name = forms.CharField(max_length=30, required=False)\n email = forms.EmailField(required=False)\n\n class Meta:\n model = Hiker\n fields = ('first_name', 'last_name', 'email',\n 'timezone', 'profile_pic')\n\n def __init__(self, *args, **kwargs):\n super(HikerBasicInfoForm, self).__init__(*args, **kwargs)\n if not self.instance.pk:\n raise ValueError('{} should not be called from a CreateView.'\n 'New Hiker instances should only be created from'\n 'the registration.'.format(self))\n else:\n hiker = self.instance.hiker\n self.initial['first_name'] = hiker.first_name\n self.initial['last_name'] = hiker.last_name\n self.initial['email'] = hiker.email\n\n def save(self, *args, **kwargs):\n hiker = self.instance.hiker\n hiker.first_name = self.cleaned_data['first_name']\n hiker.last_name = self.cleaned_data['last_name']\n hiker.email = self.cleaned_data['email']\n hiker.save()\n return super(HikerBasicInfoForm, self).save(*args, **kwargs)\n\n\nclass HikerStatsForm(forms.ModelForm):\n\n class Meta:\n model = Hiker\n fields = ('health_level', 'avg_walking_pace')\n\n def __init__(self, *args, **kwargs):\n super(HikerStatsForm, self).__init__(*args, **kwargs)\n self.fields['avg_walking_pace'].localize = True\n\n\nclass HikerAddressForm(forms.ModelForm):\n\n class Meta:\n model = HikerAddress\n fields = ('address_line1', 'address_line2',\n 'city', 'state', 'zipcode', 'cell_number')\n\n\nclass HikerDiaryForm(forms.ModelForm):\n\n class Meta:\n model = HikerDiaryEntry\n fields = ('title', 'diary_entry', 'hike', 'make_public')\n\n\nclass HikerPhotoForm(forms.ModelForm):\n\n class Meta:\n model = HikerPhoto\n fields = ('title', 'photo', 'hike', 'diary_entry', 'make_public')\n\n def __init__(self, *args, **kwargs):\n super(HikerPhotoForm, self).__init__(*args, **kwargs)\n self.fields['photo'].required = True\n","sub_path":"hikers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"341682478","text":"## pandas.DataFrame, SeriesとPython標準のリストを相互に変換\n\n# pandas.DataFrame, pandas.SeriesとPython標準のリスト型listは相互に変換できる\n\n#1. リストをpandas.DataFrame, pandas.Seriesに変換\n\n# それぞれのコンストラクタpandas.DataFrame(), pandas.Series()の引数に\n# リスト型のオブジェクトを渡すと、リストを元にpandas.DataFrame, pandas.Seriesが生成される。\n\n#%%\nimport pandas as pd\n\nl_1d = [0, 1, 2]\n\ns = pd.Series(l_1d)\n\nprint(s)\n# 0 0\n# 1 1\n# 2 2\n# dtype: int64\n#%%\ns = pd.Series(l_1d, index=['row1', 'row2', 'row3'])\n\nprint(s)\n# row1 0\n# row2 1\n# row3 2\n# dtype: int64\n\n# 二次元配列からpandas.DataFrameを生成する例。引数indexで行名(行ラベル)、引数columnsで列名(列ラベル)を指定することもできる。二次元配列からpandas.DataFrameを生成する例。\n# 引数indexで行名(行ラベル)、引数columnsで列名(列ラベル)を指定することもできる。\n#%%\nl_2d = [\n [0,1,2],\n [3,4,5]\n ]\n\ndf = pd.DataFrame(l_2d,index=['row1', 'row2'], columns=['col1', 'col2', 'col3'])\ndf\n\n# col1\tcol2\tcol3\n# row1\t0\t1\t2\n# row2\t3\t4\t5\n\n# ラベルと値がペアとなったリストからpandas.Seriesを生成する場合。\n# ラベルの配列と値の配列に分解し、それをpandas.Series()の引数に渡す。\n\n#%%\nl_1d_index = [\n ['Alice', 0],\n ['Bob', 1],\n ['Charlie', 2]]\n\nindex, value = zip(*l_1d_index)\nprint(index)\n# ('Alice', 'Bob', 'Charlie')\nprint(value)\n# (0, 1, 2)\n\ns_index = pd.Series(value, index=index)\n\nprint(s_index)\n# Alice 0\n# Bob 1\n# Charlie 2\n# dtype: int64\n\n# 同様に、ラベルと複数の値からなるリストからpandas.DataFrameを生成する場合。\n# 上述のpandas.Seriesのように配列を分解してもいいが、\n# 全体を読み込んでからset_index()メソッドでindex列を指定したほうが簡単。\n\n#%%\nl_2d_index = [['Alice', 0, 0.0], ['Bob', 1, 0.1], ['Charlie', 2, 0.2]]\n\ndf_index = pd.DataFrame(l_2d_index, columns=['name', 'val1', 'val2'])\ndf_index\n# name\tval1\tval2\n# 0\tAlice\t0\t0.0\n# 1\tBob\t1\t0.1\n# 2\tCharlie\t2\t0.2\n\ndf_index_set = df_index.set_index('name')\ndf_index_set\n\n# val1\tval2\n# name\t\t\n# Alice\t0\t0.0\n# Bob\t1\t0.1\n# Charlie\t2\t0.2\n\n# 列ごとにデータ型dtypeが異なる場合も\n# それぞれの列に最適なデータ型dtypeが自動で選ばれる。\n\n#%%\nprint(df_index_set.dtypes)\n# val1 int64\n# val2 float64\n# dtype: object\n\n#2. pandas.DataFrame, pandas.Seriesをリストに変換\n\n# pandas.DataFrame, pandas.Seriesをリスト型に直接変換するメソッドは無いため、\n# values属性で取得できるNumPy配列ndarrayを経由して、\n# ndarrayのtolist()メソッドでリストに変換する。\n\n#%%\ns = pd.Series([0, 1, 2])\n\nprint(s)\n# 0 0\n# 1 1\n# 2 2\n# dtype: int64\n\nl_1d = s.values.tolist()\nprint(l_1d)\n# [0, 1, 2]\n\n#%%\ndf = pd.DataFrame([[0, 1, 2], [3, 4, 5]])\ndf\n\n# 0\t1\t2\n# 0\t0\t1\t2\n# 1\t3\t4\t5\n\nl_2d = df.values.tolist()\nprint(l_2d)\n# [[0, 1, 2], [3, 4, 5]]\n\n# values属性ではラベル(行名、列名)があっても無視される。\n\n#%%\ns_index = pd.Series([0, 1, 2], index=['row1', 'row2', 'row3'])\nprint(s_index)\n# row1 0\n# row2 1\n# row3 2\n# dtype: int64\n\nl_1d = s_index.values.tolist()\nprint(l_1d)\n# [0, 1, 2]\n\n#%%\ndf_index = pd.DataFrame([[0, 1, 2], [3, 4, 5]],\n index=['row1', 'row2'],\n columns=['col1', 'col2', 'col3'])\n\nprint(df_index)\n# col1 col2 col3\n# row1 0 1 2\n# row2 3 4 5\n\nl_2d = df_index.values.tolist()\n\nprint(l_2d)\n# [[0, 1, 2], [3, 4, 5]]\n\n# ラベルもリストのデータとして残したい場合は、\n# reset_index()メソッドでindex列をリセットしてデータ列にする。\n\n#%%\nl_1d_index = s_index.reset_index().values.tolist()\nprint(l_1d_index)\n# [['row1', 0], ['row2', 1], ['row3', 2]]\n\n# 列名(列ラベル)をリセットするメソッドは無いため、pandas.DataFrameで行名も列名もリストのデータとして残したい場合は、reset_index()メソッドを適用したあと.\n# Tで転置して再度reset_index()メソッドを適用し、さらに.Tで元に戻す。\n\n#%%\nl_2d_index = df_index.reset_index().T.reset_index().T.values.tolist()\nprint(l_2d_index)\n# [['index', 'col1', 'col2', 'col3'], ['row1', 0, 1, 2], ['row2', 3, 4, 5]]\n\n# まとめ\n# ①DataFrameやSeriesを1次元リストや2次元リストにする場合には、直接はできないので、\n# valuesでとりだして、リストにする\n# ②2次元リストで、列名やインデクスもリストにしたい場合はちょっとめんどくさい\n\n\n","sub_path":"1日1件/リスト/pandas.DataFrame, SeriesとPython標準のリストを相互に変換.py","file_name":"pandas.DataFrame, SeriesとPython標準のリストを相互に変換.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"549044158","text":"#!/usr/bin python3\n# -*- coding: utf-8 -*-\n\n'''批量匹配照片特征点\n建议使用python -m 执行此脚本,跳过断言,提升执行速度\n\nExamples:\n----------\n匹配参考照片: python3 matches.py -kpdir features/orb8000 -imgdir images/A \n-o matches/A --filter homography\n\n匹配测试照片: python3 matches.py -kpdir features -imgdir images -o matches/T \n--filter homography --ifile ../gist_res.csv\n\nInfo:\n----------\n__require__: Python3.7及以上版本\n__author__: devecor\n'''\n\nimport argparse\nimport os\nimport time\n\nimport sys\nsys.path.append(r'../')\nimport rename as ra\n\ndef matches(kpfile1, kpfile2, img1, img2, Filter, output):\n start = time.time()\n start_cpu = time.thread_time()\n\n os.system('python3 check_match.py --kpfile1={kp1} --kpfile2={kp2} \\\n --{filter} --save --output {output} {img1} {img2}'.format(\n kp1=kpfile1, kp2=kpfile2, img1=img1, img2=img2, \n filter=Filter, output=output\n ))\n\n escape_cpu = time.thread_time() - start_cpu\n escape = time.time() - start\n print('match {img1} and {img2} used: {t} seconds'.format(img1=img1, \n img2=img2, t=escape))\n print('match {img1} and {img2} used cpu time: {t} seconds'.format(\n img1=img1, img2=img2, t=escape_cpu))\n return True\n\ndef get_kpfiles(imglist, kplist, config):\n '''组装参考点照片和参考点偏移照片的npz文件名\n Parameters\n -----------\n imglist : list\n findfile()的返回值\n config : argparse的实例或者对象\n 必须包含key_points_dir, images_dir两个属性\n '''\n kpf1 = []\n kpf2 = []\n img1 = []\n img2 = []\n args = config\n for i in imglist:\n image1 = i\n li = i.split('/')\n temp = list(li[-1])\n temp[0] = 'B'\n temp = ''.join(temp)\n li[-1] = temp\n image2 = '/'.join(li)\n\n img1.append(image1)\n img2.append(image2)\n\n kpfile1 = args.key_points_dir + \\\n image1[len(args.images_dir):len(image1)-4] + \\\n '{suffix}'.format(suffix=kplist[0][-8:])\n\n kpfile2 = args.key_points_dir + \\\n image2[len(args.images_dir):len(image2)-4] + \\\n '{suffix}'.format(suffix=kplist[0][-8:])\n \n kpf1.append(kpfile1)\n kpf2.append(kpfile2)\n return kpf1,kpf2,img1,img2\n\ndef csv2list(ifile,):\n import csv\n\n with open(ifile, newline='') as csvfile: \n reader = csv.reader(csvfile)\n li = []\n for i in reader:\n li.append(i)\n \n for i,ei in enumerate(li): \n for j,ej in enumerate(ei): \n temp = ej.split(',') \n temp[0] = temp[0][1:] \n temp[-1] = temp[-1][:-1] \n li[i][j] = temp\n \n li = [li[i] for i in range(len(li)) if i > 0]\n \n # 此段代码将str列表转换为float和int列表\n # 在元素级别上使用str()\n # 通过以上两步消掉了原str列表中的空格\n for i,ei in enumerate(li):\n for j,ej in enumerate(ei):\n for k,ek in enumerate(ej):\n if j > 1:\n li[i][j][k] = str(float(ek))\n elif j >= 0 and j <= 1:\n li[i][j][k] = str(int(ek))\n else:\n print('不可能的事情发生了,这是鬼故事!')\n\n return li\n\ndef mk_dir(dir):\n path = dir.split('/')\n for i,e in enumerate(path):\n if isinstance(e, str):\n os.system('mkdir {dir}'.format(dir='/'.join(path[0:i+1])))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='参考点照片的批量匹配')\n parser.add_argument('-kpdir', '--key_points_dir', \n default='features', help='特征点文件的路径')\n parser.add_argument('-imgdir', '--images_dir', \n default='./images', help='待匹配的照片目录')\n parser.add_argument('-o', '--output', default='matches', \n help='结果保存路径')\n parser.add_argument('--filter', choices=['homography','fundamental'], \n default='homography', help='选择过滤器')\n parser.add_argument('--ifile', default=None, help='如果指定了输入文件来\\\n限定匹配范围,则必须用于匹配测试照片')\n # parser.add_argument('-q', '--query', metavar='bool', choices=[True,False], \n # default=False, help='选择是否查询定位结果')\n args = parser.parse_args()\n\n mk_dir(args.output)\n\n if args.ifile == None:\n kplist = ra.findFile(args.key_points_dir, name='A*.npz')\n imglist = ra.findFile(args.images_dir, name='A*.jpg')\n\n begin = time.time()\n begin_cpu = time.thread_time()\n \n kpfile1,kpfile2,img1,img2 = get_kpfiles(imglist, kplist, args)\n for i in range(len(kpfile1)):\n \n assert kpfile1[i] in kplist, '凉凉'\n\n matches(kpfile1[i], kpfile2[i], img1[i], img2[i], args.filter, args.output)\n\n print('all used: {t} seconds'.format(t=time.time() - begin))\n print('all used cpu time: {t} seconds'.format(\n t=time.thread_time() - begin_cpu))\n elif args.ifile[-3:] == 'csv':\n \n li = csv2list(args.ifile)\n\n begin = time.time()\n begin_cpu = time.thread_time()\n \n for i in li:\n img1 = args.images_dir + '/T/T_' + '_'.join(i[0]) + '.jpg'\n img2 = args.images_dir + '/A/A_' + '_'.join(i[1]) + '.jpg'\n\n kpfile1 = args.key_points_dir + '/orb2000' + \\\n img1[len(args.images_dir)+2:len(img1)-4] + \\\n '{suffix}'.format(suffix='-orb.npz')\n\n kpfile2 = args.key_points_dir + '/orb8000' + \\\n img2[len(args.images_dir)+2:len(img2)-4] + \\\n '{suffix}'.format(suffix='-orb.npz')\n\n npzfiles = ra.findFile(args.key_points_dir, name='*.npz')\n assert kpfile1 in npzfiles and kpfile2 in npzfiles, '文件���有错误请检查!'\n \n matches(kpfile1, kpfile2, img1, img2, args.filter, args.output)\n \n print('all used: {t} seconds'.format(t=time.time() - begin))\n print('all used cpu time: {t} seconds'.format(\n t=time.thread_time() - begin_cpu))\n\n else:\n print('输入文件: {} 有误!'.format(args.ifile))","sub_path":"src/single/matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"300462215","text":"#Para tributar un determinado impuesto se debe ser mayor a 18 años\n#tener unos ingresos superiores a 1000 soles mensuales\n#Programa que pregunte al usuario su edad y sus ingresos mensuales\n#Muestre en pantalla si el usuario tiene que tributar\n#Declaracion\nedad_usua=0\ningresos=0.0\nimport os\n\n#Input\nedad_usua=int(os.sys.argv[1])\ningresos=float(os.sys.argv[2])\n\n#Processing\n#sino es mayor de edad y su sueldo es menor de 1000\n#entonces no tiene que tributar\nif (edad_usua>18 and ingresos>=1000):\n print(\"Tienes que cotizar\")\nelse:\n print(\"No tienes que cotizar\")\n","sub_path":"Mendoza/Doble05.py","file_name":"Doble05.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250962870","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass DeliveryRule(Model):\n \"\"\"A rule that specifies a set of actions and conditions.\n\n All required parameters must be populated in order to send to Azure.\n\n :param order: Required. The order in which the rules are applied for the\n endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will\n be applied before a rule with a greater order. Rule with order 0 is a\n special rule. It does not require any condition and actions listed in it\n will always be applied.\n :type order: int\n :param actions: Required. A list of actions that are executed when all the\n conditions of a rule are satisfied.\n :type actions: list[~azure.mgmt.cdn.models.DeliveryRuleAction]\n :param conditions: A list of conditions that must be matched for the\n actions to be executed\n :type conditions: list[~azure.mgmt.cdn.models.DeliveryRuleCondition]\n \"\"\"\n\n _validation = {\n 'order': {'required': True},\n 'actions': {'required': True},\n }\n\n _attribute_map = {\n 'order': {'key': 'order', 'type': 'int'},\n 'actions': {'key': 'actions', 'type': '[DeliveryRuleAction]'},\n 'conditions': {'key': 'conditions', 'type': '[DeliveryRuleCondition]'},\n }\n\n def __init__(self, *, order: int, actions, conditions=None, **kwargs) -> None:\n super(DeliveryRule, self).__init__(**kwargs)\n self.order = order\n self.actions = actions\n self.conditions = conditions\n","sub_path":"azure-mgmt-cdn/azure/mgmt/cdn/models/delivery_rule_py3.py","file_name":"delivery_rule_py3.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"597416580","text":"import argparse\nimport re\nimport socket\nimport socketserver\nimport time\nimport threading\n\ndef communicate(host, port, request):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(request.encode())\n response = s.recv(1024)\n s.close()\n\n return response.decode()\n\ndef runner_checker(host, port):\n while True:\n time.sleep(1)\n try:\n response = communicate(host, port, \"ping\")\n if response != \"pong\":\n print(\"peer is removed\")\n break;\n else:\n print(\"peer is alive continue\")\n except socket.error as e:\n print(\"err %s\"%e)\n break\n \n\nclass ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n dead = False # Indicate to other threads that we are no longer running\n\n\nclass DispatcherHandler(socketserver.BaseRequestHandler):\n \"\"\"\n The RequestHandler class for our dispatcher.\n This will dispatch test runners against the incoming commit\n and handle their requests and test results\n \"\"\"\n\n command_re = re.compile(r\"(\\w+)*\")\n BUF_SIZE = 1024\n\n def handle(self):\n # self.request is the TCP socket connected to the client\n self.data = self.request.recv(self.BUF_SIZE).strip()\n command_groups = self.command_re.match(self.data.decode())\n if not command_groups:\n self.request.sendall(\"Invalid command\")\n return\n command = command_groups.group(1)\n if command == \"ping\":\n print(\"in ping\")\n resp = 'pong'\n self.request.sendall(resp.encode())\n\ndef serve(host, port):\n # Create the server\n socketserver.TCPServer.allow_reuse_address = True\n server = ThreadingTCPServer((host, port), DispatcherHandler)\n print('serving on %s:%s'% (args.host, int(args.port)))\n try:\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl+C or Cmd+C\n server.serve_forever()\n except (KeyboardInterrupt, Exception):\n # if any exception occurs, kill the thread\n server.dead = True\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument( \"mode\", choices=['server', 'client'], help=\"work mode server | client\") \n parser.add_argument(\"--host\",\n help=\"dispatcher's host, by default it uses localhost\",\n default=\"localhost\",\n action=\"store\")\n parser.add_argument(\"--port\",\n help=\"dispatcher's port, by default it uses 8888\",\n default=8888,\n action=\"store\")\n args = parser.parse_args()\n\n\n if args.mode == \"server\":\n serve(args.host, int(args.port))\n else:\n runner_heartbeat = threading.Thread(target=runner_checker, args=(args.host, int(args.port)))\n try:\n runner_heartbeat.start()\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl+C or Cmd+C\n except (KeyboardInterrupt, Exception):\n # if any exception occurs, kill the thread\n runner_heartbeat.join()\n\n\n \n","sub_path":"work/python/project/unit_test/tcp_communicate.py","file_name":"tcp_communicate.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"289964676","text":"from alice_scripts import Skill, request, say, suggest\n\nskill = Skill(__name__)\n\n\n@skill.script\ndef run_script():\n yield say('Загадайте число от 1 до 100, а я его отгадаю. Готовы?')\n lo, hi = 1, 100\n while lo < hi:\n middle = (lo + hi) // 2\n yield say(f'Ваше число больше {middle}?',\n suggest('Ну да', 'Вроде нет'))\n\n while not request.has_lemmas('да', 'ага', 'нет', 'не'):\n yield say('Я вас не поняла. Скажите \"да\" или \"нет\"')\n\n if request.has_lemmas('нет', 'не'):\n hi = middle\n else:\n lo = middle + 1\n\n yield say(f'Думаю, вы загадали число {lo}!', end_session=True)\n\n\n\n\n\n\"\"\" \n---------------КАК ЗАПУСТИТЬ-----------------\n1)Запустить flask-приложение, т.е. это приложение(тк Skill - это класс, наследующий flask.Flask)\n>>> set FLASK_APP=alice1.py (Сохраняя пробелы)\n>>> set FLASK_ENV=development\n>>> flask run --with-threads\n2)Запустить alice-nearby.exe (через cmd) \n2.1) Для этого нужно найти alice-nearby.exe в GOPATH/bin (папка, где хранятся скачанные пакеты с помощью go get\n2.2) ... /alice-nearby --webhook = http://localhost:5000/post --port=3456 (именно http!!!)\n2.3) Перейти по ссылке localhost:3456\nПуть[который /post] можно поменять в исходниках Skill (как и во Flask)\nlocalhost:5000 \n3)Запускаем ngrok.exe\n3.1)Командой ngrok.exe http {название порта} создаём ссылку\n3.2)Берём ссылку с https и вставлем в webhook на яндекс диалогах с припиской /post\nDONE\n\"\"\"\n","sub_path":"alice1.py","file_name":"alice1.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"277991724","text":"\nimport os\nimport re\nimport copy\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\n\n# from spmf import Spmf\nfrom functools import reduce\nfrom itertools import groupby\nfrom nltk.corpus import stopwords\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom TRASE_v2 import *\nfrom pygapbide import *\n\ndef safe_div(a,b):\n if a == 0 or b == 0:\n return 0\n else:\n return a/b\n\n'''\n =================================================================\n Main Program\n =================================================================\n'''\n\n\narchive_url = os.getcwd() + '/components'\napps = os.listdir(archive_url)\n# apps = ['synthetic']\n\nthreshold = 0.05\nmin_sup = 0.3\n\nall_result = []\n\nfor min_len in [1,2,3,4,5]:\n for max_gap in [1,2,3,4,5]:\n\n # apps = ['chensi', 'ogden', 'abhi', 'colornote']\n apps = ['chensi', 'ogden', 'abhi']\n for app in apps:\n if app.startswith(','):\n continue\n\n app_folder = '%s/%s' % (archive_url, app)\n traces = sorted(os.listdir(app_folder))\n\n data = []\n trace_idx = 0\n unique_components = set()\n for trace in traces:\n if trace.startswith('.'):\n continue\n\n components = pickle.load(open('%s/%s' % (app_folder, trace), \"rb\"))\n # print('Number of Raw Preliminary Phrases: %d' % len(components))\n\n # Remove consecutive duplicate items\n components = [i[0] for i in groupby(components)]\n # print('Number of Cleaned Preliminary Phrases: %d' % len(components))\n\n for component in components:\n unique_components.add(frozenset(component))\n data.append(components)\n unique_components = [set(x) for x in unique_components]\n # unique_components = list(unique_components)\n\n # Clustering IDs\n dist_mat = compute_distance_matrix(unique_components)\n model = AgglomerativeClustering(n_clusters=None, affinity='precomputed', linkage='complete', distance_threshold=threshold)\n clustering = model.fit(dist_mat)\n (unique, counts) = np.unique(clustering.labels_, return_counts=True)\n unique = unique[counts > 1]\n counts = counts[counts > 1]\n for label in unique:\n # Find intersection within cluster\n indices = np.where(clustering.labels_ == label)[0]\n cluster_instances = [unique_components[i] for i in indices]\n cluster_head = set.intersection(*cluster_instances)\n # Update data with the intersection\n for trace in data:\n for i in range(len(trace)):\n if trace[i] in cluster_instances:\n trace[i] = cluster_head\n\n sequence_db = []\n for trace in data:\n sequence = []\n for i in range(len(trace)):\n # Convert the trace only preserve the first occuring phase\n if trace[i] not in sequence:\n sequence.append(trace[i])\n # sequence.append(trace[i])\n sequence_db.append(sequence)\n\n phase_sizes = []\n for seq in sequence_db:\n temp = []\n for phase in seq:\n temp.append(len(phase))\n phase_sizes.append(np.mean(temp))\n\n min_size = np.round(min_len * np.mean(phase_sizes))\n\n # Construct groundtruth and sequence_db\n remove_indices = []\n if app == 'chensi':\n labels = {\n 'add': [1,2,5,7],\n 'edit': [3,6],\n 'delete': [4,6,9]}\n remove_indices += [32,33]\n elif app == 'ogden':\n labels = {\n 'add': [7,22,23],\n 'edit': [8,24,25],\n 'delete': [9,26,27]}\n remove_indices += [1,2,3,4,5,6]\n elif app == 'abhi':\n labels = {\n 'add': [3,41,42],\n 'edit': [4,43,44],\n 'delete': [5,45,46]}\n remove_indices += [1,2,39,40]\n elif app == 'colornote':\n labels = {\n 'add': [0,1,2,3,4,5],\n 'edit': [3,4,5],\n 'delete': [6,7,8]}\n\n groundtruth = {}\n for label in labels:\n indices = labels[label]\n groundtruth[label] = []\n for idx in indices:\n remove_indices.append(idx)\n groundtruth[label].append(sequence_db[idx-1])\n remove_indices = sorted(remove_indices)[::-1]\n for idx in remove_indices:\n del sequence_db[idx-1]\n\n '''\n =================================================================\n Algorithm Begin\n =================================================================\n '''\n\n # starting time\n start = time.time()\n\n id_list, Z = TRASE(sequence_db, min_sup, min_size, max_gap, print_status=False)\n\n # # TEMP\n # temp = []\n # for phase in sequence_db[2]:\n # temp.append(id_list.ids.index(phase))\n # print(temp)\n # # END TEMP\n\n # end time\n end = time.time()\n TRASE_time = end - start\n\n # print('\\nRuntime of TRASE is %.2fs' % TRASE_time)\n\n # print('Number of Raw Patterns: %d' % len(Z))\n\n for i in range(len(Z) - 1, 0, -1):\n for j in range(len(Z)):\n if i == j:\n continue\n # Z[i] and Z[j] have the same support and Z[i] is a subsequence of Z[j]\n if (Z[i].support == Z[j].support) & (is_subsequence(Z[i].pattern, Z[j].pattern)):\n # print('Z[%d] is a subsequence of Z[%d]: (%s) and (%s)' % (i, j, Z[i].pattern, Z[j].pattern))\n # Delete Z[i]\n del Z[i]\n break\n\n # print('Number of Closed Patterns: %d' % len(Z))\n\n Z = np.array(sorted(Z, key=lambda pattern: pattern.support, reverse=True))\n\n # Prnt closed patterns\n # for pattern in Z:\n # print('%.2f\\t%.100s' % (pattern.support, pattern.pattern))\n\n # Vertex list contains the weight of the phase\n # Edge list contains relationship among phases if two phases are overlapped\n vertex_list = []\n edge_list = []\n for i in range(len(Z)):\n # Weight is defined as the number_of_method * support_of_phase\n vertex_list.append(Z[i].support * sum([len(id_list.ids[x]) for x in Z[i].pattern]))\n for j in range(i + 1, len(Z)):\n if is_intersect(Z[i], Z[j]):\n # print('%d and %d are overlapped' % (i, j))\n edge_list.append((i, j))\n edge_list.append((j, i))\n vertex_list = np.array(vertex_list)\n edge_list = np.array(edge_list)\n\n adjacency_list = [[] for __ in vertex_list]\n for edge in edge_list:\n adjacency_list[edge[0]].append(edge[1])\n adjacency_list = np.array(adjacency_list, dtype=object)\n\n subgraphs = generateSubgraphs(vertex_list, adjacency_list)\n\n solution = np.zeros(len(vertex_list), dtype=bool)\n for subgraph in subgraphs:\n vl = np.array(copy.deepcopy(vertex_list[subgraph]))\n al = np.array(copy.deepcopy(adjacency_list[subgraph]))\n for i in range(len(al)):\n for j in range(len(al[i])):\n al[i][j] = np.where(subgraph == al[i][j])[0][0]\n OPT_X = MWIS(vl, al)\n solution[subgraph] = OPT_X\n\n patterns = Z[solution]\n\n # Print Pattern Result\n # for pattern in patterns:\n # print('%.2f\\t%s' % (pattern.support, pattern.pattern))\n\n # Generate Human Readable labels for each class prediction\n\n # Read method list and group names by patterns\n methods_df = pd.read_csv('method list/%s.csv' % app, header=None, names=('index', 'method_id'))\n methods = []\n for i, row in methods_df.iterrows():\n words = []\n # Remove text in bracket\n string = re.sub(r'\\([^)]*\\)', '', row.method_id)\n # Get method name and activity/fragment name only\n terms = re.findall(r\"[\\w']+\", string)[-2:]\n for term in terms:\n words += [x.lower() for x in re.findall('.[^A-Z]*', term)]\n methods.append(words)\n\n # Build bag of word for each pattern\n pattern_methods = []\n for pattern in patterns:\n documents = []\n for pid in pattern.pattern:\n for method_ids in id_list.ids[pid]:\n documents.append(' '.join(methods[method_ids]))\n pattern_methods.append(documents)\n\n gt_dict = {}\n pt_dict = {}\n pt_methods = {}\n pt_count = {}\n\n # Construct gt_dict\n for label in labels:\n gt_patterns = []\n # Phase Level\n for gt_pattern in groundtruth[label]:\n for phase in gt_pattern:\n try:\n gt_patterns.append(id_list.ids.index(phase))\n except Exception as e:\n id_list.add_phase(phase)\n gt_patterns.append(id_list.ids.index(phase))\n gt_dict[label] = set(gt_patterns)\n # Method Level\n # for gt_pattern in groundtruth[label]:\n # gt_patterns.append(set.union(*gt_pattern))\n # gt_dict[label] = set.intersection(*gt_patterns)\n\n # Find utility methods\n common_methods = set.intersection(* [gt_dict[label] for label in gt_dict])\n for label in labels:\n gt_dict[label] = gt_dict[label].difference(common_methods)\n gt_dict['utility'] = common_methods\n labels['utility'] = []\n\n # Find utility methods among add and edit\n # common_methods = gt_dict['add'].intersection(gt_dict['edit'])\n # gt_dict['add'] = gt_dict['add'].difference(common_methods)\n # gt_dict['edit'] = gt_dict['edit'].difference(common_methods)\n # gt_dict['add_edit'] = common_methods\n # labels['add_edit'] = []\n\n for label in labels:\n pt_dict[label] = set()\n pt_methods[label] = []\n pt_count[label] = 0\n\n for i in range(len(patterns)):\n\n # Phase Level\n ml_pattern = set(patterns[i].pattern)\n\n # Method Level\n # ml_pattern = []\n # for pid in patterns[i].pattern:\n # ml_pattern.append(id_list.ids[pid])\n # ml_pattern = set().union(*ml_pattern)\n\n # Find the best matching groundtruth\n accuracy = dict()\n for label in labels:\n accuracy[label] = len(gt_dict[label].intersection(ml_pattern)) / len(ml_pattern)\n\n prediction = max(accuracy, key=accuracy.get)\n pt_dict[prediction] = pt_dict[prediction].union(ml_pattern)\n pt_count[prediction] += 1\n\n vectorizer = TfidfVectorizer(use_idf=True)\n tfIdf = vectorizer.fit_transform(pattern_methods[i])\n df = pd.DataFrame(tfIdf[0].T.todense(), index=vectorizer.get_feature_names(), columns=[\"TF-IDF\"])\n df = df.sort_values('TF-IDF', ascending=False)\n pt_methods[prediction].append(set(df.index[:5]))\n\n # Construct Confusion Matrix\n results = []\n for label in labels:\n\n # Phase Level\n pt_set = []\n gt_set = []\n for pid in pt_dict[label]:\n pt_set += id_list.ids[pid]\n for pid in gt_dict[label]:\n gt_set += id_list.ids[pid]\n pt_set = set(pt_set)\n gt_set = set(gt_set)\n\n # # Method Level\n # pt_set = pt_dict[label]\n # gt_set = gt_dict[label]\n\n TP = len(gt_set.intersection(pt_set))\n FP = len(pt_set.difference(gt_set))\n FN = len(gt_set.difference(pt_set))\n\n precision = safe_div(TP, (TP + FP)) * 100\n recall = safe_div(TP, (TP + FN)) * 100\n f1 = 2 * safe_div((precision * recall), (precision + recall))\n\n results.append((label, pt_count[label], precision, recall, f1))\n result_df = pd.DataFrame(results, columns=['class', 'count', 'precision', 'recall', 'f1'])\n\n # Aggregated Result\n RMSE = np.sqrt(np.sum(np.square(result_df['count'] - 1)) / len(result_df))\n avg_prec = np.mean(result_df['precision'])\n avg_recall = np.mean(result_df['recall'])\n avg_f1 = np.mean(result_df['f1'])\n print('RMSE: %.2f\\tPrec: %.2f\\tRecall: %.2f\\tF1: %.2f' % (RMSE, avg_prec, avg_recall, avg_f1))\n\n all_result.append((app, min_len, max_gap, RMSE, avg_prec, avg_recall, avg_f1))\n\n # print(result_df)\n # print(pt_methods)\n\n # # Print keywords from groundtruth\n # gt_methods = {}\n # gt_keywords = {}\n # for label in gt_dict:\n # gt_methods[label] = []\n # for mid in gt_dict[label]:\n # gt_methods[label].append(methods[mid])\n #\n # vectorizer = TfidfVectorizer(use_idf=True)\n # tfIdf = vectorizer.fit_transform(gt_methods[label])\n # df = pd.DataFrame(tfIdf[0].T.todense(), index=vectorizer.get_feature_names(), columns=[\"TF-IDF\"])\n # df = df.sort_values('TF-IDF', ascending=False)\n # gt_keywords[label].append(set(df.index[:5]))\n # print(gt_keywords)\n\n # '''\n # Testing Simple Approach With VMSP\n # '''\n # # Represent Data by ID-List IDs\n # temp_data = []\n # for trace in sequence_db:\n # temp_trace = []\n # for event in trace:\n # temp_trace.append(id_list.ids.index(event))\n # temp_data.append(temp_trace)\n #\n # # Write data to file\n # # f = open(\"%s.txt\" % app, \"w+\")\n # # for trace in temp_data:\n # # for i in range(len(trace)):\n # # f.write('%d -1 ' % trace[i])\n # # f.write('-2\\r\\n')\n # # f.close()\n #\n # start = time.time()\n #\n # # All Sequential Patterns\n # # spmf = Spmf(\"SPAM\", input_filename=\"%s.txt\" % app, output_filename=\"output.txt\", arguments=[0.6, 5, 500, max_gap, False])\n # # Closed Sequential Patterns\n # sdb = []\n # for trace in temp_data:\n # s = []\n # for i in range(len(trace)):\n # s.append(trace[i])\n # sdb.append(s)\n # gb = Gapbide(sdb, int(min_support * id_list.n_traces), 0, max_gap-1)\n # temp = gb.run()\n # # Maximal Sequential Patterns\n # # spmf = Spmf(\"VMSP\", input_filename=\"%s.txt\" % app, output_filename=\"output.txt\", arguments=['%d%%' % (min_support * 100), 500, max_gap, False])\n # # spmf.run()\n # # print(spmf.to_pandas_dataframe(pickle=True))\n #\n # end = time.time()\n # GB_time = end - start\n #\n # print('\\nRuntime of Gap-Bide is %.2fs' % GB_time)\n #\n # '''\n # End Testing Simple Approach With VMSP\n # '''\n\n\nall_result = pd.DataFrame(all_result, columns=['app', 'min_len', 'max_gap', 'RMSE', 'PREC', 'RECALL', 'F1'])\npd.set_option('display.max_rows', 80)\nprint(all_result.groupby(['app', 'min_len', 'max_gap']).agg(['mean']))\n\n# # Print Pattern Result\n# for p in patterns:\n# print('%.2f\\t%s' % (p[2], p[0]))\n# print('Positions:')\n# for i in range(len(p[1])):\n# print(' Trace %d: %s' % (i, p[1][i]))\n#\n# import random\n# sorted(random.sample(range(1,50), 6))\n","sub_path":"code/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":17062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"80705629","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/7/30 17:22\n# @Author : xmskf\n# @Email : 84887867@qq.com\n# @File : Iterable_test.py\n# @Software: PyCharm\n\n#迭代\n\n# 如何判断一个对象是可迭代对象呢?方法是通过collections模块的Iterable类型判断:\nfrom collections import Iterable\n\nprint(isinstance('abc', Iterable))\nprint(isinstance(['1,2,3'], Iterable))\nprint(isinstance(123, Iterable))\n\n# 如果要对list实现类似Java那样的下标循环怎么办?\n# Python内置的enumerate函数可以把一个list变成索引-元素对,\n# 这样就可以在for循环中同时迭代索引和元素本身:\nfor i, value in enumerate(['A', 'B', 'C']):\n print(i, value)\n\nprint('-------------------我是分隔符------------------------')\n\nfor x, y in [(1, 1), (2, 4), (3, 9)]:\n print(x, y)\n\n\n# 练习\n# 请使用迭代查找一个list中最小和最大值,并返回一个tuple:\ndef findMinAndMax(L):\n if (L != []):\n min = L[0]\n max = L[0]\n for x in L:\n if x < min:\n min = x\n if x > max:\n max = x\n return (min, max)\n else:\n return (None, None)\n\n\n# 测试\nif findMinAndMax([]) != (None, None):\n print('测试失败!')\nelif findMinAndMax([7]) != (7, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1]) != (1, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):\n print('测试失败!')\nelse:\n print('测试成功!')\n","sub_path":"src/Iterable_test.py","file_name":"Iterable_test.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"554060581","text":"#coding=utf-8\r\nimport xml.dom.minidom\r\nimport os\r\n\r\n# xml文档路径\r\nfile_path = os.getcwd()+'\\\\test_xml.xml'\r\n\r\n\r\nclass ReadXml():\r\n def __init__(self, file):\r\n self.dom = xml.dom.minidom.parse(file)\r\n self.root = self.dom.documentElement\r\n\r\n# -------------------- 获取属性的值 ------------------------\r\n def get_attr_value(self, t_name, index, attrs):\r\n tn = self.root.getElementsByTagName(t_name)\r\n t_ab = tn[index].getAttribute(attrs)\r\n return t_ab\r\n\r\n# ----------------- 获取标签之间的text文本 -----------------\r\n def get_tag_text(self, t_name, index):\r\n tn = self.root .getElementsByTagName(t_name)\r\n t_t = tn[index].firstChild.data\r\n return t_t\r\n\r\nif __name__ == '__main__':\r\n tt = ReadXml(file_path)\r\n a = tt.get_attr_value('login', 0, 'username')\r\n print(a)\r\n b = tt.get_tag_text('caption', 1)\r\n print(b)\r\n\r\n\r\n","sub_path":"shop_api/common/read_xml.py","file_name":"read_xml.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49008436","text":"# # heapq.nlargest, corrent but TLE\n# class KthLargest:\n\n# def __init__(self, k: int, nums: List[int]):\n# self.k = k\n# self.heap=nums\n# self.size=len(self.heap)\n# def add(self, val: int) -> int:\n# self.heap.append(val)\n# if self.k<=len(self.heap):\n# return heapq.nlargest(self.k,self.heap)[-1]\n\n\n# # Handmade Smallest Heap\n# class KthLargest:\n\n# def Swap(self,a,b):\n# tmp=a\n# a=b\n# b=tmp\n# return a,b\n# def shift_down(self,heap):\n# i=0\n# t=0\n# flag=True\n# while i*2+1heap[i*2+1]:\n# flag=True\n# t=i*2+1\n# if t+1heap[t+1]:\n# t=t+1\n# elif i*2+2heap[i*2+2]:\n# flag=True\n# t=i*2+2\n# if flag:\n# heap[i],heap[t]=self.Swap(heap[i],heap[t])\n# i=t\n# def shift_up(self,i,heap):\n# i-=1\n# while i!=0 and heap[i]self.nums[0]:\n# self.nums[0]=nums[i]\n# self.shift_down(self.nums)\n\n# def add(self, val: int) -> int:\n# if len(self.nums)self.heap[0]:\n heapq.heapreplace(self.heap,nums[i])\n\n def add(self, val: int) -> int:\n if len(self.heap)25000 in real sims)\nprint(f'Length of the saved trajectory: {Nframes}')\nblock = 0 # starting block \n\n# test some properties \n# assertions for easy managing code below \nassert (Nframes % restartSimulationEveryBlocks) == 0 \nassert (restartSimulationEveryBlocks % saveEveryBlocks) == 0\n\nsavesPerSim = restartSimulationEveryBlocks // saveEveryBlocks\nsimInitsTotal = (Nframes) // restartSimulationEveryBlocks\n# concatinate monomers if needed\nif len(oneChainMonomerTypes) != N:\n monomerTypes = np.tile(oneChainMonomerTypes, num_chains)\nelse:\n monomerTypes = oneChainMonomerTypes\n \nN_chain = len(oneChainMonomerTypes) \nN = len(monomerTypes)\nprint(f'N_chain: {N_chain}') # ~8000 in a real sim\nprint(f'N: {N}') # ~40000 in a real sim\nN_traj = trajectory_file.attrs[\"N\"]\nprint(f'N_traj: {N_traj}')\nassert N == trajectory_file.attrs[\"N\"]\nprint(f'Nframes: {Nframes}')\nprint(f'simInitsTotal: {simInitsTotal}')\n\n#==============================================================#\n# RUN 3D simulation using 1D LEF constraints #\n#==============================================================#\nmilker = bondUpdater(LEFpositions)\ndata = grow_cubic(N,int((N/(density*1.2))**0.333)) # starting conformation\nreporter = HDF5Reporter(folder=saveFolder, max_data_length=50)\nchains = [(N_chain*(k),N_chain*(k+1),0) for k in range(num_chains)]\n\nfor iteration in range(simInitsTotal):\n a = Simulation(N=N, \n error_tol=0.01, \n collision_rate=0.01, \n integrator =\"variableLangevin\", \n platform=\"cuda\",\n GPU = GPU_ID, \n PBCbox=False, \n reporters=[reporter],\n precision=\"mixed\") \n a.set_data(data)\n a.add_force(\n polychrom.forcekits.polymer_chains(\n a,\n chains=chains,\n nonbonded_force_func=polychrom.forces.heteropolymer_SSW,\n nonbonded_force_kwargs={\n 'repulsionEnergy': repulsionEnergy, # base repulsion energy for all monomers (function default is 3.0)\n 'attractionEnergy': 0, # base attraction energy for all monomers (function default is 3.0)\n 'attractionRadius': attraction_radius,\n 'interactionMatrix': interactionMatrix,\n 'monomerTypes': monomerTypes,\n 'extraHardParticlesIdxs': []\n },\n bond_force_kwargs={\n 'bondLength': 1,\n 'bondWiggleDistance': 0.05\n },\n angle_force_kwargs={\n 'k': 0.5 \n }\n )\n )\n a.add_force(polychrom.forces.spherical_confinement(a,density=density))\n # a.add_force(polychrom.forces.tether_particles(a,[0,N-1],positions=end_tethers,k=30)) # tether ends of polymer)\n \n # ------------ initializing milker; adding bonds ---------\n kbond = a.kbondScalingFactor / (smcBondWiggleDist ** 2)\n bondDist = smcBondDist * a.length_scale\n activeParams = {\"length\":bondDist,\"k\":kbond}\n inactiveParams = {\"length\":bondDist, \"k\":0}\n \n milker.setParams(activeParams, inactiveParams)\n # this step actually puts all bonds in and sets first bonds to be what they should be\n milker.setup(bondForce=a.force_dict['harmonic_bonds'],\n blocks=restartSimulationEveryBlocks)\n\n if iteration == 0:\n a.local_energy_minimization() \n else:\n a._apply_forces()\n \n for i in range(restartSimulationEveryBlocks): \n if i % saveEveryBlocks == (saveEveryBlocks - 1): \n a.do_block(steps=steps)\n else:\n a.integrator.step(steps) # do steps without getting the positions from the GPU (faster)\n if i < restartSimulationEveryBlocks - 1: \n curBonds, pastBonds = milker.step(a.context) # this updates bonds.\n data = a.get_data() # save data and step, and delete the simulation\n del a\n \n reporter.blocks_only = True # Write output hdf5-files only for blocks\n \n time.sleep(0.2) # wait 200ms for sanity (to let garbage collector do its magic)\n\nreporter.dump_data()","sub_path":"PolymerSims.py","file_name":"PolymerSims.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"552136673","text":"#!/usr/bin/python3\nfrom PyQt5 import QtWidgets, QtCore\nfrom sys import argv, exit\n\nfrom source.source import MainWindow\n\nif __name__ == \"__main__\":\n QtCore.QCoreApplication.setOrganizationName(\"ITS\")\n QtCore.QCoreApplication.setApplicationName(\"AMCS\")\n\n application = QtWidgets.QApplication(argv)\n window = MainWindow()\n window.show()\n exit(application.exec_())\n application.exit()\n","sub_path":"src/ground/laptop/AMCS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"228723475","text":"import os\nimport subprocess\nfrom collections import namedtuple\n\n\n__all__ = [\n 'abspath',\n 'update_repo',\n 'Update',\n]\n\n\nabspath = lambda *p: os.path.abspath(os.path.join(*p))\n\n\nUpdate = namedtuple('Update', 'popen cloned')\n\n\ndef update_repo(repo_path, uri, piped=False):\n if os.path.exists(repo_path):\n cmd = ['git', 'pull']\n cwd = repo_path\n cloned = False\n else:\n cmd = ['git', 'clone', uri, repo_path]\n cwd = None\n cloned = True\n kwargs = {}\n if piped:\n kwargs.update({'stderr': subprocess.PIPE, 'stdout': subprocess.PIPE})\n popen = subprocess.Popen(cmd, cwd=cwd, shell=False, **kwargs)\n popen.wait()\n return Update(popen, cloned)\n\n","sub_path":"vimper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"518929948","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nimport seaborn as sns\n\nsns.set()\n\ncolors = sns.color_palette().as_hex()\n\n\n# def my_xticks(y, pos):\n# if y <= 0:\n# return '$0$'\n# exponent = int(np.log10(y))\n# value = y / float(10 ** exponent);\n# return '${{ %1.1f \\mathrm{e} {%2d} }}$' % (value, exponent)\ndef my_xticks(x, pos):\n if x <= 0:\n return '$0$'\n value = x / int(10**6);\n return '${{ {%2d} }}$' % (value)\n\n\n# Name of output file to save the plot to\noutputFileName = 'response_layers_runtime_general_cache_opt.pdf'\n\n# Name of input files with performance data, etc.\ninputFileNames = [\n '../../../benchmarking_files/valid_benchmarking/2020_05_20_23_57_sebastian/compute_response_layer.csv',\n '../../../benchmarking_files/valid_benchmarking/2020_05_20_23_57_sebastian/compute_response_layers_precompute.csv',\n '../../../benchmarking_files/valid_benchmarking/2020_05_20_23_57_sebastian/compute_response_layers_blocking.csv',\n '../../../benchmarking_files/valid_benchmarking/2020_05_20_23_57_sebastian/compute_response_layers_at_once.csv',\n ]\n\n# Name of labels\nplotLabels = [\n '$\\mathtt{ base }$',\n #'$\\mathtt{ precomputations }$',\n '$\\mathtt{ general }$',\n '$\\mathtt{ blocking }$',\n '$\\mathtt{ at\\ once }$'\n]\n\nplotLabels.reverse()\n\n\n# Getting current axis\nax = plt.gca()\n\n# Initializing plot title\nplt.title('Response Layers - General and Cache Optimizations', x=-0.18, y=1.05, ha='left', fontsize=16,\n fontweight='bold')\n\n# Initializing plot axis labels\nplt.xlabel('[mio. cycles]', fontsize=13)\n#yl = plt.ylabel('Image Size 1024', fontsize=15, ha='left')\n#yl.set_rotation(0)\n#ax.yaxis.set_label_coords(-0.1, 1.01)\n\n# Initializing and setting axis ticks\nticks_y = [i for i, _ in enumerate(inputFileNames)]\n# ticks_y = [0, 25000000, 50000000, 75000000, 100000000] # laurin scale\nticks_x = [0, 50000000, 100000000, 150000000, 200000000] # sebastian scale\n\n# Setting y ticks and limits (min, max)\nplt.xticks(ticks_x)\nplt.xlim(ticks_x[0] - 1, ticks_x[len(ticks_x) - 1] + 1)\n\n# Setting axis ticks formatter\nax.xaxis.set_major_formatter(ticker.FuncFormatter(my_xticks))\n\n# Setting label size\nax.tick_params(axis='both', which='major', labelsize=12)\nbar_width = 0.6\n# Iterating through all input files and plotting each as a line\ncounter = 0\ninputFileNames.reverse()\nfor i in range(len(inputFileNames)-1, -1, -1):\n substrings = inputFileNames[i].split('.')\n\n # Getting function name from file name\n functionName = substrings[0]\n\n # Reading csv data from input file\n data = np.genfromtxt(inputFileNames[i], delimiter=',')\n\n # Getting width, height, number of interest points, average/min/max cycles, flops per cycles\n imageName = data[5, 0]\n width = data[5, 1]\n height = data[5, 2]\n num_interest_points = data[5, 3]\n num_flops = data[5, 4]\n avg_cycles = data[5, 5]\n min_cycles = data[5, 6]\n max_cycles = data[5, 7]\n flops_per_cycles = data[5, 8]\n\n print(\"Cycles:\")\n print(avg_cycles)\n\n # Plotting flops per cycles performance\n #plt.bar(i, avg_cycles, label=plotLabels[i])\n\n p0 = plt.barh(i, avg_cycles, bar_width, color=colors[counter])\n counter += 1\n\n#p0 = plt.barh(2, runtime_cycles[0], width, color=colors[0])\n#p1 = plt.barh(1, runtime_cycles[1], width, color=colors[3])\n#p1 = plt.barh(0, runtime_cycles[2], width, color=colors[2])\n\nplt.yticks(ticks_y, plotLabels)\n\n\nratio = 0.3\nxleft, xright = ax.get_xlim()\nybottom, ytop = ax.get_ylim()\nax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)\n\nplt.tight_layout()\n\nplt.gcf().subplots_adjust(left=0.2)\n\nplt.gcf().set_size_inches(7, 2.7)\n\n\n#plt.show()\n# Saving plot to file\nplt.savefig(outputFileName, dpi=300, figsize=(500,150))\n","sub_path":"plots/runtime plots/response layers/compute_response_layer_first_opts.py","file_name":"compute_response_layer_first_opts.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121661822","text":"import sys\n\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk, Gio, GLib, Gdk\n\nfrom searchwindow import SearchApplicationWindow\nfrom accelerators import Accelerator\nfrom conf import conf\n\nAPP_MAIN_OPTIONS = {\"version\": (ord(\"v\"), GLib.OptionFlags.NONE, GLib.OptionArg.NONE, \"Application version\", None)}\n\nAPP_ACTION_DICT = {\"new_window\": (\"app.new_window\", (\"n\",),),\n \"quit_application\": (\"app.quit_application\", (\"q\",),),\n \"open_preferences\": (\"app.open_preferences\", (\"o\",),),\n \"about\": (\"app.about\", (\"a\",),)}\n\nclass SearchApplication(Gtk.Application):\n def __init__(self, *args, **kwargs):\n super().__init__(\n *args, \n application_id=conf.app_id, \n flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,\n **kwargs)\n\n self.new_window = False\n\n self.add_main_options()\n\n def do_startup(self):\n Gtk.Application.do_startup(self)\n\n APP_CALLBACK_DICT = {\"new_window\": self.new_window_callback,\n \"quit_application\": self.quit_callback,\n \"open_preferences\": self.open_preferences_callback,\n \"about\": self.about_callback}\n Accelerator.register_actions(self, APP_ACTION_DICT, APP_CALLBACK_DICT)\n\n def do_activate(self):\n self.new_window_callback(\"new_window\", None)\n\n def do_command_line(self, command_line):\n options = command_line.get_options_dict()\n options = options.end().unpack()\n\n if \"version\" in options:\n print(conf.app_version)\n\n self.activate()\n return 0\n\n def do_window_removed(self, widget):\n Gtk.Application.do_window_removed(self, widget)\n if not len(self.get_windows()):\n self.quit()\n\n def new_window_callback(self, action, parameter):\n window = SearchApplicationWindow(application=self)\n self.add_window(window)\n if not self.new_window:\n self.new_window = True\n window.present()\n\n def quit_callback(self, action, parameter):\n for window in self.get_windows():\n cancelled = window.emit(\n \"delete-event\", Gdk.Event.new(Gdk.EventType.DELETE))\n if cancelled:\n return\n window.destroy()\n self.quit()\n\n def open_preferences_callback(self, action, parameter):\n window = self.get_active_window()\n window.preferences_dialog.show()\n\n def about_callback(self, action, parameter):\n window = self.get_active_window()\n about_dialog = Gtk.AboutDialog(transient_for=window, \n modal=True)\n about_dialog.set_logo_icon_name(conf.logo_icon_name)\n about_dialog.set_program_name(conf.app_name)\n about_dialog.set_version(conf.app_version)\n about_dialog.set_website_label(conf.app_website_label)\n about_dialog.set_website(conf.app_website)\n about_dialog.set_copyright(conf.app_copyright)\n about_dialog.set_license_type(conf.app_license)\n about_dialog.present()\n\n def add_main_options(self):\n for key, values in APP_MAIN_OPTIONS.items():\n self.add_main_option(key, values[0], values[1], values[2], \n values[3], values[4])\n\nif __name__ == \"__main__\":\n searchapp = SearchApplication()\n searchapp.run(sys.argv)","sub_path":"source/searchapp.py","file_name":"searchapp.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"507570079","text":"from random import uniform\nfrom sys import argv\n\nMAX = 10\ninputname = \"in.csv\"\ncnnname = \"cnn.csv\"\n\nif len(argv) > 1: inputname = argv[1]\nif len(argv) > 2: cnnname = argv[2]\n\nwith open(\"input/\"+inputname, 'w') as f:\n num = 250\n f.write(str(num)+'\\n')\n for x in range(num):\n for i in range(100):\n row = [str(uniform(-MAX, MAX)) for n in range(100)]\n row = ','.join(row)+'\\n'\n f.write(row)\n f.write('\\n')\n\nwith open(\"input/\"+cnnname, 'w') as f:\n for i in range(10):\n row = [str(uniform(-MAX, MAX)) for n in range(25)]\n row = ','.join(row)+'\\n'\n f.write(row)\n\n f.write('\\n')\n for i in range(10):\n row = [str(uniform(-MAX, MAX)) for n in range(4000)]\n row = ','.join(row)+'\\n'\n f.write(row)\n","sub_path":"a3/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"218898256","text":"from .constants import Direction, TypePiece\nfrom .piece import Piece\nfrom .mouvement_unitaire import MouvementUnitaire\nfrom .position_damier import PositionsDamier\n\n\nclass Pion(Piece):\n \"\"\"\n La classe Pion représente un pion. Cette classe hérite la classe « Pièce ».\n \"\"\"\n @property\n def typePiece(self):\n return TypePiece.PION\n\n def __init__(self, couleur):\n \"\"\"\n Crée un pion, en initialisant sa couleur\n @param couleur\n couleur du pion (noir ou blanc)\n \"\"\"\n super(Pion, self).__init__(couleur)\n\n def getDestinationsPossiblesSansCapture(self):\n result = []\n # sans capture, un pion peut avancer d'une case sur la diagonale\n # le noir avance vers le bas, le blanc vers le haut\n directionsAvancement = Direction.avancement[self.couleur]\n for direction in directionsAvancement:\n positionVoisine = PositionsDamier.getPositionVoisine(\n self.position, direction)\n # s'il y a une position voisine\n if positionVoisine > 0:\n # et si elle est libre\n if self.damier.estPositionLibre(positionVoisine):\n # on deduit un mouvement unitaire sans capture (positionCapturee = -1)\n mu = MouvementUnitaire(self.position, positionVoisine, -1)\n # on rajoute le mouvement unitaire de la position de départ vers positionVoisine à la liste des\n # destinations possibles sans capture\n result.append(mu)\n return result\n\n def getDestinationsPossiblesAvecUneCapture(self, piecesCapturees):\n result = []\n # avec capture, un pion peut avancer/reculer d'une case sur la diagonale\n # dans toutes les directions\n # en sautant la pièce voisine adverse à capturer\n directionsAvancement = Direction.all\n for direction in directionsAvancement:\n positionVoisine = PositionsDamier.getPositionVoisine(\n self.position, direction)\n # si position voisine n'est pas sur le damier\n # on ne fait rien\n if positionVoisine <= 0:\n continue\n\n # si la pièce de la position voisine a déjà été sauté une fois (considérée\n # capturée)\n # on ne fait rien\n if positionVoisine in piecesCapturees:\n continue\n\n pieceVoisine = self.damier.getPiece(positionVoisine)\n # si pas de pièce voisine ou si la pièce voisine n'est pas une pièce adverse,\n # on ne fait rien\n if pieceVoisine is None or pieceVoisine.couleur == self.couleur:\n continue\n\n caseVoisineALaPieceVoisine = PositionsDamier.getPositionVoisine(\n positionVoisine, direction)\n # si la case voisine à la pièce voisine n'est pas sur le damier,\n # on ne fait rien\n if caseVoisineALaPieceVoisine <= 0:\n continue\n\n # si la case voisine à la pièce voisine est ocuppée,\n # on ne fait rien\n if self.damier.estPositionOccupee(caseVoisineALaPieceVoisine):\n continue\n\n # on deduit un mouvement unitaire avec capture\n mu = MouvementUnitaire(\n self.position, caseVoisineALaPieceVoisine, pieceVoisine.position)\n # on rajoute le mouvement unitaire de la position de départ vers la caseVoisineALaPieceVoisine à la liste des\n # destinations possibles\n result.append(mu)\n return result\n\n def deplacer(self, destination):\n super(Pion, self).deplacer(destination)\n position = self.position\n # si on est sur une ligne de fond,\n if PositionsDamier.estLigneDeFond(position, self.couleur):\n # le pion est promu en dame\n self.promotion()\n\n def promotion(self):\n \"\"\"\n Promotion d'un pion en dame\n \"\"\"\n d = self.damier\n position = self.position\n couleur = self.couleur\n # on retire le pion depuis le damier\n self.retirer()\n # on créé une dame à la place du pion, de la même couleur\n d.creerDame(position, couleur)\n","sub_path":"damier_game/pion.py","file_name":"pion.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"446382402","text":"import os\r\nimport git\r\nimport shutil\r\nimport tempfile\r\n\r\n\r\n#always true, pain in the ass to check for version update, it's a tiny file and it deletes itself afterwards, it's fine.\r\nupdate = True\r\n\r\n#update Host\r\nif update == True:\r\n # Create temporary dir\r\n t = tempfile.mkdtemp()\r\n print(t)\r\n # Clone into temporary dir\r\n git.Repo.clone_from('https://github.com/TheGoatIsBetter/InnocentBot.git', t, branch='main', depth=1)\r\n # Copy desired file tree from temporary dir\r\n path = os.path.realpath(__file__)\r\n path = path[:-16]\r\n path = path + '\\Client'\r\n print(path)\r\n shutil.copy(f'{t}\\InnocentBotClient\\Client', f'{path}')\r\n # Remove temporary dir\r\n shutil.rmtree(t)\r\n\r\nos.chdir('Client')\r\nos.system('python3 InnocentBotClient.py')","sub_path":"InnocentBotClient/ClientLauncher.py","file_name":"ClientLauncher.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553860200","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 25 12:43:51 2020\n\n@author: P. M. Harrington, 25 January 2020\n\"\"\"\n\nfrom __future__ import division\nimport ctypes\nimport numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport atsapi as ats\nimport srs_dg535\nfrom Nop_class import Nop\n\ndef get_alazar_parameters(daq_params=None):\n alazar_params = Nop()\n \n #\n alazar_params.post_trigger_samples = 2048\n alazar_params.samples_per_sec = 1e9\n alazar_params.buffer_count = 64 # 64 for ATS9870, found in SDK manual\n \n #\n alazar_params.num_total_records = daq_params.num_patterns * daq_params.num_records_per_pattern \n alazar_params.records_per_buffer = min(1024, alazar_params.num_total_records) \n alazar_params.samples_per_buffer = alazar_params.post_trigger_samples * alazar_params.records_per_buffer \n alazar_params.buffers_per_acquisition = int(np.ceil(alazar_params.num_total_records/alazar_params.records_per_buffer))\n \n print(\"Patterns: {}\".format(daq_params.num_patterns))\n print(\"Records per pattern: {}\".format(daq_params.num_records_per_pattern))\n print(\"Buffers per acquistion: {}\".format(alazar_params.buffers_per_acquisition))\n print(\"DAQ samples per pattern: {}\".format(alazar_params.post_trigger_samples))\n \n return alazar_params\n\ndef rotate_iq(angle_deg=0., rec=[None, None]):\n rec_rotated = [None, None]\n \n ch_cmplx = rec[0] + 1j*rec[1]\n ch_cmplx_rot = abs(ch_cmplx)*np.exp(1j*np.angle(ch_cmplx))*np.exp(1j*np.pi*angle_deg/180)\n rec_rotated[0] = np.real(ch_cmplx_rot)\n rec_rotated[1] = np.imag(ch_cmplx_rot)\n \n return rec_rotated\n\n# Configures a board for acquisition\ndef configure_board(alazar_params, board):\n # TODO: Select clock parameters as required to generate this\n # sample rate\n #\n # For example: if samples_per_sec is 100e6 (100 MS/s), then you can\n # either:\n # - select clock source INTERNAL_CLOCK and sample rate\n # SAMPLE_RATE_100MSPS\n # - or select clock source FAST_EXTERNAL_CLOCK, sample rate\n # SAMPLE_RATE_USER_DEF, and connect a 100MHz signal to the\n # EXT CLK BNC connector\n \n samples_per_sec = alazar_params.samples_per_sec #1000000000.0\n board.setCaptureClock(ats.INTERNAL_CLOCK,\n ats.SAMPLE_RATE_250MSPS,\n ats.CLOCK_EDGE_RISING,\n 0)\n \n # TODO: Select channel A input parameters as required.\n board.inputControlEx(ats.CHANNEL_A,\n ats.DC_COUPLING,\n ats.INPUT_RANGE_PM_100_MV,\n ats.IMPEDANCE_50_OHM)\n \n # TODO: Select channel A bandwidth limit as required.\n board.setBWLimit(ats.CHANNEL_A, 0)\n \n \n # TODO: Select channel B input parameters as required.\n board.inputControlEx(ats.CHANNEL_B,\n ats.DC_COUPLING,\n ats.INPUT_RANGE_PM_100_MV,\n ats.IMPEDANCE_50_OHM)\n \n # TODO: Select channel B bandwidth limit as required.\n board.setBWLimit(ats.CHANNEL_B, 0)\n \n # TODO: Select trigger inputs and levels as required.\n board.setTriggerOperation(ats.TRIG_ENGINE_OP_J,\n ats.TRIG_ENGINE_J,\n ats.TRIG_EXTERNAL,\n ats.TRIGGER_SLOPE_POSITIVE,\n 135,\n ats.TRIG_ENGINE_K,\n ats.TRIG_DISABLE,\n ats.TRIGGER_SLOPE_POSITIVE,\n 128)\n\n# # TODO: Select external trigger parameters as required.\n# board.setExternalTrigger(ats.DC_COUPLING,\n# ats.ETR_1V)\n\n # TODO: Set trigger delay as required.\n triggerDelay_sec = 0\n triggerDelay_samples = int(triggerDelay_sec * samples_per_sec + 0.5)\n board.setTriggerDelay(triggerDelay_samples)\n\n # TODO: Set trigger timeout as required.\n #\n # NOTE: The board will wait for a for this amount of time for a\n # trigger event. If a trigger event does not arrive, then the\n # board will automatically trigger. Set the trigger timeout value\n # to 0 to force the board to wait forever for a trigger event.\n #\n # IMPORTANT: The trigger timeout value should be set to zero after\n # appropriate trigger parameters have been determined, otherwise\n # the board may trigger if the timeout interval expires before a\n # hardware trigger event arrives.\n triggerTimeout_sec = 0\n triggerTimeout_clocks = int(triggerTimeout_sec / 10e-6 + 0.5)\n board.setTriggerTimeOut(triggerTimeout_clocks)\n\n # Configure AUX I/O connector as required\n board.configureAuxIO(ats.AUX_OUT_TRIGGER,\n 0)\n \ndef acquire_data(daq_params, alazar_params, board):\n rec_avg_all = []\n rec_readout = []\n \n # No pre-trigger samples in NPT mode\n preTriggerSamples = 0\n\n # TODO: Select the number of samples per record.\n post_trigger_samples = alazar_params.post_trigger_samples\n\n # TODO: Select the number0 of records per DMA buffer.\n records_per_buffer = alazar_params.records_per_buffer #2**10 # up to 2**14\n\n # TODO: Select the number of buffers per acquisition.\n buffers_per_acquisition = alazar_params.buffers_per_acquisition\n \n records_per_acquisition = records_per_buffer * buffers_per_acquisition\n \n # TODO: Select the active channels.\n channels = ats.CHANNEL_A | ats.CHANNEL_B\n channelCount = 0\n for c in ats.channels:\n channelCount += (c & channels == c)\n\n # TODO: Should data be saved to file?\n saveData = False\n dataFile = None\n if saveData:\n dataFile = open(os.path.join(os.path.dirname(__file__),\n \"data.bin\"), 'wb')\n\n # Compute the number of bytes per record and per buffer \n memorySize_samples, bitsPerSample = board.getChannelInfo()\n bytesPerSample = (bitsPerSample.value + 7) // 8\n samplesPerRecord = preTriggerSamples + post_trigger_samples\n bytesPerRecord = bytesPerSample * samplesPerRecord\n bytesPerBuffer = bytesPerRecord * records_per_buffer * channelCount\n\n # TODO: Select number of DMA buffers to allocate\n buffer_count = alazar_params.buffer_count\n\n # Allocate DMA buffers\n\n sample_type = ctypes.c_uint8\n if bytesPerSample > 1:\n sample_type = ctypes.c_uint16\n\n buffers = []\n for i in range(buffer_count):\n buffers.append(ats.DMABuffer(board.handle, sample_type, bytesPerBuffer))\n \n # Set the record size\n board.setRecordSize(preTriggerSamples, post_trigger_samples)\n\n # Configure the board to make an NPT AutoDMA acquisition\n board.beforeAsyncRead(channels,\n -preTriggerSamples,\n samplesPerRecord,\n records_per_buffer,\n records_per_acquisition,\n ats.ADMA_EXTERNAL_STARTCAPTURE | ats.ADMA_NPT)\n\n index_avg_start = daq_params.readout_start\n index_avg_end = daq_params.readout_start + daq_params.readout_duration - 1\n\n index_ch = [None]*2\n index_ch[0] = np.arange(0,post_trigger_samples*records_per_buffer) # channel A\n index_ch[1] = post_trigger_samples*records_per_buffer + np.arange(0,post_trigger_samples*records_per_buffer) # channel B\n \n rec_all_raw = [None]*2\n rec_avg_all = [None]*2\n rec_readout = [[]]*2\n\n # Post DMA buffers to board\n for buffer in buffers:\n board.postAsyncBuffer(buffer.addr, buffer.size_bytes)\n\n start = time.clock() # Keep track of when acquisition started\n \n # start SRS DG535 triggers\n srs_dg535.set_trigger_level('high')\n \n try:\n board.startCapture() # Start the acquisition\n print(\"Capturing %d buffers. Press to abort\" %\n buffers_per_acquisition)\n buffersCompleted = 0\n bytesTransferred = 0\n while (buffersCompleted < buffers_per_acquisition and not\n ats.enter_pressed()):\n # Wait for the buffer at the head of the list of available\n # buffers to be filled by the board.\n buffer = buffers[buffersCompleted % len(buffers)]\n board.waitAsyncBufferComplete(buffer.addr, timeout_ms=5000)\n buffersCompleted += 1\n bytesTransferred += buffer.size_bytes\n\n #\n for idx, idx_ch in enumerate(index_ch):\n rec_all_raw[idx] = np.reshape(buffer.buffer[idx_ch], (records_per_buffer, post_trigger_samples))\n \n #\n rec_all = rotate_iq(daq_params.iq_angle_deg, rec_all_raw)\n \n #\n for idx in [0, 1]:\n rec_avg_all[idx] = np.mean(rec_all[idx], axis=0) # is this just the avg of the last loop?\n rec_readout[idx] = np.concatenate((rec_readout[idx], np.mean(rec_all[idx][:,index_avg_start:index_avg_end], axis=1)))\n \n # NOTE:\n #\n # While you are processing this buffer, the board is already\n # filling the next available buffer(s).\n #\n # You MUST finish processing this buffer and post it back to the\n # board before the board fills all of its available DMA buffers\n # and on-board memory.\n #\n # Samples are arranged in the buffer as follows:\n # S0A, S0B, ..., S1A, S1B, ...\n # with SXY the sample number X of channel Y.\n #\n # Sample code are stored as 8-bit values.\n #\n # Sample codes are unsigned by default. As a result:\n # - 0x00 represents a negative full scale input signal.\n # - 0x80 represents a ~0V signal.\n # - 0xFF represents a positive full scale input signal.\n # Optionaly save data to file\n if dataFile:\n buffer.buffer.tofile(dataFile)\n\n # Add the buffer to the end of the list of available buffers.\n board.postAsyncBuffer(buffer.addr, buffer.size_bytes)\n finally:\n board.abortAsyncRead()\n \n # stop SRS DG535 triggers\n srs_dg535.set_trigger_level('low')\n \n # Compute the total transfer time, and display performance information.\n transferTime_sec = time.clock() - start\n print(\"Capture completed in %f sec\" % transferTime_sec)\n buffersPerSec = 0\n bytesPerSec = 0\n recordsPerSec = 0\n if transferTime_sec > 0:\n buffersPerSec = buffersCompleted / transferTime_sec\n bytesPerSec = bytesTransferred / transferTime_sec\n recordsPerSec = records_per_buffer * buffersCompleted / transferTime_sec\n print(\"Captured %d buffers (%f buffers per sec)\" %\n (buffersCompleted, buffersPerSec))\n print(\"Captured %d records (%f records per sec)\" %\n (records_per_buffer * buffersCompleted, recordsPerSec))\n print(\"Transferred %d bytes (%f bytes per sec)\" %\n (bytesTransferred, bytesPerSec))\n \n return (rec_avg_all, rec_readout)\n \nif __name__ == \"__main__\":\n pass","sub_path":"instruments/old_python/daq_alazar.py","file_name":"daq_alazar.py","file_ext":"py","file_size_in_byte":11080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313787815","text":"#! /usr/bin/env python2.7\n# -*- coding: UTF-8 -*-\n# File: split_english_list.py\n# Author: tim \n# Date: 15/5/19\nimport json\nimport string\nimport os\nimport re\n\nartist_types = ['english_M', 'english_F', 'english_B']\npinyin_dict = {}\n\n\ndef split(artist_type):\n with open('../../basesite/static/sort_%s_type_artists.json' % (artist_type,), 'r') as fp:\n artists = json.load(fp)\n result = {'#': []}\n for a in string.ascii_uppercase:\n result[a] = []\n for artist in artists['data']:\n a = artist['artist_name'][0].encode('utf-8').upper()\n if a in result:\n result[a].append(artist)\n else:\n result['#'].append(artist)\n\n for key in result.keys():\n if key == '#':\n k = ''\n else:\n k = key\n with open('../../basesite/static/singerlist/%s_type/%s_artists.json' % (artist_type, k), 'w') as fp:\n json.dump({\"ret\": 'true', \"data\": result[key]}, fp)\n\n\nif __name__ == '__main__':\n for artist_type in artist_types:\n split(artist_type)","sub_path":"src/util/artists_get/split_english_list.py","file_name":"split_english_list.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"455032438","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 12 14:30:50 2021\n\n@author: kaan\n\"\"\"\n\n#Needs to be trained then tuned\n\nfrom gensim.models.fasttext import FastText\nimport nltk\nimport gc\nimport csv\nimport pickle\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn import model_selection\nfrom sklearn.model_selection import train_test_split # train-test split'i bununla yapıyoruz\nfrom sklearn.feature_extraction.text import TfidfVectorizer # TFIDF kullanmak için \nfrom sklearn.preprocessing import StandardScaler # to scale input data\n\n# for metrics and evaluation of results\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom torch import nn, optim, topk\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchtext\nfrom torchtext.data import get_tokenizer\nimport scipy\nimport torch.nn.functional as F\n\nimport io\ntokenizer = get_tokenizer(\"basic_english\")\n\n\n#%% parameters\n'''\n#Optimizer parameters # 91.5% for ttc3600\nBATCH_SIZE = 200\nEPOCHS = 50\nLEARNING_RATE =0.5\nMOMENTUM = 0.9\nDROPOUT = 0.1\n\n#LSTM parameters\nVECTOR_SIZE = 300 #Input size and the vector size are the same\nINPUT_SIZE = 300\n\nHIDDEN_SIZE = 300\n\nNUM_LAYERS = 1\n\nSENTENCE_LENGTH = 200 # max text length is 3125\n\nSHUFFLE = True\n\n'''\n#Optimizer parameters\nBATCH_SIZE = 200\nEPOCHS = 50\nLEARNING_RATE =0.5\nMOMENTUM = 0.9\nDROPOUT = 0.1\n\n#LSTM parameters\nVECTOR_SIZE = 300 #Input size and the vector size are the same\nINPUT_SIZE = 300\n\nHIDDEN_SIZE = 300\n\nNUM_LAYERS = 1\n\nSENTENCE_LENGTH = 200 # max text length is 3125\n\nSHUFFLE = True\n\n#%%obtain vectors\n\nprint('---------------------------------------------------------------------')\nprint('reading FastText training vectors ...')\n\n\nfname ='/home/kaan/Desktop/cc.tr.300.vec'\n\n\nfin = io.open(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\nn, d = map(int, fin.readline().split())\nvectors = {}\nfor line in fin:\n tokens = line.rstrip().split(' ')\n vectors[tokens[0]] = [float(i) for i in tokens[1:]]\n# vector is a dictionary with keys corresponding to each vectorized word (with length 300)\n# vector[','] as an example ( to reach the vectors)\n\n\n#%%obtain data\nfile_name = '/home/kaan/Downloads/1-ttc3600.xlsx'\npage = 'cleaned'\n\n#file_name = '/home/kaan/Downloads/2-ttc4900.xlsx'\n#page = 'sw_lem'\n\nlanguage = 'turkish'\nscoring = 'accuracy' \nuseStopwords = False \nuseLemmatize = False\n\n# %% import data\ndata = pd.read_excel(file_name, page) #, encoding = 'utf-8')\n\n# summarize dataset\nprint(\"input data shape: \", data.shape)\nprint(data.groupby('class').size())\nprint(data['class'].nunique())\n\nNUM_LABEL = data['class'].nunique()\n\n\nle = LabelEncoder().fit(data[\"class\"])\ndata['encodedcat'] = le.transform(data['class'])\n\ntrain_news, test_news, train_topics, test_topics = train_test_split(data['description'],data['encodedcat'],test_size=.2,stratify=data['class'],random_state=42)\n\ntrain_news=train_news.tolist()\ntest_news = test_news.tolist()\n\ntraining_tokens=[] #training tokens\n\nfor i,row in enumerate(train_news):\n training_tokens.append(tokenizer(row))\n\ntesting_tokens = [] #testing tokens\n\nfor i,row in enumerate(test_news):\n testing_tokens.append(tokenizer(row))\n\ny_train = torch.tensor(train_topics.values) #converts the panda object to a tensor \ny_test = torch.tensor(test_topics.values)\n\n#%%\ntorch.cuda.empty_cache()\n\n\n#%% Prepare training dataset\n#train dataset for dataloader\n\n\ntraining_tuples = []\n\nfor j,_ in enumerate(train_news):\n a = torch.zeros( SENTENCE_LENGTH,VECTOR_SIZE)\n for num , i in enumerate(training_tokens[j]): \n \n if num < SENTENCE_LENGTH:\n \n try:\n a[num] = torch.Tensor(vectors[i]).reshape(1,VECTOR_SIZE) # construct matrices for each sentence\n \n except KeyError:\n continue\n \n else: continue\n a = StandardScaler(with_mean = 0).fit_transform(a)\n \n tuple1=(a,y_train[j])\n\n training_tuples.append(tuple1) # construct tuples with data and labels\n\n\n#%% Prepare test dataset\n#test dataset for test dataloader\ntesting_tuples = []\n\n# buraya j = 0 yazarsak çalışmıyor?? \n\nfor j,_ in enumerate(test_news):\n b = torch.zeros( SENTENCE_LENGTH,VECTOR_SIZE)\n for num2 , i2 in enumerate(testing_tokens[j]): \n if num2 < SENTENCE_LENGTH:\n \n try:\n b[num2] = torch.Tensor(vectors[i2]).reshape(1,VECTOR_SIZE) # construct matrices for each sentence\n \n except KeyError:\n continue\n \n else: continue \n b = StandardScaler(with_mean = 0).fit_transform(b)\n \n tuple2=(b,y_test[j])\n \n testing_tuples.append(tuple2) # construct tuples with data and labels\n\n#%%\n'''\nprint(testing_tokens[0])\nvec, label = testing_tuples[0]\nprint(vec.shape)\nprint(vec[1,:])\nprint(vec[1,:].shape)\nprint(vectors['5'])\n\n'''\n#%% Cuda\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda:2\")\n print(f\"There are {torch.cuda.device_count()} GPU(s) available.\")\n print('Device name:', torch.cuda.get_device_name(0))\nelse: \n print(\"No GPU available, using the CPU instead.\")\n device = torch.device(\"cpu\")\n\ntorch.cuda.empty_cache()\n\n\n#device = torch.device(\"cpu\")\n\n#%%\n\nNUM_LABEL = data['class'].nunique()\nTEST_BATCH = test_topics.__len__()\n\ntrain_dl= DataLoader(training_tuples,batch_size=BATCH_SIZE,shuffle=SHUFFLE,pin_memory=True)\ntest_dl = DataLoader(testing_tuples, batch_size =TEST_BATCH, shuffle = SHUFFLE, pin_memory = True)\n\n\n#%%Model design\n\nprint(\"---Model Design------------------------------\")\nclass RNNet(nn.Module):\n def __init__(self,input_layer, hidden_size,num_layers):\n super(RNNet,self).__init__()\n self.input_layer = input_layer\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n \n self.LSTM = nn.LSTM(input_size = input_layer,hidden_size = hidden_size, num_layers = num_layers)\n self.linear = nn.Linear(hidden_size,NUM_LABEL).to('cuda:1')\n \n \n def forward(self,x):\n h0 = torch.zeros(self.num_layers,x.shape[1], self.hidden_size, device = device)\n c0 = torch.zeros(self.num_layers,x.shape[1], self.hidden_size, device = device)\n \n \n output, (hn,cn) = self.LSTM(x,(h0,c0)) \n \n output= output[-1,:,:]\n\n output = self.linear(output)\n output = F.log_softmax(output, dim = 1)\n return output \n\n\nmodel = RNNet (INPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS).to(device)\n\n# Main function\n\ntrain_losses = []\ntest_losses = []\n#test_accuracies = []\n#val_outputs= []\nf1_score_list = []\n\ncriterion = nn.CrossEntropyLoss()\n\ndef train_model(model,train_dl,test_dl,epochs):\n \n #gc.collect()\n print(model)\n optimizer= optim.SGD(model.parameters(),lr=LEARNING_RATE,momentum=MOMENTUM)\n #optimizer = optim.Adam(model.parameters(),lr = LEARNING_RATE,weight_decay=WEIGHT_DECAY)\n for epoch in range(epochs):\n \n \n for train_data, train_label_data in train_dl: \n model.train()\n train_data = train_data.to(device).float()\n train_data =train_data.permute(1,0,2)\n train_label_data = train_label_data.to(device)\n output = model(train_data)\n\n loss = criterion(output,train_label_data)\n loss.backward()\n train_loss = loss.item()\n \n #train_losses.append(train_loss)\n optimizer.step()\n optimizer.zero_grad()\n loss.detach()\n \n validation_f1_score,val_loss = evaluate_model(model,test_dl)\n f1_score_list.append(validation_f1_score)\n #test_losses.append(val_loss)\n torch.cuda.empty_cache()\n print(f\"Epoch: {epoch+1}/{epochs}..\", f\"Training loss: {train_loss:.3f}\", f\"Validation loss: {val_loss:.3f} \" , f\"Validation F1 Score: {validation_f1_score:.3f}\")\n \n plt.figure(figsize=(12,5))\n plt.title(f\"Batch size / Learning rate = {BATCH_SIZE, LEARNING_RATE}\")\n plt.subplot(121)\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.plot(train_losses,label='Training loss')\n plt.plot(test_losses, label ='Validation loss')\n plt.legend(frameon=True);\n plt.subplot(122)\n plt.xlabel('epochs')\n plt.ylabel('F1 score')\n plt.plot(f1_score_list,label='F1 score')\n \n\n\ndef evaluate_model(model,test_dl):\n model.eval()\n #actual = []\n for test_data,test_label_data in test_dl:\n test_data = test_data.to(device).float()\n test_data =test_data.permute(1,0,2)\n test_label_data = test_label_data.to(device)\n \n val_out = model(test_data)\n \n val_loss= criterion(val_out,test_label_data)\n \n val_loss.detach()\n #targets=test_label_data.cpu().numpy()\n \n _, predictions = torch.max(val_out,dim=1) \n #accuracy = accuracy_score(predictions, targets)\n #test_accuracies.append(accuracy)\n #actual.append(predictions) #Creates a list with all the outputs\n test_label_data = test_label_data.cpu()\n predictions = predictions.cpu()\n f1_score_result=f1_score(test_label_data,predictions,average='weighted') \n torch.cuda.empty_cache()\n return f1_score_result,val_loss\n #print(f\"Validation loss: {val_loss:.3f}\" ,f\"Validation accuracy:{accuracy:.3f}\")\n \n \n\n\nprint(\"Training....\")\ntrain_model(model,train_dl,test_dl, EPOCHS)\n","sub_path":"lstm_fasttext.py","file_name":"lstm_fasttext.py","file_ext":"py","file_size_in_byte":9908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242411051","text":"# coding:utf-8\nfrom celery import Celery\nimport os\nimport Lagou, Boss\nimport time\n\nbroker = 'redis://127.0.0.1:6379/1'\nbackend = 'redis://127.0.0.1:6379/2'\n\napp = Celery('tasks', broker=broker, backend=backend)\napp.conf.task_routes = {\n 'boss.tasks.*': {\n 'queue': 'boss',\n },\n 'lagou.tasks.*': {\n 'queue': 'lagou',\n }\n}\n\n\n@app.task\ndef lagou_info_task(url):\n return Lagou.info(url)\n\n\n@app.task\ndef lagou_url_task(url):\n urls = Lagou.main(url)\n if urls:\n for _url in urls:\n lagou_info_task.apply_async(args=[_url], queue='lagou')\n return 'Success(lagou)'\n else:\n return 'Fail(lagou)'\n\n\n@app.task\ndef boss_info_task(url):\n return Boss.info(url)\n\n\n@app.task\ndef boss_url_task(url):\n urls = Boss.main(url)\n if urls:\n for _url in urls:\n boss_info_task.apply_async(args=[_url], queue='boss')\n return 'Success(boss)'\n else:\n return 'Fail(boss)'\n\n\nif __name__ == '__main__':\n # os.system('celery -A Tasks worker -n worker1 -Q boss -c 4 --loglevel=info')\n # os.system('celery -A Tasks worker -n worker2 -Q lagou -c 4 --loglevel=info')\n os.system('celery flower -A Tasks')\n # pass","sub_path":"JobSpider/Tasks.py","file_name":"Tasks.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"108260885","text":"import random\n\nSIZE = 4\nEMPTY = \" \"\nPERSON = \"P\"\nPET = \"T\"\nPOOP = \"O\"\nERROR = \"!\"\nCLEANED = \".\"\n\n'''\nStudents write\n'''\n# clean()\n# @arg(world: a reference to a 2D list of Strings of length one)\n# @return(none)\ndef clean(world):\n print(\"Scooping the poop\")\n\n'''\nStudents write\n'''\n# count()\n# @arg(world: a reference to a 2D list of Strings of length one)\n# @return(string,int):\n# @first return value = single string character entered by the user\n# @second return value = the number of times that the string was found in the list\ndef count(world):\n print(\"Counting number of occurances of a character\")\n number = 0\n element = input(\"Enter character: \")\n return(element,number)\n\n# createElement()\n# @args: none\n# @return: a String of length one\n# Randomly generates an 'occupant' for a location in the world\ndef createElement():\n tempNum = random.randrange(10)+1\n\n # 50% chance empty\n if ((tempNum >= 1) and (tempNum <= 5)):\n tempElement = EMPTY\n\n # 20% of a person\n elif ((tempNum >= 6) and (tempNum <= 7)):\n tempElement = PERSON\n\n # 10% chance of pet\n elif (tempNum == 8):\n tempElement = PET\n\n # 20% chance of poop in that location (lots in this world)\n elif ((tempNum >= 9) and (tempNum <= 10)):\n tempElement = POOP\n\n # In case there's a bug in the random number genrator\n else:\n tempElement = ERROR\n\n return(tempElement)\n\n# createWorld()\n# @args: none\n# @return: a randomly initialized 2D list\n# Creates the SIZExSIZE world. Randomly populates it with the\n# return values from function createElement\ndef createWorld():\n world = [] # Create a variable that refers to a 1D list.\n r = 0\n\n # Outer 'r' loop traverses the rows.\n # Each iteration of the outer loop creates a new 1D list.\n while (r < SIZE):\n world.append([]) # Create new empty row\n c = 0\n # The inner 'c' loop traverses the columns of the newly\n # created 1D list creating and initializing each element\n # to the value returned by createElement()\n while (c < SIZE):\n element = createElement()\n world[r].append(element)\n c = c + 1\n r = r + 1\n return(world)\n\n# Shows the elements of the world. All the elements in a row will\n# appear on the same line.\ndef display(world):\n print(\"OUR WORLD\")\n print(\"========\")\n r = 0\n while (r < SIZE): # Each iteration accesses a single row\n c = 0\n while (c < SIZE): # Each iteration accesses an element in a row\n print(world[r][c], end=\"\")\n c = c + 1\n print() # Done displaying row, move output to the next line\n r = r + 1\n\ndef start():\n world = createWorld()\n display(world)\n element,number = count(world)\n print(\"# occurances of %s=%d\" %(element,number))\n clean(world)\n display(world)\n\nstart()\n","sub_path":"compsci courses/CPSC231 - Intro to CompSci in Python/miniAssignments/mini4.py","file_name":"mini4.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"616145289","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n\n\nfrom .models import Comment\nfrom .models import CommentForm\n\nimport datetime\n\n\ndef load_comments(request):\n result, comment_4, comment_3, comment_2, comment_1 = [], [], [], [], []\n comment_5 = Comment.objects.filter(rating=5)[:5]\n for i in comment_5:\n result.append(to_json(i))\n if len(comment_5) < 5:\n comment_4 = Comment.objects.filter(rating=4)[:5-len(comment_5)]\n for k in comment_4:\n result.append(to_json(k))\n if (len(comment_5) + len(comment_4)) < 5:\n comment_3 = Comment.objects.filter(rating=3)[:5-(len(comment_5) + len(comment_4))]\n for j in comment_3:\n result.append(to_json(j))\n return JsonResponse({'comments': result}, safe=False)\n\n\ndef add_comments(request):\n comment_form = CommentForm(request.POST)\n if request.method == 'POST':\n formset = comment_form\n if formset.is_valid():\n author = formset.save(commit=False)\n comment = Comment(user=author.user)\n comment.comment_text = author.comment_text\n comment.pub_date = datetime.datetime.now()\n comment.rating = author.rating\n comment.save()\n else:\n formset = comment_form\n return render(request, \"add_comment.html\", {\"formset\": formset})\n\n\ndef to_json(comment):\n # {'1 comment': {'user': asd, 'text': asfasd, 'rating': 1/2/3/4/5, 'date': asojd},\n # '2 comment': {},\n # '3 comment': {},\n # '4 comment': {},\n # '5 comment': {}}\n results = {'user': comment.user.first_name,\n 'text': comment.comment_text,\n 'rating': comment.rating,\n 'date': comment.pub_date,\n 'city': comment.user.profile.city,\n 'avatar': comment.user.profile.user_avatar.url\n }\n return results\n","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"65211696","text":"import os\nimport pathlib\nimport json\n\nimport requests\nfrom flask import Flask, session, abort, redirect, request\nfrom google.oauth2 import id_token\nfrom google_auth_oauthlib.flow import Flow\nfrom pip._vendor import cachecontrol\nimport google.auth.transport.requests\n\n\nwith open('flask_secret.json', 'r') as myfile:\n CLIENT_SECRETS = json.load(myfile)\n\napp = Flask(\"PrideMatch\")\napp.secret_key = (CLIENT_SECRETS[\"APP_SECRET\"])\n\nos.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\n\nGOOGLE_CLIENT_ID = (CLIENT_SECRETS[\"GOOGLE_CLIENT_ID\"])\n\nclient_secret_file = os.path.join(pathlib.Path(__file__).parent, \"client_secret.json\")\nflow = Flow.from_client_secrets_file(\n client_secrets_file = client_secret_file,\n scopes = [\"https://www.googleapis.com/auth/userinfo.profile\", \"https://www.googleapis.com/auth/userinfo.email\", \"openid\"],\n redirect_uri = \"http://127.0.0.1:5000/callback\"\n )\n\n@ app.route(\"/\")\ndef index():\n return \"hello world Login\"\n\ndef login_is_required(function):\n def wrapper(*args, **kwargs):\n if \"google_id\" not in session:\n return abort(401) # auth req\n else: \n return function()\n\n return wrapper\n\n@ app.route(\"/login\")\ndef login():\n authorization_url, state = flow.authorization_url()\n session[\"state\"] = state\n return redirect(authorization_url)\n\n@ app.route(\"/callback\")\ndef callback():\n flow.fetch_token(authorization_response = request.url)\n\n if not session[\"state\"] == request.args[\"state\"]:\n abort(500)\n \n credentials = flow.credentials\n request_session = requests.session()\n cached_session = cachecontrol.CacheControl(request_session)\n token_request = google.auth.transport.requests.Request(session = cached_session)\n\n id_info = id_token.verify_oauth2_token(\n id_token = credentials._id_token,\n request = token_request,\n audience = GOOGLE_CLIENT_ID\n )\n\n session['google_id'] = id_info.get(\"sub\")\n session['name'] = id_info.get(\"name\")\n return redirect(\"/protected_area\")\n\n@ app.route(\"/logout\")\ndef logout():\n session.clear()\n return redirect('/')\n pass\n\n@ app.route(\"/protected_area\")\n@login_is_required\ndef protected_area():\n return \"PROTECTED PAGE logout\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629322812","text":"\nimport math\nimport random\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom collections import Counter \nimport matplotlib.pyplot as plt \nimport random\n\n# 决策树的最大深度\nmax_deep = 10\n# 达到分割的最少条件 (低于这个数量不用分割 统计个数)\nmin_samples_split = 2\n# 叶子节点的最少数据个数\nmin_samples_leaf = 2\n\n# 训练取的样本数\ntrain_data_size = 150\n# 测试取的样本树\ntest_data_size = 50\n# 学习速度 精度\nwindow_num = 100\n\n# criteria = entropy, by default\n\n# 计算熵值\ndef entroy(class_probability):\n return sum(-p * math.log(p, 2) for p in class_probability if p)\n\ndef class_probability(labels):\n totol_count = len(labels)\n return [count / totol_count for count in Counter(labels).values()]\n\n\n\n\n# 计算信息增益率\n# TODO 连续值 如何进行增益率的计算\n# 关键!!!\n'''\n 将连续转换为离散的方法\n 1. 取一定的位数 * 10^N\n 2. 取一定间隔 等份\n'''\ndef information_gain_ratio():\n pass\n\n\n\n# 寻找最好的特征 求得最大的信息增益率\ndef find_best_t(X,y, candidate_features):\n # class_num = len(candidate_features)\n \n # t_values = [t_value_bi_partition(X, y, t) for t in candidate_features]\n\n\n max_info_gain = -1\n best_t = -1\n for t in candidate_features:\n t_value = t_value_bi_partition(X, y, t)\n info_gain = information_gain(X, y, t, t_value)\n \n if info_gain > max_info_gain:\n best_t = t\n max_info_gain = info_gain\n\n return best_t\n\n'''\n@Function\n 给定特征值index,二分求得最佳的t_value,使得信息增益率最大\n TODO\n P84 如何处理 N个划分, 候选位点(中值) -> 变成连续函数\n (凸函数)\n@Parameter\n X 特征矩阵\n y target\n t 选中的特征值\n@Output\n t_value 二分求得的t_value\n info_gain 信息增益最大的时候的值\n'''\ndef t_value_bi_partition(X, y, t):\n\n x_min, x_max = X[:,t].min(), X[:,t].max()\n flag_a = x_min\n flag_b = x_max\n\n max_info_gain = -1\n flag_max = -1\n \n # x_groups = []\n # y_groups = []\n\n win_width = (x_max - x_min) / window_num\n\n for win_idx in range(window_num):\n x = x_min + win_width * (0.5 + win_idx)\n info_gain = information_gain(X, y, t, x)\n\n # x_groups.append(x)\n # y_groups.append(info_gain)\n\n #若相等\n if abs(info_gain - max_info_gain) <= 1e-10:\n flag_max = x\n elif info_gain - max_info_gain > 1e-10:\n flag_max = x\n max_info_gain = info_gain\n \n\n # print(\"x: %lf , info_gain: %lf\"%(x, info_gain))\n\n # print(\"X: %lf; MAX INFO_GAIN : %lf\"%(flag_max, max_info_gain))\n \n\n #if(len(x_groups)): \n #plt.plot(x_groups, y_groups, color='green')\n #plt.savefig(\"t_%d_t_value_%.5lf.png\"%(t,flag_max))\n #plt.close('all')\n '''\n print(\"======================================\")\n print(\"data: t_%d_t_value_%.5lf\"%(t,flag_max))\n print(\"x_groups\")\n print(x_groups)\n print(\"y_groups\")\n print(y_groups)\n '''\n return flag_max\n\n'''\n@Function\n 计算信息增益\n@Parameter\n X 特征矩阵\n y target\n t 选中的特征值\n t_value 划分点\n\n@Output\n info_gain :information gain 信息增益\n'''\ndef information_gain(X, y, t, t_value):\n\n ent_d = entroy(class_probability(y))\n \n (X_posi, y_posi, X_negi, y_negi) = partition_by(X, y, t, t_value)\n \n # 统计个数\n num_posi = len(X_posi)\n num_negi = len(X_negi)\n\n # 计算各自的entropy\n entroy_posi = entroy(class_probability(y_posi))\n entroy_negi = entroy(class_probability(y_negi))\n\n # 信息增益\n info_gain = ent_d - (num_posi * entroy_posi + num_negi * entroy_negi) / (num_posi + num_negi) \n \n return info_gain\n\n\n\n\n'''\n@Function\n根据t_value进行划分\n@Parameter\nX 特征矩阵\ny target\nt 特征值\nt_value 划分点\n@Output\n\n X: 输入的矩阵(连续值)\n col: \n'''\ndef partition_by(X,y,t,t_value):\n\n idx_negi = []\n idx_posi = []\n for i in range(len(X)):\n if X[i][t] - t_value > 1e-10:\n idx_posi.append(i)\n else:\n idx_negi.append(i)\n\n # D+ \n X_posi = X[idx_posi]\n y_posi = y[idx_posi]\n # D- \n X_negi = X[idx_negi]\n y_negi = y[idx_negi]\n\n return X_posi, y_posi, X_negi, y_negi\n\n # print(class_stat_dict)\n\n\n\nclass DecisionTree:\n def __init__(self, X, y, deep = 0):\n self.t = None #划分属性\n self.t_value = None #划分值\n self.deep = deep #树的深度\n self.target = None # 类标签\n self.leftChild = None #左子树为小于\n self.rightChild = None\n self.fit(X,y)\n\n # 训练模型 生成树\n def fit(self, X, y):\n \n class_stat_dict = Counter(y)\n num_total = len(X)\n \n # 纯度 100%\n if(len(class_stat_dict) == 1):\n self.target = y[0]\n return\n\n # 小于最小划分树 或者深度超标\n if num_total < min_samples_split or self.deep > max_deep:\n # 取占比最大的\n self.target = max(class_stat_dict)\n return\n \n split_candidate = [c for c in range(len(X[0]))]\n \n best_t = find_best_t(X,y, split_candidate)\n t_value = t_value_bi_partition(X, y, best_t)\n (X_posi, y_posi, X_negi, y_negi) = partition_by(X,y,best_t,t_value)\n\n self.t = best_t\n self.t_value = t_value\n\n # 分割小于最小子节点的数的要求\n if(len(y_posi) < min_samples_leaf or len(y_negi) < min_samples_leaf):\n self.target = max(class_stat_dict)\n return\n\n if(len(X_negi)):\n self.leftChild = DecisionTree(X_negi, y_negi, self.deep + 1)\n \n if(len(X_posi)):\n self.rightChild = DecisionTree(X_posi, y_posi, self.deep + 1)\n\n \n # 做出预测\n def predict(self,data):\n # target为空, 说明不可以在\n if(self.target != None):\n return self.target\n\n # 未判断左右树是否相等\n if data[self.t] > self.t_value and self.rightChild:\n return self.rightChild.predict(data)\n elif data[self.t] <= self.t_value and self.leftChild:\n return self.leftChild.predict(data)\n else:\n return None\n # 打印决策树\n def print_tree(self):\n if(self.target != None):\n print(\"\\t\"*(self.deep-1) + \"|\\t|\\n\" + \"\\t\"*(self.deep-1) + \"|\\t\" +\"|_____\" + \" [[__%d__]] \"%(self.target))\n return\n if(self.t == None or self.t_value == None):\n return\n print(\"\\t\"*self.deep + \"\\n\"+\"\\t\"*self.deep + \"|\\n\" + \"\\t\"*self.deep + \"|____T = %d, T_Value = %.4lf\"%(self.t, self.t_value))\n print(\"\\t\"*self.deep + \"|\")\n print(\"\\t\"*self.deep + \"|_______\" + \"T_VALUE <= %.4lf\"%(self.t_value))\n self.leftChild.print_tree()\n print(\"\\t\"*self.deep + \"|_______T_VALUE > %.4lf\"%(self.t_value))\n self.rightChild.print_tree()\n \n # 决策树的自我评价\n def evaluate_model(self, test_X, test_y):\n data_num = len(test_X)\n predict_target = [self.predict(data) for data in test_X]\n match_vector = [predict_target[i] == test_y[i] for i in range(data_num)]\n # 获取测试统计数据\n # Counter({True: 47, False: 3})\n return Counter(match_vector)[True]/ data_num\n\nif __name__ == \"__main__\":\n iris = load_iris()\n # 获取iris的数据\n X = np.array(iris.data)\n y = np.array(iris.target)\n data_size = len(X)\n\n # 放回抽样 train_data_size个\n train_idx = [random.randint(0, data_size-1) for __ in range(train_data_size)]\n train_X = X[train_idx]\n train_y = y[train_idx]\n\n # 放回抽样, 做为测试数据\n test_idx = [random.randint(0, data_size-1) for __ in range(test_data_size)]\n test_X = X[test_idx]\n test_y = y[test_idx]\n \n # 训练决策树\n DTree = DecisionTree(train_X, train_y)\n # 打印树\n DTree.print_tree()\n \n # 准确度\n precision = DTree.evaluate_model(test_X, test_y)\n print(\"Precision: %.5f\"%(precision))\n ","sub_path":"random_forest/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"330925178","text":"from flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\nfrom linebot.models import *\n\nfrom instaloader import Profile\n\nimport os, random, json, requests, secret, instaloader, urllib, threading, time\n\nkeys = secret.keys()\n\nbeauty_img_path = '/home/yopipi-ubuntu/桌面/yes_or_no/ok'\ndcard_img_path = '/home/yopipi-ubuntu/桌面/yes_or_no/dcard_test'\nig_save_path = '/home/yopipi-ubuntu/桌面/yes_or_no/ig'\nbeauty_img_list = os.listdir(beauty_img_path)\ndcard_img_list = os.listdir(dcard_img_path)\nnow_path = os.getcwd()\n\nimgur_url = 'https://imgur.com/'\n\napp = Flask(__name__)\n\nline_bot_api = LineBotApi(keys.line_api)\nhandler = WebhookHandler(keys.line_secret)\n\n@app.route(\"/\", methods=['POST'])\ndef index():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n json_line = request.get_json()\n json_line = json.dumps(json_line)\n global decoded\n decoded = json.loads(json_line)\n print(decoded[\"events\"][0][\"message\"][\"text\"])\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n # 隨機抽我的表特版圖庫\n if event.message.text==u\"抽\":\n length = len(beauty_img_list) - 1\n img = beauty_img_list[random.randint(0,length)]\n url = imgur_url + img\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n line_bot_api.reply_message(\n event.reply_token, image_message)\n # 隨機抽我的dcard圖庫\n elif event.message.text==u\"卡\":\n length = len(dcard_img_list) - 1\n img = dcard_img_list[random.randint(0,length)]\n url = imgur_url + img\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n line_bot_api.reply_message(\n event.reply_token, image_message)\n # 推薦 ig 帳號\n elif event.message.text[:2]==u\"推薦\":\n # 分男女\n\n ig_id = event.message.text[3:]\n\n # Get instance\n L = instaloader.Instaloader(post_metadata_txt_pattern= '', download_videos=False, download_video_thumbnails=False, download_geotags=False, download_comments=False, compress_json=False)\n\n # 找前100篇圖\n profile = Profile.from_username(L.context, ig_id)\n all_likes = []\n all_urls = []\n count = 1\n for post in profile.get_posts():\n all_likes.append(post.likes)\n all_urls.append(post.url)\n #print(count)\n count += 1\n if count == 100: # 爬 100 張照片\n break\n\n try:\n person_path = os.path.join(ig_save_path, ig_id)\n os.mkdir(person_path)\n except Exception as e:\n print(e)\n\n sorted_urls = [all_urls for _,all_urls in sorted(zip(all_likes,all_urls), reverse=True)]\n # 找按讚數前50的載\n download_urls = sorted_urls[:50]\n count = 1\n for url in download_urls:\n print(url)\n final_path = os.path.join(person_path, str(count)+'.jpg')\n urllib.request.urlretrieve(url, final_path)\n count += 1\n \n # 把網址存在json裡給line bot用\n with open(os.path.join(person_path, 'url.json'), 'w') as f:\n json.dump(download_urls, f) \n\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text='圖片已抓取')) \n elif event.message.text==u\"更新\":\n # Get instance\n L = instaloader.Instaloader(post_metadata_txt_pattern= '', download_videos=False, download_video_thumbnails=False, download_geotags=False, download_comments=False, compress_json=False)\n all_person = os.listdir(ig_save_path)\n for person in all_person:\n person_path = os.path.join(ig_save_path, person)\n final_path = os.path.join(ig_save_path, person)\n # 找前100篇圖\n profile = Profile.from_username(L.context, person)\n all_likes = []\n all_urls = []\n count = 1\n for post in profile.get_posts():\n all_likes.append(post.likes)\n all_urls.append(post.url)\n #print(count)\n count += 1\n if count == 100: # 爬 100 張照片\n break\n\n sorted_urls = [all_urls for _,all_urls in sorted(zip(all_likes,all_urls), reverse=True)]\n # 找按讚數前50的載\n download_urls = sorted_urls[:50]\n count = 1\n for url in download_urls:\n print(url)\n final_path = os.path.join(person_path, str(count)+'.jpg')\n urllib.request.urlretrieve(url, final_path)\n count += 1\n\n # 把網址存在json裡給line bot用\n with open(os.path.join(person_path, 'url.json'), 'w') as f:\n json.dump(download_urls, f) \n\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text='更新已完成!'))\n #line_bot_api.push_message('U4a163602a7d66b0494cc38f4824d4d44', TextSendMessage(text='更新已完成!')) \n # 隨機抽 ig 圖庫裡的圖\n elif event.message.text.lower()==u\"ig\":\n person_folders = os.listdir(ig_save_path)\n global ran_folder\n ran_folder = person_folders[random.randint(0,len(person_folders) - 1)]\n folder = os.path.join(ig_save_path,ran_folder)\n image_list = os.listdir(folder)\n ran_image_num = random.randint(0,len(image_list) - 2)\n print(folder, ran_image_num)\n with open(os.path.join(folder, 'url.json'), 'r') as f:\n data = json.load(f)\n url = data[ran_image_num]\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n line_bot_api.reply_message(event.reply_token, image_message)\n elif event.message.text.lower()==u\"男ig\":\n print(\"test\")\n # 回答上一張圖是哪個 ig 帳號\n elif event.message.text==u\"誰\":\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text=ran_folder))\n # 指定抽某個 ig 帳號的圖出來\n elif event.message.text[:1]==u\"找\":\n direct_folder = event.message.text[2:]\n folder = os.path.join(ig_save_path,direct_folder)\n image_list = os.listdir(folder)\n ran_image_num = random.randint(0,len(image_list) - 2)\n print(folder, ran_image_num)\n with open(os.path.join(folder, 'url.json'), 'r') as f:\n data = json.load(f)\n url = data[ran_image_num]\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n line_bot_api.reply_message(event.reply_token, image_message)\n elif event.message.text[:1]==u\"投\":\n ig_id = event.message.text[2:]\n if os.path.isfile('vote.json'):\n with open('vote.json', 'r') as r:\n data = json.load(r)\n r.close()\n tmp_dict = {\"userid\" : event.source.user_id, \"igid\" : ig_id}\n if tmp_dict in data:\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=\"你已經投過摟!\"))\n else:\n data.append(tmp_dict)\n with open('vote.json', 'w') as w:\n json.dump(data, w)\n w.close()\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=\"你已經投票!\"))\n else:\n with open('vote.json', 'w') as w:\n data = [{\"userid\" : event.source.user_id, \"igid\" : ig_id}]\n json.dump(data, w)\n w.close()\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=\"你已經投票!\"))\n # 顯示 ig 功能\n elif event.message.text==u\"ig功能\":\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text='輸入ig即可抽ig妹子,想推薦妹子請打:推薦(空格)ig的id。 \\n例如:推薦 xxx \\n找特定帳號的圖,請打:找(空格)ig的id \\n想知道上一張圖是誰請打:誰 \\n如果你覺得哪個帳號的圖你不喜歡,打:投(空格)ig的id,投他一票,我會視票數決定是否從圖庫刪除。')) \n # 顯示虛擬幣功能\n elif event.message.text==u\"虛擬幣功能\":\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text='以下請注意中間要加空白鍵! \\n輸入:check(空格)xxxyyy 可查詢匯率。 \\n例如:check(空格)ethbtc。')) \n #line_bot_api.reply_message(\n #event.reply_token, TextSendMessage(text='以下請注意中間要加空白鍵! \\n輸入:check(空格)xxxyyy 可查詢匯率。 \\n例如:check(空格)ethbtc。 \\n 輸入:order(空格)xxxyyy(空格)>(空格)500 可設定到價通知。 \\n例如:order(空格)btcusdt(空格)>(空格)8000。 \\n目前到價通知測試中喔!')) \n # 查詢幣安的當下虛擬貨幣匯率\n elif event.message.text[:5]==u\"check\":\n binance_request = requests.get(\"https://api.binance.com/api/v1/ticker/24hr\")\n binance_info = json.loads(str(binance_request.content, encoding = \"utf-8\"))\n for info in binance_info:\n if event.message.text[6:].lower()==info['symbol'].lower():\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text=info['symbol'] + \":\" + info['lastPrice']))\n elif event.message.text[:3]==u\"給作者\":\n user_message = event.message.text[4:]\n line_bot_api.push_message(keys.my_user_id,TextSendMessage(text=user_message))\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=' 已送達!'))\n \n\n '''\n # 設定到價通知\n elif event.message.text[:5]==u\"order\":\n input_order = event.message.text[6:]\n split_order = input_order.split(' ')\n split_order.append(event.source.user_id)\n if len(split_order) != 4:\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text='我覺得不行! \\n請確認格式~'))\n else:\n if os.path.isfile('order.json'):\n with open('order.json', 'r') as r:\n old_order = json.load(r)\n r.close()\n with open('order.json', 'w') as w:\n old_order.append(split_order)\n json.dump(old_order, w)\n w.close()\n else:\n with open('order.json', 'w') as w:\n old_order = [split_order]\n json.dump(old_order, w)\n w.close()\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text='roger that!'))\n flagg = True\n while flagg:\n with open('binance_info.json', 'r') as r:\n binance_info = json.load(r)\n r.close()\n for info in binance_info:\n if split_order[0].lower()==info['symbol'].lower():\n if split_order[1] == \">\" or split_order[1] == \">=\":\n if info['lastPrice'] >= split_order[2]:\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text=input_order + \"\\n已達成!\"))\n if split_order in old_order:\n old_order.remove(split_order)\n with open('order.json', 'w') as w:\n json.dump(old_order, w)\n w.close()\n flagg = False\n elif split_order[1] == \">\" or split_order[1] == \"<=\":\n if info['lastPrice'] <= split_order[2]:\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text=input_order + \"\\n已達成!\"))\n if split_order in old_order:\n old_order.remove(split_order)\n with open('order.json', 'w') as w:\n json.dump(old_order, w)\n w.close()\n flagg = False \n time.sleep(30)\n # 重設到價通知 \n elif event.message.text[:5]==u\"reset\":\n print(\"reset order\")\n with open('order.json', 'r') as r:\n old_order = json.load(r)\n r.close()\n print(old_order)\n flagg = True\n while flagg:\n with open('binance_info.json', 'r') as r:\n binance_info = json.load(r)\n r.close()\n for info in binance_info:\n for order in old_order:\n if order[0].lower()==info['symbol'].lower():\n if order[1] == \">\" or order[1] == \">=\":\n if info['lastPrice'] >= order[2]:\n input_order = order[0] + order[1] + order[2]\n line_bot_api.push_message(order[3],TextSendMessage(text=input_order + \"\\n已達成!\"))\n if order in old_order:\n old_order.remove(order)\n with open('order.json', 'w') as w:\n json.dump(old_order, w)\n w.close()\n if len(old_order) > 0 :\n continue\n else:\n flagg = False\n elif order[1] == \"<\" or order[1] == \"<=\":\n if info['lastPrice'] <= order[2]:\n input_order = order[0] + order[1] + order[2]\n line_bot_api.push_message(order[3],TextSendMessage(text=input_order + \"\\n已達成!\"))\n if order in old_order:\n old_order.remove(order)\n with open('order.json', 'w') as w:\n json.dump(old_order, w)\n w.close()\n if len(old_order) > 0:\n continue\n else:\n flagg = False \n time.sleep(30)\n '''\n\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1', port= 8080)","sub_path":"line_bot.py","file_name":"line_bot.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"287961123","text":"# This program gives details about mileage in a multi-legged trip.\n\n\ndef main():\n initial_odo = float(input(\"Enter the initial odometer reading in miles: \"))\n mpg_lst = []\n while True:\n user_input = input(\"Enter the current odometer reading and amount of gas consumed in gallons \"\n \"seperated by a space (Press to end your trip): \")\n if not user_input:\n break\n user_input_lst = user_input.split(\" \")\n new_odo = float(user_input_lst[0])\n gallons_consumed = float(user_input_lst[1])\n miles = new_odo - initial_odo\n mpg_lst.append(miles / gallons_consumed)\n print('Your fuel efficiency for this leg of the trip was '\n '{:.02} miles per gallon.'.format(miles/gallons_consumed))\n initial_odo = new_odo\n combined_mpg = 0\n for mpg in mpg_lst:\n combined_mpg += mpg\n combined_mpg = combined_mpg / len(mpg_lst)\n print('Your fuel efficiency for this trip was {:02} miles per gallon.'.format(combined_mpg))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"johnzellebook/ch8/ex8-9.py","file_name":"ex8-9.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598173879","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\nimport tempfile\nimport json\n\nimport fabric\nimport fabric.api\n\nfrom cloudify import ctx\nfrom azurecloudify import constants\n\nPROVIDER_CONTEXT_RUNTIME_PROPERTY = 'provider_context'\n\n\ndef configure(azure_config):\n _set_provider_context()\n _copy_azure_configuration_to_manager(azure_config)\n\n\ndef _copy_azure_configuration_to_manager(azure_config):\n tmp = tempfile.mktemp()\n with open(tmp, 'w') as f:\n json.dump(azure_config, f)\n with open(tmp, 'r+') as f:\n json_data = json.load(f)\n json_data[\"auth_token\"] = \"\"\n json_data[\"token_expires\"] = \"0\" #unix timestamp for 1/1/1970 00:00:00 is 0\n f.seek(0)\n f.write(json.dumps(json_data))\n\n config_path = ctx.node.properties.get(\n 'cloudify').get(\n 'plugins').get(\n 'azure_config_path') or constants.default_path_to_azure_conf\n\n fabric.api.sudo('mkdir -p {0}'.format(os.path.dirname(config_path)))\n fabric.api.put(tmp, '{0}'.format(config_path), use_sudo=True)\n\n\ndef _set_provider_context():\n # Do not use this code section as a reference - it is a workaround for a\n # deprecated feature and will be removed in the near future\n\n resources = dict()\n\n # the reference to storage only works the workflow is executed as a\n # local workflow (i.e. in a local environment context)\n node_instances = ctx._endpoint.storage.get_node_instances()\n nodes_by_id = \\\n {node.id: node for node in ctx._endpoint.storage.get_nodes()}\n\n node_id_to_provider_context_field = {\n 'manager_host': 'manager_host',\n 'manager_resource_group': 'manager_resource_group',\n 'manager_security_group': 'manager_security_group',\n 'manager_storage_account': 'manager_storage_account',\n 'manager_public_ip': 'manager_public_ip',\n 'manager_nic': 'manager_nic',\n 'manager_vnet': 'manager_vnet'\n }\n for node_instance in node_instances:\n if node_instance.node_id in node_id_to_provider_context_field:\n run_props = node_instance.runtime_properties\n props = nodes_by_id[node_instance.node_id].properties\n provider_context_field = \\\n node_id_to_provider_context_field[node_instance.node_id]\n\n if 'use_external_resource' in props:\n resources[provider_context_field] = {\n 'use_external_resource': props['use_external_resource']\n }\n else:\n resources[provider_context_field] = {}\t\t\t\t\n\n for runtime_prop in run_props:\n resources[provider_context_field][runtime_prop] = run_props[runtime_prop]\n ctx.logger.info(\"field {} prop {} = {}\".format(provider_context_field,runtime_prop, run_props[runtime_prop]))\n\n if node_instance.node_id == 'manager_host':\n resources[provider_context_field]['ip'] = run_props['ip']\n ctx.logger.info(\"provider_context_field {} private IP is {}\".format(provider_context_field, run_props['ip']))\n\n provider = {\n 'resources': resources\n }\n\n ctx.instance.runtime_properties[PROVIDER_CONTEXT_RUNTIME_PROPERTY] = provider\n","sub_path":"new-manager-blueprint/components/manager/scripts/azure/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"622447932","text":"import logging\nimport time\nimport re\nimport dateutil.parser as dateparser\n\nfrom magnet.config import settings\n\nclass Item(object):\n\n name = None\n item = None\n fname = None\n\n post_fields = [\n 'title',\n 'author',\n 'subhead',\n 'summary',\n 'body',\n 'images',\n 'slug',\n 'tags',\n 'type',\n 'link',\n ]\n\n main_fields = [\n 'id',\n 'self_url',\n 'source',\n 'status',\n 'published',\n 'created',\n 'post',\n ]\n\n def __init__(self, name, item, fname):\n self.name = name\n self.item = item\n self.fname = fname\n\n def get_source(self):\n return {}\n\n def get_tags(self):\n return []\n\n def get_images(self):\n return []\n\n def get_author(self):\n author = self.item.get('author')\n return author or settings.get_blog(self.name, 'author')\n\n def get_self_url(self):\n url = '/post/%s/' % self.get_id()\n slug = self.get_slug()\n if slug:\n url += '%s/' % slug\n return url\n\n def callattrib(self, attrib):\n op = getattr(self, 'get_%s' % attrib, None)\n if callable(op):\n return op()\n return self.item.get(attrib, None)\n\n def get_post(self):\n converted_item = {}\n for n in self.post_fields:\n converted_item[n] = self.callattrib(n)\n return converted_item\n\n def get(self):\n converted_item = {}\n for n in self.main_fields:\n converted_item[n] = self.callattrib(n)\n return converted_item\n\n def to_timestamp(self, date):\n dt = dateparser.parse(date)\n ts = int(time.mktime(dt.timetuple()))\n return ts\n\n def extract_img_tags(self, html):\n p = re.compile(r'')\n imgs = []\n for n in re.findall(p, html):\n m = re.search('src=\"([^\"]+)', n)\n if m and m.group(1):\n imgs.append(m.group(1))\n return imgs, p.sub('[image]', html)\n\n def index(self, converted_item, path):\n return {\n 'id': converted_item.get('id'),\n 'published': converted_item.get('published'),\n 'path': path,\n 'status': converted_item.get('status'),\n 'title': converted_item.get('post').get('title'),\n 'url': converted_item.get('self_url'),\n 'tags': converted_item.get('post').get('tags'),\n }\n","sub_path":"magnet/converter/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"500407248","text":"# encoding:utf-8\n\n\"\"\"\n域名信息相关页面handler\n\"\"\"\n\nimport decimal\nimport tornado\nimport json\nfrom models.whois_infor import WhoisInfor\nfrom models.domain_db import TldInfor\nfrom base_handler import BaseHandler\n\nPATH = './domain/' # 模板地址\n\nclass DomainFindHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n\n if not self.get_current_user():\n self.redirect('/login')\n return\n\n self.render(\n PATH + 'domain_find.html'\n )\n\n# ajex异步加载查询结果\nclass DomainFindDataHandler(BaseHandler):\n\n @tornado.web.authenticated\n def get(self):\n if not self.get_current_user():\n self.redirect('/login')\n return\n\n results = []\n domain_name = self.get_argument('domain_name', '')\n option = self.get_argument('option', '')\n\n whois_infor = WhoisInfor().get_whois_infor(domain_name, option)\n\n self.write(json.dumps(whois_infor, default=self._json_serial))\n\n\n def _json_serial(self, obj):\n \"\"\"格式化时间\"\"\"\n\n if isinstance(obj, date):\n serial = obj.isoformat()\n return serial\n\nclass DomainFindWholeDataHandler(BaseHandler):\n\n @tornado.web.authenticated\n def get(self):\n if not self.get_current_user():\n self.redirect('/login')\n return\n\n whole_whois = WhoisInfor().get_whole_whois_infor()\n\n self.write(json.dumps(whole_whois, default=self._json_serial))\n\n\n def _json_serial(self, obj):\n \"\"\"格式化时间\"\"\"\n\n if isinstance(obj, date):\n serial = obj.isoformat()\n return serial\n\nclass TldDestributionHandler(BaseHandler):\n\n @tornado.web.authenticated\n def get(self):\n if not self.get_current_user():\n self.redirect('/login')\n return\n domains, total = TldInfor().get_tld_infor('db')\n self.render(\n PATH + 'domain_overall.html',\n domains=json.dumps(domains),\n total=total\n )","sub_path":"handlers/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"202201378","text":"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport seaborn as sns \nfrom sklearn.model_selection import train_test_split \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\n\npd.set_option('display.max_columns',None)\ndata=pd.read_csv('train.csv')\npredict_data=pd.read_csv('test.csv')\n\ndata.pickup_time=pd.to_datetime(data.pickup_time,format='%m/%d/%Y %H:%M')\ndata.drop_time=pd.to_datetime(data.drop_time,format='%m/%d/%Y %H:%M')\n\ndata['pickup_year']=data.apply(lambda row: row.pickup_time.year,axis=1)\ndata['pickup_month']=data.apply(lambda row: row.pickup_time.month,axis=1)\ndata['pickup_day']=data.apply(lambda row: row.pickup_time.day,axis=1)\ndata['pickup_hour']=data.apply(lambda row: row.pickup_time.hour,axis=1)\ndata['pickup_minute']=data.apply(lambda row: row.pickup_time.minute,axis=1)\n\ndata['drop_year']=data.apply(lambda row: row.drop_time.year,axis=1)\ndata['drop_month']=data.apply(lambda row: row.drop_time.month,axis=1)\ndata['drop_day']=data.apply(lambda row: row.drop_time.day,axis=1)\ndata['drop_hour']=data.apply(lambda row: row.drop_time.hour,axis=1)\ndata['drop_minute']=data.apply(lambda row: row.drop_time.minute,axis=1)\n\ndata=data.drop(columns=['tripid','pickup_time','drop_time'])\n\npredict_data.pickup_time=pd.to_datetime(predict_data.pickup_time,format='%m/%d/%Y %H:%M')\npredict_data.drop_time=pd.to_datetime(predict_data.drop_time,format='%m/%d/%Y %H:%M')\n\npredict_data['pickup_year']=predict_data.apply(lambda row: row.pickup_time.year,axis=1)\npredict_data['pickup_month']=predict_data.apply(lambda row: row.pickup_time.month,axis=1)\npredict_data['pickup_day']=predict_data.apply(lambda row: row.pickup_time.day,axis=1)\npredict_data['pickup_hour']=predict_data.apply(lambda row: row.pickup_time.hour,axis=1)\npredict_data['pickup_minute']=predict_data.apply(lambda row: row.pickup_time.minute,axis=1)\n\npredict_data['drop_year']=predict_data.apply(lambda row: row.drop_time.year,axis=1)\npredict_data['drop_month']=predict_data.apply(lambda row: row.drop_time.month,axis=1)\npredict_data['drop_day']=predict_data.apply(lambda row: row.drop_time.day,axis=1)\npredict_data['drop_hour']=predict_data.apply(lambda row: row.drop_time.hour,axis=1)\npredict_data['drop_minute']=predict_data.apply(lambda row: row.drop_time.minute,axis=1)\n\npredict_data=predict_data.drop(columns=['pickup_time','drop_time'])\npredict_index=predict_data.pop('tripid')\n\ntrain_data=data.sample(frac=0.8,random_state=200)\ntest_data=data.drop(train_data.index)\n\nlabel = {'correct':1, 'incorrect':0}\ndata.label=[label[item] for item in data.label]\ntrain_data.label=[label[item] for item in train_data.label]\ntest_data.label=[label[item] for item in test_data.label]\n\ndata=data.dropna()\ntrain_data=train_data.dropna()\ntest_data=test_data.dropna()\n\ndata_label=data.pop('label')\ntrain_label=train_data.pop('label')\ntest_label=test_data.pop('label')\n\nall_vars=data.columns.to_list()\ntop_vars=['duration','fare','meter_waiting','meter_waiting_till_pickup',\n 'drop_lon','pick_lat','pick_lon','drop_lat','meter_waiting_fare',\n 'additional_fare']\ntop_vars_reduced=['duration','fare','meter_waiting','meter_waiting_till_pickup',\n 'meter_waiting_fare','additional_fare']\nbottom_vars=[cols for cols in all_vars if cols not in top_vars]\nbottom_vars_reduced=[cols for cols in all_vars if cols not in top_vars_reduced]\n\ndata=data.drop(bottom_vars,axis=1)\ntrain_data=train_data.drop(bottom_vars,axis=1)\ntest_data=test_data.drop(bottom_vars,axis=1)\npredict_data=predict_data.drop(bottom_vars,axis=1)\n\n\nrandom_forest=RandomForestClassifier(n_estimators=200,max_depth=32,\n max_features=None,min_samples_split=4)\n\nrandom_forest=random_forest.fit(data,data_label)\n\npredictions = random_forest.predict(predict_data)\n\noutput = pd.DataFrame({'tripid': predict_index, 'prediction': predictions})\noutput.to_csv('sample_submission_random_forest_selected_tweeked_all_data.csv', index=False)\nprint(\"score : \"+str(random_forest.score(train_data,train_label)))\npred=pd.DataFrame(random_forest.predict(test_data))\nprint(\"Accuracy : \"+str(metrics.accuracy_score(test_label,pred)))\nprint(\"F1 score : \"+str(metrics.f1_score(test_label,pred)))\n","sub_path":"sklearn_random_forest_selected_tweeked_all_data.py","file_name":"sklearn_random_forest_selected_tweeked_all_data.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"51953699","text":"from urllib.parse import parse_qs\n\nimport utils\nfrom handler import Handler\n\n\nclass DownloadHandler(Handler):\n def handle(self, environ, start_response):\n c = utils.getCursor()\n o = parse_qs(environ['QUERY_STRING'])\n versionId = o['versionId']\n c.execute(\"SELECT * FROM versions WHERE id = ?\", versionId)\n version = c.fetchone()\n if version['existsOnMain'] == 1:\n dir = utils.missionMainDir\n else:\n dir = utils.missionMakerDir\n with open(dir + \"/\" + version['name'], mode=\"rb\", ) as stream:\n start_response('200 OK', [('Content-Disposition', 'attachment; filename=\"' + version['name'] + '\"')])\n return [stream.read()]\n\n def getHandled(self):\n return \"download\"\n","sub_path":"handlers/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"444959642","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#buat data (rumus sinus = sin(2wt + theta))\r\ndef sinusGenerator(amplitudo, frekuensi, tAkhir, theta):\r\n t = np.arange(0,tAkhir,0.1)\r\n y = amplitudo * np.sin(2*frekuensi*t + np.deg2rad(theta))\r\n return t,y\r\n\r\nt1, y1 = sinusGenerator(1,1,4,0)\r\nt2, y2 = sinusGenerator(1,1,4,90)\r\nt3, y3 = sinusGenerator(1,1,4,180)\r\n\r\n#buat plot\r\ndataPlot1 = plt.plot(t1,y1)\r\ndataPlot2 = plt.plot(t2,y2)\r\ndataPlot3 = plt.plot(t3,y3)\r\n\r\n#setting properties\r\nplt.setp(dataPlot1,color='r', linestyle='-', linewidth=0.75)\r\nplt.setp(dataPlot2,color='b', linestyle='-.', linewidth=4)\r\nplt.setp(dataPlot3,color='g', linestyle='--', linewidth=1.25)\r\n\r\n#menampikan plot\r\nplt.show()\r\n","sub_path":"matplotlib/set properties/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"564081448","text":"import os\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport random \n\n\nfrom collections import Counter\nfrom sklearn.neighbors import KDTree \nfrom mpl_toolkits.mplot3d import Axes3D\nfrom input.Inputdatas_2dcnn import * \nfrom model.model_2dcnn import * \nfrom sklearn.preprocessing import StandardScaler\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\n\nLOG_DIR = 'log'\nif not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)\nLOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')\n#LOG_FOUT.write(str(FLAGS)+'\\n')\nNUM_CLASSES = 40\nNUM_POINT = 2048\nBATCH_SIZE = 32\nnumkernel = 512\npgnump = 64\n\nMAX_EPOCH = 500\n\nMOMENTUM = 0.9\nOPTIMIZER = 'momentum' #'adam or momentum'\nDECAY_STEP = 200000\nBASE_LEARNING_RATE= 0.05\nDECAY_RATE = 0.7\n\n#load train file\nTRAIN_FILES = getDataFiles( \\\n os.path.join(BASE_DIR, 'input/train_files.txt'))\n\nTEST_FILES = getDataFiles( \\\n os.path.join(BASE_DIR, 'input/test_files.txt'))\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n BASE_LEARNING_RATE, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n DECAY_STEP, # Decay step.\n DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\n return learning_rate \n\ndef variable_summaries(var, name=\"layer\"):\n with tf.variable_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef train():\n with tf.Graph().as_default():\n with tf.device('/cpu:0'):\n pointclouds_pl, pointclouds_pl_m4, pointclouds_kernel, pointclouds_all ,labels_pl = placeholder_inputs(BATCH_SIZE, numkernel, pgnump) \n is_training_pl = tf.placeholder(tf.bool, shape=())\n print(is_training_pl)\n # Note the global_step=batch parameter to minimize. \n # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.\n batch = tf.Variable(0)\n \n # Get model and loss \n pred = get_model(pointclouds_pl, pointclouds_pl_m4, pointclouds_kernel, pointclouds_all, is_training_pl)\n loss = get_loss(pred, labels_pl)\n tf.summary.scalar('loss', loss)\n\n correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))\n accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)\n tf.summary.scalar('accuracy', accuracy)\n\n # Get training operator\n learning_rate = get_learning_rate(batch)\n tf.summary.scalar('learning_rate', learning_rate)\n if OPTIMIZER == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)\n elif OPTIMIZER == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=batch)\n \n '''\n var_list_w = [var for var in tf.trainable_variables() if \"w\" in var.name]\n #var_list_b = [var for var in tf.trainable_variables() if \"b\" in var.name]\n\n gradient_w = optimizer.compute_gradients(loss, var_list=var_list_w)\n #gradient_b = optimizer.compute_gradients(loss, var_list=var_list_b)\n \n for idx, itr_g in enumerate(gradient_w):\n variable_summaries(itr_g[0], \"layer%d-w\"%idx)\n #for idx, itr_g in enumerate(gradient_b):\n # variable_summaries(itr_g[0], \"layer%d-b\"%idx)\n '''\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n \n # Create a session\n # config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n # config.allow_soft_placement = True\n # config.log_device_placement = False\n # sess = tf.Session(config=config)\n sess = tf.Session(config=tf.ConfigProto(device_count={'GPU':0})) \n\n # Add summary writers\n #merged = tf.merge_all_summaries()\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),\n sess.graph)\n test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))\n\n # Init variables\n init = tf.global_variables_initializer()\n sess.run(init, {is_training_pl: True})\n\n ops = {'pointclouds_pl': pointclouds_pl,\n 'pointclouds_pl_m4': pointclouds_pl_m4,\n 'pointclouds_kernel':pointclouds_kernel,\n 'pointclouds_all':pointclouds_all,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss,\n 'train_op': train_op,\n 'merged': merged,\n 'step': batch}\n \n max_acc=0\n\n for epoch in range(MAX_EPOCH):\n log_string('**** EPOCH %03d ****' % (epoch))\n sys.stdout.flush()\n \n allaccuracy = train_one_epoch(sess, ops, train_writer)\n eval_one_epoch(sess, ops, test_writer)\n \n # Save the variables to disk.\n if epoch % 1 == 0:\n log_string('epoch: '+str(epoch)+', val_acc: '+str(allaccuracy)+'\\n')\n if allaccuracy > max_acc:\n max_acc=allaccuracy\n save_path = saver.save(sess, os.path.join(LOG_DIR, \"model.ckpt\"), global_step=epoch)\n log_string(\"Model saved in file: %s\" % save_path)\n\ndef train_one_epoch(sess, ops, train_writer):\n is_training = True\n #shuffle train file\n train_file_idxs = np.arange(0, len(TRAIN_FILES))\n np.random.shuffle(train_file_idxs)\n allaccuracy = 0\n\n for fn in range(len(TRAIN_FILES)):\n log_string('train file NUM---' + str(fn) + '-----')\n current_data, current_label = loadDataFile(TRAIN_FILES[train_file_idxs[fn]])\n current_data = current_data[:,0:NUM_POINT,:]\n current_data, current_label, _ = shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n file_size = current_data.shape[0]\n\n num_batches = file_size // BATCH_SIZE\n log_string('train file size---' + str(file_size) + '-----')\n \n total_correct = 0\n total_seen = 0\n loss_sum = 0\n\n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n \n train_data_r = rotate_point_cloud(current_data[start_idx:end_idx, :, :])\n train_data_rt = rotate_point_cloud(train_data_r)\n train_label = current_label[start_idx:end_idx]\n train_data, train_data_m4, idx_data, train_data_all = point_group_first(train_data_rt,train_data_rt,numkernel,pgnump)\n print('train batch',batch_idx)\n # #disply point cloud\n # point = train_data[0][:][:][:]\n # point1 = idx_data[0][:][:]\n # print(point.shape)\n # print(train_label[0])\n\n # fig1=plt.figure(dpi=120) \n # ax1=fig1.add_subplot(111,projection='3d') \n # plt.title('point cloud1')\n # for i in range(numkernel): \n # col = random.sample(range(1, 100), 3)\n # ax1.scatter(point1[i,0],point1[i,1],point1[i,2],color=[float(col[0])/100.0, float(col[1])/100.0, float(col[2])/100.0],marker='.',s=10,linewidth=1,alpha=1,cmap='spectral') \n # ax1.axis('scaled') \n # ax1.set_xlabel('X Label') \n # ax1.set_ylabel('Y Label') \n # ax1.set_zlabel('Z Label') \n\n # fig=plt.figure(dpi=120) \n # ax=fig.add_subplot(111,projection='3d') \n # plt.title('point cloud')\n # for i in range(numkernel): \n # col = random.sample(range(1, 100), 3)\n # ax.scatter(point[i,:,0],point[i,:,1],point[i,:,2],color=[float(col[0])/100.0, float(col[1])/100.0, float(col[2])/100.0],marker='.',s=10,linewidth=1,alpha=1,cmap='spectral') \n # ax.axis('scaled') \n # ax.set_xlabel('X Label') \n # ax.set_ylabel('Y Label') \n # ax.set_zlabel('Z Label') \n # plt.show() \n \n #log_string('batch num---' + str(batch_idx) + '-----')\n feed_dict = {ops['pointclouds_pl']: train_data,\n ops['pointclouds_pl_m4']: train_data_m4,\n ops['pointclouds_kernel']: idx_data,\n ops['pointclouds_all']: train_data_all,\n ops['labels_pl']: train_label,\n ops['is_training_pl']: is_training,}\n summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],\n ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)\n train_writer.add_summary(summary, step)\n pred_val = np.argmax(pred_val, 1)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += BATCH_SIZE\n loss_sum += loss_val\n \n log_string('mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('accuracy: %f' % (total_correct / float(total_seen)))\n allaccuracy += (total_correct / float(total_seen))\n\n return (allaccuracy / float(len(TRAIN_FILES)))\n #print(train_data.shape)\n #print(train_label.shape)\n \n # #disply point cloud\n # point = train_data[0][:][:][:]\n # print(point.shape)\n # print(train_label[0]) \n # fig=plt.figure(dpi=120) \n # ax=fig.add_subplot(111,projection='3d') \n # plt.title('point cloud')\n # for i in range(128): \n # col = random.sample(range(1, 100), 3)\n # ax.scatter(point[i,:,0],point[i,:,1],point[i,:,2],color=[float(col[0])/100.0, float(col[1])/100.0, float(col[2])/100.0],marker='.',s=10,linewidth=1,alpha=1,cmap='spectral') \n # ax.axis('scaled') \n # ax.set_xlabel('X Label') \n # ax.set_ylabel('Y Label') \n # ax.set_zlabel('Z Label') \n # plt.show() \n\n# def eval_one_epoch(sess, ops, test_writer):\n# \"\"\" ops: dict mapping from string to tf ops \"\"\"\n# is_training = False #test\n# total_correct = 0\n# total_seen = 0\n# loss_sum = 0\n# total_seen_class = [0 for _ in range(NUM_CLASSES)]\n# total_correct_class = [0 for _ in range(NUM_CLASSES)]\n \n# for fn in range(len(TEST_FILES)):\n# log_string('train file NUM---' + str(fn) + '-----')\n# current_data, current_label = loadDataFile(TEST_FILES[fn])\n# current_data = current_data[:,0:NUM_POINT,:]\n# current_label = np.squeeze(current_label)\n \n# file_size = current_data.shape[0]\n# num_batches = file_size // BATCH_SIZE\n# log_string('test file size---' + str(file_size) + '-----')\n\n# for batch_idx in range(num_batches):\n# start_idx = batch_idx * BATCH_SIZE\n# end_idx = (batch_idx+1) * BATCH_SIZE\n# test_data_p = current_data[start_idx:end_idx, :, :]\n# test_data=np.zeros([BATCH_SIZE,numkernel,pgnump,3])\n\n# for point_idx in range (BATCH_SIZE):\n# rerowcol=test_data_p[point_idx]\n# KernelList=random.sample(range(rerowcol.shape[0]),numkernel)\n# #rerowcol_downsample is downsample point\n# rerowcol_downsample=rerowcol[KernelList,:]\n# kdt = KDTree(rerowcol, leaf_size=30, metric='euclidean')\n# pgi=kdt.query(rerowcol_downsample, k=pgnump, return_distance=False)\n# pgr=np.zeros([numkernel,pgnump,3])\n# for n in range(pgi.shape[0]):\n# pgr[n]=rerowcol[pgi[n,:],:]\n# test_data[point_idx]=pgr\n\n# feed_dict = {ops['pointclouds_pl']: test_data,\n# ops['labels_pl']: current_label[start_idx:end_idx],\n# ops['is_training_pl']: is_training}\n# summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],\n# ops['loss'], ops['pred']], feed_dict=feed_dict)\n \n# #test_writer.add_summary(summary, step)\n# pred_val = np.argmax(pred_val, 1)\n# correct = np.sum(pred_val == current_label[start_idx:end_idx])\n# total_correct += correct\n# total_seen += BATCH_SIZE\n# loss_sum += (loss_val*BATCH_SIZE)\n# for i in range(start_idx, end_idx):\n# l = current_label[i]\n# total_seen_class[l] += 1\n# total_correct_class[l] += (pred_val[i-start_idx] == l)\n \n# log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))\n# log_string('eval accuracy: %f'% (total_correct / float(total_seen)))\n# log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))\n\ndef eval_one_epoch(sess, ops, test_writer):\n is_training = False\n #shuffle train file\n test_file_idxs = np.arange(0, len(TEST_FILES))\n #np.random.shuffle(train_file_idxs)\n \n for fn in range(len(TEST_FILES)):\n log_string('test file NUM---' + str(fn) + '-----')\n current_data, current_label = loadDataFile(TEST_FILES[test_file_idxs[fn]])\n current_data = current_data[:,0:NUM_POINT,:]\n current_data, current_label, _ = shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n file_size = current_data.shape[0]\n\n num_batches = file_size // BATCH_SIZE\n log_string('test file size---' + str(file_size) + '-----')\n \n total_correct = 0\n total_seen = 0\n loss_sum = 0\n\n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n \n test_data = current_data[start_idx:end_idx, :, :]\n test_label = current_label[start_idx:end_idx]\n test_data_pg, test_data_pg_m4, idx_data,test_data_all= point_group_first(test_data,test_data,numkernel,pgnump)\n \n print('test batch',batch_idx)\n #log_string('batch num---' + str(batch_idx) + '-----')\n feed_dict = {ops['pointclouds_pl']: test_data_pg,\n ops['pointclouds_pl_m4']: test_data_pg_m4,\n ops['pointclouds_kernel']: idx_data,\n ops['pointclouds_all']: test_data_all,\n ops['labels_pl']: test_label,\n ops['is_training_pl']: is_training,}\n summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],\n ops['loss'], ops['pred']], feed_dict=feed_dict)\n test_writer.add_summary(summary, step)\n pred_val = np.argmax(pred_val, 1)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += BATCH_SIZE\n loss_sum += loss_val\n \n log_string('test mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('test accuracy: %f' % (total_correct / float(total_seen)))\n\n\n\nif __name__ == \"__main__\":\n train()\n LOG_FOUT.close()\n","sub_path":"train_2dcnn.py","file_name":"train_2dcnn.py","file_ext":"py","file_size_in_byte":16133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"86754356","text":"import matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\n\ntimes = {}\n\nwith open(\"times.dat\", \"rb\") as f:\n times = pickle.load(f)\n\n # Prepare the data\n x = np.array(range(len(times[\"calculateMatrix\"])))\n\n plt.yscale(\"log\")\n plt.title(\"Time taken to produce 10 million element buffer\")\n plt.ylabel(\"Time taken (ns) [log scale]\")\n plt.xlabel(\"Trial number\")\n\n for fn in times:\n plt.plot(x,times[fn], label=fn)\n\n print(\"Mean time for {}: {} ns\".format(fn, sum(times[fn])/len(times[fn])))\n\n # Add a legend\n plt.legend()\n\n # Show the plot\n plt.show()","sub_path":"times_plotter.py","file_name":"times_plotter.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"437437234","text":"class Truck:\r\n '''-> Truck - a type of automobile. '''\r\n\r\n def dumpload(self, value=None):\r\n if value is None:\r\n print(\"Truck has nothing to dump\")\r\n else:\r\n print(\"Truck is dumping \" + str(value))\r\n\r\nmyTruck = Truck()\r\nmyTruck.dumpload(5)\r\n\r\n\r\n \r\n\r\n \r\n","sub_path":"OO_Method_DefaultValues.py","file_name":"OO_Method_DefaultValues.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618415770","text":"import requests\nimport pickle\nimport time\nfrom lxml import html as lxhtml\nfrom lxml import etree\nfrom urllib.parse import unquote\nimport html\n\n# Проверка наличия лида в лидроке\n# Открываем лидрок, грузим куки, ищем lead\ndef lead_check(self):\n time.sleep(5) #ждем появления лида в базе\n\n tree = self.get_source('https://leadrock.com/administrator/lead')\n lead = self.get_text(tree, \"//td[contains(.,'%s') and contains(.,'%s')]\" % (self.lead, self.LEAD_NAME))\n if lead:\n self.log(\"лид дошел\")\n else:\n self.logBad(\"не удалось найти лид c телефоном %s\" % self.lead)\n return False\n\n # Получаем данные лида, оставленные пользователем\n lead_data = self.get_text(tree, \"//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[3]\" % (self.lead, self.LEAD_NAME))\n lead_data = lead_data.split('\\n')\n lead_data = [x for x in lead_data if x]\n for s in lead_data:\n if ('Подробнее' not in s) and ('More information' not in s):\n self.log(' %s' % s)\n\n # Получаем номер постбэка\n self.postback_id = self.get_text(tree, \"//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[2]\" % (self.lead, self.LEAD_NAME))\n\n # Получаем номер лида\n self.lead_id = self.get_text(tree, \"//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[1]/span[1]\" % (self.lead, self.LEAD_NAME))\n\n # Получаем сабы\n lead_data = self.get_text(tree, \".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[6]\" % (self.lead, self.LEAD_NAME))\n lead_subs = lead_data.split('\\n')[2]\n if 'subid ' not in lead_subs:\n lead_subs = lead_subs.split(', ')\n for sub in lead_subs:\n self.log(' %s' % sub)\n\n return True\n\n\n\n\n\n\n\n\n\n\n\n\n\n # try:\n # # WebDriverWait(self.driver, 6).until(\n # # EC.presence_of_element_located((By.XPATH, \"//td[contains(.,'%s') and contains(.,'%s')]\" % (self.lead, self.LEAD_NAME))))\n # self.f_xp(\"//td[contains(.,'%s') and contains(.,'%s')]\" % (self.lead, self.LEAD_NAME))\n # self.log(\"лид дошел\")\n\n # try:\n # self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[3]/a[@class='more-info']\" % (self.lead, self.LEAD_NAME)).click()\n # except:\n # pass\n \n # lead_data = self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[3]\" % (self.lead, self.LEAD_NAME)).text.split('\\n')\n # for s in lead_data:\n # if ('Скрыть' not in s) and ('Less information' not in s):\n # self.log(' %s' % s)\n\n\n # try:\n # self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[4]/i[contains(@class,'fa-spinner')]\" % s(elf.lead, self.LEAD_NAME))\n # self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[10]/a[contains(@class,'trash')]\" % (self.lead, self.LEAD_NAME)).click()\n # time.sleep(1)\n # self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[4]/i[contains(@class,'fa-trash-o')]\" % (self.lead, self.LEAD_NAME))\n # self.log(\"лид отправлен в trash\")\n # except:\n # #f_xp(\".//tr[td[contains(.,'\" + str(lead) + \"')]]/td[9]/a[contains(@class,'hold')]\")\n # try:\n # self.f_xp(\".//tr[td[contains(.,'%s') and contains(.,'%s')]]/td[4]/i[contains(@class,'fa-trash-o')]\" % (self.lead, self.LEAD_NAME))\n # self.log(\"лид уже в trash\")\n # except:\n # self.logBad(\"не удалось отправить в trash лид c телефоном %s\" % self.lead)\n # except:\n # self.logBad(\"не удалось найти лид c телефоном %s\" % self.lead)\n # return False","sub_path":"checks/lead_check.py","file_name":"lead_check.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"235598251","text":"# -*- encoding: utf-8 -*-\n\n\ndef _render_value(value):\n \"\"\"Renders a value.\"\"\"\n if value is None or value == 'None':\n return None\n else:\n try:\n return value.strip() or None\n except AttributeError:\n return value\n\n\ndef _elem_to_list(elem):\n \"\"\"Convert an lxml list-like Element to a Python list.\n\n Some of the Miro data stores lists in XML by using ``<_>`` keys, e.g.:\n\n \n <_>apple\n <_>banana\n <_>cherry\n \n\n This function takes such elements and turns them into lists.\n \"\"\"\n assert all(child.tag == '_' for child in elem.iterchildren())\n return [_render_value(child.text) for child in elem.iterchildren()]\n\n\ndef elem_to_dict(elem):\n \"\"\"Converts an lxml Element to a Python dict.\"\"\"\n res = {}\n for child in elem.iterchildren():\n name = child.tag\n assert name not in res\n\n # Miro stores lists with <_> keys, so if we spot one, this element\n # should actually be treated as a list.\n if name == '_':\n return _elem_to_list(elem)\n\n if child.getchildren():\n res[name] = elem_to_dict(child)\n else:\n res[name] = _render_value(child.text)\n return res\n","sub_path":"miro_adapter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"321530709","text":"import numpy as np \nimport cv2\n\ndef video2imgs(video_name, size):\n \"\"\"\n\n :param video_name: 字符串, 视频文件的路径\n :param size: 二元组,(宽, 高),用于指定生成的字符画的尺寸\n :return: 一个 img 对象的列表,img对象实际上就是 numpy.ndarray 数组\n \"\"\"\n\n img_list = []\n\n # 从指定文件创建一个VideoCapture对象\n cap = cv2.VideoCapture(video_name)\n\n # 如果cap对象已经初始化完成了,就返回true,换句话说这是一个 while true 循环\n while cap.isOpened():\n # cap.read() 返回值介绍:\n # ret 表示是否读取到图像\n # frame 为图像矩阵,类型为 numpy.ndarry.\n ret, frame = cap.read()\n if ret:\n # 转换成灰度图,也可不做这一步,转换成彩色字符视频。\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # resize 图片,保证图片转换成字符画后,能完整地在命令行中显示。\n img = cv2.resize(gray, size, interpolation=cv2.INTER_AREA)\n\n # 分帧保存转换结果\n img_list.append(img)\n else:\n break\n\n # 结束时要释放空间\n cap.release()\n\n return img_list\n\nif __name__ == \"__main__\":\n imgs = video2imgs(\"BadApple.mp4\", (64, 48))\n assert len(imgs) > 10","sub_path":"projects/character_animation/19-4-22/trial1.py","file_name":"trial1.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"426001263","text":"#!/usr/bin/env python3\n\nimport time\nimport json\nimport logging\nimport websocket\n\ndef connect():\n\n try:\n\n url = \"wss://livecodecreator.herokuapp.com/public/raspi/message\"\n\n header = [\n \"TokenCode: %s\" % \"XXXXXXXX\"\n ]\n\n # websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n header = header,\n on_open = on_open,\n on_close = on_close,\n on_error = on_error,\n on_message = on_message,\n )\n\n ws.run_forever()\n\n except Exception as e:\n\n logging.error(e)\n\ndef on_open(ws):\n\n pass\n\ndef on_close(ws):\n\n logging.info(\"websocket close\")\n\ndef on_error(ws, error):\n\n logging.error(\"websocket error: {}\".format(error))\n\ndef on_message(ws, message):\n\n data = json.loads(message)\n if data.get(\"message\") == \"keepalive\": return\n logging.info(\"websocket message: {}\".format(data.get(\"message\")))\n\ndef main():\n\n logging.basicConfig(level=logging.DEBUG, format=\"%(levelname)s: %(message)s\")\n\n while True:\n\n connect()\n time.sleep(5)\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"cmd/raspi/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139914494","text":"from func import primes\n\n__author__ = \"Bungogood\"\n\n'''\nProblem 10\n\nSummation of primes\n'''\n\ndef f(x):\n total = 0\n for p in primes():\n if p > x:\n break\n total += p\n return total\n\nif __name__ == \"__main__\":\n print(f(2000000))","sub_path":"Problem-010.py","file_name":"Problem-010.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486419167","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def print(self):\n temp = self.head\n while temp:\n print(temp.data, end=' ')\n temp = temp.next\n print()\n\n def push(self, data):\n # insert first\n node = Node(data)\n node.next = self.head\n self.head = node\n\n def insert(self, data):\n # insert last\n node = Node(data)\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = node\n\n def pop(self):\n # remove last\n temp = self.head\n self.head = self.head.next\n return temp.data\n\n\nif __name__ == '__main__':\n llst = LinkedList()\n llst.head = Node(1)\n\n second = Node(2)\n llst.head.next = second\n\n third = Node(3)\n second.next = third\n\n llst.print()\n print(llst.pop())\n print(llst.pop())\n print(llst.pop())\n\n llst.push(4)\n llst.push(5)\n llst.push(6)\n llst.print()\n\n llst.insert(1)\n llst.insert(2)\n llst.insert(3)\n llst.print()\n","sub_path":"Algorithm_Py/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"154285153","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 12 12:05:47 2019\r\nWrite a program that outputs the string representation of numbers from 1 to n.\r\n\r\nBut for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”. \r\nFor numbers which are multiples of both three and five output “FizzBuzz”.\r\n@author: swang\r\n\"\"\"\r\n\r\nclass Solution:\r\n def fizzBuzz(self, n):\r\n myList = []\r\n for num in range(1,n+1):\r\n if num%3 == 0 and num%5!=0:\r\n myList.append('Fizz')\r\n continue\r\n elif num%5 == 0 and num%3!=0:\r\n myList.append('Buzz')\r\n continue\r\n elif num%5==0 and num%3==0:\r\n myList.append('FizzBuzz')\r\n else:\r\n myList.append(str(num))\r\n return myList","sub_path":"Python/Fizz.py","file_name":"Fizz.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"188740919","text":"from __future__ import absolute_import\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport h5py as h5\nimport time, os\nimport pysingfel as ps\n\nnumOp = 1\nnumCl = 1\nnum = 2\npwd = os.path.dirname(__file__)\n\n# create particle object(s)\nparticleOp = ps.Particle()\nparticleOp.read_pdb(os.path.join(pwd,'../input/pdb/3iyf.pdb'), ff='WK')\n\nparticleCl = ps.Particle()\nparticleCl.read_pdb(os.path.join(pwd,'../input/pdb/3j03.pdb'), ff='WK')\n\n# load beam\nbeam = ps.Beam(os.path.join(pwd,'../input/beam/amo86615.beam'))\n\ngeom = os.path.join(pwd,'../input/lcls/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data')\n\n# load and initialize the detector\ndet = ps.PnccdDetector(geom=geom, beam=beam)\n\ntic = time.time()\nexperiment = ps.FXSExperiment(det, beam, [particleOp], numOp)\npatternOp = experiment.generate_image_stack()\ntoc = time.time()\nprint(\"It took {:.2f} seconds to finish SPI calculation.\".format(toc-tic))\n\nexperiment = ps.FXSExperiment(det, beam, [particleCl], numCl)\npatternCl = experiment.generate_image_stack()\n\n# calculate 1 diffraction pattern from 2 particles, where each particle has 50% chance of being Open or Closed\n# (end up in 25% with two Open, 25% with two Closed, and 50% with one of each)\nexperiment = ps.FXSExperiment(det, beam, [particleOp, particleCl], num)\npattern = experiment.generate_image_stack()\n\npattern_no_polarization = det.remove_polarization(pattern, res=None)\nnp_img = det.assemble_image_stack(pattern_no_polarization)\nmask = np.ones_like(pattern_no_polarization)\nnp_mask = det.assemble_image_stack(mask)\n\n# write data to HDF5\nwith h5.File('mixed_chaperones_and_mask.hdf5', 'w') as f:\n f.create_dataset('img', data=np_img)\n f.create_dataset('mask', data=np_mask, dtype=np.int16)\n\nfig = plt.figure(figsize=(10, 8))\nplt.subplot(131)\nplt.imshow(det.assemble_image_stack(patternOp),norm=LogNorm())\nplt.colorbar()\nplt.title('Open chaperone (photons)')\nplt.subplot(132)\nplt.imshow(det.assemble_image_stack(patternCl),norm=LogNorm())\nplt.colorbar()\nplt.title('Closed chaperone (photons)')\nplt.subplot(133)\nplt.imshow(det.assemble_image_stack(pattern),norm=LogNorm())\nplt.colorbar()\nplt.title('Mixed chaperones (photons)')\n\nfig = plt.figure()\npolarization = det.polarization_correction\nplt.subplot(121)\nplt.imshow(det.assemble_image_stack(polarization),vmin=0.995, vmax=1)\nplt.colorbar()\nplt.title('Polarization')\nplt.subplot(122)\nplt.imshow(det.assemble_image_stack(mask),vmin=0, vmax=1)\nplt.colorbar()\nplt.title('Polarization corrected region')\nplt.show()\n","sub_path":"examples/scripts/ExampleMultipleChaperones.py","file_name":"ExampleMultipleChaperones.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"592471459","text":"\"\"\" The amount of energy required to increase the temperature of one gram of a material by one degree Celsius is the material’s specific heat capacity, C. The total amount of energy, q, required to raise m grams of a material by ΔT degrees Celsius can be computed using the formula:\nq = mCΔT\nWrite a program that reads the mass of some water and the temperature change from the user. Your program should display the total amount of energy that must be added or removed to achieve the desired temperature change.\nHint: The specific heat capacity of water is 4.186 J . Because water has a g◦C density of 1.0 grams per milliliter, you can use grams and milliliters inter- changeably in this exercise.\nExtend your program so that it also computes the cost of heating the water. Electricity is normally billed using units of kilowatt hours rather than Joules. In this exercise, you should assume that electricity costs 8.9 cents per kilowatt hour. Use your program to compute the cost of boiling the water needed for a cup of coffee.\nHint: You will need to look up the factor for converting between Joules and kilowatt hours to complete the last part of this exercise.\n\"\"\"\nimport math\nmass_water = float(input('Insert mass of water: '))\ntemperature_initial = float(input('Insert temperature initial: '))\ntemperature_final = float(input('Insert temperature initial: '))\n\nq = mass_water* 4.184 * (temperature_final-temperature_initial)\n\nkwh = q*2.77778\n\ncost = kwh * 8.9 \n\nprint('Energy: ' + '%.2f' % q + ' joule' )\nprint('Costs: ' + '%.2f' % cost + ' cents')\n\n","sub_path":"The-Python-Workbook/1-Introduction-to-Programming/exercise17.py","file_name":"exercise17.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"389195683","text":"import azure.cognitiveservices.speech as speechsdk\n\nimport requests\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom io import BytesIO\n\n\ndef get_text():\n # Creates an instance of a speech config with specified subscription key and service region.\n # Replace with your own subscription key and service region (e.g., \"westus\").\n speech_key, service_region = \"02773ba094fa4c55be7510e2e317d9be\", \"westus\"\n speech_config = speechsdk.SpeechConfig(\n subscription=speech_key, region=service_region)\n\n # Creates a recognizer with the given settings\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)\n\n print(\"Say something...\")\n\n # Starts speech recognition, and returns after a single utterance is recognized. The end of a\n # single utterance is determined by listening for silence at the end or until a maximum of 15\n # seconds of audio is processed. The task returns the recognition text as result.\n # Note: Since recognize_once() returns only a single utterance, it is suitable only for single\n # shot recognition like command or query.\n # For long-running multi-utterance recognition, use start_continuous_recognition() instead.\n result = speech_recognizer.recognize_once()\n\n # Checks result.\n if result.reason == speechsdk.ResultReason.RecognizedSpeech:\n print(\"Recognized: {}\".format(result.text))\n return result.text\n elif result.reason == speechsdk.ResultReason.NoMatch:\n print(\"No speech could be recognized: {}\".format(result.no_match_details))\n elif result.reason == speechsdk.ResultReason.Canceled:\n cancellation_details = result.cancellation_details\n print(\"Speech Recognition canceled: {}\".format(\n cancellation_details.reason))\n if cancellation_details.reason == speechsdk.CancellationReason.Error:\n print(\"Error details: {}\".format(\n cancellation_details.error_details))\n\n\nsubscription_key = \"21b5911a1e084a9d8aa92127502e072f\"\nsearch_url = \"https://eastus.api.cognitive.microsoft.com/bing/v7.0/images/search\"\nsearch_term = get_text()\n\nterms = search_term.split(' ')\n\nimgs = []\nfor term in terms:\n print(term)\n headers = {\"Ocp-Apim-Subscription-Key\": subscription_key}\n params = {\"q\": term, \"license\": \"public\", \"imageType\": \"photo\"}\n response = requests.get(search_url, headers=headers, params=params)\n response.raise_for_status()\n search_results = response.json()\n imgs.append(search_results['value'][0]['thumbnailUrl'])\n\nf, axes = plt.subplots(len(imgs))\nfor i, imgLink in enumerate(imgs):\n image_data = requests.get(imgLink)\n image_data.raise_for_status()\n image = Image.open(BytesIO(image_data.content))\n axes[i].imshow(image)\n axes[i].axis(\"off\")\n\nplt.show()\n","sub_path":"get_speech.py","file_name":"get_speech.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"231944965","text":"## Copyright (c) 2012, 2013 Aldebaran Robotics. All rights reserved.\n## Use of this source code is governed by a BSD-style license that can be\n## found in the COPYING file.\n\nimport pytest\nfrom qibuild.test.test_toc import TestToc\n\n# pylint: disable-msg=E1101\n@pytest.mark.slow\ndef test_build():\n with TestToc() as toc:\n proj = toc.get_project(\"submodule\")\n toc.configure_project(proj)\n toc.build_project(proj)\n","sub_path":"python/qibuild/test/test_submodule.py","file_name":"test_submodule.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"154966147","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 13 00:04:56 2016\n\n@author: ozawa\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom chainer import Function, gradient_check, Variable \nfrom chainer import optimizers, utils\nfrom chainer import Link, Chain, ChainList\nfrom chainer import iterators, serializers\nfrom chainer import report, training , datasets\nfrom chainer.datasets import tuple_dataset\nimport chainer.functions as F\nimport chainer.links as L\nimport time\nfrom chainer.training import extensions\n\n\n#%%\n# titanicdataの読み込み\nos.chdir('/Users/ozawa/Desktop/python/titanic_py')\nOriginal_train = pd.read_csv('train.csv')\nCabin = np.asarray( Original_train['Cabin'])\nCabin = pd.get_dummies(Cabin)\nCabin = Cabin.dot(np.ones(147))\n# 使わない列削除\nno_use_name = ['PassengerId','Name','Ticket','Cabin',]\nOriginal_train = Original_train.drop(no_use_name,axis =1) \n\n# 教師データ生成\ny_train = pd.get_dummies(Original_train['Survived'].astype(object))\ny_train = np.asarray(y_train).astype(np.int32)\n\ny_train = y_train[:,1]\nOriginal_train = Original_train.drop('Survived',axis =1)\n\n# ダミー変数生成\ndtypes = Original_train.dtypes\nnames = list(dtypes[dtypes != 'float64'].index) # float型以外をダミー変数に\ndum =pd.get_dummies(Original_train[names].astype(object)) # ダミー変数生成\nx_train = Original_train\nx_train = x_train.drop(names,axis =1) # ダミーにした変数を一旦除く \nx_train = pd.concat([x_train,dum,Cabin],axis = 1) # ダミー変数くっつける\n# 前処理\nx_train[np.isnan(x_train)] = 0 # 欠損値を0に変える。\nx_train = np.asarray(x_train).astype(np.float32)\n\n#%%\ntrain = tuple_dataset.TupleDataset(x_train, y_train)\n#%%\n\nclass TitanicChain(Chain):\n def __init__(self):\n super(TitanicChain, self).__init__(\n l1 = L.Linear(25,6),\n l2 = L.Linear(6,5), \n l3 = L.Linear(5,2),\n )\n\n def __call__(self, x, train=True):\n h1 = F.sigmoid(self.l1(x))\n h2 = F.sigmoid(self.l2(h1))\n h3 = F.sigmoid(self.l3(h2))\n return h3\n#%%\nmodel1 = TitanicChain()\nmodel = L.Classifier(model1)\noptimizer = optimizers.Adam()\noptimizer.setup(model)\n#%%\ntrain_iter = iterators.SerialIterator(train, 1)\ntest_iter = iterators.SerialIterator(train, 1,repeat=False, shuffle=False)\n#%%\nupdater = training.StandardUpdater(train_iter, optimizer)\ntrainer = training.Trainer(updater, (100, 'epoch'), out='result')\n\ntrainer.extend(extensions.Evaluator(test_iter, model))\ntrainer.extend(extensions.LogReport(trigger = (1,'epoch')))\ntrainer.extend(extensions.PrintReport( ['epoch', 'main/accuracy', 'validation/main/accuracy']))\ntrainer.extend(extensions.ProgressBar())\ntrainer.run()\n\n#%%\nmodel1.l1.W.data\n#%%\n# titanicdataの読み込み\nos.chdir('/Users/ozawa/Desktop/python/titanic_py')\nOriginal_test = pd.read_csv('test.csv')\nCabin = np.asarray( Original_test['Cabin'])\nCabin = pd.get_dummies(Cabin)\nCabin = Cabin.dot(np.ones(76))\n\n\n\n# 使わない列削除\nno_use_name = ['PassengerId','Name','Ticket','Cabin']\nPassengerId = Original_test.PassengerId\nOriginal_test = Original_test.drop(no_use_name,axis =1)\n\n\n# ダミー変数生成\ndtypes = Original_test.dtypes\nnames = list(dtypes[dtypes != 'float64'].index) # float型以外をダミー変数に\ndum =pd.get_dummies(Original_test[names].astype(object)) # ダミー変数生成\nx_test = Original_test\nx_test = x_test.drop(names,axis =1) # ダミーにした変数を一旦除く \nx_test = pd.concat([x_test,dum,Cabin],axis = 1) # ダミー変数くっつける\n# 前処理\nx_test = x_test.drop('Parch_9',axis =1)\nx_test[np.isnan(x_test)] = 0 # 欠損値を0に変える。\nx_test = np.asarray(x_test).astype(np.float32)\n#%%\nserializers.save_npz('model1', model)\nserializers.save_npz('state1', optimizer)\n\n#%%\nxte = Variable(x_test, volatile='on') \nyte = model.predictor(xte)\npermmit = yte.data\npermmit = permmit[:,1]\npermmit[permmit > 0.5] = 1\npermmit[permmit <= 0.5] = 0\npermmit = pd.Series(permmit)\ndf = pd.DataFrame({ 'PassengerId':PassengerId ,\n 'Survived' : permmit})\ndf.to_csv(\"permmit.csv\" )\n\n","sub_path":"titanic_py/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"617668436","text":"\"\"\"\nThis module provides one method (:func:`notify_course_of_action`), which serves to\ngive a slightly more helpful error message when a student tries to complete this\nassignment with an incompatible version of Python installed.\n\nOften times in the wild you will encounter long and complicated ``__main__.py`` (as\nwell as more often, long and complicated ``__init__.py`` files). We were concerned\nabout students being able to run the assignment, but did not want to detract from what\n``__main__.py`` was actually doing. That is, the core mechanics of ``__main__.py``\nare relatively straightforward:\n\n1. Import the relevant utilities from the modules in the assignment.\n2. Run the main method.\n\nHowever, it seemed likely that some students may not have had the right version of\nAnaconda Python. Depending on when you installed it, there were certain crucial updates\nrelated to the core library we used for displaying the assignment. We elected to\nseparate this out into a different file so that when you read ``__main__.py`` you would\nbe able to see more clearly what it is doing.\n\nIn short, this is a long way of saying the use of ``error_out.py`` is non-standard, and\nonly exists due to the circumstances surrounding this particular assignment's code-base.\n\"\"\"\n\n# FILE VERSION: released 5/5/2017 @ 13:00\n\nimport sys\nimport re\nimport textwrap\n\n\ndef notify_course_of_action():\n '''\n This method checks to see what version of python is installed, and informs the user\n where to go to get the correct version of python for this semester.\n\n If this code-base is used in the future, the default library will be ``PyQt5``, so\n\n 1. Switch the imports for ``PyQt4`` from ``__main__.py`` and ``PyQt5`` from below.\n 2. Update the ``semester`` variable, and ``install_base`` if the website layout\n has changed to a new setup.\n 3. Update the ``anaconda_needs`` variable for the correct version.\n\n Refer to the `PyQt documentation`_ for more information on switching to ``PyQt5``.\n\n .. _PyQt documentation: http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html\n\n :Return:\n ``int``\n Always returns ``1`` (indicates error).\n Intended use: ``sys.exit(notify_course_of_action())``.\n '''\n # Generate an error message to guide what the problem is.\n website_base = \"https://www.cs.cornell.edu/courses/cs1110\"\n semester = \"2017sp\"\n install_base = \"materials/python.php#install\"\n install_instructions = \"{}/{}/{}\".format(website_base, semester, install_base)\n\n # The Anaconda we installed at the beginning of the semester should have been 4.1.1\n # but starting with 4.2.0, PyQt5 was vendored instead. Hopefully no students actually\n # hit this code, but better to try and give as much information as possible.\n anaconda_needs = \"4.1.1\"\n anaconda_match = re.match(r\".*\\|Anaconda (\\d\\.\\d\\.\\d).*\\|.*\", sys.version)\n if anaconda_match:\n anaconda_version = anaconda_match.groups()[0]\n else:\n anaconda_version = \"No Anaconda python detected!\"\n\n # If they installed a newer version of Anaconda, PyQt4 and PyQt5 are not compatible\n # and cannot be used interchangeably. They should be very similar, but the author\n # of this file does not have time to discover any such bugs. What is evident is\n # that the import paths have changed.\n #\n # Excellent documentation for potential forward port here:\n #\n # http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html\n #\n # You may get lucky, and just have to change imports?\n err_msg_front = \"There seems to be an issue with the version of python you have \" \\\n \"installed. Please make sure you have the correct version of \" \\\n \"Anaconda python for *THIS* semester ({0}), instructions can be \" \\\n \"found here:\".format(semester)\n err_msg_expects = \"The version of Anaconda python expected\"\n err_msg_located = \"The version of Anaconda python we found\"\n\n try:\n from PyQt5.QtWidgets import QApplication, QMessageBox\n app = QApplication([]) # noqa: F841\n err_msg = textwrap.dedent('''\n {0}\n\n {1}\n\n {2}: {3}\n {4}: {5}\n '''.format(err_msg_front, install_instructions,\n err_msg_expects, anaconda_needs,\n err_msg_located, anaconda_version))\n QMessageBox.critical(None, \"MacPan\", err_msg.replace(\"\\n\", \"
\"))\n except:\n # Same error message as before, just don't use the html formatting since this\n # is getting sent to the console.\n err_msg = textwrap.dedent('''\n {0}\n\n {1}\n\n {2}: {3}\n {4}: {5}\n '''.format(err_msg_front, install_instructions,\n err_msg_expects, anaconda_needs,\n err_msg_located, anaconda_version))\n\n sys.stderr.write(err_msg)\n\n return 1\n","sub_path":"error_out.py","file_name":"error_out.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"586882430","text":"f = open('poem.txt', 'w')\nf.write(\"Roses are red,\\nbleach is white,\\ndrink some more\\nand it's Letalzeit.\\n\")\nf.close()\n\nout = open('poem.txt', 'r')\nprint(''.join(out.readlines()))\n\nimport pickle\n\nsp3 = ['Vodka', 'Martini', 'Beer', 'Scotch']\nfile = open('drinks.dat', 'wb')\npickle.dump(sp3, file)\nfile.close()\n\nfile = open('drinks.dat', 'rb')\ndr = pickle.load(file)\nprint(dr)\nfile.close()\n\nsurnames_file = open('surnames.txt', 'w')\n\nsurnames_file.write(\"Martin\\nStollman\\nRitchie\\n\")\n\nsurnames_file.close()\n\ntry:\n surnames_file = open(\"surnames.txt\", 'r')\n surnames_r = [line.strip() for line in surnames_file if line[0] == 'R']\n lines = [line for line in surnames_file]\nexcept IOError:\n print(\"Can\\'t read file\")\n\n","sub_path":"3/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496126029","text":"# encoding:utf-8\nimport re\nimport requests\nimport json\nimport urllib\nimport time\nimport timeit\nimport math\nimport sys\nfrom datetime import datetime\nfrom dateutil import tz\nimport os\n\nosenviron={}\nheaders={}\ninfo={}\nuserInfo={}\ndjj_bark_cookie=''\ndjj_sever_jiang=''\ndjj_tele_cookie=''\ndjj_djj_cookie=''\n\nencryptProjectId=''\n'''\n抽奖可获得京豆和快递优惠券\n活动时间:2021年1月15日至2021年2月19日\n活动入口:https://snsdesign.jd.com/babelDiy/Zeus/4N5phvUAqZsGWBNGVJWmufXoBzpt/index.html?channel=lingsns003&scope=0&sceneid=9001&btnTips=&hideApp=0\n\n'''\n\n\nheaders={'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5'}\n\n\n\n\n\n\n\n\n\n#删除\nresult=''\n\ndef TotalBean(cookies,checkck):\n print('检验过期')\n signmd5=False\n headers= {\n \"Cookie\": cookies,\n \"Referer\": 'https://home.m.jd.com/myJd/newhome.action?',\n \"User-Agent\": 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Mobile/15E148 Safari/604.1'\n }\n try:\n ckresult= requests.get('https://wq.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New',headers=headers,timeout=10).json()\n if ckresult['retcode']==0:\n signmd5=True\n print(f'''【京东{checkck}】''')\n else:\n \t signmd5=False\n \t msg=f'''【京东账号{checkck}】cookie已失效,请重新登录京东获取'''\n \t print(msg)\n pushmsg(msg)\n except Exception as e:\n signmd5=False\n msg=str(e)\n print(msg)\n pushmsg('京东cookie',msg)\n return signmd5\n\n\ndef JD_Geziyaya():\n JD_getInfo()\n JD_getUserInfo()\n while (userInfo['bless'] >= userInfo['cost_bless_one_time']):\n JD_draw()\n JD_getUserInfo()\n time.sleep(2)\ndef JD_getInfo():\n print('getInfo\\n')\n global info\n try:\n rs= requests.get('https://snsdesign.jd.com/babelDiy/Zeus/4N5phvUAqZsGWBNGVJWmufXoBzpt/index.html?channel=lingsns003&scope=0&sceneid=9001&btnTips=&hideApp=0',headers=headers,timeout=10)\n rs.encoding=rs.apparent_encoding\n rs=rs.text\n txt=re.compile('var snsConfig = (.*)').findall(rs)\n tmp=txt[0]\n \n info=json.loads(tmp)\n #print(info)\n \n except Exception as e:\n msg=str(e)\n print(msg)\ndef JD_getUserInfo():\n print('getUserInfo\\n')\n try:\n global userInfo\n body = 'activeid='+info['activeId']+'&token='+info['actToken']+'&sceneval=2&shareid=&_=&callback=query&'\n url='https://wq.jd.com/activet2/piggybank/query?'+body\n headers['Referer']='https://anmp.jd.com/babelDiy/Zeus/xKACpgVjVJM7zPKbd5AGCij5yV9/index.html?wxAppName=jd'\n \n rs= requests.get(url,headers=headers,timeout=10)\n rs.encoding=rs.apparent_encoding\n rs=rs.text\n #print(rs)\n txt=re.compile('query\\((.*)').findall(rs)\n tmp=txt[0]\n userInfo=json.loads(tmp)\n userInfo=userInfo['data']\n print('当前幸运值:'+str(userInfo['bless']))\n print(userInfo['complete_task_list'])\n for task in info['config']['tasks']:\n if len(userInfo['complete_task_list'])==0:\n print('去做任务'+task['_id'])\n doTask(task['_id'])\n else:\n if task['_id'] not in userInfo['complete_task_list']:\n print('去做任务'+task['_id'])\n doTask(task['_id'])\n except Exception as e:\n msg=str(e)\n print(msg)\n \n \n \n\n\n\ndef doTask(taskId):\n print('doTask\\n')\n global userInfo\n try:\n body = 'activeid='+info['activeId']+'&token='+info['actToken']+'&sceneval=2&shareid=&_=&callback=query&'+'task_bless=10&taskid='+taskId\n url='https://wq.jd.com/activet2/piggybank/completeTask?'+body\n headers['Referer']='https://anmp.jd.com/babelDiy/Zeus/xKACpgVjVJM7zPKbd5AGCij5yV9/index.html?wxAppName=jd'\n \n rs= requests.get(url,headers=headers,timeout=10)\n rs.encoding=rs.apparent_encoding\n rs=rs.text\n txt=re.compile('query\\((.*)').findall(rs)\n tmp=txt[0]\n if tmp.find('curbless')>0:\n tmp=json.loads(tmp)\n userInfo['bless'] = tmp['data']['curbless']\n print('任务完成成功,当前幸运值:'+str(tmp['data']['curbless']))\n \n else:\n print('任务重复======')\n \n \n\n time.sleep(1)\n \n\n except Exception as e:\n msg=str(e)\n print(msg)\n\n\ndef JD_draw():\n print('draw\\n')\n try:\n body = 'activeid='+info['activeId']+'&token='+info['actToken']+'&sceneval=2&shareid=&_=&callback=query&'\n url='https://wq.jd.com/activet2/piggybank/draw?'+body\n headers['Referer']='https://anmp.jd.com/babelDiy/Zeus/xKACpgVjVJM7zPKbd5AGCij5yV9/index.html?wxAppName=jd'\n rs= requests.get(url,headers=headers,timeout=10)\n rs.encoding=rs.apparent_encoding\n rs=rs.text\n txt=re.compile('query\\((.*)').findall(rs)\n tmp=txt[0]\n if tmp.find('drawflag')>0:\n tmp=json.loads(tmp)['data']\n print('转盘bless:'+str(tmp['bless']))\n else:\n print('结束======')\n \t return \n except Exception as e:\n msg=str(e)\n print(msg)\n\n\n \ndef check(flag,list):\n vip=''\n global djj_bark_cookie\n global djj_sever_jiang\n if \"DJJ_BARK_COOKIE\" in os.environ:\n djj_bark_cookie = os.environ[\"DJJ_BARK_COOKIE\"]\n if \"DJJ_SEVER_JIANG\" in os.environ:\n djj_sever_jiang = os.environ[\"DJJ_SEVER_JIANG\"]\n if flag in os.environ:\n vip = os.environ[flag]\n if flag in osenviron:\n vip = osenviron[flag]\n if vip:\n for line in vip.split('\\n'):\n if not line:\n continue \n list.append(line.strip())\n return list\n else:\n print(f'''【{flag}】 is empty,DTask is over.''')\n exit()\n \ndef pushmsg(title,txt,bflag=1,wflag=1,tflag=1):\n try:\n txt=urllib.parse.quote(txt)\n title=urllib.parse.quote(title)\n if bflag==1 and djj_bark_cookie.strip():\n print(\"\\n【通知汇总】\")\n purl = f'''https://api.day.app/{djj_bark_cookie}/{title}/{txt}'''\n response = requests.post(purl)\n #print(response.text)\n if tflag==1 and djj_tele_cookie.strip():\n print(\"\\n【Telegram消息】\")\n id=djj_tele_cookie[djj_tele_cookie.find('@')+1:len(djj_tele_cookie)]\n botid=djj_tele_cookie[0:djj_tele_cookie.find('@')]\n\n turl=f'''https://api.telegram.org/bot{botid}/sendMessage?chat_id={id}&text={title}\\n{txt}'''\n\n response = requests.get(turl)\n #print(response.text)\n if wflag==1 and djj_sever_jiang.strip():\n print(\"\\n【微信消息】\")\n purl = f'''http://sc.ftqq.com/{djj_sever_jiang}.send'''\n headers={\n 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'\n }\n body=f'''text={txt})&desp={title}'''\n response = requests.post(purl,headers=headers,data=body)\n #print(response.text)\n except Exception as e:\n msg=str(e)\n print(msg)\ndef loger(m):\n print(m)\n global result\n result +=m+'\\n'\ndef getid(st):\n for k in st.split(';'):\n if k.strip().find('pt_pin=')==0:\n nm=k[(k.find('pt_pin=')+7):len(k)]\n nm=urllib.parse.unquote(nm)\n return nm\ndef islogon(j,count):\n JD_islogn=False \n for i in count.split(';'):\n if i.find('pin=')>=0:\n newstr=i.strip()[i.find('pin=')+4:len(i)]\n print(f'''>>>>>>>>>【账号{str(j)}开始】{newstr}''')\n if(TotalBean(count,newstr)):\n JD_islogn=True\n return JD_islogn\n \ndef clock(func):\n def clocked(*args, **kwargs):\n t0 = timeit.default_timer()\n result = func(*args, **kwargs)\n elapsed = timeit.default_timer() - t0\n name = func.__name__\n arg_str = ', '.join(repr(arg) for arg in args)\n print('[🔔运行完毕用时%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n return clocked\n \n@clock\ndef start():\n cookiesList=[]\n xfj_hdlist=[]\n global headers,result\n global djj_djj_cookie\n check('DJJ_DJJ_COOKIE',cookiesList)\n j=0\n for count in cookiesList:\n j+=1\n #if j!=3:\n #continue\n headers['Cookie']=count\n result+='【count】'+getid(count)\n if(islogon(j,count)):\n JD_Geziyaya()\n print('任务执行结束........+')\nif __name__ == '__main__':\n start()\n","sub_path":"djj/JD_geziyaya.py","file_name":"JD_geziyaya.py","file_ext":"py","file_size_in_byte":8240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"476665464","text":"class Translator:\n mapping = {\n \"CURRENCY\" : {\n '南非幣':'ZAR',\n '美元':'USD',\n '歐元':'EUR',\n '澳幣':'AUD',\n '港幣':'HKD',\n '人民幣':'CNH',\n '英鎊':'GBP',\n '日圓':'JPY',\n '加幣':'CAD',\n '紐西蘭幣':'NZD',\n '瑞士法郎':'CHF'\n },\n \"UNDERLYING\" : {\n '指數':'INDEX',\n '股權':'EQUITY',\n '匯率':'FX',\n '其他利益':'MUTUAL_FUND',\n '利率':'RATE',\n '商品':\"COMMODITY\",\n '信用事件':\"CREDIT_EVENT\",\n },\n \"PI\" : {\n \"專業投資人\":\"PI\",\n \"非專業投資人\":\"NON_PI\"\n },\n \"MASTER_AGENT\" : {\n '台灣摩根士丹利證券股份有限公司':\"MS\",\n '法商法國外貿銀行台北分行':\"NATIXIS\",\n '法商東方匯理銀行台北分行':\"CA\",\n '法商法國巴黎銀行股份有限公司台北分公司':\"BNP\",\n '法商法國興業銀行台北分行':\"SG\",\n '美商高盛亞洲證券有限公司台北分公司':\"GS\",\n '花旗環球證券股份有限公司':\"CITI\",\n '美商摩根大通銀行台北分行':\"JPM\",\n '香港商野村國際證券有限公司台北分公司':\"NOMURA\",\n '英商渣打銀行股份有限公司台北分公司':\"SC\",\n '英商巴克萊銀行股份有限公司台北分行':\"BARC\",\n '香港商蘇皇證券亞洲有限公司台北分公司':\"RBS\",\n '香港商香港上海匯豐銀行股份有限公司台北分公司':\"HSBC\",\n '荷商安智銀行台北分行':\"ING\",\n '新加坡商星展銀行股份有限公司台北分行':\"DBS\",\n '德商德意志銀行股份有限公司台北分公司':\"DB\",\n '瑞士商瑞士信貸銀行股份有限公司台北證券分公司':\"CS\",\n '澳商澳盛銀行集團股份有限公司台北分公司':\"ANZ\",\n '瑞士商瑞士銀行台北分行':\"UBS\"\n },\n \"DISTRIBUTOR\" : {\n '臺灣新光商業銀行股份有限公司':\"SHIN_KONG_BANK\",\n '瑞士商瑞士銀行台北分行' :\"UBS_BANK\",\n '中國信託商業銀行股份有限公司':\"CTBC_BANK\",\n '台中商業銀行股份有限公司':\"TAICHUNG_BANK\",\n '台北富邦商業銀行股份有限公司':\"FUBON_BANK\",\n '台新國際商業銀行股份有限公司':\"TAISHIN_BANK\",\n '玉山商業銀行股份有限公司':\"ESUN_BANK\",\n '國泰綜合證券股份有限公司':\"CATHAY_SEC\",\n '第一金證券股份有限公司':\"FIRST_SEC\",\n '法商法國巴黎銀行股份有限公司台北分公司':\"BNP_BANK\",\n '永豐金證券股份有限公司':\"SINOPAC_SEC\",\n '國泰世華商業銀行股份有限公司':\"CATHAY_BANK\",\n '富邦綜合證券股份有限公司':\"FUBON_SEC\",\n '兆豐國際商業銀行股份有限公司':\"MEGA_BANK\",\n '元大商業銀行股份有限公司':\"YUANTA_BANK\",\n '渣打國際商業銀行股份有限公司':\"STANCHAR\",\n '星展(台灣)商業銀行股份有限公司':\"DBS_BANK\",\n '日盛國際商業銀行股份有限公司':\"JIHSUN_BANK\",\n '日盛證券股份有限公司':\"JIHSUN_SEC\",\n '合作金庫商業銀行股份有限公司':\"COOP_BANK\",\n '花旗(台灣)商業銀行股份有限公司':\"CITI_BANK\",\n '玉山證券公司':\"ESUN_SEC\",\n '東亞證券股份有限公司':\"BEA_SEC\",\n '法商法國巴黎人壽保險股份有限公司台灣分公司':\"BNP_LIFE\",\n '上海商業儲蓄銀行股份有限公司':\"SCSB_BANK\",\n '華泰商業銀行股份有限公司':\"HWATAI_BANK\",\n '花旗證券股份有限公司':\"CITI_SEC\",\n '臺灣土地銀行':\"LAND_BANK\",\n '遠智證券股份有限公司':\"FAR_EASTERN_SEC\",\n '凱基證券股份有限公司':\"KGI_SEC\",\n '澳商澳盛銀行集團股份有限公司台北分公司':\"ANZ\",\n '元大證券股份有限公司':\"YUANTA_SEC\",\n '兆豐證券股份有限公司':\"MEGA_SEC\",\n '匯豐(台灣)商業銀行股份有限公司':\"HSBC_BANK\",\n '國泰人壽保險股份有限公司':\"CATHAY_LIFE\",\n '新光人壽保險股份有限公司':\"SHIN_KONG_LIFE\",\n '保誠人壽保險股份有限公司':\"PRUDENTIAL_LIFE\",\n '群益金鼎證券股份有限公司':\"CAPITAL_SEC\",\n '富邦人壽保險股份有限公司':\"FUBON_LIFE\",\n '台灣人壽保險股份有限公司':\"TAIWAN_LIFE\",\n '遠雄人壽保險事業股份有限公司':\"FARGLORY_LIFE\",\n '三商美邦人壽保險股份有限公司':\"MERCURIES_LIFE\",\n '安泰商業銀行股份有限公司':\"ENTIE_BANK\",\n '英屬百慕達商中泰人壽保險股份有限公司臺灣分公司':\"ACE_LIFE\",\n '合作金庫人壽保險股份有限公司':\"COOP_LIFE\",\n },\n }\n def translate(self, header, series):\n mapping = self.mapping[header]\n for chinese, english in mapping.items():\n series = series.str.replace(chinese, english)\n return series\n","sub_path":"tdcc/Translator.py","file_name":"Translator.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"566720601","text":"from imageai.Detection import ObjectDetection\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport json\nimport requests\ndef getHelmets(frame):\n net = cv2.dnn.readNetFromDarknet(\"../static/models/yolov3-obj.cfg\", \"../static/models/yolov3-obj_2400.weights\")\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n (H, W) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n \t\tswapRB=True, crop=False)\n net.setInput(blob)\n layerOutputs = net.forward(ln) \n boxes = []\n confidences = []\n classIDs = []\n confThreshold=0.5\n \t\n for output in layerOutputs:\t\n \tfor detection in output:\n \t\tscores = detection[5:]\n \t\tclassID = np.argmax(scores)\n \t\tconfidence = scores[classID]\n \t\tif confidence > confThreshold:\n \t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n \t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n \t\t\tx = int(centerX - (width / 2))\n \t\t\ty = int(centerY - (height / 2))\n \t\t\tboxes.append([x, y, int(width), int(height)])\n \t\t\tconfidences.append(float(confidence))\n \t\t\tclassIDs.append(classID)\n \n if len(boxes)==0:\n return boxes\n def sortbyX1(temp):\n return temp[0]\n boxes=sorted(boxes,key=sortbyX1)\n new_boxes=[]\n for i in range(0,len(boxes)-1):\n temp1=boxes[i]\n temp2=boxes[i+1]\n \n if abs(temp1[0]-temp2[0])<=5 and abs(temp1[1]-temp2[1])<=5:\n continue\n \n new_boxes.append(boxes[i])\n \n \n new_boxes.append(boxes[len(boxes)-1])\n return new_boxes\n\ndef markItems(frame,numberOfHelmets,detections1):\n \n for i in numberOfHelmets:\n x1,y1,x2,y2=i\n frame=cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),3)\n \n for i in detections1:\n if i['name']=='person':\n x1,y1,x2,y2=i['box_points']\n frame=cv2.rectangle(frame,(x1,y1),(x2,y2),(150,120,0),3)\n return frame\n\ndef makeChallan(people,helmet):\n if people=3:\n triplingFault=True\n \n if helmet\\d+)/files/(?P.+)/delete/$',\n name='staging_file_delete', view=StagingFileDeleteView.as_view()\n ),\n\n # Document create views\n\n url(\n regex=r'^documents/create/from/local/multiple/$',\n name='document_create_multiple', view=DocumentCreateWizard.as_view()\n ),\n url(\n regex=r'^documents/upload/new/interactive/(?P\\d+)/$',\n name='document_upload_interactive',\n view=UploadInteractiveView.as_view()\n ),\n url(\n regex=r'^documents/upload/new/interactive/$',\n name='document_upload_interactive',\n view=UploadInteractiveView.as_view()\n ),\n url(\n regex=r'^documents/(?P\\d+)/files/upload/interactive/(?P\\d+)/$',\n name='document_file_upload',\n view=DocumentFileUploadInteractiveView.as_view()\n ),\n url(\n regex=r'^documents/(?P\\d+)/files/upload/interactive/$',\n name='document_file_upload',\n view=DocumentFileUploadInteractiveView.as_view()\n ),\n\n # Setup views\n\n url(\n regex=r'^sources/$', name='setup_source_list',\n view=SourceListView.as_view()\n ),\n url(\n regex=r'^sources/create/(?P\\w+)/$',\n name='setup_source_create', view=SourceCreateView.as_view()\n ),\n url(\n regex=r'^sources/(?P\\d+)/check/$',\n name='setup_source_check', view=SourceCheckView.as_view()\n ),\n url(\n regex=r'^sources/(?P\\d+)/delete/$',\n name='setup_source_delete', view=SourceDeleteView.as_view()\n ),\n url(\n regex=r'^sources/(?P\\d+)/edit/$', name='setup_source_edit',\n view=SourceEditView.as_view()\n ),\n]\n\napi_urls = [\n url(\n regex=r'^staging_folders/file/(?P[0-9]+)/(?P.+)/image/$',\n name='stagingfolderfile-image',\n view=APIStagingSourceFileImageView.as_view()\n ),\n url(\n regex=r'^staging_folders/file/(?P[0-9]+)/(?P.+)/upload/$',\n name='stagingfolderfile-upload',\n view=APIStagingSourceFileUploadView.as_view()\n ),\n url(\n regex=r'^staging_folders/file/(?P[0-9]+)/(?P.+)/$',\n name='stagingfolderfile-detail',\n view=APIStagingSourceFileView.as_view()\n ),\n url(\n regex=r'^staging_folders/$', name='stagingfolder-list',\n view=APIStagingSourceListView.as_view()\n ),\n url(\n regex=r'^staging_folders/(?P[0-9]+)/$',\n name='stagingfolder-detail', view=APIStagingSourceView.as_view()\n )\n]\n","sub_path":"mayan/apps/sources/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"430605597","text":"from pyblish import api\n\n\nclass ValidateNames(api.InstancePlugin):\n \"\"\"Validate sequence, video track and track item names.\n\n When creating output directories with the name of an item, ending with a\n whitespace will fail the extraction.\n Exact matching to optimize processing.\n \"\"\"\n\n order = api.ValidatorOrder\n families = [\"clip\"]\n match = api.Exact\n label = \"Names\"\n hosts = [\"nukestudio\"]\n\n def process(self, instance):\n\n item = instance.data[\"item\"]\n\n msg = \"Track item \\\"{0}\\\" ends with a whitespace.\"\n assert not item.name().endswith(\" \"), msg.format(item.name())\n\n msg = \"Video track \\\"{0}\\\" ends with a whitespace.\"\n msg = msg.format(item.parent().name())\n assert not item.parent().name().endswith(\" \"), msg\n\n msg = \"Sequence \\\"{0}\\\" ends with a whitespace.\"\n msg = msg.format(item.parent().parent().name())\n assert not item.parent().parent().name().endswith(\" \"), msg\n","sub_path":"pype/plugins/nukestudio/publish/validate_names.py","file_name":"validate_names.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"611276880","text":"from collections import defaultdict\nimport random\nimport copy\nimport dill\n\n\n\ndef MakeData(path_data_in):\n data = list()\n queue = list()\n heads = [-1]\n with open(path_data_in) as data_in:\n for line in data_in:\n if line == '\\n':\n data.append((queue, heads))\n queue = list()\n heads = [-1]\n else:\n labels = line.strip().split('\\t')\n ID, word, pos, head = int(labels[0]), labels[1], labels[3], int(labels[6])\n queue.append((ID, word, pos))\n heads.append(head)\n\n return data\n\n\n\ndef MakeFeatures(stack, queue):\n features = defaultdict(lambda: 0)\n if len(stack) > 0 and len(queue) > 0:\n features['W-1' + stack[-1][1] + 'W0' + queue[0][1]] += 1\n features['W-1' + stack[-1][1] + 'P0' + queue[0][2]] += 1\n features['P-1' + stack[-1][2] + 'W0' + queue[0][1]] += 1\n features['P-1' + stack[-1][2] + 'P0' + queue[0][2]] += 1\n if len(stack) > 1:\n features['W-2' + stack[-2][1] + 'W-1' + stack[-1][1]] += 1\n features['W-2' + stack[-2][1] + 'P-1' + stack[-1][2]] += 1\n features['P-2' + stack[-2][2] + 'W-1' + stack[-1][1]] += 1\n features['P-2' + stack[-2][2] + 'P-1' + stack[-1][2]] += 1\n\n return features\n\n\n\ndef PredictScore(weight, features):\n score = 0\n for key, value in features.items():\n if key in weight.keys():\n score += value*weight[key]\n\n return score\n\n\n\ndef Update_Weight(weights, features, predict, correct):\n for key in features.keys():\n weights[predict][key] -= features[key]\n weights[correct][key] += features[key]\n\n\n\ndef ShiftReduceTrain(queue, heads, weights):\n stack = [(0, 'ROOT', 'ROOT')]\n unproc = list()\n\n for i in range(len(heads)):\n unproc.append(heads.count(i))\n\n while len(queue) > 0 or len(stack) > 1:\n features = MakeFeatures(stack, queue)\n scores = {}\n scores['SHIFT'] = PredictScore(weights['SHIFT'], features)\n scores['LEFT'] = PredictScore(weights['LEFT'], features)\n scores['RIGHT'] = PredictScore(weights['RIGHT'], features)\n\n if (max(scores.items(), key=lambda x: x[1])[0] == 'SHIFT' and len(queue) > 0) or len(stack) < 2:\n predict = 'SHIFT'\n elif max(scores.items(), key=lambda x: x[1])[0] == 'LEFT':\n predict = 'LEFT'\n else:\n predict = 'RIGHT'\n\n if len(stack) < 2:\n correct = 'SHIFT'\n elif heads[stack[-2][0]] == stack[-1][0] and unproc[stack[-2][0]] == 0:\n correct = 'LEFT'\n elif heads[stack[-1][0]] == stack[-2][0] and unproc[stack[-1][0]] == 0:\n correct = 'RIGHT'\n else:\n correct = 'SHIFT'\n\n if predict != correct:\n Update_Weight(weights, features, predict, correct)\n if correct == 'SHIFT':\n stack.append(queue.pop(0))\n elif correct == 'LEFT':\n unproc[stack[-1][0]] -= 1\n stack.pop(-2)\n elif correct == 'RIGHT':\n unproc[stack[-2][0]] -= 1\n stack.pop(-1)\n\n\n\nif __name__ == '__main__':\n path_data_train = '../../data/mstparser-en-train.dep'\n path_weights = 'weights.dump'\n n_epoch = 12\n\n data = MakeData(path_data_train)\n\n weights = dict()\n weights['SHIFT'] = defaultdict(lambda: 0)\n weights['LEFT'] = defaultdict(lambda: 0)\n weights['RIGHT'] = defaultdict(lambda: 0)\n\n for epoch in range(n_epoch):\n data_ = copy.deepcopy(data)\n random.seed(epoch)\n random.shuffle(data_)\n for queue, heads in data_:\n ShiftReduceTrain(queue, heads, weights)\n\n with open(path_weights, 'wb') as data_weights:\n dill.dump(weights, data_weights)\n","sub_path":"Shi-ma/tutorial11/train_sr.py","file_name":"train_sr.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"170569404","text":"import pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\nchannel = connection.channel()\n\nchannel.queue_declare(queue='example1')\nprint ('[*] Waiting for messages. To exit press CTRL+C')\n\ndef callback(ch, method, properties, body):\n print (\" [x] Received %r\" % body)\n\nchannel.basic_consume('example1', callback, auto_ack=True)\nchannel.start_consuming()","sub_path":"receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"97297843","text":"import sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtPrintSupport import *\nimport urllib.request\n\nclass MainApp(QMainWindow):\n \"\"\" the main class of our app \"\"\"\n def __init__(self):\n \"\"\" init things here \"\"\"\n super().__init__() # parent class initializer\n\n # window title\n self.title = \"NoteWord\"\n self.setWindowTitle(self.title)\n \n # editor section\n self.editor = QTextEdit(self) \n self.setCentralWidget(self.editor)\n\n # create menubar and toolbar first\n self.create_menu_bar()\n self.create_toolbar()\n\n # after creating toolbar we can call and select font size\n font = QFont('Times', 12)\n self.editor.setFont(font)\n self.editor.setFontPointSize(12)\n\n # stores path\n self.path = ''\n\n def create_menu_bar(self):\n menuBar = QMenuBar(self)\n\n \"\"\" add elements to the menubar \"\"\"\n # App icon will go here\n url13 = \"https://raw.githubusercontent.com/Harsh-0986/google-doc-clone/main/ico.ico\"\n data13 = urllib.request.urlopen(url13).read()\n pixmap13 = QPixmap()\n pixmap13.loadFromData(data13)\n app_icon = menuBar.addMenu(QIcon(pixmap13), \"icon\")\n\n # file menu **\n file_menu = QMenu(\"File\", self)\n menuBar.addMenu(file_menu)\n\n save_action = QAction('Save', self)\n save_action.triggered.connect(self.file_save)\n file_menu.addAction(save_action)\n\n open_action = QAction('Open', self)\n open_action.triggered.connect(self.file_open)\n file_menu.addAction(open_action)\n\n rename_action = QAction('Rename', self)\n rename_action.triggered.connect(self.file_saveas)\n file_menu.addAction(rename_action)\n\n pdf_action = QAction(\"Save as PDF\", self)\n pdf_action.triggered.connect(self.save_pdf)\n file_menu.addAction(pdf_action)\n \n\n # edit menu **\n edit_menu = QMenu(\"Edit\", self)\n menuBar.addMenu(edit_menu)\n\n # paste\n paste_action = QAction('Paste', self)\n paste_action.triggered.connect(self.editor.paste)\n edit_menu.addAction(paste_action)\n\n # clear \n clear_action = QAction('Clear', self)\n clear_action.triggered.connect(self.editor.clear)\n edit_menu.addAction(clear_action)\n\n # select all\n select_action = QAction('Select All', self)\n select_action.triggered.connect(self.editor.selectAll)\n edit_menu.addAction(select_action)\n\n # view menu **\n view_menu = QMenu(\"View\", self)\n menuBar.addMenu(view_menu)\n\n # fullscreen\n fullscr_action = QAction('Full Screen View', self)\n fullscr_action.triggered.connect(lambda : self.showFullScreen())\n view_menu.addAction(fullscr_action)\n\n # normal screen\n normscr_action = QAction('Normal View', self)\n normscr_action.triggered.connect(lambda : self.showNormal())\n view_menu.addAction(normscr_action)\n\n # minimize\n minscr_action = QAction('Minimize', self)\n minscr_action.triggered.connect(lambda : self.showMinimized())\n view_menu.addAction(minscr_action)\n\n self.setMenuBar(menuBar)\n\n def create_toolbar(self):\n # Using a title\n ToolBar = QToolBar(\"Tools\", self)\n\n # undo\n url3 = \"https://cdn4.iconfinder.com/data/icons/navigation-40/24/rotate-2-512.png\"\n data3 = urllib.request.urlopen(url3).read()\n pixmap3 = QPixmap()\n pixmap3.loadFromData(data3)\n \n undo_action = QAction(QIcon(pixmap3), 'Undo', self)\n undo_action.triggered.connect(self.editor.undo)\n ToolBar.addAction(undo_action)\n\n # redo\n url4 = \"https://cdn4.iconfinder.com/data/icons/vectory-multimedia-1/40/redo_2-512.png\"\n data4 = urllib.request.urlopen(url4).read()\n pixmap4 = QPixmap()\n pixmap4.loadFromData(data4)\n\n redo_action = QAction(QIcon(pixmap4), 'Redo', self)\n redo_action.triggered.connect(self.editor.redo)\n ToolBar.addAction(redo_action)\n\n # adding separator\n ToolBar.addSeparator()\n\n # copy\n url5 = \"https://cdn3.iconfinder.com/data/icons/business-912/24/copy-512.png\"\n data5 = urllib.request.urlopen(url5).read()\n pixmap5 = QPixmap()\n pixmap5.loadFromData(data5)\n\n copy_action = QAction(QIcon(pixmap5), 'Copy', self)\n copy_action.triggered.connect(self.editor.copy)\n ToolBar.addAction(copy_action)\n\n # cut \n url6 = \"https://cdn2.iconfinder.com/data/icons/picons-basic-2/57/basic2-032_scissors_cut-512.png\"\n data6 = urllib.request.urlopen(url6).read()\n pixmap6 = QPixmap()\n pixmap6.loadFromData(data6)\n\n cut_action = QAction(QIcon(pixmap6), 'Cut', self)\n cut_action.triggered.connect(self.editor.cut)\n ToolBar.addAction(cut_action)\n\n # paste\n url7 = \"https://cdn1.iconfinder.com/data/icons/material-core/22/content-paste-512.png\"\n data7 = urllib.request.urlopen(url7).read()\n pixmap7 = QPixmap()\n pixmap7.loadFromData(data7)\n\n paste_action = QAction(QIcon(pixmap7), 'Paste', self)\n paste_action.triggered.connect(self.editor.paste)\n ToolBar.addAction(paste_action)\n\n # adding separator\n ToolBar.addSeparator()\n ToolBar.addSeparator()\n\n # fonts\n self.font_combo = QComboBox(self)\n self.font_combo.addItems([\"Courier Std\", \"Hellentic Typewriter Regular\", \"Helvetica\", \"Arial\", \"SansSerif\", \"Helvetica\", \"Times\", \"Monospace\"])\n self.font_combo.activated.connect(self.set_font) # connect with function\n ToolBar.addWidget(self.font_combo) \n\n # font size\n self.font_size = QSpinBox(self) \n self.font_size.setValue(12) \n self.font_size.valueChanged.connect(self.set_font_size) # connect with funcion\n ToolBar.addWidget(self.font_size)\n\n # separator\n ToolBar.addSeparator()\n\n # bold\n url = \"https://cdn2.iconfinder.com/data/icons/font-awesome/1792/bold-512.png\"\n data = urllib.request.urlopen(url).read()\n pixmap = QPixmap()\n pixmap.loadFromData(data)\n bold_action = QAction(QIcon(pixmap), 'Bold', self)\n bold_action.triggered.connect(self.bold_text)\n ToolBar.addAction(bold_action)\n\n # underline\n url2 = \"https://cdn1.iconfinder.com/data/icons/feather-2/24/underline-512.png\"\n data2 = urllib.request.urlopen(url2).read()\n pixmap2 = QPixmap()\n pixmap2.loadFromData(data2)\n underline_action = QAction(QIcon(pixmap2), 'Underline', self)\n underline_action.triggered.connect(self.underline_text)\n ToolBar.addAction(underline_action)\n\n # italic\n url1 = \"https://cdn4.iconfinder.com/data/icons/feather/24/italic-512.png\"\n data1 = urllib.request.urlopen(url1).read()\n pixmap1 = QPixmap()\n pixmap1.loadFromData(data1)\n italic_action = QAction(QIcon(pixmap1), 'Italic', self)\n italic_action.triggered.connect(self.italic_text)\n ToolBar.addAction(italic_action)\n\n # separator\n ToolBar.addSeparator()\n\n # text alignment\n url8 = \"https://cdn1.iconfinder.com/data/icons/feather-2/24/align-right-512.png\"\n data8 = urllib.request.urlopen(url8).read()\n pixmap8 = QPixmap()\n pixmap8.loadFromData(data8)\n\n right_alignment_action = QAction(QIcon(pixmap8), 'Align Right', self)\n right_alignment_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignRight))\n ToolBar.addAction(right_alignment_action)\n\n url9 = \"https://cdn1.iconfinder.com/data/icons/feather-2/24/align-left-512.png\"\n data9 = urllib.request.urlopen(url9).read()\n pixmap9 = QPixmap()\n pixmap9.loadFromData(data9)\n\n left_alignment_action = QAction(QIcon(pixmap9), 'Align Left', self)\n left_alignment_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignLeft))\n ToolBar.addAction(left_alignment_action)\n\n url10 = \"https://cdn2.iconfinder.com/data/icons/viiva-content-editor/32/justify-512.png\"\n data10 = urllib.request.urlopen(url10).read()\n pixmap10 = QPixmap()\n pixmap10.loadFromData(data10)\n\n justification_action = QAction(QIcon(pixmap10), 'Center/Justify', self)\n justification_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignCenter))\n ToolBar.addAction(justification_action)\n\n # separator\n ToolBar.addSeparator()\n\n # zoom in\n url11 = \"https://cdn1.iconfinder.com/data/icons/feather-2/24/zoom-in-512.png\"\n data11 = urllib.request.urlopen(url11).read()\n pixmap11 = QPixmap()\n pixmap11.loadFromData(data11)\n\n zoom_in_action = QAction(QIcon(pixmap11), 'Zoom in', self)\n zoom_in_action.triggered.connect(self.editor.zoomIn)\n ToolBar.addAction(zoom_in_action)\n\n # zoom out\n url12 = \"https://cdn1.iconfinder.com/data/icons/feather-2/24/zoom-out-512.png\"\n data12 = urllib.request.urlopen(url12).read()\n pixmap12 = QPixmap()\n pixmap12.loadFromData(data12)\n\n zoom_out_action = QAction(QIcon(pixmap12), 'Zoom out', self)\n zoom_out_action.triggered.connect(self.editor.zoomOut)\n ToolBar.addAction(zoom_out_action)\n\n\n # separator\n ToolBar.addSeparator()\n \n self.addToolBar(ToolBar)\n\n def italic_text(self):\n # if already italic, change into normal, else italic\n state = self.editor.fontItalic()\n self.editor.setFontItalic(not(state))\n\n def underline_text(self):\n # if already underlined, change into normal, else underlined\n state = self.editor.fontUnderline()\n self.editor.setFontUnderline(not(state))\n\n def bold_text(self):\n # if already bold, make normal, else make bold\n if self.editor.fontWeight() != QFont.Bold:\n self.editor.setFontWeight(QFont.Bold)\n return\n self.editor.setFontWeight(QFont.Normal)\n\n def set_font(self):\n font = self.font_combo.currentText()\n self.editor.setCurrentFont(QFont(font))\n\n def set_font_size(self):\n value = self.font_size.value()\n self.editor.setFontPointSize(value)\n\n # we can also make it one liner without writing such function.\n # by using lamba function -\n # self.font_size.valueChanged.connect(self.editor.setFontPointSize(self.font_size.value())) \n\n\n def file_open(self):\n self.path, _ = QFileDialog.getOpenFileName(self, \"Open file\", \"\", \"Text Files(*.txt)\")\n\n try:\n # if self.path.endswith(('.doc', '.docx')):\n # text = textract.process(self.path)\n # print(text)\n # doc = Document(self.path)\n # text = ''\n # for line in doc.paragraphs:\n # text+=line.text\n # else:\n with open(self.path, 'r') as f:\n text = f.read()\n except Exception as e:\n print(e)\n else:\n self.editor.setText(text)\n self.update_title()\n\n def file_save(self):\n\n if self.path == '':\n # If we do not have a path, we need to use Save As.\n self.file_saveas()\n\n text = self.editor.toPlainText()\n\n try:\n with open(self.path, 'w') as f:\n f.write(text)\n self.update_title()\n except Exception as e:\n print(e)\n\n def file_saveas(self):\n self.path, _ = QFileDialog.getSaveFileName(self, \"Save file\", \"\", \"Text Files(*.txt)\")\n if self.path == '':\n return # If dialog is cancelled, will return ''\n\n text = self.editor.toPlainText()\n\n try:\n with open(self.path, 'w') as f:\n f.write(text)\n self.update_title()\n except Exception as e:\n print(e)\n\n def update_title(self):\n self.setWindowTitle(self.title + ' ' + self.path)\n\n def save_pdf(self):\n f_name, _ = QFileDialog.getSaveFileName(self, \"Export PDF\", None, \"Pdf files(*.pdf)\")\n\n if f_name != '': # if name not empty\n printer = QPrinter(QPrinter.HighResolution)\n printer.setOutputFormat(QPrinter.PdfFormat)\n printer.setOutputFileName(f_name)\n self.editor.document().print_(printer)\n \n\napp = QApplication(sys.argv)\nwindow = MainApp()\nwindow.show()\nsys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"648198091","text":"# Анонимная функия\n# mult = lambda a, x: a * x\n# mult(2,6)\n# ZZ = 0\n# while 5 > ZZ:\n# print(mult(1,2))\n#\n# ZZ += 1\n\n# ПРОСТАЯ ФУНКЦИЯ ВЫВОДА\n\n# x = 50\n#\n# def vers():\n# # print('vot')\n# global x\n# print(x + 1)\n# x = 10\n# print(x - 1)\n#\n# vers()\n#print(x)\n# vers(2, 2)\n\n# ФУНКЦИЯ СУМИРОВАНИЯ\n\n# def summa(x = 0, y = 0, z = 0):\n# pass\n# xyz = x + y + z\n# return xyz\n# print(xyz)\n#\n#\n# a = summa(1, 2, 3)\n# print(a, summa(45,10))\n\n# ФУНКЦИЯ с кортежем ()\n\n# def printAll(*params):\n# for i in params:\n# print(i * 2)\n#\n# printAll(6, 'word', 7, 'asd')\n\n# ФУНКЦИЯ с СЛОВАРЕМ {}\n\n# def printDict(**paramsdict):\n#\n# for key, ke2 in paramsdict.items():\n# print('ключ:', key, \", значение:\", ke2)\n#\n# printDict(long = \"гоша\", short = \"Лиля\", x = 7, ss = True)\n\n# dict_ = {'long': 3, 'b': 'super'}\n# print(dict_)\n#\n# sse = (dict([1,2],[3,'gjhkg'])\n# print(sse)\n\nx = 33333\n\n\ndef inner():\n x = 3\n print(x)\n\n def outer():\n global x\n x = 4\n print(x, 'х в OUTER')\n outer()\n print(x, 'конец INNER')\n\ninner()\nprint(x, \"значение х до функции\")","sub_path":"def.py","file_name":"def.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70359360","text":"import os\nimport argparse\n\ndef prefix_groups(data):\n \"\"\"Return a dictionary of {prefix:[items]}.\"\"\"\n lines = data[:]\n groups = dict()\n while lines:\n longest = None\n first = lines.pop()\n for line in lines:\n prefix = os.path.commonprefix([first, line])\n if not longest:\n longest = prefix\n elif len(prefix) > len(longest):\n longest = prefix\n if longest:\n group = [first]\n rest = [item for item in lines if longest in item]\n [lines.remove(item) for item in rest]\n group.extend(rest)\n groups[longest] = group\n else:\n # Singletons raise an exception\n raise IndexError(\"No prefix match for {}!\".format(first))\n return groups\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--inputdir',default=\"input\", type=str)\nargs = parser.parse_args()\nprint(args)\n\nfiles = sorted(os.listdir(args.inputdir))\npfg = prefix_groups(files)\n#print(pfg)\nfor el in pfg:\n print(el)\n print(pfg[el])\n","sub_path":"tasks/filterModels/filterModels.py","file_name":"filterModels.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"179520966","text":"import MCM2020.A.mcmutil as mu\nimport MCM2020.A.evaK as ek\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#wrong version\n\n\n# datafile = r'C:\\Users\\蔡小蔚\\Desktop\\2020\\A\\dataA.xlsx'\n# res = mu.excel_to_matrix(datafile, False)\n#\n# print(res.shape)\n#\n# x = res[:, 0]\n# y = res[:, 1]\n\ntempatures = [173, 198, 230, 257, 25]\nxx, yy = ek.getKlist()\nxx, yy = ek.completeAllKs2(xx, yy)\n# print(xx)\n# print(yy)\n# print(len(xx))\n\ndef getAlphaWhenMinIndex(x, y, tempnow):\n index = 0\n minSub = 100\n # size = x.shape[0]\n size = len(x)\n for i in range(size):\n tempSub = abs(x[i] - tempnow)\n if tempSub < minSub:\n minSub = tempSub\n index = i\n\n return y[index]\n\n\ndef place(carv, step):\n lentemp = 0.5*step*carv\n if lentemp<=197.5:\n return 1\n if lentemp<=233:\n return 2\n if lentemp<=268.5:\n return 3\n if lentemp<=339.5:\n return 4\n return 5\n\n\n\nlenAll = 435.5\ncarv = 1.3\ntimaAll = lenAll / carv\nstep = 0\ntimelist = []\ntempaturelist = []\nsteplist = []\nT0 = 25\ndalt = 0.5\nwhile 0.5*step*carv<=lenAll:\n k = getAlphaWhenMinIndex(xx, yy, T0)\n idx = place(carv, step)\n Ti = tempatures[idx - 1]\n T = Ti - (Ti - T0)*np.exp(-k*0.5)\n steplist.append(step)\n tempaturelist.append(T)\n T0 = T\n step+=1\n timelist.append(step*dalt)\n\n\nprint(\"time:\", timelist)\nprint(\"tempreature: \", tempaturelist)\n\nfilepathTimeList = \"timelist.txt\"\nfilepathTempreatureList = \"templist.txt\"\nprint(\"size:\", len(timelist))\nmu.WriteFile(timelist, filepathTimeList)\nmu.WriteFile(tempaturelist, filepathTempreatureList)\n\n\nplt.plot(timelist, tempaturelist)\nplt.xlabel(\"Time (s) \")\nplt.ylabel(\"Circuit Board's Temperature (°C)\")\nplt.show()\n\n\nprint(tempaturelist)","sub_path":"2020_CUMCM/MCM2020/A/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"366536989","text":"from rest_framework.viewsets import ModelViewSet\n\nfrom heat_map.models import Ipv\nfrom heat_map.serializer import IpvSerializer\n\n\nclass IpvViewSet(ModelViewSet):\n serializer_class = IpvSerializer\n entity_name = 'ipvfour'\n\n def get_queryset(self):\n bounds = get_query_parameters(self.request)\n for each in bounds:\n if not each:\n return Ipv.objects.none()\n cluster = Ipv.objects.filter(\n longitude__gte=float(bounds[0]),\n longitude__lte=float(bounds[1]),\n latitude__gte=float(bounds[2]),\n latitude__lte=float(bounds[3])\n )\n return cluster\n\n\ndef get_query_parameters(request):\n lower_long = request.query_params.get('lower_long')\n upper_long = request.query_params.get('upper_long')\n lower_lat = request.query_params.get('lower_lat')\n upper_lat = request.query_params.get('upper_lat')\n return [lower_long, upper_long, lower_lat, upper_lat]","sub_path":"heat_map/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"256551492","text":"\n# X4DF\n# Copyright (C) 2017 Eric Kerfoot, King's College London, all rights reserved\n\nfrom .x4df import readFile, writeFile, idTransform, validFieldTypes, ASCII, BASE64, BASE64_GZ, BINARY, BINARY_GZ, NODE, ELEM, INDEX\nfrom .x4df import dataset, meta, nodes, topology, field, imagedata, mesh, image, transform, array\n\n__appname__='x4df'\n__version_info__=(0,1,0) # global application version, major/minor/patch\n__version__='%i.%i.%i'%__version_info__\n__author__='Eric Kerfoot'\n__copyright__=\"Copyright (c) 2016-7 Eric Kerfoot, King's College London, all rights reserved. Licensed under the GPL (see LICENSE.txt).\"\n__website__=\"https://ericspod.github.io/X4DF\"\n","sub_path":"python/x4df/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"267582593","text":"class User(object):\n def __init__(self):\n self.name = \"Tom\"\n self.email = \"tom@email.com\"\n self.account_balance = 0\n\n def make_deposit(self, amount):\t\n self.account_balance += amount\n return self\n \n def make_withdrawal(self, amount):\n self.account_balance -= amount\n return self\n def display_user_balance(self):\n print(\"User: {}, Balance: {}\".format(self.name, self.account_balance))\n return self\n\n def transfer_money(self, other_user, amount):\n self.make_withdrawal(amount)\n other_user.make_deposit(amount)\n return self\n\nuser1 = User()\nuser2 = User()\nuser3 = User()\n\nuser1.make_deposit(10).make_deposit(20).make_deposit(30).make_withdrawal(5).display_user_balance()\n\nuser2.make_deposit(10).make_deposit(20).make_withdrawal(5).make_withdrawal(5).display_user_balance()\n\nuser3.make_deposit(3).make_withdrawal(1).make_withdrawal(1).make_withdrawal(1).display_user_balance()\n\nuser1.transfer_money(user3, 55).display_user_balance()\nuser3.display_user_balance()","sub_path":"OOP/chaining_methods.py","file_name":"chaining_methods.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"611620181","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom discord import Member\n\nfrom cogbot.cogs.robo_mod.robo_mod_action import RoboModAction\nfrom cogbot.cogs.robo_mod.robo_mod_action_log_entry import RoboModActionLogEntry\nfrom cogbot.cogs.robo_mod.robo_mod_trigger import RoboModTrigger\n\n\nclass LogMemberBannedAction(RoboModAction):\n async def log(self, trigger: RoboModTrigger) -> Optional[RoboModActionLogEntry]:\n member: Member = trigger.member\n # Name\n name_str = f\"{member}\"\n # User ID\n user_id_str = f\"{member.id}\"\n # Member for\n now = datetime.utcnow()\n member_for = now - member.joined_at\n member_for_str = f\"{member_for.days} days\"\n if member_for.days < 7:\n hh = int(member_for.total_seconds() / 3600)\n mm = int(member_for.total_seconds() / 60) % 60\n member_for_str = f\"{hh} hours, {mm} minutes\"\n return RoboModActionLogEntry(\n content=f\"{member.mention} was banned.\",\n fields={\n \"Name\": name_str,\n \"User ID\": user_id_str,\n \"Member for\": member_for_str,\n },\n )\n","sub_path":"cogbot/cogs/robo_mod/actions/log_member_banned.py","file_name":"log_member_banned.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"284551866","text":"import cv2\nimport numpy as np\nimport struct\n\ndef parse_image_file(image_file_path):\n \n return cv2.imread(image_file_path)\n\ndef parse_velodyne_file(velodyne_file_path):\n \n with open(velodyne_file_path, 'rb') as velodyne_file:\n \n points = []\n \n point = velodyne_file.read(16)\n \n while len(point) == 16:\n \n x, y, z, r = struct.unpack('4f', point)\n \n points.append([x, y, z, r])\n \n point = velodyne_file.read(16)\n \n return np.asarray(points)\n\ndef parse_calib_file(calib_file_path):\n \n with open(calib_file_path, 'r') as calib_file:\n \n lines = calib_file.readlines()\n \n P_line = lines[2]\n R_line = lines[4]\n T_line = lines[5]\n \n P_strings = P_line.split()[1:]\n R_strings = R_line.split()[1:]\n T_strings = T_line.split()[1:]\n \n P_floats = [float(P_string) for P_string in P_strings]\n R_floats = [float(R_string) for R_string in R_strings]\n T_floats = [float(T_string) for T_string in T_strings]\n \n P = np.asarray(P_floats)\n P = np.reshape(P, (3,4))\n \n R = np.asarray(R_floats)\n R = np.reshape(R, (3,3))\n R = np.concatenate([R,np.zeros((3,1))], axis=1)\n R = np.concatenate([R,np.array([[0,0,0,1]])])\n \n T = np.asarray(T_floats)\n T = np.reshape(T, (3,4))\n T = np.concatenate([T,np.array([[0,0,0,1]])])\n \n return P, R, T\n\ndef parse_label_file(label_file_path):\n \n with open(label_file_path, 'r') as label_file:\n \n labels = []\n \n lines = label_file.readlines()\n \n for line in lines:\n \n strings = line.split()\n \n label = {}\n label['type'] = strings[0]\n label['truncated'] = float(strings[1])\n label['occluded'] = int(strings[2])\n label['alpha'] = float(strings[3])\n label['bbox'] = [float(x) for x in strings[4:8]]\n label['dimensions'] = [float(x) for x in strings[8:11]]\n label['location'] = [float(x) for x in strings[11:14]]\n label['rotation_y'] = float(strings[14])\n \n labels.append(label)\n \n return labels\n","sub_path":"kitti_utils.py","file_name":"kitti_utils.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"621813966","text":"# from django.views import generic\nfrom django.contrib.gis.geos import fromstr, Point\n# from django.contrib.gis.measure import Distance\nfrom . models import *\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom . serializer import *\n\n# Create your views here.\n\nlongitude = 19.127730749999998\nlatitude = 72.83693882433897\n\nuser_location = Point(longitude, latitude, srid=4326)\n\ndef index(request):\n locations = Location.objects.all().order_by('?')\n # random.shuffle(locations)\n # locations = Location.objects.annotate(distance=Distance('location',user_location))\n print(str(locations))\n return render(request, 'sports/index.html', {'locations' : locations})\n\nclass ReactLocationView(APIView):\n \n serializer_class = ReactSerializer\n \n def get(self, request):\n detail = [ {\"name\": loc.name} \n for loc in Location.objects.all().order_by('?')]\n detail = detail[0:10]\n return Response(detail)\n \n def post(self, request):\n \n serializer = ReactSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n\n","sub_path":"backend/sports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"75864758","text":"import asyncio\nimport getpass\nimport logging\nimport os\nimport sys\nfrom typing import Dict, List, Sequence, Tuple\n\nfrom funcy import func_partial, walk, with_next\nfrom telethon import TelegramClient, errors, functions, utils\nfrom telethon.crypto import CdnDecrypter\nfrom telethon.errors import FileMigrateError, SessionPasswordNeededError\nfrom telethon.network.connection import tcpabridged\nfrom telethon.tl.custom import dialog\nfrom telethon.tl.functions.upload import GetFileRequest\nfrom telethon.tl.types import (DocumentAttributeAudio,\n DocumentAttributeFilename,\n InputDocumentFileLocation,\n InputMessagesFilterMusic, InputPeerEmpty)\nfrom telethon.tl.types.upload import FileCdnRedirect\nfrom telethon.utils import get_display_name\n\nlogger = logging.getLogger('tgclient')\n\nMB = 1048576\nKB = 1024\nBLOCK_SIZE = 128 * KB\n\n# 178\n# 356\n# 711\n# requests_made = 0\n\ndef block(byte_idx: int):\n return byte_idx//BLOCK_SIZE\n\n\ndef block_mb(block_idx: int):\n return (block_idx*BLOCK_SIZE)//MB\n\n\ndef mb(byte: int):\n return byte//MB\n\n\ndef split_range(offset: int, limit: int):\n \"\"\"\n Restrictions on upload.getFile and upload.getCdnFile parameters\n offset must be divisible by 4096 bytes\n limit must be divisible by 4096 bytes\n 10485760 (1MB) must be divisible by limit\n offset / (1024 * 1024) == (offset + limit - 1) / (1024 * 1024)\n (file parts that are being downloaded must always be inside the same megabyte-sized fragment)\n \"\"\"\n if offset % 4096 != 0:\n offset = (offset // 4096) * 4096\n\n if limit % 4096 != 0:\n limit = (limit // 4096 + 1) * 4096\n\n a = offset\n b = offset + limit\n\n starting_block = block(a)\n ending_block = block(b - 1)\n\n blocks = list(range(starting_block, ending_block + 1))\n\n rngs = list(map(lambda b: b * BLOCK_SIZE, blocks))\n rngs.append(rngs[-1] + BLOCK_SIZE)\n\n return rngs\n\n\ndef msg_to_inputlocation(msg):\n return InputDocumentFileLocation(id=msg.media.document.id,\n access_hash=msg.media.document.access_hash,\n file_reference=msg.media.document.file_reference,\n thumb_size='')\n\n\nclass TelegramFsClient(TelegramClient):\n def __init__(self, session_user_id, api_id, api_hash, proxy):\n\n super().__init__(\n session_user_id,\n api_id,\n api_hash,\n proxy=proxy\n )\n\n self.api_id = api_id\n self.api_hash = api_hash\n\n async def auth(self):\n logger.debug('Connecting to Telegram servers...')\n\n try:\n await self.connect()\n except ConnectionError:\n logger.debug('Initial connection failed. Retrying...')\n if not await self.connect():\n logger.debug('Could not connect to Telegram servers.')\n return\n\n logger.debug('Connected')\n\n if not await self.is_user_authorized():\n\n user_phone = input('Enter your phone number: ')\n\n logger.debug('First run. Sending code request...')\n\n await self.sign_in(user_phone)\n\n self_user = None\n while self_user is None:\n code = input('Enter the code you just received: ')\n try:\n self_user = await self.sign_in(code=code)\n except SessionPasswordNeededError:\n pw = getpass.getpass('Two step verification is enabled. '\n 'Please enter your password: ')\n\n self_user = await self.sign_in(password=pw)\n\n async def get_dialogs_dict(self, limit=150, offset_id=0) -> Dict:\n \"\"\"\n Returns mapping dialog_display_name -> dialog\n \"\"\"\n logger.debug(\"get_dialogs_map(limit=%s, offset_id=%d)\" %\n (limit, offset_id))\n dialogs = await self.get_dialogs(limit=limit, offset_id=offset_id)\n\n entities = [(get_display_name(d.entity), d.entity) for d in dialogs]\n\n return dict(entities)\n\n async def get_file_chunk(self, input_location, offset, limit):\n \"\"\"\n This reimplementation of telethon's `download_file` method adds offset and limit parametres \n in order to download arbitrary chunks of a file\n \"\"\"\n logger.debug(\"get_file_chunk(%s, %s,%s)\" %\n (input_location.id, offset, limit))\n\n ranges = split_range(offset, limit)\n\n received_bytes = bytes()\n\n assert not offset % 4096\n\n dc_id, input_location = utils.get_input_location(input_location)\n exported = dc_id and self.session.dc_id != dc_id\n\n if exported:\n try:\n sender = await self._borrow_exported_sender(dc_id)\n except errors.DcIdInvalidError:\n # Can't export a sender for the ID we are currently in\n config = await self(functions.help.GetConfigRequest())\n for option in config.dc_options:\n if option.ip_address == self.session.server_address:\n self.session.set_dc(\n option.id, option.ip_address, option.port)\n self.session.save()\n break\n\n # TODO Figure out why the session may have the wrong DC ID\n sender = self._sender\n exported = False\n else:\n # The used sender will also change if ``FileMigrateError`` occurs\n sender = self._sender\n\n try:\n for a, b in with_next(ranges):\n if not b:\n break\n\n _offset = a\n _limit = b - a\n\n logger.debug(\"Quering range offset=%s limit=%s\" %\n (_offset, _limit))\n\n try:\n result = await sender.send(functions.upload.GetFileRequest(\n input_location, _offset, _limit\n ))\n\n # global requests_made\n # requests_made += 1\n\n # print(\"requests_made = %d\" % requests_made)\n\n if isinstance(result, FileCdnRedirect):\n logger.debug(\"FileCdnRedirect was received\")\n raise NotImplementedError\n\n except FileMigrateError as e:\n logger.debug(\"Caught FileMigrateError\")\n\n sender = await self._borrow_exported_sender(e.new_dc)\n exported = True\n continue\n\n if not result.bytes:\n return getattr(result, 'type', '')\n\n received_bytes += result.bytes\n\n except Exception as e:\n logger.error(\"Zero chunk received %s\" % e)\n finally:\n logger.debug(\"Finalization %s\" % len(received_bytes))\n\n if exported:\n await self._return_exported_sender(sender)\n elif sender != self._sender:\n await sender.disconnect()\n\n ret_data = received_bytes[offset -\n ranges[0]: offset - ranges[0] + limit]\n\n if len(ret_data) == 0:\n logger.error(\"Empty result\")\n\n logger.debug(\"Returning %s bytes\" % len(ret_data))\n\n return ret_data\n\n def document_from_message(self, msg):\n \"\"\"\n transforms a message containing a document to a dictionary\n \"\"\"\n\n if not getattr(msg, 'media', None):\n return None\n\n if not getattr(msg.media, 'document', None):\n return None\n\n document = msg.media.document\n document_data = dict.fromkeys([\n 'id',\n 'message_id',\n 'message_date',\n 'document_date',\n 'mime_type',\n 'size',\n 'attributes',\n 'download_func'])\n\n document_atrributes = dict.fromkeys([\n 'file_name',\n 'title',\n 'performer',\n 'duration'])\n\n document_data['attributes'] = document_atrributes\n document_data['download_func'] = func_partial(\n self.get_file_chunk, msg_to_inputlocation(msg))\n\n document_data.update(id=document.id,\n message_date=msg.date,\n document_date=document.date,\n mime_type=document.mime_type,\n size=document.size, message_id=msg.id)\n\n for attr in msg.media.document.attributes:\n if isinstance(attr, DocumentAttributeAudio):\n document_atrributes['title'] = getattr(attr, 'title', None)\n document_atrributes['performer'] = getattr(\n attr, 'performer', None)\n document_atrributes['duration'] = int(\n getattr(attr, 'duration', 0))\n\n elif isinstance(attr, DocumentAttributeFilename):\n document_atrributes['file_name'] = attr.file_name\n\n return document_data\n\n async def _get_documents(self, entity, limit=None, offset_id=0, reverse=False, filter_music=False, ids=None):\n \"\"\"\n Returns two lists: list of processed messages and list of tuples (message, document)\n \"\"\"\n documents = []\n\n logger.debug(\"_get_documents(entity=%s, limit=%s, offset_id=%s, reverse=%s, filter_music=%s, ids=%s)\"\n % (entity.id, limit, offset_id, reverse, filter_music, ids))\n\n messages = await self.get_messages(entity, limit=limit, offset_id=offset_id, reverse=reverse,\n filter=filter_music and InputMessagesFilterMusic, ids=ids)\n\n logger.debug(\"Received %d messages\" % len(messages))\n\n for msg in messages:\n document = self.document_from_message(msg)\n if document:\n documents.append((msg, document))\n\n return [messages, documents]\n\n async def get_documents(self, entity, limit=None, offset_id=0, reverse=False, filter_music=False, ids=None):\n \"\"\"\n Returns list of tuples (message, document)\n \"\"\"\n documents = []\n\n logger.debug(\"get_documents(entity=%s, limit=%s, offset_id=%s, reverse=%s, filter_music=%s, ids=%s)\"\n % (entity.id, limit, offset_id, reverse, filter_music, ids))\n\n [messages, documents] = await self._get_documents(entity,\n limit=limit,\n offset_id=offset_id,\n reverse=reverse,\n filter_music=filter_music,\n ids=ids)\n\n logger.debug(\"Received %d documents\" % len(documents))\n\n while not ids and limit and limit > len(documents):\n logger.debug(\"Loading more documents\")\n [messages, more] = await self._get_documents(entity,\n offset_id=messages[-1].id,\n limit=100,\n reverse=reverse,\n filter_music=filter_music)\n logger.debug(\"Received %d documents\" % len(more))\n\n if len(messages) == 0:\n break\n\n documents.extend(more)\n\n return documents[:limit]\n","sub_path":"tgmount/tgmount/tgclient.py","file_name":"tgclient.py","file_ext":"py","file_size_in_byte":11635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"466486231","text":"from django.urls import path, re_path\nimport ordersapp.views as ordersapp\n\napp_name = 'ordersapp'\n\nurlpatterns = [\n re_path(r'^$', ordersapp.OrderList.as_view(), name='index'),\n re_path(r'^order/create/$', ordersapp.OrderItemsCreate.as_view(), name='order_create'),\n re_path(r'^order/read/(?P\\d+)/$', ordersapp.OrderRead.as_view(), name='order_read'),\n re_path(r'^order/update/(?P\\d+)/$', ordersapp.OrderItemsUpdate.as_view(), name='order_update'),\n re_path(r'^order/delete/(?P\\d+)/$', ordersapp.OrderDelete.as_view(), name='order_delete'),\n\n re_path(r'^order/forming/complete/(?P\\d+)/$', ordersapp.order_forming_complete, name='order_forming_complete'),\n re_path(r'^product/(?P\\d+)/price/$', ordersapp.get_product_price),\n\n]\n","sub_path":"ordersapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"202176634","text":"from .db.leaderboard import LeaderboardDb\nfrom .leaderboard_entry import LeaderboardEntry\nfrom .db.leaderboard_entry import LeaderboardEntryDb\nimport flask_cot.exceptions as Exceptions\nfrom sqlalchemy import and_, text\nfrom flask_cot.db.models import BaseModel\n\n\nclass Leaderboard(BaseModel):\n \"\"\"docstring for Leaderboard.\n\n\n _pref_score_col\n 0 = a only\n 1 = a then b\n _pref_score_X_direction\n ASC lower scores are better\n DESC higher scores are better\n _pref_created_direction\n ASC early scores are better\n DESC recent scores are better\n\n \"\"\"\n def __init__(self, id=None):\n super(Leaderboard, self).__init__()\n self._db_model = self._get_db_model(LeaderboardDb)\n self.reset()\n if id:\n self.load(id)\n\n def add_entry(\n self,\n leaderboard_id=None, # Allows addition without preloading the board\n dim_1=None,\n dim_2=None,\n dim_3=None,\n dim_4=None,\n dim_5=None,\n user_id=None,\n display_name=None,\n display_avatar=None,\n score_a=None,\n score_b=None,\n _return_position=True\n ):\n if leaderboard_id is None:\n leaderboard_id = self.id\n if leaderboard_id is None:\n raise ValueError('No Leaderboard id set.')\n # TODO: check for multy entry\n lbe = LeaderboardEntry()\n lbe.create_new(\n leaderboard_id,\n dim_1=dim_1,\n dim_2=dim_2,\n dim_3=dim_3,\n dim_4=dim_4,\n dim_5=dim_5,\n user_id=user_id,\n display_name=display_name,\n display_avatar=display_avatar,\n score_a=score_a,\n score_b=score_b,\n _return_position=_return_position\n )\n\n def get_top(\n self,\n limit=10,\n offset=0,\n dim_1=None,\n dim_2=None,\n dim_3=None,\n dim_4=None,\n dim_5=None,\n ):\n filter_group = []\n filter_group.append(text(\"leaderboard_id='\"+self.id+\"'\"))\n if dim_1:\n filter_group.append(text(\"_dim_1=\" + str(dim_1)))\n if dim_2:\n filter_group.append(text(\"_dim_2=\" + str(dim_2)))\n if dim_3:\n filter_group.append(text(\"_dim_3=\" + str(dim_3)))\n if dim_4:\n filter_group.append(text(\"_dim_4='\" + self.data['_dim_4'] + \"'\"))\n if dim_5:\n filter_group.append(text(\"_dim_5='\" + self.data['_dim_5'] + \"'\"))\n\n order_group = []\n order_group.append(\n text('score_a ' + self.data['_pref_score_a_direction'])\n )\n if self.data['_pref_score_col']:\n order_group.append(\n text('score_b ' + self.data['_pref_score_b_direction'])\n )\n order_group.append(\n text('created ' + self.data['_pref_created_direction'])\n )\n\n objs = self._db_model.query.filter(\n and_(*filter_group)\n )\\\n .order_by(*order_group)\\\n .limit(limit)\\\n .offset(offset)\n ob = []\n pos = offset\n for o in objs:\n pos += 1\n i = o.to_dict()\n i['position'] = pos\n ob.append(i)\n\n return ob\n","sub_path":"leaderboards/models/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"339792338","text":"import torch\nfrom torch import ByteTensor\nfrom cloudvolume.lib import Bbox, Vec\n\nimport util\nimport math\nimport argparse\n\ndef logical_or(tensors):\n \"\"\"Combine list of byte tensors with disjunction\n\n Args:\n tensors: list of tensors\n \"\"\"\n assert(len(tensors) > 1)\n o = tensors[0] | tensors[1]\n for t in tensors[2:]:\n o = o | t\n return o\n\ndef logical_and(tensors):\n \"\"\"Combine list of byte tensors with conjunction\n\n Args:\n tensors: list of tensors\n \"\"\"\n assert(len(tensors) > 1)\n o = tensors[0] | tensors[1]\n for t in tensors[2:]:\n o = o & t\n return o\n\nclass MaskCompiler():\n\n def __init__(self, src_paths, dst_path, mip, bbox_start, bbox_stop, \n bbox_mip, thresholds, disable_cuda, **kwargs):\n assert(len(src_paths) == len(thresholds))\n self.thresholds = thresholds\n bbox = Bbox(bbox_start, bbox_stop)\n self.device = None\n if not disable_cuda and torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n \n self.srcs = [util.get_cloudvolume(path, mip=mip) for path in src_paths]\n self.src_bbox = self.srcs[0].bbox_to_mip(bbox, bbox_mip, mip)\n self.dst = util.create_cloudvolume(dst_path, self.srcs[0].info, mip, mip)\n\n def get_bytetensor(self, src, bbox, threshold):\n img = util.get_image(src, bbox)\n S = util.uint8_to_R(img)\n S = util.to_tensor(S, device=self.device).float()\n return S >= threshold\n \n def run(self):\n print('src_bbox {0}'.format(self.src_bbox))\n print('src_path {0}'.format(self.srcs[0].path))\n print('threshold {0}'.format(self.thresholds[0]))\n M = self.get_bytetensor(self.srcs[0], self.src_bbox, self.thresholds[0])\n for src, th in zip(self.srcs[1:], self.thresholds[1:]):\n print('src_path {0}'.format(src.path))\n print('threshold {0}'.format(th))\n op = self.get_bytetensor(src, self.src_bbox, th)\n M = logical_or([M, op])\n mask = util.to_numpy(M)\n util.save_image(self.dst, self.src_bbox, mask)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description='Threshold & combine CloudVolumes')\n parser.add_argument('--src_paths', nargs='+', type=str, \n help='List of CloudVolume paths for images to be combined')\n parser.add_argument('--dst_path', type=str,\n help='CloudVolume path for where combined image written')\n parser.add_argument('--mip', type=int,\n help='MIP level of images to be combined')\n parser.add_argument('--bbox_start', nargs=3, type=int,\n help='bbox origin, 3-element int list')\n parser.add_argument('--bbox_stop', nargs=3, type=int,\n help='bbox origin+shape, 3-element int list')\n parser.add_argument('--bbox_mip', type=int, default=0,\n help='MIP level at which bbox_start & bbox_stop are specified')\n parser.add_argument('--thresholds', nargs='+', type=float,\n help='List of thresholds; length should match length of SRC_PATHS')\n parser.add_argument('--disable_cuda', action='store_true', help='Disable CUDA')\n args = parser.parse_args()\n\n m = MaskCompiler(**vars(args))\n m.run()\n\n","sub_path":"inference/mask_compiler.py","file_name":"mask_compiler.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629246262","text":"import os\n\nfrom setuptools import setup, find_packages\n\n# Versioning autover depends on autover.\nfrom autover.version import get_setup_version\n\nsetup_args = dict(\n name='autover',\n version=get_setup_version(__file__, \"autover\", archive_commit=\"$Format:%h$\"),\n description='Autover provides consistent and up-to-date `__version__` strings for Python packages.',\n long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',\n author= \"IOAM\",\n author_email= \"developers@topographica.org\",\n maintainer=\"IOAM\",\n maintainer_email=\"developers@topographica.org\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='http://github.com/ioam/autover/',\n packages = find_packages(),\n provides = [\"autover\"],\n package_data = {'autover':['.version']},\n include_package_data=True,\n scripts = [\"scripts/autover\"],\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Software Development :: Libraries\"]\n)\n\n\n\nif __name__==\"__main__\":\n setup(**setup_args)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49634393","text":"#encoding=utf8\n\nimport logging\nimport logging.handlers #3.6版本必须要导入\n\n# Define some constants\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'\nBLACK_DOMAIN = ['www.google.gf', 'www.google.io', 'www.google.com.lc']\n\nDOMAIN = 'www.google.com'\nURL_SEARCH = \"https://{domain}/search?hl={language}&q={query}&btnG=Search&gbv=1\"\nURL_NUM = \"https://{domain}/search?hl={language}&q={query}&btnG=Search&gbv=1&num={num}\"\nURL_NEXT = \"https://{domain}/search?hl={language}&q={query}&btnG=Search&gbv=1&num={num}&start={start}\"\n\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\nlogging.getLogger(\"chardet\").setLevel(logging.WARNING)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\n#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', filename=r\"..\\log\\crawlerlog.log\")\nformat='%(asctime)s - %(name)s - %(levelname)s: %(message)s'\n#file_handler = logging.handlers.RotatingFileHandler(filename=r\"..\\log\\crawlerlog.log\", mode='a', encoding='utf-8')\n#formatter = logging.Formatter('%(name)-12s %(asctime)s level-%(levelname)-8s thread-%(thread)-8d %(message)s') # 每行日志的前缀设置\nfile_handler = logging.handlers.TimedRotatingFileHandler(filename=r\"..\\log\\ContentCrawlerLog.log\",when='H',interval=1,backupCount=24,encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器\nsh_handler = logging.StreamHandler()#往屏幕上输出\nformatter = logging.Formatter(format) # 实例化formatter\nfile_handler.setFormatter(formatter) # 为handler添加formatter\nLOGGER = logging.getLogger('Content_Search')\nLOGGER.addHandler(sh_handler)\nLOGGER.addHandler(file_handler)\nLOGGER.setLevel(logging.DEBUG)","sub_path":"MyGoogleTool/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499570988","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 5 11:46:17 2019\n\n@author: Aditya Tanwar\n\"\"\"\n\n# Importing the libraries\nimport pandas as pd\nfrom apyori import apriori\nfrom matplotlib import pyplot as plt\n\n# Dataset Loading\ndataset = pd.read_csv('BreadBasket_DMS.csv')\n\n\n# Ploting Pie Chart of top 15 selling items\nmost_selled_15 = dataset[\"Item\"].value_counts().head(15)\n\nlabels = list(most_selled_15.index)\nslices = list(most_selled_15)\nplt.pie(slices,labels=labels, wedgeprops={'edgecolor':'black'})\nplt.axis('equal')\nplt.show()\n\n\n\n# Preparing data\n\n#transactions = []\n#for i in set(dataset['Transaction']):\n# transactions.append( list(dataset['Item'][dataset['Transaction']==i]) )\n# \n#counter = 0\n#for i in transactions:\n# transactions[ counter ] = list(set(transactions[counter]).discard(\"None\"))\n# counter +=1\n#\n\ndataset = dataset[dataset[\"Item\"]!= \"NONE\"]\n\ndef cart(values):\n return list(set(values))\n\ntransactions = list(dataset.groupby(\"Transaction\")[\"Item\"].apply(cart))\n\n\n# Training Apriori on the dataset\nresults = list( apriori(transactions, min_support = 0.0025, min_confidence = 0.2, min_lift = 3) )\n\nfor item in results:\n\n # first index of the inner list\n # Contains base item and add item\n pair = item[0] \n items = [x for x in pair]\n print(\"Rule: \" + items[0] + \" -> \" + items[1])\n\n #second index of the inner list\n print(\"Support: \" + str(item[1]))\n\n #third index of the list located at 0th\n #of the third index of the inner list\n\n print(\"Confidence: \" + str(item[2][0][2]))\n print(\"Lift: \" + str(item[2][0][3]))\n print(\"=====================================\")\n\n\n","sub_path":"Day24/BreadBasket_DMS.py","file_name":"BreadBasket_DMS.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"210468584","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 22 12:51:47 2019\r\n\r\n@author: michael.schulte\r\n\"\"\"\r\n\r\nimport gym\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nenv = gym.make('MountainCar-v0')\r\n\r\nLEARNING_RATE = 0.1\r\nDISCOUNT = 0.9\r\n\r\nEPISODES = 3000\r\nSHOW_EVERY = 100\r\n\r\nepsilon = 0.2\r\nSTART_EPSILON_DECAYING = 1\r\nEND_EPSILON_DECAYING = EPISODES // 2\r\nepsilon_decay_value = epsilon / (END_EPSILON_DECAYING - START_EPSILON_DECAYING)\r\n\r\nDISCRETE_OBS_SPACE_SIZE = [15] * len(env.observation_space.high)\r\ndiscrete_obs_space_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OBS_SPACE_SIZE\r\n\r\nq_table = np.random.uniform(low = -2, high = 0,\r\n size = (DISCRETE_OBS_SPACE_SIZE + [env.action_space.n]))\r\n\r\nep_rewards = []\r\naggr_ep_rewards = {'ep': [], 'avg': [], 'min': [], 'max': []}\r\n\r\ndef get_discrete_state(state):\r\n discrete_state = (state - env.observation_space.low) / discrete_obs_space_win_size\r\n\r\n return tuple(discrete_state.astype(np.int))\r\n\r\n\r\nfor episode in range(EPISODES):\r\n episode_reward = 0\r\n\r\n if episode % SHOW_EVERY == 0:\r\n render = True\r\n #print(episode)\r\n else:\r\n render = False\r\n\r\n discrete_state = get_discrete_state(env.reset())\r\n\r\n done = False\r\n\r\n while not done:\r\n if np.random.random() > epsilon:\r\n action = np.argmax(q_table[discrete_state])\r\n else:\r\n action = np.random.randint(low = 0, high = env.action_space.n)\r\n\r\n new_state, reward, done, _ = env.step(action)\r\n episode_reward += reward\r\n\r\n new_discrete_state = get_discrete_state(new_state)\r\n\r\n if render == True:\r\n env.render()\r\n\r\n if not done:\r\n max_future_q = np.max(q_table[new_discrete_state])\r\n current_q = q_table[discrete_state + (action, )]\r\n new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)\r\n q_table[discrete_state + (action, )] = new_q\r\n elif new_state[0] >= env.goal_position:\r\n q_table[discrete_state + (action, )] = 0\r\n print('we made it on episode: {}'.format(episode))\r\n\r\n discrete_state = new_discrete_state\r\n\r\n if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:\r\n epsilon -= epsilon_decay_value\r\n\r\n ep_rewards.append(episode_reward)\r\n\r\n if episode % SHOW_EVERY == 0:\r\n average_reward = sum(ep_rewards[-SHOW_EVERY:]) / len(ep_rewards[-SHOW_EVERY:])\r\n aggr_ep_rewards['ep'].append(episode)\r\n aggr_ep_rewards['avg'].append(average_reward)\r\n aggr_ep_rewards['min'].append(np.min(ep_rewards[-SHOW_EVERY:]))\r\n aggr_ep_rewards['max'].append(np.max(ep_rewards[-SHOW_EVERY:]))\r\n\r\nenv.close()\r\n\r\nplt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'], color = 'r', label = 'avg')\r\nplt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['min'], color = 'g', label = 'min')\r\nplt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['max'], color = 'b', label = 'max')\r\n#plt.legend(loc = 4)\r\nplt.show()","sub_path":"rl/mountain-car.py","file_name":"mountain-car.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"472541963","text":"import random\n\nclass Thing:\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n \n __repr__ = __str__\n\nthings = [Thing(random.randint(-100, 100)) for n in range(20)]\n\n# # Good idea but confuses Python\n# print(things)\n# for thing in things:\n# if thing.value < 0:\n# things.remove(thing)\n\nprint(things)\ntemp_things = []\nfor thing in things:\n if not thing.value < 0:\n temp_things.append(thing)\n\nprint(temp_things) # This now contains all the positive values\n\n# replace things with temp_things\nthings[:] = temp_things\nprint(things)","sub_path":"examples/misc/removefromlist.py","file_name":"removefromlist.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"458137776","text":"from datetime import datetime\nfrom hashlib import sha1\nfrom hmac import new as hmac_new\nfrom django.conf import settings\nfrom AuroraUser.models import AuroraUser\n\nimport logging\nlogger = logging.getLogger(\"ZidSSOBackend\")\n\n\nclass ZIDAuthenticationMiddleware(object):\n \"\"\"\n This class is used as the AuthenticationMiddleware on the production server\n \"\"\"\n def authenticate(self, params):\n\n param_keys = params.keys()\n\n if 'sKey' in param_keys:\n hmac_received = params['sKey']\n elif 'logout' in param_keys:\n hmac_received = params['logout']\n else:\n logger.error(\"Missing parameters for authentication. Abort.\")\n return None\n\n # make sure order is correct by creating a new list and putting in the available keys one by one\n values = ''\n for key in ['oid', 'mn', 'firstName', 'lastName', 'mail']:\n if key in param_keys:\n values += params[key]\n\n shared_secret = settings.SSO_SHARED_SECRET.encode(encoding='latin1')\n utc_now = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()\n now = int(utc_now / 10)\n user = None\n for offset in [0, -1, 1, -2, 2]:\n values_string = values + str(now + offset)\n values_string = values_string.encode(encoding='latin1')\n hmac_calced = hmac_new(shared_secret, values_string, sha1).hexdigest()\n\n if hmac_calced == hmac_received:\n try:\n user = AuroraUser.objects.get(matriculation_number=params['mn'])\n except AuroraUser.DoesNotExist:\n logger.debug(\"User with Matr.Nr.: %s not found\" % params['mn'])\n try:\n user = AuroraUser.objects.get(oid=params['oid'])\n except AuroraUser.DoesNotExist:\n logger.debug(\"User with OID:%s not found\" % params['oid'])\n user = None\n\n if user is None:\n logger.debug(\"HMAC doesn't match. Abort authentication.\")\n\n return user\n\n def get_user(self, user_id):\n try:\n user = AuroraUser.objects.get(pk=user_id)\n except AuroraUser.DoesNotExist:\n user = None\n\n return user\n\n def __str__(self):\n return('ZidSSOBackend')\n\n def __unicode__(self):\n return('ZidSSOBackend')","sub_path":"middleware/ZIDAuthenticationMiddleware.py","file_name":"ZIDAuthenticationMiddleware.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"87082820","text":"#!/usr/bin/python\n__author__ = 'ejk'\n\nimport subprocess\nfrom os import path\nfrom time import sleep\n\nsalt_master = 'salt-master.cisco.com'\nsalt_name = 'virl'\nsalt_append_domain = 'virl.info'\nwhile_exit = 0\ncwd = path.realpath('./')\nproxy = 'None'\nhostname = 'virl'\ndomain = 'virl.info'\n\nwhile not while_exit:\n print (30 * '-')\n print (\" V I R L - I N S T A L L - M E N U\")\n print (30 * '-')\n print (\"1. Change salt master from {0} \".format(salt_master))\n print (\"2. Change salt id from {0} or salt domain from {1}\".format(salt_name, salt_append_domain))\n print (\"3. Change hostname from {0} or domain name {1}\".format(hostname, domain))\n print (\"4. Write out extra.conf\")\n print (\"5. Change http proxy from {0}\".format(proxy))\n print (\"6. install salt without preseed keys\")\n print (\"7. install salt with preseed keys in {0}\".format(cwd + '/preseed_keys'))\n print (\"8. Test if you are connected to salt-master\")\n print (\"9. Install virl installer and settings\")\n print (\"10. Edit /etc/virl.ini\")\n print (\"11. Exit\")\n print (30 * '-')\n\n choice = raw_input('Which step are you on [1-11] : ')\n\n choice = int(choice)\n\n if choice == 1:\n salt_master = raw_input('Salt master [%s] ' % salt_master)\n if choice == 2:\n salt_name = raw_input('Salt name [%s] ' % salt_name)\n salt_append_domain = raw_input('Salt domain name [%s] ' % salt_append_domain)\n if choice == 3:\n hostname = raw_input('System hostname [%s] ' % hostname)\n domain = raw_input('System Domain name [%s] ' % domain)\n if choice == 4:\n if not path.exists('/etc/salt/virl'):\n subprocess.check_output(['mkdir', '-p', '/etc/salt/virl'])\n if not path.exists('/etc/salt/minion.d'):\n subprocess.check_output(['mkdir', '-p', '/etc/salt/minion.d'])\n with open((\"/etc/salt/minion.d/extra.conf\"), \"w\") as extra:\n extra.write(\"\"\"master: {salt_master}\\n\"\"\".format(salt_master=salt_master))\n extra.write(\"\"\"id: {salt_name}\\n\"\"\".format(salt_name=salt_name))\n extra.write(\"\"\"append_domain: {salt_append_domain}\\n\"\"\".format(salt_append_domain=salt_append_domain))\n ##TODO waiting for salt to put this back in\n # extra.write(\"\"\"grains_dirs:\\n\"\"\")\n # extra.write(\"\"\" - /etc/salt/virl\\n\"\"\")\n\n if choice == 5:\n proxy = raw_input('Http proxy [%s] ' % proxy)\n if not proxy == 'None':\n if not path.exists('/etc/salt'):\n subprocess.check_output(['mkdir', '-p', '/etc/salt'])\n with open((\"/etc/salt/grains\"), \"w\") as grains:\n grains.write(\"\"\"proxy: True\\n\"\"\")\n grains.write(\"\"\"http proxy: {proxy}\\n\"\"\".format(proxy=proxy))\n if choice == 6:\n subprocess.call(['sh', '/home/virl/virl-bootstrap/install_salt.sh'])\n if choice == 7:\n subprocess.call(['sh', '/home/virl/virl-bootstrap/install_salt.sh', '-k', (cwd + '/preseed_keys')])\n if choice == 8:\n subprocess.call(['salt-call', 'test.ping'])\n if choice == 9:\n subprocess.call(['salt-call', 'state.sls', 'zero'])\n if choice == 10:\n if not path.exists('/etc/virl.ini'):\n subprocess.call(['cp', '/home/virl/vsettings.ini', '/etc/virl.ini'])\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'salt_master', salt_master])\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'salt_id', salt_name])\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'salt_domain', salt_append_domain])\n if not proxy == 'None':\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'proxy', 'True'])\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'http_proxy', proxy])\n if not hostname == 'virl' or not domain == 'virl.info':\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'hostname', hostname ])\n subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',\n 'domain', domain])\n subprocess.call(['/usr/bin/nano', '/etc/virl.ini'])\n\n if choice == 11:\n subprocess.call(['/usr/local/bin/vinstall', 'salt'])\n sleep(5)\n subprocess.call(['/usr/local/bin/vinstall', 'first'])\n while_exit = 1\n\n","sub_path":"virl-bootstrap.py","file_name":"virl-bootstrap.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"381426622","text":"from Tkinter import *\r\nimport tkMessageBox\r\n\r\nclass AddRecordWindow:\r\n \"\"\" Class is a user dialog window which allows to add new record to data base.\r\n\r\n Class fields:\r\n - root - Tkinter top level window(parent of all the widgets)\r\n - canvas - Tkinter.Canvas widget; parent for the widget frame, allows scrolling\r\n - frame - Tkinter.Frame widget; parent for all containing widgets(entries and buttons)\r\n - legend - list of strings; contains data base legend\r\n - value_list - list of Tkinter.StringVar objects attached to text entries\r\n \r\n \"\"\"\r\n def __init__(self, legend_list):\r\n self.legend = legend_list\r\n self.value_list = []\r\n self.arg_quantity = len(legend_list)\r\n\r\n def initWindowLoop(self):\r\n \"\"\" Method initiates process of building window forms and widgets.\r\n The returning reference is a list of values entered by user.\r\n This list is returned after destorying Tkinter instance of the window.\r\n\r\n \"\"\"\r\n self.buildWindow()\r\n return self.prepareToReturn(self.value_list)\r\n\r\n def buildWindow(self):\r\n \"\"\" Method creates Tkinter instance of the user dialog and \r\n sets up it initial configurations: \r\n + attaching Tkinter.Canvas and Tkinter.Frame\r\n + adding Scrollbars\r\n\r\n \"\"\"\r\n self.root = Tk()\r\n self.root.title('Add Entry')\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.closeEventHandler)\r\n self.root.geometry(\"260x660\")\r\n self.root.minsize(260,330)\r\n\r\n self.canvas = Canvas(self.root)\r\n self.frame = Frame(self.canvas)\r\n self.vsb = Scrollbar(self.root, orient=\"vertical\", command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=self.vsb.set)\r\n\r\n self.vsb.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((5,5), window=self.frame, anchor=\"nw\", tags=\"self.frame\")\r\n\r\n self.frame.bind(\"\", self.onFrameConfigure)\r\n self.buildForm()\r\n self.root.mainloop()\r\n\r\n def onFrameConfigure(self, event):\r\n \"\"\" Reset the scroll region to encompass the inner frame \"\"\"\r\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))\r\n\r\n def buildForm(self):\r\n \"\"\" Method creates and attaches buttons, test entries and labels to window frame \"\"\"\r\n for i in range(self.arg_quantity):\r\n self.value_list.append(StringVar())\r\n for row in range(self.arg_quantity):\r\n Label(self.frame, text=self.legend[row]).grid(row=row, column=0)\r\n Entry(self.frame, textvariable=self.value_list[row]).grid(row=row, column=1)\r\n Button(self.frame, text=\"Add entry\", command=self.submitReaction,\r\n cursor=\"hand2\").grid(row=self.arg_quantity+1, column=1)\r\n\r\n def submitReaction(self):\r\n \"\"\" Method is called on pressing the \"Add entry\" button.\r\n It validates data entered by user.\r\n\r\n \"\"\"\r\n for i in range(self.arg_quantity):\r\n if self.value_list[i].get() == \"\":\r\n tkMessageBox.showerror(\"Input error!\", \"All fields have to be filled.\")\r\n return\r\n self.root.destroy()\r\n return\r\n\r\n def closeEventHandler(self):\r\n \"\"\" Method is called on window destroy event \"\"\"\r\n self.value_list = []\r\n self.root.destroy()\r\n return\r\n\r\n def prepareToReturn(self,stringvar_list):\r\n \"\"\" Method extracts Tkinter.StringVar objects to list of strings. \"\"\"\r\n value_list = []\r\n if stringvar_list == []:\r\n return []\r\n for i in range(self.arg_quantity):\r\n value_list.append( stringvar_list[i].get() )\r\n return value_list","sub_path":"src/AddRecordWindow.py","file_name":"AddRecordWindow.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"309361250","text":"import os\nfrom astropy.io import fits\nfrom urllib.request import urlretrieve \n\n\ndef get_flat(file_name):\n '''\n Will check if user has proper reference file directories \n and files. Will also return flat field file appropriate for \n the input file. \n\n Parameters\n ----------\n file_name : string\n File name of input IMA. \n\n Returns\n ----------\n reffile_name : string\n File name of flat field for that file. \n\n '''\n os.environ['iref'] = '~/iref/'\n if not os.path.exists('iref'):\n os.mkdir('iref')\n \n base_url = 'https://hst-crds.stsci.edu/unchecked_get/references/hst/'\n \n with fits.open(file_name) as fitsfile:\n reffile_name = fitsfile[0].header['PFLTFILE'].replace('$', '/')\n if not os.path.exists(reffile_name):\n urlretrieve(base_url + os.path.basename(reffile_name), reffile_name)\n\n return reffile_name\n\ndef footprints_plot(root='icxe15010'):\n \n # import unicorn.survey_paper as sup #Mario: commenting out the sup dependencies. Will fix this later\n import matplotlib.colors as colors\n import matplotlib.cm as cmx\n \n if root == 'icxe15010':\n aspect = 1.75\n xlim = [150.265, 150.157]\n ylim = [2.45, 2.64]\n xticklab = [r'$10^\\mathrm{h}01^\\mathrm{m}00^\\mathrm{s}$', r'$10^\\mathrm{h}00^\\mathrm{m}45^\\mathrm{s}$']\n #xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True)]\n yticklab = [r'$+02^\\circ30^\\prime00^{\\prime\\prime}$',r'$+02^\\circ35^\\prime00^{\\prime\\prime}$']\n #ytickv = [sup.degrees(2, 30, 00, hours=False),sup.degrees(2, 35, 00, hours=False)]\n label = 'COSMOS-15'\n factor=10.\n\n if root == 'icxe16010':\n aspect=0.9\n xlim = [150.265, 150.1]\n ylim = [2.607, 2.74]\n xticklab = [r'$10^\\mathrm{h}01^\\mathrm{m}00^\\mathrm{s}$', r'$10^\\mathrm{h}00^\\mathrm{m}45^\\mathrm{s}$',r'$10^\\mathrm{h}00^\\mathrm{m}30^\\mathrm{s}$']\n #xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True)]\n yticklab = [r'$+02^\\circ38^\\prime00^{\\prime\\prime}$',r'$+02^\\circ40^\\prime00^{\\prime\\prime}$', r'$+02^\\circ42^\\prime00^{\\prime\\prime}$', r'$+02^\\circ44^\\prime00^{\\prime\\prime}$']\n #ytickv = [sup.degrees(2, 38, 00, hours=False),sup.degrees(2, 40, 00, hours=False),sup.degrees(2, 42, 00, hours=False),sup.degrees(2, 44, 00, hours=False)]\n label='COSMOS-16'\n factor=20.\n \n if root == 'icxe17010':\n aspect=1.4\n xlim = [150.2, 150.06]\n ylim = [2.52, 2.72]\n xticklab = [r'$10^\\mathrm{h}00^\\mathrm{m}45^\\mathrm{s}$', r'$10^\\mathrm{h}00^\\mathrm{m}30^\\mathrm{s}$',r'$10^\\mathrm{h}00^\\mathrm{m}15^\\mathrm{s}$']\n #xtickv = [sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True),sup.degrees(10,00,15, hours=True)]\n yticklab = [r'$+02^\\circ35^\\prime00^{\\prime\\prime}$',r'$+02^\\circ40^\\prime00^{\\prime\\prime}$']\n #ytickv = [sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False)]\n label='COSMOS-17'\n factor=240.\n\n if root == 'icxe18010':\n aspect=1.577\n xlim = [150.14, 150.01]\n ylim = [2.53, 2.735]\n xticklab = [r'$10^\\mathrm{h}00^\\mathrm{m}30^\\mathrm{s}$', r'$10^\\mathrm{h}00^\\mathrm{m}20^\\mathrm{s}$',r'$10^\\mathrm{h}00^\\mathrm{m}10^\\mathrm{s}$']\n #xtickv = [sup.degrees(10,00,30, hours=True),sup.degrees(10,00,20, hours=True),sup.degrees(10,00,10, hours=True)]\n yticklab = [r'$+02^\\circ35^\\prime00^{\\prime\\prime}$',r'$+02^\\circ40^\\prime00^{\\prime\\prime}$']\n #ytickv = [sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False)]\n label='COSMOS-18'\n factor=240.\n \n \n \n # fig = unicorn.catalogs.plot_init(square=True, xs=5., aspect=aspect, # MArio: changed this to a regular matplotlib call (below), until we fix the unicorn module dependency\n #fontsize=8, left=0.18, right=0.02, bottom=0.10, top=0.10)\n\n fig = plt.figure(square=True, xs=5., aspect=aspect, \n fontsize=8, left=0.18, right=0.02, bottom=0.10, top=0.10)\n ax = fig.add_subplot(111)\n jet = cm = plt.get_cmap('jet')\n cNorm = colors.Normalize(vmin=0, vmax=9)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet) \n \n reg_file = root+'_asn.reg'\n \n poly = []\n with open(reg_file) as f:\n for line in f:\n if not line.startswith('fk5'):\n region = line.split('#')[0]\n # poly.append(sup.polysplit(region=region, get_shapely=True))\n\n shifts = table.read('shifts_{}.txt'.format(root), format='ascii', \n names=('file','x','y','rot','scale','x_rms','y_rms'))\n \n cc = 0\n xcen_all = []\n ycen_all = []\n for j,(pp, x_off, y_off, file) in enumerate(zip(poly, shifts['x'], shifts['y'], shifts['file'])):\n cc += 1.\n color = scalarMap.to_rgba(cc)\n x, y = pp.exterior.xy\n flt = fits.open(file)\n xcen = flt[1].header['CRVAL1O']\n ycen = flt[1].header['CRVAL2O']\n x_off = (flt[1].header['CRVAL1B']-flt[1].header['CRVAL1O'])*20.\n y_off = (flt[1].header['CRVAL2B']-flt[1].header['CRVAL2O'])*20.\n #print file, xcen, xcen+x_off, ycen, ycen+y_off\n #xcen = (np.mean(x[:-1]))\n #ycen = (np.mean(y[:-1]))\n xcen_all.append(xcen)\n ycen_all.append(ycen)\n ax.plot(x,y,'-', color=color)\n #ax.annotate(\"\",xy=(xcen+(x_off*0.12)/factor, ycen+(y_off*0.12)/factor), xytext=(xcen, ycen), \n # arrowprops=dict(arrowstyle='->', color=color))\n #ax.plot([xcen, xcen+x_off], [ycen, ycen+y_off], '-')\n ax.annotate(\"\",xy=(xcen+x_off, ycen+y_off), xytext=(xcen, ycen), \n arrowprops=dict(arrowstyle='->', color=color))\n\n ax.plot(xcen_all, ycen_all, '+:', markersize=10., color='0.5', alpha=0.5) \n ax.set_xlim(xlim)\n ax.set_ylim(ylim) \n ax.set_xticklabels(xticklab)\n xtick = ax.set_xticks(xtickv)\n ax.set_yticklabels(yticklab)\n ytick = ax.set_yticks(ytickv)\n ax.set_title(label) \n plt.show(block=False)\n \n fig.savefig('footprint_{}.png'.format(label.lower()), dpi=200, transparent=False) \n\n\nclass ASNFile(object):\n \"\"\"\n ASNFile()\n \n Class for handling ASN fits files.\n \n >>> asn = ASNFile(file='ib3701050_asn.fits', grow=False)\n >>> asn.exposures\n ['ib3701ryq', 'ib3701sbq', 'ib3701sdq', 'ib3701sqq']\n >>> asn.product\n 'IB3701050'\n\n If grow=True, allow file rootnames to be 20 characters rather than 14.\n \"\"\"\n def _read_asn_file(self, grow=True):\n \"\"\"\n _read_asn_file(self)\n \n Read an ASN FITS file (self.file).\n \"\"\"\n import numpy as np\n from warnings import warn\n \n self.in_fits = pyfits.open(self.file)\n \n data = self.in_fits[1].data\n \n if grow:\n #### Allow more characters in the MEMNAME column\n memname = pyfits.Column(name='MEMNAME', format='40A', array=self.in_fits[1].columns[0].array.astype('S40'), disp='A40')\n memtype = self.in_fits[1].columns[1]\n memprsnt = self.in_fits[1].columns[2]\n coldefs = pyfits.ColDefs([memname, memtype, memprsnt])\n try:\n #print 'from_columns'\n hdu = pyfits.BinTableHDU.from_columns(coldefs)\n except:\n #print 'fail pyfits'\n hdu = pyfits.new_table(coldefs)\n \n hdu.header = self.in_fits[1].header\n hdu.header['TFORM1'] = '40A'\n hdu.header['TDISP1'] = 'A40'\n hdu.header['NAXIS1'] += 26\n self.in_fits[1] = hdu \n \n data = self.in_fits[1].data\n #print data\n \n self.header = self.in_fits[0].header\n \n names = data.field('MEMNAME')\n types = data.field('MEMTYPE')\n \n ##### Exposures\n #exp_idx = np.where(types == 'EXP-DTH')\n exp_idx = types == 'EXP-DTH'\n #### check if MEMTYPE starts with EXP, have other cases where type is \"EXP-RP#\"\n for ii, type in enumerate(types):\n if types[ii].startswith('EXP'):\n exp_idx[ii] = True\n \n if exp_idx.sum() == 0:\n warn('ASN file %s has no EXP-DTH items')\n else:\n self.exposures = []\n for exp in names[exp_idx]:\n self.exposures.append(exp.lower())\n \n ##### Products\n prod_idx = np.where(types == 'PROD-DTH')\n if prod_idx[0].shape[0] != 1:\n warn('ASN file %s has N != 1 PROD-DTH items' %self.file )\n self.product = None\n else:\n self.product = names[prod_idx[0]][0].upper()\n \n \n def __init__(self, file=None, grow=True):\n self.file = file\n self.exposures = []\n self.product = None\n if file:\n self._read_asn_file(grow=grow)\n \n \n def write(self, out_file=None, clobber=True):\n \"\"\"\n write(self,out_file=None, clobber=True)\n\n out_file='self' writes to `self.file`.\n \n \"\"\"\n if not out_file:\n print(\"USAGE:: asn.write(out_file='output_asn.fits')\")\n else:\n if out_file == 'self':\n out_file = self.file\n \n nexp = self.exposures.__len__()\n if self.product:\n nprod = 1\n else:\n nprod = 0\n nrows = nexp + nprod\n #### Primary HDU\n hdu = self.in_fits[0].copy()\n #### BinTable HDU\n tbhdu = pyfits.new_table(self.in_fits[1].columns, nrows=nrows, fill=True)\n for i in range(nexp):\n tbhdu.data[i] = (self.exposures[i].upper(), 'EXP-DTH', True)\n if nprod > 0:\n tbhdu.data[i+1] = (self.product, 'PROD-DTH', True)\n \n tbhdu.header = self.in_fits[1].header.copy()\n tbhdu.header.update('ASN_ID',out_file.split('_asn.fits')[0])\n tbhdu.header.update('ASN_TAB',out_file)\n #### Create HDUList and write it to output file\n self.out_fits = pyfits.HDUList([hdu,tbhdu])\n if 'EXTEND' not in list(hdu.header.keys()):\n hdu.header.update('EXTEND', True, after='NAXIS')\n \n self.out_fits.writeto(out_file, clobber=clobber)\n \n def showContents(self):\n \"\"\"\n showContents()\n \n >>> x = ASNFile(file='ib3702060_asn.fits')\n >>> x.showContents()\n 1 ib3703uxq EXP-DTH yes\n 2 ib3703vaq EXP-DTH yes\n 3 ib3703vcq EXP-DTH yes\n 4 ib3703vsq EXP-DTH yes\n 5 IB3703050 PROD-DTH yes\n \"\"\"\n if self.exposures.__len__() > 0:\n for i,exp in enumerate(self.exposures):\n print('%5d %s EXP-DTH yes' %(i+1,exp))\n print('%5d %s PROD-DTH yes' %(i+2,self.product))\n \n def append(self, new):\n \"\"\"\n append(self, new)\n \n `new` must be an instance of ASNFile.\n \n `new.exposures` are added to the `self.exposures` list.\n \"\"\"\n from warnings import warn\n if not isinstance(new,self.__class__):\n warn(\"argument is not an instance of ASNFile()\")\n else:\n self.exposures.extend(new.exposures)\n \ndef clean_wcsname(flt='ibhj15wyq_flt.fits', wcsname='TWEAK', ACS=False, WFPC2=False):\n \"\"\"\n Workaround for annoying TweakReg feature of not overwriting WCS solns\n \"\"\"\n im = pyfits.open(flt, mode='update')\n if ACS:\n exts = [1,4]\n elif WFPC2:\n exts = [1,2,3,4]\n else:\n exts = [1]\n \n for ext in exts:\n header = im[ext].header\n for key in header:\n if key.startswith('WCSNAME'):\n if header[key] == wcsname:\n wcs_ext = key[-1]\n if key == 'WCSNAME':\n header[key] = 'X' + wcsname+'X'\n #im.flush()\n #\n for key in ['WCSNAME', 'WCSAXES', 'CRPIX1', 'CRPIX2', 'CDELT1', 'CDELT2', 'CUNIT1', 'CUNIT2', 'CTYPE1', 'CTYPE2', 'CRVAL1', 'CRVAL2', 'LONPOLE', 'LATPOLE', 'CRDER1', 'CRDER2', 'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2', 'FITNAME', 'NMATCH', 'RMS_RA', 'RMS_DEC']:\n try:\n header.remove(key+wcs_ext)\n except:\n #print key\n pass\n \n im.flush()\n\ndef subtract_flt_background(root='GOODN-N1-VBA-F105W', scattered_light=False, sex_background=False, order=2):\n \"\"\"\n Subtract polynomial background\n \"\"\"\n import scipy.optimize\n \n import astropy.units as u\n \n from astropy.table import Table as table\n \n import stwcs\n from stwcs import updatewcs\n \n import drizzlepac\n from drizzlepac import astrodrizzle, tweakreg, tweakback\n \n import threedhst\n \n asn = threedhst.utils.ASNFile(root+'_asn.fits')\n for exp in asn.exposures:\n updatewcs.updatewcs('%s_%s.fits' %(exp, 'flt'))\n\n if not os.path.exists('%s_drz_sci.fits' %(root)): \n if len(asn.exposures) == 1:\n drizzlepac.astrodrizzle.AstroDrizzle(root+'_asn.fits', clean=False, context=False, preserve=False, skysub=True, driz_separate=False, driz_sep_wcs=False, median=False, blot=False, driz_cr=False, driz_cr_corr=False, driz_combine=True)\n else:\n drizzlepac.astrodrizzle.AstroDrizzle(root+'_asn.fits', clean=False, context=False, preserve=False, skysub=True, driz_separate=True, driz_sep_wcs=True, median=True, blot=True, driz_cr=True, driz_cr_corr=True, driz_combine=True)\n \n se = threedhst.sex.SExtractor()\n se.options['WEIGHT_IMAGE'] = '%s_drz_wht.fits' %(root)\n se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT'\n se.options['CHECKIMAGE_TYPE'] = 'SEGMENTATION,BACKGROUND'\n se.options['CHECKIMAGE_NAME'] = '%s_drz_seg.fits,%s_drz_bkg.fits' %(root, root)\n se.options['BACK_TYPE'] = 'AUTO'\n se.options['BACK_SIZE'] = '256'\n #\n se.params['X_IMAGE'] = True; se.params['Y_IMAGE'] = True\n se.params['MAG_AUTO'] = True\n #\n se.options['CATALOG_NAME'] = '%s_drz_sci.cat' %(root)\n se.options['FILTER'] = 'Y'\n se.copyConvFile()\n se.options['FILTER_NAME'] = 'gauss_4.0_7x7.conv'\n se.options['DETECT_THRESH'] = '0.8'\n se.options['ANALYSIS_THRESH'] = '0.8'\n #\n se.options['MEMORY_OBJSTACK'] = '30000'\n se.options['MEMORY_PIXSTACK'] = '3000000'\n se.options['MEMORY_BUFSIZE'] = '2048'\n \n se.sextractImage('%s_drz_sci.fits' %(root))\n #threedhst.sex.sexcatRegions('%s_flt.cat' %(exp), '%s_flt.reg' %(exp), format=1)\n \n #### Blot segmentation map to FLT images for object mask\n asn = threedhst.utils.ASNFile('%s_asn.fits' %(root))\n \n #print 'Read files...'\n ref = pyfits.open('%s_drz_sci.fits' %(root))\n ref_wcs = stwcs.wcsutil.HSTWCS(ref, ext=0)\n\n seg = pyfits.open('%s_drz_seg.fits' %(root)) \n #### Fill ref[0].data with zeros for seg mask\n #seg_data = ref[0].data\n #seg_data[seg[0].data == 0] = 0\n seg_data = np.cast[np.float32](seg[0].data)\n \n bkg_data = pyfits.open('%s_drz_bkg.fits' %(root))[0].data\n \n yi, xi = np.indices((1014,1014))\n if scattered_light: \n bg_components = np.ones((4,1014,1014))\n bg_components[1,:,:] = xi/1014.*2\n bg_components[2,:,:] = yi/1014.*2\n bg_components[3,:,:] = pyfits.open(os.getenv('THREEDHST') + '/CONF/G141_scattered_light.fits')[0].data\n #### Use flat-field itself for images affected by full-field \n #### persistence from the tungsten lamp\n if scattered_light == 2:\n bg_components[3,:,:] = pyfits.open(os.getenv('iref') + 'flat_UDF_F140W_v0.fits')[1].data[5:-5,5:-5]\n \n NCOMP=4\n else:\n # bg_components = np.ones((3,1014,1014))\n # bg_components[1,:,:] = xi/1014.*2\n # bg_components[2,:,:] = yi/1014.*2\n # NCOMP=3\n #\n if order == 2:\n NCOMP=6\n bg_components = np.ones((NCOMP,1014,1014))\n bg_components[1,:,:] = (xi-507)/507.\n bg_components[2,:,:] = (yi-507)/507.\n bg_components[3,:,:] = ((xi-507)/507.)**2\n bg_components[4,:,:] = ((yi-507)/507.)**2\n bg_components[5,:,:] = (xi-507)*(yi-507)/507.**2\n else:\n NCOMP=3\n bg_components = np.ones((NCOMP,1014,1014))\n bg_components[1,:,:] = (xi-507)/507.\n bg_components[2,:,:] = (yi-507)/507.\n \n bg_flat = bg_components.reshape((NCOMP,1014**2))\n \n #### Loop through FLTs, blotting reference and segmentation\n models = []\n for exp in asn.exposures:\n flt = pyfits.open('%s_flt.fits' %(exp)) #, mode='update')\n flt_wcs = stwcs.wcsutil.HSTWCS(flt, ext=1)\n \n ### segmentation \n print('Segmentation image: %s_blot.fits' %(exp))\n blotted_seg = astrodrizzle.ablot.do_blot(seg_data+0, ref_wcs, flt_wcs, 1, coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)\n \n blotted_bkg = 0.\n if sex_background:\n blotted_bkg = astrodrizzle.ablot.do_blot(bkg_data+0, ref_wcs, flt_wcs, 1, coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)\n flt[1].data -= blotted_bkg\n \n mask = (blotted_seg == 0) & (flt['DQ'].data == 0) & (flt[1].data > -1) & (xi > 10) & (yi > 10) & (xi < 1004) & (yi < 1004) \n mask &= np.isfinite(flt[1].data) & np.isfinite(flt[2].data)\n mask &= (flt[1].data < 5*np.median(flt[1].data[mask]))\n data_range = np.percentile(flt[1].data[mask], [2.5, 97.5])\n mask &= (flt[1].data >= data_range[0]) & (flt[1].data <= data_range[1])\n data_range = np.percentile(flt[2].data[mask], [0.5, 99.5])\n mask &= (flt[2].data >= data_range[0]) & (flt[2].data <= data_range[1])\n \n ### Least-sq fit for component normalizations\n data = flt[1].data[mask].flatten()\n wht = (1./flt[2].data[mask].flatten())**2\n templates = bg_flat[:, mask.flatten()]\n p0 = np.zeros(NCOMP)\n p0[0] = np.median(data)\n obj_fun = threedhst.grism_sky.obj_lstsq\n print('XXX: %d' %(mask.sum()))\n popt = scipy.optimize.leastsq(obj_fun, p0, args=(data, templates, wht), full_output=True, ftol=1.49e-8/1000., xtol=1.49e-8/1000.)\n xcoeff = popt[0]\n model = np.dot(xcoeff, bg_flat).reshape((1014,1014))\n models.append(model)\n \n # add header keywords of the fit components\n flt = pyfits.open('%s_flt.fits' %(exp), mode='update')\n flt[1].data -= model+blotted_bkg\n for i in range(NCOMP):\n if 'BGCOMP%d' %(i+1) in flt[0].header:\n flt[0].header['BGCOMP%d' %(i+1)] += xcoeff[i]\n else:\n flt[0].header['BGCOMP%d' %(i+1)] = xcoeff[i] \n \n flt.flush()\n coeff_str = ' '.join(['%.4f' %c for c in xcoeff])\n threedhst.showMessage('Background subtraction, %s_flt.fits:\\n\\n %s' %(exp, coeff_str))\n \n return models\n","sub_path":"wfc3_dash/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"466778362","text":"# uncompyle6 version 3.3.5\r\n# Python bytecode 3.6 (3379)\r\n# Decompiled from: Python 3.7.1 (v3.7.1:260ec2c36a, Oct 20 2018, 14:57:15) [MSC v.1915 64 bit (AMD64)]\r\n# Embedded file name: tgmain.py\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport numpy as np, tensorflow as tf, random as rn, os, math, time, collections, numpy as np\r\nos.environ['PYTHONHASHSEED'] = '0'\r\nnp.random.seed(42)\r\nrn.seed(12345)\r\ntf.set_random_seed(1234)\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\nimport tensorflow.contrib.slim as slim, sys, shutil, subprocess\r\nfrom lib102.ops import *\r\nfrom lib102.dataloader import inference_data_loader\r\nfrom lib102.frvsr import generator_F, fnet\r\nFlags = tf.app.flags\r\nFlags.DEFINE_integer('rand_seed', 1, 'random seed')\r\nFlags.DEFINE_string('input_dir_LR', './input', 'The directory of the input resolution input data, for inference mode')\r\nFlags.DEFINE_string('mode', 'inference', 'train, or inference')\r\nFlags.DEFINE_string('output_dir', './output', 'The output directory of the checkpoint')\r\nFlags.DEFINE_string('output_ext', 'png', 'The format of the output when evaluating')\r\nFlags.DEFINE_string('checkpoint', './model/TecoGAN', 'If provided, the weight will be restored from the provided checkpoint')\r\nFlags.DEFINE_integer('num_resblock', 16, 'How many residual blocks are there in the generator')\r\nFlags.DEFINE_string('cudaID', '0', 'CUDA devices')\r\nFLAGS = Flags.FLAGS\r\nos.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cudaID\r\nmy_seed = FLAGS.rand_seed\r\nrn.seed(my_seed)\r\nnp.random.seed(my_seed)\r\ntf.set_random_seed(my_seed)\r\nif FLAGS.output_dir is None:\r\n raise ValueError('The output directory is needed')\r\nif not os.path.exists(FLAGS.output_dir):\r\n os.mkdir(FLAGS.output_dir)\r\n\r\ndef printVariable(scope, key=tf.GraphKeys.MODEL_VARIABLES):\r\n print('Scope %s:' % scope)\r\n variables_names = [v.name for v in tf.get_collection(key, scope=scope)]\r\n values = sess.run(variables_names)\r\n total_sz = 0\r\n for k, v in zip(variables_names, values):\r\n print('Variable: ' + k)\r\n print('Shape: ' + str(v.shape))\r\n total_sz += np.prod(v.shape)\r\n\r\n print('total size: %d' % total_sz)\r\n\r\n\r\nif FLAGS.mode == 'inference':\r\n if FLAGS.checkpoint is None:\r\n raise ValueError('The checkpoint file is needed to performing the test.')\r\n inference_data = inference_data_loader(FLAGS)\r\n input_shape = [1] + list(inference_data.inputs[0].shape)\r\n output_shape = [1, input_shape[1] * 4, input_shape[2] * 4, 3]\r\n oh = input_shape[1] - input_shape[1] // 8 * 8\r\n ow = input_shape[2] - input_shape[2] // 8 * 8\r\n paddings = tf.constant([[0, 0], [0, oh], [0, ow], [0, 0]])\r\n print(input_shape)\r\n print(output_shape)\r\n inputs_raw = tf.placeholder((tf.float32), shape=input_shape, name='inputs_raw')\r\n pre_inputs = tf.Variable((tf.zeros(input_shape)), trainable=False, name='pre_inputs')\r\n pre_gen = tf.Variable((tf.zeros(output_shape)), trainable=False, name='pre_gen')\r\n pre_warp = tf.Variable((tf.zeros(output_shape)), trainable=False, name='pre_warp')\r\n transpose_pre = tf.space_to_depth(pre_warp, 4)\r\n inputs_all = tf.concat((inputs_raw, transpose_pre), axis=(-1))\r\n with tf.variable_scope('generator'):\r\n gen_output = generator_F(inputs_all, 3, reuse=False, FLAGS=FLAGS)\r\n with tf.control_dependencies([tf.assign(pre_inputs, inputs_raw)]):\r\n outputs = tf.assign(pre_gen, deprocess(gen_output))\r\n inputs_frames = tf.concat((pre_inputs, inputs_raw), axis=(-1))\r\n with tf.variable_scope('fnet'):\r\n gen_flow_lr = fnet(inputs_frames, reuse=False)\r\n gen_flow_lr = tf.pad(gen_flow_lr, paddings, 'SYMMETRIC')\r\n gen_flow = upscale_four(gen_flow_lr * 4.0)\r\n gen_flow.set_shape(output_shape[:-1] + [2])\r\n pre_warp_hi = tf.contrib.image.dense_image_warp(pre_gen, gen_flow)\r\n before_ops = tf.assign(pre_warp, pre_warp_hi)\r\n print('Finish building the network')\r\n var_list = tf.get_collection((tf.GraphKeys.MODEL_VARIABLES), scope='generator')\r\n var_list = var_list + tf.get_collection((tf.GraphKeys.MODEL_VARIABLES), scope='fnet')\r\n weight_initiallizer = tf.train.Saver(var_list)\r\n init_op = tf.global_variables_initializer()\r\n local_init_op = tf.local_variables_initializer()\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n image_dir = FLAGS.output_dir\r\n if not os.path.exists(image_dir):\r\n os.makedirs(image_dir)\r\n with tf.Session(config=config) as (sess):\r\n sess.run(init_op)\r\n sess.run(local_init_op)\r\n print('Loading weights from ckpt model')\r\n weight_initiallizer.restore(sess, FLAGS.checkpoint)\r\n max_iter = len(inference_data.inputs)\r\n srtime = 0\r\n print('Frame evaluation starts!!')\r\n for i in range(max_iter):\r\n input_im = np.array([inference_data.inputs[i]]).astype(np.float32)\r\n feed_dict = {inputs_raw: input_im}\r\n t0 = time.time()\r\n if i != 0:\r\n sess.run(before_ops, feed_dict=feed_dict)\r\n output_frame = sess.run(outputs, feed_dict=feed_dict)\r\n srtime += time.time() - t0\r\n if i >= 5:\r\n name, _ = os.path.splitext(os.path.basename(str(inference_data.paths_LR[i])))\r\n filename = 'output_' + name\r\n print('saving image %s' % filename)\r\n out_path = os.path.join(image_dir, '%s.%s' % (filename, FLAGS.output_ext))\r\n save_img(out_path, output_frame[0])\r\n else:\r\n print('Warming up %d' % (5 - i))\r\n\r\n print('total time ' + str(srtime) + ', frame number ' + str(max_iter))\r\nelif FLAGS.mode == 'train':\r\n print('Comming Soon!')","sub_path":"runGan102.py","file_name":"runGan102.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"645500638","text":"import os\n\nimport itertools\n\nfrom utils.genome import Genome, get_extremity_by_gene\n# import bg.breakpoint_graph\n\n\ndef get_immediate_subdirectories(a_dir):\n \"\"\"\n This function get subdirectories\n \"\"\"\n return ((os.path.join(a_dir, name), name) for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name)))\n\n\ndef get_immediate_files(a_dir):\n \"\"\"\n This function get files\n \"\"\"\n return ((os.path.join(a_dir, name), name) for name in os.listdir(a_dir) if\n os.path.isfile(os.path.join(a_dir, name)))\n\n\nclass ILPAnswer(object):\n def __init__(self, ov=0, score=0, es=0, genome=None, tm=0):\n self.obj_val = ov\n self.score = score\n self.exit_status = es\n self.genome = genome\n self.solution_time = tm\n\n def write_stats_file(self, result_out_file):\n with open(result_out_file, 'w') as out:\n out.write(\"# Objective value\\n\")\n out.write(str(self.obj_val) + \"\\n\")\n out.write(\"# Total DCJ-Indel Distance\\n\")\n out.write(str(self.score) + \"\\n\")\n out.write(\"# Is it solved \\n\")\n status_to_string = {0: 'time limit exceeded',\n 1: 'solved',\n 2: 'interrupted'}\n out.write(status_to_string[self.exit_status] + \"\\n\")\n out.write(\"# Total time \\n\")\n out.write(str(self.solution_time))\n\n def write_genome_file(self, out_genome_file):\n with open(out_genome_file, 'w') as out:\n out.write(\">answer\\n\")\n for chr_ind, chromosome in self.genome.items():\n for chr_type, blocks in chromosome:\n string = \" \".join(value if sign == \"+\" else (sign + value) for sign, value in blocks)\n string += \" {chr_type}\".format(chr_type=chr_type)\n out.write(string + \"\\n\")\n\n\ndef get_genome_graph_from_vars(rs, r_dot, gene_set, edge_set, telomer_set, ind2vertex):\n result_graph = bg.breakpoint_graph.BreakpointGraph()\n\n # graph = nx.Graph()\n #\n # for gene in gene_set:\n # graph.add_node(get_extremity_by_gene(gene, True, 1)[:-2])\n # graph.add_node(get_extremity_by_gene(gene, False, 1)[:-2])\n\n name2vertices = {}\n for gene in gene_set:\n left, right = bg.vertices.TaggedBlockVertex(get_extremity_by_gene(gene, True, 1)[:-2]), \\\n bg.vertices.TaggedBlockVertex(get_extremity_by_gene(gene, False, 1)[:-2])\n left.mate_vertex = right\n right.mate_vertex = left\n name2vertices[left.name] = left\n name2vertices[right.name] = right\n\n # text_multiset = defaultdict(list)\n # text_set = set()\n # for u, v in edge_set:\n # i, j = tuple(sorted([u, v]))\n # if rs[i, j].x == 1:\n # if ind2vertex[j][:-2] == ind2vertex[i][:-2]:\n # print(\"ALLERT ALLERT\")\n # text_multiset[ind2vertex[i][:-2]].append(ind2vertex[j][:-2])\n # text_multiset[ind2vertex[j][:-2]].append(ind2vertex[i][:-2])\n # text_set.add(ind2vertex[i][:-2])\n # text_set.add(ind2vertex[j][:-2])\n # graph.add_edge(ind2vertex[i][:-2], ind2vertex[j][:-2])\n\n # for a, b in text_multiset.items():\n # if len(b) != 1:\n # print(a)\n # print(b)\n\n # text_set2 = set()\n # for u, cond in telomer_set.items():\n # if cond and r_dot[u].x == 1:\n # text_set2.add(ind2vertex[u][:-2])\n # if ind2vertex[u][:-2] in text_set:\n # print(\"ALERT! ALERT! ALERT!\")\n # graph.add_node(ind2vertex[u][:-2])\n #\n # print(\"----MISSING-----\")\n # for v in graph:\n # if len(list(graph.neighbors(v))) == 0:\n # if v not in text_set2:\n # print(\"Alert!\")\n # print(v)\n\n for u, v in edge_set:\n i, j = tuple(sorted([u, v]))\n if rs[i, j].x == 1:\n result_graph.add_edge(name2vertices[ind2vertex[i][:-2]],\n name2vertices[ind2vertex[j][:-2]],\n bg.multicolor.Multicolor(1), merge=False)\n\n for u, cond in telomer_set.items():\n if cond and r_dot[u].x == 1:\n result_graph.add_edge(name2vertices[ind2vertex[u][:-2]],\n bg.vertices.TaggedInfinityVertex(ind2vertex[u][:-2]),\n bg.multicolor.Multicolor(1), merge=False)\n\n return result_graph.get_blocks_order()\n\n\ndef complete_genes_multiset(gene_set, copies):\n complete_multiset = dict()\n for gene in gene_set:\n complete_multiset[gene] = copies\n return complete_multiset\n\n\ndef vertex_set_from_gene_multiset(gene_multiset):\n vertex_set = set()\n for gene, copies in gene_multiset.items():\n for left in [True, False]:\n for i in range(1, copies + 1):\n vertex_set.add(get_extremity_by_gene(gene, left, i))\n return vertex_set\n\n\ndef observed_edges_from_gene_multiset(gene_multiset):\n obverse_edges = set()\n for gene, copies in gene_multiset.items():\n for i in range(1, copies + 1):\n obverse_edges.add((get_extremity_by_gene(gene, True, i), get_extremity_by_gene(gene, False, i)))\n return obverse_edges\n\n\ndef general_allowable_set(vertex_set):\n return {tuple(sorted([u, v])) for u, v in itertools.combinations(vertex_set, 2)}\n\n\ndef product_set(vertex_set1, vertex_set2):\n return set(tuple(sorted([u, v])) for u, v in itertools.product(vertex_set1, vertex_set2) if u != v)\n\n\ndef general_conditional_set(vertex_set):\n return {u: {tuple(sorted((u, v))) for v in vertex_set if u != v} for u in vertex_set}\n\n\ndef conserved_allowable_set(vertex_set, graph, telomers):\n allowable_edge_set = set()\n for v in vertex_set:\n for u1, u2 in graph.edges(v):\n if u1 in vertex_set and u2 in vertex_set:\n allowable_edge_set.add(tuple(sorted([u1, u2])))\n allowable_telomer_set = {v for v in vertex_set if v in telomers}\n return allowable_edge_set, allowable_telomer_set\n\n\ndef define_equiv_function(gene_multiset, vertex2ind, bar_vertex2ind):\n equiv = {}\n for gene, copies in gene_multiset.items():\n for left in [True, False]:\n temp = []\n for i in range(1, copies + 1):\n temp.append(bar_vertex2ind[get_extremity_by_gene(gene, left, i)])\n equiv[vertex2ind[get_extremity_by_gene(gene, left, 1)]] = temp\n return equiv\n\n\ndef remove_singletons_dupl_wrt_gene_set(genome, gene_set):\n new_genome = Genome(genome.get_name())\n for chromosome in genome:\n if chromosome.is_circular():\n if len(chromosome.get_gene_set().intersection(gene_set)):\n new_genome.append(chromosome)\n else:\n new_genome.append(chromosome)\n return new_genome\n\n\ndef remove_singletons_in_ord_wrt_two_dupl(ord_genome, dupl_genome):\n new_genome = Genome(ord_genome.get_name())\n dupl_gene_set = set(dupl_genome.get_gene_multiset().keys())\n\n matching, _ = dupl_genome.convert_to_genome_graph()\n matching = set(matching)\n\n def get_partitioned_gene_set(gene_multiset):\n s_1, s_2 = set(), set()\n for gene, count in gene_multiset.items():\n if count == 1:\n s_1.add(gene)\n else:\n s_2.add(gene)\n return s_1, s_2\n\n s_1_all_dupl, s_2_all_dupl = get_partitioned_gene_set(dupl_genome.get_gene_multiset())\n\n for chromosome in ord_genome:\n if chromosome.is_circular():\n s_1_chr, s_2_chr = get_partitioned_gene_set(chromosome.get_gene_multiset())\n\n if not len(s_2_chr) and s_1_chr.intersection(s_1_all_dupl) == s_1_chr:\n chromosome_matching = []\n chromosome.convert_to_genome_graph(edges=chromosome_matching)\n chromosome_matching, _ = chromosome.convert_to_matching()\n\n flag = False\n for u, v in chromosome_matching:\n if (u, v) not in matching and (v, u) not in matching:\n flag = True\n\n if flag:\n new_genome.append(chromosome)\n\n elif len(chromosome.get_gene_set().intersection(dupl_gene_set)):\n new_genome.append(chromosome)\n else:\n new_genome.append(chromosome)\n\n return new_genome\n\n#######################################################\n#\n#\n# BEGINNING_INDEX = 1\n#\n#\n# def create_complete_genes_multiset(gene_set, copies):\n# complete_multiset = {}\n# for gene in gene_set:\n# complete_multiset[gene] = copies\n# return complete_multiset\n#\n#\n# def enumerate_vertex_multiset(multiset):\n# cbg_vertex2ind, cbg_ind2vertex = {}, {}\n# max_size = BEGINNING_INDEX\n#\n# for gene, copies in multiset.items():\n# for left in [True, False]:\n# for i in range(1, copies + 1):\n# cbg_vertex2ind[get_extremity_by_gene(gene, left, i)] = max_size\n# cbg_ind2vertex[max_size] = get_extremity_by_gene(gene, left, i)\n# max_size += 1\n#\n# return cbg_vertex2ind, cbg_ind2vertex\n#\n#\n# def indexing_obverse_edges_in_n_dupl_genome(all_genes, cbg_vertex2ind):\n# obverse_edges = set()\n# for gene, copies in all_genes.items():\n# for i in range(1, copies + 1):\n# obverse_edges.add(tuple(sorted([cbg_vertex2ind[get_extremity_by_gene(gene, True, i)],\n# cbg_vertex2ind[get_extremity_by_gene(gene, False, i)]])))\n# return obverse_edges\n#\n#\n# def indexing_graph_edges(matching, cbg_vertex2ind):\n# indexing_edges = set()\n# for u, v in matching:\n# indexing_edges.add(tuple(sorted([cbg_vertex2ind[u], cbg_vertex2ind[v]])))\n# return indexing_edges\n#\n#\n# def indexing_vertex_set(vertex_set, cbg_vertex2ind):\n# indexing_v_set = set()\n# for u in vertex_set:\n# indexing_v_set.add(cbg_vertex2ind[u])\n# return indexing_v_set\n#\n#\n# def indexing_gene_multiset(gene_multiset, cbg_vertex2ind):\n# indexing_v_set = set()\n# for gene, copies in gene_multiset.items():\n# for i in range(1, copies + 1):\n# for left in [True, False]:\n# indexing_v_set.add(cbg_vertex2ind[get_extremity_by_gene(gene, left, i)])\n# return indexing_v_set\n#\n#\n# def get_biggest_constant(number_of_vertices):\n# return BEGINNING_INDEX + number_of_vertices\n#\n#\n# def define_equiv_function(gene_set, copies, cbg_vertex2ind, bar_vertex2ind):\n# equiv = {}\n# for gene in gene_set:\n# for left in [True, False]:\n# temp = []\n# for i in range(1, copies + 1):\n# temp.append(bar_vertex2ind[get_extremity_by_gene(gene, left, i)])\n# equiv[cbg_vertex2ind[get_extremity_by_gene(gene, left, 1)]] = temp\n# return equiv\n#\n# def write_stats_file(result_out_file, obj_val, score, status):\n# with open(result_out_file, 'w') as out:\n# out.write(\"# Number of cycles\\n\")\n# out.write(str(obj_val) + \"\\n\")\n# out.write(\"# Total DCJ-Indel Distance\\n\")\n# out.write(str(score) + \"\\n\")\n# out.write(\"# Is it solved \\n\")\n# out.write(str(status) + \"\\n\")\n# out.write(str(1) + \"\\n\")\n#\n#\n# def write_genome_file(out_genome_file, genome):\n# with open(out_genome_file, 'w') as out:\n# out.write(\">answer\\n\")\n# for chr_ind, chromosome in genome.items():\n# for chr_type, blocks in chromosome:\n# string = \" \".join(value if sign == \"+\" else (sign + value) for sign, value in blocks)\n# string += \" {chr_type}\".format(chr_type=chr_type)\n# out.write(string + \"\\n\")\n#\n#\n# def create_gene_sets_for_pair_genomes(ordinary_genome, all_dupl_genome):\n# S_B = ordinary_genome.get_gene_set()\n# S_1_A, S_2_A = all_dupl_genome.get_partitioned_gene_set()\n# S_A = S_1_A | S_2_A\n# S_R = (S_B & S_1_A) | S_2_A\n# return S_B, S_R, S_A, S_1_A, S_2_A\n#\n#\n# def remove_singletons_in_A_wrt_set_B(genome_A, gene_set_B):\n# new_genome = Genome(genome_A.get_name())\n# for chromosome in genome_A:\n# if chromosome.is_circular():\n# if len(chromosome.get_gene_set().intersection(gene_set_B)):\n# new_genome.append(chromosome)\n# else:\n# new_genome.append(chromosome)\n# return new_genome\n#\n#\n# def remove_singletons_in_ord_A_wrt_2_dupl_B(ord_genome, all_dupl_genome):\n# new_genome = Genome(ord_genome.get_name())\n# all_dupl_gene_set = all_dupl_genome.get_gene_set()\n# matching, _ = all_dupl_genome.convert_to_matching()\n# matching = set(matching)\n# s_1_all_dupl, s_2_all_dupl = all_dupl_genome.get_partitioned_gene_set()\n#\n# for chromosome in ord_genome:\n# if chromosome.is_circular():\n# s_1_chr, s_2_chr = chromosome.get_partitioned_gene_set()\n#\n# if not len(s_2_chr) and s_1_chr.intersection(s_1_all_dupl) == s_1_chr:\n# chromosome_matching, _ = chromosome.convert_to_matching()\n# flag = False\n# for u, v in chromosome_matching:\n# if (u, v) not in matching and (v, u) not in matching:\n# flag = True\n#\n# if flag:\n# new_genome.append(chromosome)\n# elif len(chromosome.get_gene_set().intersection(all_dupl_gene_set)):\n# new_genome.append(chromosome)\n# else:\n# new_genome.append(chromosome)\n#\n# return new_genome\n","sub_path":"impl_gurobi/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":13513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533754682","text":"import random\n\n\"\"\"##########\"\"\"\n\"\"\"SYGNAL JAM\"\"\"\n\"\"\"##########\"\"\"\n\nclass JAM_Signal():\n def __init__(self, miejsce_stluczki,n ):\n self.miejsce_stluczki=miejsce_stluczki\n self.droga=[]\n for i in range(n+1): # tworze pasmo n+1-elementowe\n self.droga.append(0)\n self.droga[miejsce_stluczki+1]=\"JAM\"\n self.droga[miejsce_stluczki-1] = \"JAM\"\n self.pozycja_prawa=miejsce_stluczki+1\n self.pozycja_lewa=miejsce_stluczki-1\n self.koniec_internetu = False\n print('\\033[91m',\"Powstal JAM | miejsce stluczki: \",miejsce_stluczki,'\\033[0m')\n\n def move(self):\n for i in range(1,len(self.droga)-self.miejsce_stluczki): #w prawo\n self.droga[len(self.droga)-i]=self.droga[len(self.droga)-i-1]\n self.droga[len(self.droga) - i - 1]=0\n self.pozycja_prawa+=1\n for i in range(self.miejsce_stluczki): #w lewo\n self.droga[i]=self.droga[i+1]\n self.droga[i+1]=0\n self.pozycja_lewa-=1\n if(self.pozycja_lewa<0 and self.pozycja_prawa>len(self.droga)):\n print('\\033[90m',\"Jeden z sygnalow JAM dotarl do konca kabla, nie bedzie dluzej wyswietlany\",'\\033[0m')\n self.koniec_internetu=True\n\n def print(self):\n print(\"JAM: \",self.droga)\n\n\"\"\"#############\"\"\"\n\"\"\"SYGNAL ZWYKLY\"\"\"\n\"\"\"#############\"\"\"\n\nclass Signal():\n def __init__(self, wiadomosc,n ):\n self.droga=[]\n for i in range(n+1): # tworze pasmo n+1-elementowe\n self.droga.append(0)\n self.nie_wyslane = wiadomosc.tresc #to co mi jeszcze zostalo do wyslania (na poczatku calosc)\n self.wiadomosc=wiadomosc #cala wiadomosc\n if (wiadomosc.odbiorca > wiadomosc.nadawca): #w ktora strone idzie sygnal\n self.prawo = True\n else:\n self.prawo = False\n self.wyslana_cala=False #Czy wiadomosc cala wyslana\n self.koniec_internetu=False #Zebym w razie kolizji nie wysylal wiadomosci w nieskonczonosc, jak dojdzie do konca kabla to koniec. Dotarla do do nadawcy na 100%\n self.miejsce=wiadomosc.nadawca #gdzie jest moja wiadomosc\n self.key=1\n\n def move(self,t):\n if(self.prawo):\n for i in range(1,len(self.droga)-self.wiadomosc.nadawca):\n self.droga[len(self.droga)-i]=self.droga[len(self.droga)-i-1]\n self.droga[len(self.droga)-i-1]=0\n self.miejsce+=1\n if((self.miejsce-self.wiadomosc.nadawca)==len(self.wiadomosc.tresc)*self.key):\n self.wyslana_cala=True\n print('\\033[94m',\"wiadomosc uzytkownika:\",self.wiadomosc.nadawca,\"zostala cala wyslana\",'\\033[0m')\n if (self.miejsce - len(self.wiadomosc.tresc)*self.key == len(self.droga)):\n print('\\033[90m',\"Wiadomosc uzytkownika:\", self.wiadomosc.nadawca, \"dotarala do konca kabla, nie bedzie dluzej wyswietlana\",'\\033[0m')\n self.koniec_internetu = True\n else:\n for i in range(self.wiadomosc.nadawca+1):\n self.droga[i]=self.droga[i+1]\n self.droga[i+1]=0\n self.miejsce-=1\n if((self.wiadomosc.nadawca-len(self.wiadomosc.tresc)*self.key)==self.miejsce):\n self.wyslana_cala=True\n print('\\033[94m',\"wiadomosc uzytkownika:\",self.wiadomosc.nadawca,\"zostala cala wyslana\",'\\033[0m')\n if(self.miejsce+len(self.wiadomosc.tresc)*self.key==0):\n print('\\033[90m',\"Wiadomosc uzytkownika:\",self.wiadomosc.nadawca,\"dotarala do konca kabla, nie bedzie dluzej wyswietlana\",'\\033[0m')\n self.koniec_internetu=True\n\n if(len(self.nie_wyslane)!=0 and (t % self.key)==0):\n self.droga[self.wiadomosc.nadawca]=self.nie_wyslane[0]\n self.nie_wyslane=self.nie_wyslane[1:]\n return 0 #uzytkownik w tym ruchu nie moze juz nadawac zadnego innego sygnalu\n return 1 #w tym ruchu nic nie bylo wsadzane do kanalu wiec uzytkownik moze nadac jeszcze jakis inny sygna;\n\n def reset(self,id):\n self.nie_wyslane = self.wiadomosc.tresc #to co mi jeszcze zostalo do wyslania (na poczatku calosc)\n self.miejsce=self.wiadomosc.nadawca #gdzie jest moja wiadomosc\n #wylosuj liczbe i za kazdym wysylaniem sprawdzaj czy jest podzielne i gotowe\n self.key=random.randint(len(self.droga), len(self.droga)*int(len(self.droga)/5))\n print('\\033[93m',\"Uzytkownik\",id,\"wie ze byla stluczka | wylosowal:\",self.key, '\\033[0m')\n\n def print(self,id):\n print(\"syganl\",id,\": \",self.droga)\n #print(\"nie wyslane: \",self.nie_wyslane,\"miejsce: \",self.miejsce)","sub_path":"Technologie Sieciowe | Python/Lista3/zad2/Elementy_kabla/Signals.py","file_name":"Signals.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"124201557","text":"import json\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponse\n\nfrom data.models import StateResource\nfrom .decorators import login_check\n\n\n@csrf_exempt\n@login_check\ndef put_data(request):\n if not request.method == 'POST' or not request.body:\n return JsonResponse({'error': 'method not allowed'}, status=403)\n try:\n post = json.loads(request.body.decode('utf-8'))\n except ValueError:\n return JsonResponse({'error': 'empty data'},\n status=403)\n\n if not post.get('server_fqdn'):\n return JsonResponse({'error': 'empty data'}, status=403)\n\n fqdn = post.get('server_fqdn')\n res = StateResource()\n\n if not res.is_valid_fqdn(fqdn):\n return JsonResponse({'error': 'error fqdn'}, status=403)\n if not res.is_valid_time(fqdn):\n return JsonResponse({'error': 'error time post'}, status=403)\n\n res.put_fqdn(post.get('server_fqdn'))\n return JsonResponse({'message': 'OK'}, status=200)\n\n\n@login_check\ndef get_all_uptime(request):\n if not request.method == 'GET':\n return JsonResponse({'error': 'method not allowed or empty data'},\n status=403)\n\n res = StateResource()\n return HttpResponse(json.dumps(res.get_uptime_servers()),\n content_type='application/json',\n status=200)","sub_path":"test_job/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"236279562","text":"# Problem 6\n#\n# The sum of the squares of the first ten natural numbers is,\n#\n# 1^2 + 2^2 + ... + 10^2 = 385\n#\n# The square of the sum of the first ten natural numbers is,\n#\n# Hence the difference between the sum of the squares of the first ten natural\n# numbers and the square of the sum is 3025 - 385 = 2640.\n#\n# Find the difference between the sum of the squares of the first one hundred\n# natural numbers and the square of the sum.\n\ndef solution1():\n nums = range(1, 101)\n sum_of_squares = sum(a**2 for a in nums)\n square_of_sum = sum(nums)**2\n return abs(sum_of_squares - square_of_sum)\n\nprint(solution1())\n \n","sub_path":"Python/p006.py","file_name":"p006.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"346149007","text":"# if\r\nscore = int(input(\"得点: \"))\r\n#printでなくinputをつけると、ユーザーの入力した値を返してくれる\r\n\"\"\"\r\n# Pythonの複数行コメントアウト状態\r\nif score >= 80:\r\n print(\"Good!\")\r\nelif score > 60:\r\n print(\"Ok\")\r\nelse:\r\n print(\"Oops...\")\r\n\"\"\"\r\n# 以下は上の条件分岐とだいたい同じことを行っている\r\nprint(\"Good!\" if score >= 80 else \"It's Ok...\")\r\nprint(\"\\n\")\r\n# while\r\n\r\ni = 0\r\nwhile i < 10:\r\n print(i)\r\n i += 1\r\nelse:\r\n print(\"complete!\\n\")\r\n\r\n# for\r\n# 「for 変数 in データの集合:」という記述をする\r\n# for h in range(0, 10):\r\nfor h in range(10): # 整数10までの範囲、ということ\r\n if h == 5:\r\n break #breakは、その数字の時に処理を打ち切るもの\r\n print(h)\r\nelse:\r\n print(\"complete\\n\")\r\n\r\nfor g in range(10):\r\n if g == 3:\r\n continue #continueは、その数字の時だけ処理を飛ばすもの\r\n print(g)\r\nelse:\r\n print(\"complete!\\n\")","sub_path":"dotInstall02.py","file_name":"dotInstall02.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"565276510","text":"# -*- coding: utf-8 -*-\n\nfrom orgmode import ORGMODE, repeat\nfrom orgmode.menu import Submenu, ActionEntry\nfrom orgmode.keybinding import Keybinding, Plug, Command\nfrom orgmode.heading import Heading\nfrom orgmode import settings\n\nimport vim\n\nclass HeadingTags(Heading):\n\t\"\"\" Heading with Tags functionality \"\"\"\n\n\tdef __init__(self, *args, **kwargs):\n\t\tHeading.__init__(self, *args, **kwargs)\n\t\tself._tags = None\n\n\tdef tags():\n\t\t\"\"\" Tags \"\"\"\n\t\tdef fget(self):\n\t\t\tif self._tags == None:\n\t\t\t\ttext = self.text.split()\n\t\t\t\tif not text or len(text[-1]) <= 2 or text[-1][0] != ':' or text[-1][-1] != ':':\n\t\t\t\t\tself._tags = []\n\t\t\t\telse:\n\t\t\t\t\tself._tags = [ x for x in text[-1].split(':') if x ]\n\t\t\treturn self._tags\n\n\t\tdef fset(self, value):\n\t\t\t\"\"\"\n\t\t\t:value:\tlist of tags, the empty list deletes all tags\n\t\t\t\"\"\"\n\t\t\t# find beginning of tags\n\t\t\ttext = self.text.decode('utf-8')\n\t\t\tidx = text.rfind(' ')\n\t\t\tidx2 = text.rfind('\\t')\n\t\t\tidx = idx if idx > idx2 else idx2\n\n\t\t\tif not value:\n\t\t\t\tif self.tags:\n\t\t\t\t\t# remove tags\n\t\t\t\t\tvim.current.buffer[self.start] = '%s %s' % ('*'*self.level, text[:idx].strip().encode('utf-8'))\n\t\t\telse:\n\t\t\t\tif self.tags:\n\t\t\t\t\ttext = text[:idx]\n\t\t\t\ttext = text.strip()\n\n\t\t\t\ttabs = 0\n\t\t\t\tspaces = 2\n\t\t\t\ttags = ':%s:' % (':'.join(value))\n\n\t\t\t\ttag_column = int(settings.get('org_tags_column', '77'))\n\n\t\t\t\tlen_heading = self.level + 1 + len(text)\n\t\t\t\tif len_heading + spaces + len(tags) < tag_column:\n\t\t\t\t\tts = int(vim.eval('&ts'))\n\t\t\t\t\ttmp_spaces = ts - divmod(len_heading, ts)[1]\n\n\t\t\t\t\tif len_heading + tmp_spaces + len(tags) < tag_column:\n\t\t\t\t\t\ttabs, spaces = divmod(tag_column - (len_heading + tmp_spaces + len(tags)), ts)\n\n\t\t\t\t\t\tif tmp_spaces:\n\t\t\t\t\t\t\ttabs += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tspaces = tag_column - (len_heading + len(tags))\n\n\t\t\t\t# add tags\n\t\t\t\tvim.current.buffer[self.start] = '%s %s%s%s%s' % ('*'*self.level, text.encode('utf-8'), '\\t'*tabs, ' '*spaces, tags)\n\n\t\t\tself._tags = value\n\t\treturn locals()\n\ttags = property(**tags())\n\n\t@classmethod\n\tdef complete_tags(cls):\n\t\t\"\"\" build a list of tags and store it in variable b:org_tag_completion\n\t\t\"\"\"\n\t\theading = cls.current_heading()\n\t\tif not heading:\n\t\t\treturn\n\n\t\tleading_portion = vim.eval('a:ArgLead')\n\t\tcursor = int(vim.eval('a:CursorPos'))\n\n\t\t# extract currently completed tag\n\t\tidx_orig = leading_portion.rfind(':', 0, cursor)\n\t\tif idx_orig == -1:\n\t\t\tidx = 0\n\t\telse:\n\t\t\tidx = idx_orig\n\n\t\tcurrent_tag = leading_portion[idx: cursor].lstrip(':')\n\t\thead = leading_portion[:idx + 1]\n\t\tif idx_orig == -1:\n\t\t\thead = ''\n\t\ttail = leading_portion[cursor:]\n\n\t\t# extract all tags of the current file\n\t\tall_tags = set()\n\t\tfor h in cls.all_headings():\n\t\t\tfor t in h.tags:\n\t\t\t\tall_tags.add(t)\n\n\t\tignorecase = bool(int(settings.get('org_tags_completion_ignorecase', '0')))\n\t\tpossible_tags = []\n\t\tfor t in all_tags:\n\t\t\tif ignorecase:\n\t\t\t\tif t.lower().startswith(current_tag.lower()):\n\t\t\t\t\tpossible_tags.append(t)\n\t\t\telif t.startswith(current_tag):\n\t\t\t\tpossible_tags.append(t)\n\n\t\tvim.command('let b:org_complete_tags = [%s]' % ', '.join(['\"%s%s:%s\"' % (head, i, tail) for i in possible_tags]))\n\nclass TagsProperties(object):\n\t\"\"\" TagsProperties plugin \"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Initialize plugin \"\"\"\n\t\tobject.__init__(self)\n\t\t# menu entries this plugin should create\n\t\tself.menu = ORGMODE.orgmenu + Submenu('&TAGS and Properties')\n\n\t\t# key bindings for this plugin\n\t\t# key bindings are also registered through the menu so only additional\n\t\t# bindings should be put in this variable\n\t\tself.keybindings = []\n\n\t\t# commands for this plugin\n\t\tself.commands = []\n\n\t@classmethod\n\t@repeat\n\tdef set_tags(cls):\n\t\t\"\"\" Set tags for current heading\n\t\t\"\"\"\n\t\theading = HeadingTags.current_heading()\n\t\tif not heading:\n\t\t\treturn\n\n\t\t# retrieve tags\n\t\tres = None\n\t\tif heading.tags:\n\t\t\tres = vim.eval('input(\"Tags: \", \":%s:\", \"customlist,Org_complete_tags\")' % ':'.join(heading.tags))\n\t\telse:\n\t\t\tres = vim.eval('input(\"Tags: \", \"\", \"customlist,Org_complete_tags\")')\n\n\t\tif res == None:\n\t\t\t# user pressed abort any further processing\n\t\t\treturn\n\n\t\t# remove empty tags\n\t\theading.tags = filter(lambda x: x.strip() != '', res.strip().strip(':').split(':'))\n\n\t\treturn 'OrgSetTags'\n\n\t@classmethod\n\tdef update_tags(cls):\n\t\t\"\"\"\n\t\tUpdates tags when user finishes editing a heading\n\t\t\"\"\"\n\t\theading = HeadingTags.current_heading()\n\t\tif not heading:\n\t\t\treturn\n\n\t\tif vim.current.window.cursor[0] == heading.start_vim:\n\t\t\theading.tags = heading.tags\n\n\t@classmethod\n\tdef realign_tags(cls):\n\t\t\"\"\"\n\t\tUpdates tags when user finishes editing a heading\n\t\t\"\"\"\n\t\tfor h in HeadingTags.all_headings():\n\t\t\tif h.tags:\n\t\t\t\th.tags = h.tags\n\n\tdef register(self):\n\t\t\"\"\"\n\t\tRegistration of plugin. Key bindings and other initialization should be done.\n\t\t\"\"\"\n\t\t# an Action menu entry which binds \"keybinding\" to action \":action\"\n\t\tsettings.set('org_tags_column', '77')\n\n\t\tsettings.set('org_tags_completion_ignorecase', '0')\n\n\t\tsettings.set('org_leader', ',')\n\t\tleader = settings.get('org_leader', ',')\n\n\t\tself.keybindings.append(Keybinding('%st' % leader, Plug('OrgSetTags', ':py ORGMODE.plugins[\"TagsProperties\"].set_tags()')))\n\t\tself.menu + ActionEntry('Set &Tags', self.keybindings[-1])\n\n\t\tself.commands.append(Command('OrgTagsRealign', \":py ORGMODE.plugins['TagsProperties'].realign_tags()\"))\n\n\t\t# workaround to align tags when user is leaving insert mode\n\t\tvim.command(\"\"\"function Org_complete_tags(ArgLead, CmdLine, CursorPos)\npython << EOF\nfrom orgmode.plugins.TagsProperties import HeadingTags\nHeadingTags.complete_tags()\nEOF\nif exists('b:org_complete_tags')\n\tlet tmp = b:org_complete_tags\n\tunlet b:org_complete_tags\n\treturn tmp\nelse\n\treturn []\nendif\nendfunction\"\"\")\n\n\t\t# this is for all org files opened after this file\n\t\tvim.command(\"au FileType org :au InsertLeave :silent! py ORGMODE.plugins['TagsProperties'].update_tags()\")\n\n\t\t# this is for the current file\n\t\tvim.command(\"au InsertLeave :silent! py ORGMODE.plugins['TagsProperties'].update_tags()\")\n","sub_path":".vim/ftplugin/orgmode/plugins/TagsProperties.py","file_name":"TagsProperties.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499460487","text":"'''\n使用 canvas 顯示圖片 gif\n'''\nimport tkinter as tk\n\nwindow = tk.Tk()\n\n# 設定主視窗大小\nW = 800\nH = 800\nx_st = 100\ny_st = 100\n#size = str(W) + 'x' + str(H)\n#size = str(W) + 'x' + str(H) + '+' + str(x_st) + '+' + str(y_st)\n#window.geometry(size)\nwindow.geometry(\"{0:d}x{1:d}+{2:d}+{3:d}\".format(W, H, x_st, y_st))\n#print(\"{0:d}x{1:d}+{2:d}+{3:d}\".format(W, H, x_st, y_st))\n\n# 設定主視窗標題\ntitle = 'Display Image'\nwindow.title(title)\n\nfilename = 'C:/_git/vcs/_1.data/______test_files1/__pic/_gif/dragon-boat-festival.gif'\n\nphoto = tk.PhotoImage(file = filename)\ntk.Label(window, text = \"Blue\", image = photo, bg = \"gray\").pack(fill = tk.BOTH, expand = 1)\n\n\nseparator = tk.Frame(height = 2, bd = 1, relief = tk.SUNKEN).pack(fill = tk.X, padx = 5, pady = 5) #分隔線\n\nseparator = tk.Frame(height = 2, bd = 1, relief = tk.SUNKEN).pack(fill = tk.X, padx = 5, pady = 5) #分隔線\nseparator = tk.Frame(height = 2, bd = 1, relief = tk.SUNKEN).pack(fill = tk.X, padx = 5, pady = 5) #分隔線\n\n\nfilename = 'C:/_git/vcs/_1.data/______test_files1/__pic/_gif/dragon-boat-festival.gif'\ncaImage = tk.PhotoImage(file = filename)\n\nx = 90\ny = 50\nwidth = 400\nheight = 200\n\ncanvas = tk.Canvas(window, width = width, height = height)\ncanvas.pack()\ncanvas.create_image(x, y, image = caImage)\n\nwindow.mainloop()\n","sub_path":"_4.python/tkinter/tk_show_image2_canvas.py","file_name":"tk_show_image2_canvas.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"572217748","text":"import time\nimport random\nimport datetime\nimport telepot\nfrom khayyam import JalaliDatetime\n\n\ndef handle(msg):\n chat_id = msg['chat']['id']\n command = msg['text']\n print ('Got command: %s' % command)\n \n if command == 'roll':\n bot.sendMessage(chat_id, random.randint(1,6))\n elif command == 'time':\n bot.sendMessage(chat_id, str(datetime.datetime.now()))\n\n\n mss = command\n \n mss=mss.replace('/', ',')\n mss=mss.replace('.', ',')\n mss=mss.replace('-', ',')\n mss=mss.split(',')\n now =JalaliDatetime.now()\n old=JalaliDatetime(int(mss[0]),int(mss[1]),int(mss[2])).todate() \n dif = [100,200,222,300,333,400,444,500,555,600,666,700,777,800,888,900,999,1000,1111,2000,2222,3000,3333,4000,4444,5000,5555,6000,6666,7000,7777,8000,8228,8888,9999,10000,11111]\n i=0\n ss=''\n for x in dif:\n dif1=datetime.timedelta(x)\n date=old+dif1\n jdate=JalaliDatetime(date)\n if (jdate>now and i<5):\n ss= ss + '\\n' + str(jdate.strftime('%A %D %B %N')) +'میشی '+str(dif1.days) +\"روزه \"\n i=i+1\n\n print(ss) \n bot.sendMessage(chat_id, text= ss )\n\n\n\n\nbot = telepot.Bot('375977039:AAEGag8W43sQmo61KmBnvtVXFOsVAP7PIwk')\nbot.message_loop(handle)\nprint ('I am listening ...')\n\nwhile 1:\n time.sleep(10)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"425544002","text":"import os\n\n# Crawl to a website and create a file for it\ndef create_project_dir(directory):\n dir = './Websites/' + directory\n if not os.path.exists(dir):\n print('Creating a new project for ' + dir)\n os.makedirs(dir)\n\n\n# Create queue and crawled file for each website inside the Websites dir\ndef create_data_file(project_name, base_url):\n dir = './Websites/'\n queue = dir + project_name + '/queue.txt'\n crawled = dir + project_name + '/crawled.txt'\n if not os.path.isfile(queue):\n print('Creating queue file ', queue)\n write_data(queue, base_url)\n if not os.path.isfile(crawled):\n print('Creating crawled file ', crawled)\n write_data(crawled, '')\n\n# Writes data into a queue and crawled file\ndef write_data(path, data):\n with open(path, 'w') as data_file:\n data_file.write(data)\n\n\n# Add links to the queue and crawled data_file\ndef append_to_file(path, data):\n with open(path, 'a') as file:\n file.write(data + '\\n')\n\n# Delete the contents of a file\ndef delete_data(path):\n with open(path, 'w'):\n pass\n\n# Converts the file to set\ndef file_to_set(file_name):\n results = set()\n with open(file_name, 'rt') as file:\n for line in file:\n results.add(line.strip('\\n'))\n return results\n\n# Convert the set to file\ndef set_to_file(links, file):\n delete_data(file)\n for link in sorted(links):\n append_to_file(file, link)\n","sub_path":"Crawlers/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"551960256","text":"#!/usr/bin/env python3\n\nimport collections\nimport itertools\nimport os\nimport subprocess\nimport sys\nimport zipfile\n\nSubItem = collections.namedtuple(\"SubItem\", \"tfrom tto text\")\n\n# how much time is ok between items divergence in subtitle adjustment\nMAX_SUB_SEPARATION = .5\n\n\ndef _time_sub2stamp(subinfo):\n \"\"\"Convert time from sub style to timestamp.\"\"\"\n if \",\" in subinfo:\n hms, msec = subinfo.split(\",\")\n elif \".\" in subinfo:\n hms, msec = subinfo.split(\".\")\n else:\n hms = subinfo\n msec = \"0\"\n\n parts = hms.split(\":\")\n if len(parts) == 1:\n s = int(parts[0])\n h, m = 0, 0\n elif len(parts) == 2:\n m, s = int(parts[0]), int(parts[1])\n h = 0\n elif len(parts) == 3:\n h, m, s = int(parts[0]), int(parts[1]), int(parts[2])\n elif len(parts) == 4:\n h, m, s = int(parts[0]), int(parts[1]), int(parts[2])\n msec = parts[3]\n else:\n raise ValueError(\"Time not understood: {!r}\".format(subinfo))\n tstamp = h * 3600 + m * 60 + s + int(msec.ljust(3, '0')) / 1000\n return tstamp\n\n\ndef _time_stamp2sub(tstamp):\n \"\"\"Convert time from timestamp to sub style.\"\"\"\n msec = int(round(1000 * (tstamp % 1)))\n x, s = divmod(int(tstamp), 60)\n h, m = divmod(x, 60)\n subinfo = \"{:02}:{:02}:{:02},{:03}\".format(h, m, s, msec)\n return subinfo\n\n\ndef _build_item(pack):\n \"\"\"Build an item from the lines.\"\"\"\n times = pack[1].split()\n assert times[1] == '-->'\n tfrom = _time_sub2stamp(times[0])\n tto = _time_sub2stamp(times[2])\n text = '\\n'.join(pack[2:])\n return SubItem(tfrom=tfrom, tto=tto, text=text)\n\n\ndef _load_srt(content):\n \"\"\"Parse the subtitle file in a SRT format.\"\"\"\n results = []\n pack = []\n errors = False\n prevempty = False\n for i, line in enumerate(content.splitlines(), 1):\n line = line.strip()\n if not line:\n prevempty = True\n continue\n\n if prevempty and line.isdigit() and pack:\n try:\n results.append(_build_item(pack))\n except Exception as err:\n errors = True\n print(\"ERROR parsing the subtitle:\", err)\n print(\n \"The problem is in this block (line={}): {!r}\".format(i, pack))\n pack = []\n prevempty = False\n pack.append(line)\n\n if pack:\n try:\n results.append(_build_item(pack))\n except Exception as err:\n errors = True\n print(\"ERROR parsing the subtitle:\", err)\n print(\"The problem is in this block:\", repr(pack))\n\n if errors:\n exit()\n else:\n print(\"File parsed ok\")\n return results\n\n\ndef _load_ssa(content):\n \"\"\"Parse the subtitle file in a SSA format.\"\"\"\n fields_names = None\n items = []\n for i, line in enumerate(content.splitlines(), 1):\n line = line.strip()\n\n if line.startswith('Format:'):\n # store the format to use in Dialogue lines\n fields_names = [x.strip().lower() for x in line[7:].split(',')]\n\n if line.startswith('Dialogue:'):\n if fields_names is None:\n raise ValueError(\"Found a Dialogue line before having Format\")\n parts = [x.strip()\n for x in line[9:].split(',', maxsplit=len(fields_names) - 1)]\n fields = dict(zip(fields_names, parts))\n\n tfrom = _time_sub2stamp(fields['start'])\n tto = _time_sub2stamp(fields['end'])\n text = fields['text'].replace('\\\\N', '\\n')\n si = SubItem(tfrom=tfrom, tto=tto, text=text)\n items.append(si)\n\n return items\n\n\ndef _load_subtitle(content):\n \"\"\"Load the subtitle in any of the supported formats.\"\"\"\n if content.startswith('[Script Info]'):\n loader = _load_ssa\n else:\n loader = _load_srt\n return loader(content)\n\n\ndef _save_srt(subitems, outfile):\n \"\"\"Save the items to a srt file.\"\"\"\n with open(outfile, 'wt', encoding='utf8') as fh:\n for i, item in enumerate(subitems, 1):\n tfrom = _time_stamp2sub(item.tfrom)\n tto = _time_stamp2sub(item.tto)\n tline = \"{} --> {}\".format(tfrom, tto)\n fh.write('\\n'.join((str(i), tline, item.text)) + '\\n\\n')\n\n\ndef rescale(inpfile, num1, time1, num2, time2):\n \"\"\"Rescaling main process.\"\"\"\n with open(inpfile, 'rt', encoding='utf8') as fh:\n subitems = _load_subtitle(fh.read())\n\n item1 = subitems[int(num1) - 1]\n item2 = subitems[int(num2) - 1]\n tstamp1 = _time_sub2stamp(time1)\n tstamp2 = _time_sub2stamp(time2)\n print(\"Found 1st item {:>4} with time {:>4}, should be {} ({!r})\".format(\n num1, _time_stamp2sub(item1.tfrom), _time_stamp2sub(tstamp1), item1.text))\n print(\"Found 2nd item {:>4} with time {:>4}, should be {} ({!r})\".format(\n num2, _time_stamp2sub(item2.tfrom), _time_stamp2sub(tstamp2), item2.text))\n\n s1 = item1.tfrom\n s2 = item2.tfrom\n r1 = tstamp1\n r2 = tstamp2\n\n a = (r1 - r2) / (s1 - s2)\n b = (s1 * r2 - s2 * r1) / (s1 - s2)\n\n newitems = []\n for item in subitems:\n newfrom = item.tfrom * a + b\n newto = item.tfrom * a + b\n newitems.append(SubItem(newfrom, newto, item.text))\n\n outfile = inpfile[:-4] + \"-fixed\" + inpfile[-4:]\n _save_srt(newitems, outfile)\n print(\"Done\")\n\n\ndef _fix_times(subitems):\n \"\"\"Really fix subitems times.\"\"\"\n newitems = []\n for i, item in enumerate(subitems, 1):\n # check if something needs to be fixed\n if item.tfrom == item.tto:\n print(\" fixing sub {} (same times)\".format(i))\n elif i < len(subitems) and item.tto > subitems[i].tfrom:\n print(\n \" fixing cross timings between {} and {}\".format(i, i + 1))\n else:\n newitems.append(item)\n continue\n\n # fix it! a priori, the length should be 70ms per char, with 2s min\n fixed_len = len(item.text) * .07\n if fixed_len < 2:\n fixed_len = 2\n\n # check that it doesn't overlap to the next one\n if i + 1 < len(subitems):\n next_from = subitems[i].tfrom\n if item.tfrom + fixed_len > next_from:\n fixed_len = next_from - item.tfrom\n\n new_to = item.tfrom + fixed_len\n newitems.append(SubItem(item.tfrom, new_to, item.text))\n return newitems\n\n\ndef fix_times(inpfile):\n \"\"\"Fix permanency in screen of each text.\"\"\"\n with open(inpfile, 'rt', encoding='utf8') as fh:\n subitems = _load_subtitle(fh.read())\n\n newitems = _fix_times(subitems)\n\n outfile = inpfile[:-4] + \"-fixed\" + inpfile[-4:]\n _save_srt(newitems, outfile)\n print(\"Done\")\n\n\ndef adjust(inpfile_srt, inpfile_idx):\n \"\"\"Fix permanency in screen of each text.\"\"\"\n with open(inpfile_srt, 'rt', encoding='utf8') as fh:\n srt_items = _load_subtitle(fh.read())\n\n with open(inpfile_idx, 'rt', encoding='ascii') as fh:\n idx_tstamps = []\n for line in fh:\n if line.startswith('timestamp'):\n time_string = line[11:23]\n tstamp = _time_sub2stamp(time_string)\n idx_tstamps.append(tstamp)\n\n def _find_matching_pair(srt_pos, idx_pos):\n \"\"\"Find a match between next five items.\"\"\"\n min_delta = 999999999999999999\n delta_items = None\n pairs = itertools.chain(zip(range(5), [0] * 5),\n zip([0] * 4, range(1, 5)))\n for si, ii in pairs:\n new_si = srt_pos + si\n new_ii = idx_pos + ii\n if new_si >= len(srt_items) or new_ii >= len(idx_tstamps):\n continue\n srt_t = srt_items[new_si].tfrom\n idx_t = idx_tstamps[new_ii]\n delta = abs(srt_t - idx_t)\n if delta < min_delta:\n min_delta = delta\n delta_items = new_si, new_ii\n return delta_items\n\n newitems = []\n srt_pos = idx_pos = 0\n while srt_pos < len(srt_items) and idx_pos < len(idx_tstamps):\n srt_item = srt_items[srt_pos]\n idx_tstamp = idx_tstamps[idx_pos]\n\n sub_len = srt_item.tto - srt_item.tfrom\n delta = abs(idx_tstamp - srt_item.tfrom)\n if delta > MAX_SUB_SEPARATION:\n # too much of a difference, let's find a better match\n new_srt_pos, new_idx_pos = _find_matching_pair(srt_pos, idx_pos)\n if new_srt_pos != srt_pos or new_idx_pos != idx_pos:\n for i in range(srt_pos, new_srt_pos):\n newitems.append(srt_items[i])\n srt_pos = new_srt_pos\n idx_pos = new_idx_pos\n continue\n else:\n print(\"WARNING: big delta: {:.3f} (srt={} idx={}) {!r}\".format(\n delta, _time_stamp2sub(srt_item.tfrom),\n _time_stamp2sub(idx_tstamp), srt_item.text))\n\n new_from = idx_tstamp\n new_to = idx_tstamp + sub_len\n\n # check that it doesn't overlap to the next one\n if srt_pos + 1 < len(srt_items):\n next_from = srt_items[srt_pos + 1].tfrom\n if new_to > next_from:\n new_to = next_from - 0.01\n\n newitems.append(SubItem(new_from, new_to, srt_item.text))\n srt_pos += 1\n idx_pos += 1\n\n # check outliers\n if idx_pos < len(idx_tstamps):\n print(\"WARNING: timestamps missing at the end!\",\n idx_pos, len(idx_tstamps))\n for i in range(srt_pos, len(srt_items)):\n print(\"WARNING: missing outlier sub:\", srt_items[i])\n\n outfile = inpfile_srt[:-4] + \"-fixed\" + inpfile_srt[-4:]\n _save_srt(newitems, outfile)\n print(\"Done\")\n\n\ndef check(inpfile):\n \"\"\"Check subtitles sanity.\"\"\"\n if inpfile[-4:] == '.zip':\n print(\"Opening the ZIP file: {!r}\".format(inpfile))\n zf = zipfile.ZipFile(inpfile)\n to_process = zf.namelist()\n for fname in to_process:\n zf.extract(fname)\n os.remove(inpfile)\n elif inpfile[-4:] == '.rar':\n print(\"Opening the RAR file: {!r}\".format(inpfile))\n cmd = [\"/usr/bin/unrar\", \"-y\", \"x\", inpfile]\n out = subprocess.check_output(cmd)\n lines = out.decode(\"utf8\").split(\"\\n\")\n inside = [l.split('\\x08') for l in lines\n if l.startswith(\"Extracting \")]\n if not all(x[-1].strip() == \"OK\" for x in inside):\n print(\" ERROR opening the .rar:\\n\", out)\n exit()\n os.remove(inpfile)\n to_process = [x[0].split(maxsplit=1)[1].strip() for x in inside]\n elif inpfile[-4:] not in (\".srt\", \".ssa\"):\n raise ValueError(\"Unknown file extension.\")\n else:\n to_process = [inpfile]\n\n for inpfile in to_process:\n print(\"\\nFound: {!r}\".format(inpfile))\n\n # encoding, fix it if needed\n print(\"Test encoding...\")\n try:\n with open(inpfile, 'rt', encoding='utf8') as fh:\n content = fh.read()\n except UnicodeDecodeError:\n print(\" not utf8! try latin1\")\n with open(inpfile, 'rt', encoding='latin1') as fh:\n content = fh.read()\n with open(inpfile, 'wt', encoding='utf8') as fh:\n fh.write(content)\n print(\" fixed\")\n else:\n print(\" was ok\")\n subitems = _load_subtitle(content)\n\n # times sanity\n print(\"Test times sanity...\")\n newitems = _fix_times(subitems)\n if newitems == subitems:\n print(\" was ok\")\n subitems = newitems\n\n # clean spam\n print(\"Checking for spam...\")\n for item in subitems[:]:\n if \"OpenSubtitles\" in item.text:\n print(\"Removing:\", repr(item.text))\n subitems.remove(item)\n\n _save_srt(subitems, inpfile)\n print(\"Done\")\n\n\ndef die(error=None):\n if error:\n print(\"ERROR:\", error)\n print(\"\"\"\nUsage: sub_rescaler.py cmd \n\n rescale subfile.srt id1 time1 id2 time2\n Rescale the subtitles using two points (id1 and id2) traslating\n them to the new times (time1 and time2)\n example: sub_rescaler.py Movie.srt 4 43,5 168 1:02:15\n\n fix-times subfile.srt\n Fix the times of each phrase in the subtitle, using arbitrarious rules\n\n check subfile [subfile2 [...]]\n Do several checks on the subfile; decompress and extract if needed\n\n adjust subfile.srt subfile.idx\n Adjust the .srt phrase times, using the timepoints from the .idx one\n\"\"\")\n exit()\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n die()\n cmd = sys.argv[1]\n params = sys.argv[2:]\n if cmd == 'rescale':\n if len(params) != 5:\n die(\"Need 5 parameters for rescale, got %d\" % len(params))\n rescale(*params)\n elif cmd == 'fix-times':\n if len(params) != 1:\n die()\n fix_times(*params)\n elif cmd == 'adjust':\n if len(params) != 2:\n die()\n adjust(*params)\n elif cmd == 'check':\n if len(params) < 1:\n die()\n for inpfile in params:\n check(inpfile)\n else:\n die()\n","sub_path":"subtitleschecker.py","file_name":"subtitleschecker.py","file_ext":"py","file_size_in_byte":13130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"148286889","text":"\nimport numpy as np\nfrom matplotlib import style\nimport math\nimport random\nfrom scipy import spatial\nfrom scipy.spatial import distance\nfrom metrics import Metrics\n\nstyle.use('ggplot')\n\n\nclass K_Means:\n def __init__(self, k):\n self.k_clusters = k\n\n # Calcule os centróides para os clusters, calculando a média de todos os pontos de dados que pertencem a cada cluster.\n def compute_centroids(self, lista_centroids):\n new_centroids = []\n\n for k, centroid in lista_centroids:\n\n total_distance_cossine = 0\n count_doc = 0\n distance_cossines = []\n for doc in centroid:\n if doc.distance_cosine == 0:\n continue\n count_doc += 1\n distance_cossines.append(doc.distance_cosine)\n total_distance_cossine += doc.distance_cosine\n print(count_doc)\n mean = (total_distance_cossine / count_doc)\n\n print(\"Centroid {0}\".format(len(centroid)))\n print(\"total_distance_cossine {0}\".format(total_distance_cossine))\n print(\"Mean {0}\".format(mean))\n print(\"Distances {0}\".format(distance_cossines))\n\n distance_mean = distance_cossines.index(\n min(distance_cossines, key=lambda x: abs(x - mean)))\n\n new_centroids.append(\n (len(distance_cossines), total_distance_cossine))\n\n return new_centroids\n\n # Retona o index documento com o valor mas proximo do centroid\n def closest_document(self, documents, centroids):\n\n metric = Metrics()\n list_centroid = []\n list_closest = []\n best_document = \"\"\n best_index = 0\n\n documents_k = documents[:]\n\n # posso clusterizar\n for centroid in range(len(centroids)):\n list_closest = []\n lista_best_document = []\n for k, document in documents_k:\n distance_cosine = metric.get_cosine_distance(\n centroids[centroid][1], document)\n\n list_closest.append(DocumentKmean(\n k, distance_cosine, document))\n\n list_centroid.append((centroids[centroid][0], list_closest))\n\n for index_lista in range(len(list_centroid)):\n if len(lista_best_document) == 0:\n lista_best_document = [\n doc.distance_cosine for doc in list_centroid[index_lista][1]]\n else:\n lista_best_document = [max(value) for value in np.array(list(zip([doc.distance_cosine for doc in list_centroid[index_lista][1]], [\n distance_cosine for distance_cosine in lista_best_document])))]\n\n for k in range(len(list_centroid)):\n array_remove = []\n for index in range(len(lista_best_document)):\n if lista_best_document[index] > list_centroid[k][1][index].distance_cosine:\n array_remove.append(index)\n for index in sorted(array_remove, reverse=True):\n del list_centroid[k][1][index]\n\n # remove o elementos que não apresentaram nenhum valor de coseno para naão enviesar a media\n for k in range(len(list_centroid)):\n for doc in list_centroid[k][1]:\n if doc.distance_cosine == 0:\n list_centroid[k][1].remove(doc)\n\n # posso clusterizar\n\n return list_centroid\n\n def execute(self, matriz_t_idf):\n self.clusters = {}\n document_vectors_list = []\n document_id = 0\n\n for line in range(len(matriz_t_idf)):\n document_vector = []\n for column in matriz_t_idf[line]:\n document_vector.append(column)\n\n document_vectors_list.append((document_id, document_vector))\n document_id += 1\n\n initial_centroids = random.sample(document_vectors_list, k=3)\n\n temp_dist = 1.0\n metrics = Metrics()\n cluster_stats = []\n\n init_centroid_eu = []\n for ik, d in initial_centroids:\n init_centroid_eu.append((ik, (1, 1)))\n\n while temp_dist > 0.01:\n # Calcule os centróides para os clusters, calculando a média de todos os pontos de dados que pertencem a cada cluster.\n cluster_stats = self.closest_document(\n document_vectors_list, initial_centroids)\n\n # Returns the centroid cluster from the sum and count of documents in the cluster\n new_clusters = self.compute_centroids(cluster_stats)\n results = []\n\n # some as distancias euclidianas dos novos clusters com os iniciais esse resultado tende a zero quando não ouver mas mudanças\n for index_cluster in range(len(new_clusters)):\n results.append(metrics.get_eculedian_distance(\n init_centroid_eu[index_cluster][1], new_clusters[index_cluster]))\n\n temp_dist = sum(results)\n\n for ik in range(len(new_clusters)):\n init_centroid_eu[ik] = (0, new_clusters[ik])\n\n print(\"Temp = {0}\".format(temp_dist))\n\n return cluster_stats\n\n\nclass DocumentKmean:\n def __init__(self, id, distance_cosine, document):\n self.distance_cosine = distance_cosine\n self.document = document\n self.id = id\n","sub_path":"kmeans_sequencial.py","file_name":"kmeans_sequencial.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"311847982","text":"# disasters\r\n# DisasterModel.py\r\n\r\n\"\"\"\r\nA model for coal mining disaster time series with a changepoint\r\n\r\nswitchpoint: s ~ U(0,111)\r\nearly_mean: e ~ Exp(1.)\r\nlate_mean: l ~ Exp(1.)\r\ndisasters: D[t] ~ Poisson(early if t <= s, l otherwise)\r\n\"\"\"\r\n\r\nimport pymc \r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n#load in data\r\nD = pd.DataFrame.from_csv('laa_2011_april.txt', sep='\t').sort('Player')\r\nX = D['AVG']\r\nnum_hits = D['H']\r\nN = D['AB']\r\n\r\n\r\nmus = dict() #priors\r\nxs = dict() #liklihood\r\n\r\n\r\n#mus = pymc.Beta('mus', alpha=10, beta = 40)\r\n#xs = pymc.Binomial('xs' , n=N, p=mus, value=num_hits, observed=True) \r\n\r\nfor i in np.arange(len(X)):\r\n # prior on mu_i\r\n mus['mu'+ str(i)] = pymc.Beta('mu%i' %i, alpha=43.5, beta = 127)\r\n\r\n # likelihood\r\n xs['x' + str(i)] = pymc.Binomial('x%i' %i , n=N[i], p=mus['mu'+str(i)], value=num_hits[i], observed=True) \r\n\r\n\r\n\r\n\r\n\r\n#@pymc.deterministic(plot=False)\r\n#def r(s=s, e=e, l=l):\r\n# \"\"\"Concatenate Poisson means\"\"\"\r\n# out = np.empty(len(disasters_array))\r\n# out[:s] = e\r\n# out[s:] = l\r\n# return out\r\n\r\n\r\n# likelihood\r\n#D = pymc.Poisson('D', mu=r, value=disasters_array, observed=True)\r\n#liklihood\r\n#hood\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"hw11/hw11_model.py","file_name":"hw11_model.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"24803947","text":"#!/usr/bin/python3\r\n#############################################################################\r\n# Author: Priyank Jain\r\n# Email: jain206@purdue.edu\r\n# Description: Compare cross-phase similarity between two different\r\n# participants in subsequent phases using MOSS\r\n# Pre-requisite: The same-phase-similarity script should have ran sucessfully\r\n# before this script is called\r\n#############################################################################\r\nimport os\r\nimport config\r\nimport subprocess\r\nimport shutil\r\nimport pprint\r\nfrom unidiff import PatchSet\r\nfrom time import strftime\r\nimport sys\r\nimport shutil\r\nimport traceback\r\nimport requests\r\nimport bs4\r\nimport collections\r\nimport csv\r\n\r\ndef initializeMetricDictionaries():\r\n\tcrossPhaseSimilarityDict = collections.OrderedDict()\r\n\tprevPhaseProjects = list()\r\n\tfor filename in os.listdir():\r\n\t\tif os.path.isdir(filename) and not filename.startswith(\".\"):\r\n\t\t\tif filename.endswith(\"-cp\"):\r\n\t\t\t\tprojectName = filename.split(\"-\")[-2] + '-cp'\r\n\t\t\t\tcrossPhaseSimilarityDict.setdefault(projectName,collections.OrderedDict())\r\n\t\t\telse:\r\n\t\t\t\tprojectName = filename.split(\"-\")[-1]\r\n\t\t\t\tprevPhaseProjects.append(projectName)\r\n\tfor innerKey in prevPhaseProjects:\r\n\t\tfor outerKey in crossPhaseSimilarityDict:\r\n\t\t\tcrossPhaseSimilarityDict[outerKey][innerKey] = 0\r\n\treturn crossPhaseSimilarityDict\r\n\r\ndef interpretMOSSResults(soup, similarityDict):\r\n\ttable = soup.select('table')[0]\r\n\tfor tr in table.select('tr'):\r\n\t\tcount = 0\r\n\t\tfirstProjectName = None\r\n\t\tsecondProjectName = None\r\n\t\tfirstProjectPercentage = None\r\n\t\tsecondProjectPercentage = None\r\n\t\tlinesMatched = None\r\n\t\tfor td in tr.select('td'):\r\n\t\t\ttxt = td.get_text().strip()\r\n\t\t\tif count==0:\t\t\t\r\n\t\t\t\tbits = txt.split(\" \")\t\r\n\t\t\t\tif bits[0].endswith(\"-cp/\"):\r\n\t\t\t\t\tfirstProjectName = bits[0].replace(\"/\",\"\").split(\"-\")[-2].strip() + '-cp'\r\n\t\t\t\telse:\r\n\t\t\t\t\tfirstProjectName = bits[0].replace(\"/\",\"\").split(\"-\")[-1].strip()\r\n\t\t\t\tfirstProjectPercentage = int(bits[1].replace(\"(\",\"\").replace(\"%\",\"\").replace(\")\",\"\").strip())\r\n\t\t\telif count==1:\r\n\t\t\t\tbits = txt.split(\" \")\r\n\t\t\t\tif bits[0].endswith(\"-cp/\"):\r\n\t\t\t\t\tsecondProjectName = bits[0].replace(\"/\",\"\").split(\"-\")[-2].strip() + '-cp'\r\n\t\t\t\telse:\r\n\t\t\t\t\tsecondProjectName = bits[0].replace(\"/\",\"\").split(\"-\")[-1].strip()\r\n\t\t\t\tsecondProjectPercentage = int(bits[1].replace(\"(\",\"\").replace(\"%\",\"\").replace(\")\",\"\").strip())\r\n\t\t\telif count==2:\r\n\t\t\t\tlinesMatched = int(txt)\r\n\t\t\t\tif (not firstProjectName.endswith(\"-cp\") and secondProjectName.endswith(\"-cp\")) \\\r\n\t\t\t\t\tor (not secondProjectName.endswith(\"-cp\") and firstProjectName.endswith(\"-cp\")):\r\n\t\t\t\t\tsimilarity_score = None\r\n\t\t\t\t\tif firstProjectPercentage == 0 or secondProjectPercentage == 0:\r\n\t\t\t\t\t\tsimilarity_score = 0\r\n\t\t\t\t\telse: \r\n\t\t\t\t\t\tsimilarity_score = 2 * firstProjectPercentage * secondProjectPercentage / 100 / (firstProjectPercentage + secondProjectPercentage)\r\n\t\t\t\t\tif firstProjectName.endswith(\"-cp\"):\r\n\t\t\t\t\t\tsimilarityDict[firstProjectName][secondProjectName] = similarity_score\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsimilarityDict[secondProjectName][firstProjectName] = similarity_score\r\n\t\t\tcount = (count + 1) % 3\r\n\r\ndef dumpSimilarityMetric(similarityDict, fileName):\r\n\tsimilarity_matrix_file = open(fileName, 'w')\r\n\tcsvwriter = csv.writer(similarity_matrix_file, delimiter=\",\", quotechar='\"')\r\n\tcount = 0\r\n\tfor firstName, internalDict in similarityDict.items():\r\n\t\tif count == 0:\r\n\t\t\tcsvwriter.writerow([\"\"] + list(internalDict.keys()))\r\n\t\t\tcount = 1\r\n\t\tcsvwriter.writerow([firstName] + [round(x, 5) for x in list(internalDict.values())])\t\t\r\n\tsimilarity_matrix_file.close()\r\n\r\nhomedir = os.getcwd()\r\nlogFile = open('cross-phase-similarity-{0}.log'.format(strftime('%Y-%m-%d-%H-%M-%S')), 'w')\r\nsys.stdout = logFile\r\nprint(\"Cmd arguments are \", sys.argv)\r\nif not(len(sys.argv)>1 and sys.argv[1]=='skipMoss'):\r\n\tif os.path.exists(\"cross-phase\"):\r\n\t\tshutil.rmtree(\"cross-phase\")\r\n\tos.mkdir(\"cross-phase\")\r\nfor phase in range(2, 4):\r\n\tos.chdir(os.path.join(homedir, \"cross-phase\"))\r\n\tcurPhase = phase\r\n\tprevPhase = phase - 1\r\n\tprevCrossPhaseDirectory = None\r\n\tprevCrossPhaseDirectory = \"phase-{1}-{0}\".format(prevPhase-1, curPhase-1)\r\n\tcurrentCrossPhaseDirectory = \"phase-{1}-{0}\".format(prevPhase, curPhase)\r\n\tif not os.path.exists(currentCrossPhaseDirectory):\r\n\t\tos.mkdir(currentCrossPhaseDirectory)\r\n\ttry:\r\n\t\tprint(\"Calculating cross-phase similarity for phases {0} and {1}\".format(prevPhase, curPhase))\r\n\t\tconfig.updatePhase(prevPhase)\r\n\t\tprevPhaseDirectory = os.path.join(homedir, config.PHASE_DIRECTORY)\r\n\t\tconfig.updatePhase(curPhase)\r\n\t\tcurPhaseDirectory = os.path.join(homedir, config.PHASE_DIRECTORY)\r\n\t\tif not (len(sys.argv)>1 and sys.argv[1]=='skipMoss'):\t\r\n\t\t\tfor curPhaseProject in os.listdir(curPhaseDirectory):\t\t\t\r\n\t\t\t\tif os.path.isdir(os.path.join(homedir, curPhaseDirectory, curPhaseProject)) and not curPhaseProject.startswith(r'.'):\r\n\t\t\t\t\tos.chdir(os.path.join(homedir, \"cross-phase\", currentCrossPhaseDirectory))\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tprint(\"Performing git diff for {0} across phase {1} and {2}\".format(curPhaseProject, prevPhase, curPhase))\r\n\t\t\t\t\t\tsubprocess.check_output(\"git diff --no-index -w --ignore-space-at-eol -b \\\"{0}\\\" \\\"{1}\\\" >> git_diff_{2}.txt\".format( \\\r\n\t\t\t\t\t\t\tos.path.join(prevPhaseDirectory, curPhaseProject), os.path.join(curPhaseDirectory, curPhaseProject), curPhaseProject), shell=True)\r\n\t\t\t\t\texcept subprocess.CalledProcessError:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t\t\tprint(\"Creating directory {0} in {1}\".format(curPhaseProject, os.getcwd()))\r\n\t\t\t\t\tif not os.path.exists(curPhaseProject):\r\n\t\t\t\t\t\tos.mkdir(curPhaseProject)\t\t\r\n\t\t\t\t\twith open(\"git_diff_{0}.txt\".format(curPhaseProject), 'r', encoding='utf8', errors='ignore') as diff_file:\r\n\t\t\t\t\t\tos.chdir(os.path.join(homedir, \"cross-phase\", currentCrossPhaseDirectory, curPhaseProject))\r\n\t\t\t\t\t\tprint(\"Inside project {0}\".format(curPhaseProject))\r\n\t\t\t\t\t\tpatch = PatchSet(diff_file)\r\n\t\t\t\t\t\tfor changedFile in patch:\r\n\t\t\t\t\t\t\tprint(\"Processing file {0}\".format(changedFile.path))\r\n\t\t\t\t\t\t\tif changedFile.is_removed_file:\r\n\t\t\t\t\t\t\t\tcontinue\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tfileBaseName = os.path.basename(changedFile.path)\r\n\t\t\t\t\t\t\tbits = changedFile.path.split(curPhaseProject)\r\n\t\t\t\t\t\t\tbaseDir = bits[-1].replace(fileBaseName, '')[1:]\r\n\t\t\t\t\t\t\tif baseDir and not os.path.exists(baseDir):\r\n\t\t\t\t\t\t\t\tos.makedirs(baseDir)\r\n\t\t\t\t\t\t\twriter = open(os.path.join(baseDir, fileBaseName), 'w')\r\n\t\t\t\t\t\t\tcount = 0;\r\n\t\t\t\t\t\t\tflag = 1;\r\n\t\t\t\t\t\t\tfor line in str(changedFile).splitlines():\r\n\t\t\t\t\t\t\t\tif(count < 3):\r\n\t\t\t\t\t\t\t\t\tcount = count + 1\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\t\tif(line.startswith('@@ ')):\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\t\tif line[0] == '+':\r\n\t\t\t\t\t\t\t\t\tflag = 1\r\n\t\t\t\t\t\t\t\t\tline = line[1:]\r\n\t\t\t\t\t\t\t\telif line[0] == '-':\r\n\t\t\t\t\t\t\t\t\tflag = 0\r\n\t\t\t\t\t\t\t\t\tline = line[1:]\r\n\t\t\t\t\t\t\t\tif flag==1:\r\n\t\t\t\t\t\t\t\t\twriter.write(line + '\\n')\r\n\t\t\t\t\t\t\twriter.close()\r\n\t\t\t\t\tprint(\"Completed processing {0} across phase {1} and {2}\".format(curPhaseProject, prevPhase, curPhase))\r\n\t\t\t# Now you have code for {newPhase - oldPhase}\r\n\t\t\t# Use get MOSS script data by comparing this difference to the prevPhase code\r\n\t\t\tos.chdir(os.path.join(homedir, \"cross-phase\", currentCrossPhaseDirectory))\r\n\t\t\t# Add -cp to names of all existing directories\r\n\t\t\tfor cpDirectory in os.listdir(os.getcwd()):\r\n\t\t\t\tif os.path.isdir(cpDirectory) and not cpDirectory.startswith('.'):\r\n\t\t\t\t\tos.rename(cpDirectory, cpDirectory+'-cp')\r\n\t\t\t# Get directories from previous phase to this directory\r\n\t\t\t# This is for MOSS calculations\r\n\t\t\t# For phase 1-2, get code from phase 1\r\n\t\t\t# For phase 2-3, 3-4 get code from previous cross-phase\r\n\t\t\tif curPhase==2:\r\n\t\t\t\tfor prevPhaseProject in os.listdir(os.path.join(homedir, prevPhaseDirectory)):\r\n\t\t\t\t\tif os.path.isdir(os.path.join(homedir, prevPhaseDirectory, prevPhaseProject)) \\\r\n\t\t\t\t\t\tand not prevPhaseProject.startswith('.'):\r\n\t\t\t\t\t\tsrc = os.path.join(homedir, prevPhaseDirectory, prevPhaseProject)\r\n\t\t\t\t\t\tdest = os.path.join(os.getcwd(), prevPhaseProject)\r\n\t\t\t\t\t\tprint(\"Copying directory {0} to {1}\".format(src, dest))\r\n\t\t\t\t\t\tshutil.copytree(src, dest, ignore = shutil.ignore_patterns('.*'))\r\n\t\t\telse:\r\n\t\t\t\tprevPhaseDirectoryComplete = os.path.join(homedir, \"cross-phase\", prevCrossPhaseDirectory)\r\n\t\t\t\tprint(prevPhaseDirectoryComplete)\r\n\t\t\t\tfor prevPhaseProject in os.listdir(prevPhaseDirectoryComplete):\r\n\t\t\t\t\tif os.path.isdir(os.path.join(prevPhaseDirectoryComplete, prevPhaseProject))\\\r\n\t\t\t\t\t\tand not prevPhaseProject.startswith('.') and prevPhaseProject.endswith('-cp'):\r\n\t\t\t\t\t\tsrc = os.path.join(prevPhaseDirectoryComplete,\tprevPhaseProject)\r\n\t\t\t\t\t\tdest = os.path.join(os.getcwd(), prevPhaseProject[:-3])\r\n\t\t\t\t\t\tprint(\"Copying directory {0} to {1}\".format(src, dest))\r\n\t\t\t\t\t\tshutil.copytree(src, dest, ignore = shutil.ignore_patterns('.*'))\r\n\t\t\t\r\n\t\t\toutput = subprocess.check_output(\"perl ../../moss.pl -d */*.js */*.html\", shell = True) \r\n\t\t\toutput = output.decode(\"utf-8\")\r\n\t\t\tprint(output)\r\n\t\t\toutput = output.splitlines()\r\n\t\t\tsys.stdout.flush()\r\n\t\t\tresult_url = output[-1]\r\n\t\t\t# Get the MOSS result HTML file\r\n\t\t\tres = requests.get(result_url)\r\n\t\t\tres.raise_for_status()\r\n\t\t\tresult_file = open(\"moss-result-cross-phase-{0}-{1}.html\".format(curPhase, prevPhase), \"wb\")\r\n\t\t\tfor chunk in res.iter_content(100000):\r\n\t\t\t\tresult_file.write(chunk)\r\n\t\t\tresult_file.close()\t\t\r\n\t\tos.chdir(os.path.join(homedir, \"cross-phase\", currentCrossPhaseDirectory))\r\n\t\tresult_file = open(\"moss-result-cross-phase-{0}-{1}.html\".format(curPhase, prevPhase), \"r\")\r\n\t\tsoup = bs4.BeautifulSoup(result_file)\r\n\t\t\r\n\t\t#Initialize data structures to store different metrics for a phase\r\n\t\tsimilarityDict = initializeMetricDictionaries()\r\n\r\n\t\t#Interpret MOSS results and populate required data structures\r\n\t\tinterpretMOSSResults(soup, similarityDict)\r\n\t\tpprint.pprint(similarityDict)\r\n\t\t\r\n\t\t#Dump cross-phase similarity metric\r\n\t\tdumpSimilarityMetric(similarityDict, 'cross-phase-similarity-metric-phase-{0}-{1}.csv'.format(curPhase, prevPhase))\r\n\texcept Exception:\r\n\t\tprint(\"Exception occurred while processing phase {0}: {1}\".format(phase, sys.exc_info()[0]))\r\n\t\tprint(\"Stack trace: \")\r\n\t\ttraceback.print_exc()","sub_path":"black/cross-phase-similarity.py","file_name":"cross-phase-similarity.py","file_ext":"py","file_size_in_byte":10006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496686795","text":"from five import grok\nfrom plone.directives import dexterity, form\n\nfrom plone.namedfile.interfaces import IImageScaleTraversable\nfrom plone.namedfile.field import NamedBlobImage\n\nfrom plone.formwidget.autocomplete import AutocompleteFieldWidget\nfrom z3c.relationfield.schema import RelationChoice\nfrom plone.formwidget.contenttree import ObjPathSourceBinder\n\nfrom isps.sitecontent.contentpage import IContentPage\nfrom isps.sitecontent.project import IProject\n\nfrom isps.sitecontent import MessageFactory as _\n\n\n# Interface class; used to define content-type schema.\n\nclass IBanner(form.Schema, IImageScaleTraversable):\n \"\"\"\n Animated banner content\n \"\"\"\n image = NamedBlobImage(\n title=_(u\"Banner Image\"),\n description=_(u\"Upload banner image ideally already resized to the \"\n u\"correct dimensions\"),\n required=True,\n )\n form.widget(project=AutocompleteFieldWidget)\n project = RelationChoice(\n title=_(u\"Project\"),\n description=_(u\"Select related project for the banner link\"),\n source=ObjPathSourceBinder(object_provides=IProject.__identifier__),\n required=False,\n )\n form.widget(information=AutocompleteFieldWidget)\n information = RelationChoice(\n title=_(u\"Related content\"),\n description=_(u\"Select related content. When a link to related \"\n u\"content is available any selection for related \"\n u\"projects will be ignored\"),\n source=ObjPathSourceBinder(\n object_provides=IContentPage.__identifier__),\n required=False,\n )\n\n\nclass Banner(dexterity.Item):\n grok.implements(IBanner)\n\n\nclass View(grok.View):\n grok.context(IBanner)\n grok.require('zope2.View')\n grok.name('view')\n","sub_path":"src/isps.sitecontent/isps/sitecontent/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"500886761","text":"# coding=utf-8\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, render_to_response, redirect\nfrom production.models import Product\nfrom django.template import RequestContext\nfrom models import Order, QuantityProduction, PromoCodePercents, PromoCodeCurrency\nfrom users.models import UserProfile\nfrom production.forms import QuantityForm\nfrom forms import InputPromoCode\nfrom shop.helpers import send_mail_to_admin\n\n\ndef add_product(request, product_id):\n form = QuantityForm(request.POST)\n if form.is_valid():\n try:\n a = request.session['products']\n quantity = form.cleaned_data[\"quantity\"]\n for i in xrange(0, int(quantity)):\n a.append(product_id)\n request.session['products'] = a\n # На случай, если добавляем первый товар в корзину\n except KeyError:\n a = []\n quantity = form.cleaned_data[\"quantity\"]\n for i in xrange(0, int(quantity)):\n a.append(product_id)\n request.session['products'] = a\n return redirect('production.views.added_to_checkout', product_id)\n\n\ndef view_order(request):\n ctx = {}\n try:\n a = request.session['products']\n form = InputPromoCode()\n products = []\n # Получаем объекты, используя айдишники\n for i in a:\n products.append(Product.objects.get(pk=i))\n ctx['products'] = products\n\n ctx['form'] = form\n for product in products:\n if product.discount and product.discount.status == 1:\n product.price -= product.discount.discount\n\n total_cost = 0\n for i in products:\n total_cost += i.price\n ctx['total_cost'] = total_cost\n #пытаемся получить ошибки валидации формы из сессии.Если ошибки есть - передаем и обнуляем\n try:\n ctx['errors'] = request.session['errors']\n request.session['errors'] = {}\n # Если их нет - передаем пустой словарик\n except KeyError:\n ctx['errors'] = {}\n # На случай, если товаров нет\n except KeyError:\n pass\n return render_to_response('orders/view_order.html', ctx, context_instance=RequestContext(request))\n\n\n@login_required\ndef confirm_order(request):\n percent = 0.0\n currency = 0.0\n if request.POST:\n # проверяем промокод\n form = InputPromoCode(request.POST)\n if form.is_valid():\n #... если этот промокод вообще есть\n try:\n promo = form.cleaned_data['promocode']\n perc_obj = PromoCodePercents.objects.filter(code=promo).first()\n curr_obj = PromoCodeCurrency.objects.filter(code=promo).first()\n if perc_obj and perc_obj.number_of_usage > 0:\n percent += perc_obj.discount_in_percents\n perc_obj.number_of_usage -= 1\n perc_obj.save()\n if curr_obj and curr_obj.number_of_usage > 0:\n currency += curr_obj.discount_in_currency\n curr_obj.number_of_usage -= 1\n curr_obj.save()\n #... если нет, то\n except TypeError:\n pass\n # Есть ошибки? Закинем их в сессию и перекинем обратно на страницу подтверждения\n else:\n request.session['errors'] = form.errors\n return redirect('view_order')\n\n try:\n # Получаем список айдишников\n a = request.session['products']\n products = []\n product_quantity = {}\n for i in a:\n products.append(Product.objects.get(pk=i))\n\n #формируем общую сумму заказа и считаем кол-во заказанного товара\n total_cost = 0\n for i in products:\n quantity = products.count(i)\n product_quantity[str(i.id)] = quantity\n if i.discount and i.discount.status == 1:\n i.price -= i.discount.discount\n total_cost += i.price\n\n\n #отнимаем процент или сумму по промокоду\n total_cost -= (total_cost * percent / 100)\n total_cost -= currency\n\n # Связываем заказ с юзером\n current_user = request.user\n current_user_profile = UserProfile.objects.get(user=current_user)\n order = Order(customer=current_user_profile, total_cost=total_cost)\n order.save()\n\n # Создаем Продукт-Количество и связываем с заказом\n for key, value in product_quantity.iteritems():\n quantity_product = QuantityProduction(order=order, quantity=value,\n product=Product.objects.get(pk=int(key)))\n quantity_product.save()\n order.products.add(quantity_product)\n # Добавляем заказ в историю заказов юзера\n current_user_profile.orders_history.add(order)\n\n # Отправляем письмо\n data_dict = {\n 'products': products,\n 'total_cost': total_cost,\n 'user': current_user_profile\n }\n send_mail_to_admin('New Order', 'mails/mail_to_admin.html', 'noreply@orders.com', data_dict)\n\n #Удаляем энное кол-во товара со склада\n for key, value in product_quantity.iteritems():\n prod = Product.objects.get(pk=int(key))\n prod.quantity -= value\n if prod.quantity < 0: # так как в модели - PositiveInteger\n prod.quantity = 0\n prod.save()\n\n # Очищаем корзину\n request.session['products'] = []\n\n except KeyError:\n pass\n return render_to_response('orders/success.html', {}, context_instance=RequestContext(request))\n\n\ndef delete_from_order(request, product_id):\n a = request.session['products']\n ind = a.index(product_id)\n b = a.pop(ind)\n request.session['products'] = a\n return redirect('view_order')\n\n\ndef clear_order(request):\n request.session['products'] = []\n return redirect('view_order')\n\n\ndef compare_products(request):\n ctx = {}\n try:\n compare_id = request.session['compare']\n compare_list = []\n for id in compare_id:\n compare_list.append(Product.objects.get(pk=id))\n ctx['compare'] = compare_list\n\n except KeyError:\n pass\n return render_to_response('orders/compare.html', ctx, context_instance=RequestContext(request))\n\n\ndef delete_from_compare(request, product_id):\n a = request.session['compare']\n ind = a.index(product_id)\n b = a.pop(ind)\n request.session['compare'] = a\n return redirect('view_compare')\n","sub_path":"shop/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42807570","text":"__author__ = 'Ruian'\n\nimport re\n\nclass Parser:\n \"\"\"\n The Parser class parses text files with to retrieve JASPAR names and their\n targets which are in an AGI format. Information is stored in a dictionary,\n with the JASPAR names as keys and AGI targets as values.\n\n Attributes:\n text_file (File): The file which is opened for further reading and\n parsing of information\n interaction_dictionary (Dict): The dictionary of interactions with\n JASPAR names as keys, and AGI targets as values.\n \"\"\"\n\n def __init__(self, file):\n \"\"\"(Parser, file) -> None\n\n Initializes the parser module by opening the file.\n\n Args:\n text_file (File): A text file, with JASPAR names and AGI targets\n \"\"\"\n # Open file for reading\n self.text_file = open(file, \"r\")\n # Initialize interaction dictionary\n self.interaction_dictionary = self.parse_data_set()\n # Close reading file\n self.text_file.close()\n\n def parse_data_set(self):\n \"\"\"(Parser) -> Dictionary\n\n Return a dictionary of JASPAR names and AGI targets from a parsed\n data file.\n \"\"\"\n # Dictionary to return\n new_dictionary = {}\n # Regex to find JASPAR and AGI\n regex = re.compile(\"(MA\\d\\d\\d\\d\\.1).(AT\\dG\\d\\d\\d\\d\\d)\")\n # Reads all lines\n for line in self.text_file:\n # Find JASPAR and AGI target in current line\n result = regex.match(line)\n\n # Get JASPAR and AGI if match found\n if result:\n name_jas = result.group(1)\n name_agi = result.group(2)\n\n # Check if JASPAR name key exists in dictionary\n if name_jas in new_dictionary:\n # Appends agi target to list\n new_dictionary[name_jas].append(name_agi)\n else:\n # Creates a new list of agi targets\n new_dictionary[name_jas] = [name_agi]\n return new_dictionary\n\nif __name__ == \"__main__\":\n user_input = input(\"Select file to parse:\\n\")\n\n parser = Parser(user_input)\n\n file_name = input(\"Select output file name:\\n\")\n\n output = open(file_name, \"w\")\n\n for key in parser.interaction_dictionary.keys():\n for value in parser.interaction_dictionary.get(key):\n output.write(key + \"\\t\" + value + \"\\n\")\n output.close()","sub_path":"JasAgiParser.py","file_name":"JasAgiParser.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533888986","text":"import pandas as pd\nimport numpy as np\nimport sqlite3\nimport datetime as dt\nimport os\n\ndef create_tec_map_table(sdate, edate, tec_resolution=5,\n file_dir=\"../data/tec_map/filled/\",\n table_name=\"tec_map_filled\", \n db_name=\"tec_map.sqlite\", \n db_dir=\"../data/sqlite3/\"):\n \"\"\"Creats a table in SQLite db to store datetimes of tec maps\n and their file paths\"\"\"\n\n # Make a db connection\n conn = sqlite3.connect(db_dir + db_name)\n\n # Create a table\n schema = \"Create Table IF NOT EXISTS {tbl} (\" +\\\n\t \"datetime TIMESTAMP, \"+\\\n \"file_path TEXT, \" +\\\n \"PRIMARY KEY datetime)\"\n schema = schema.format(tbl=table_name)\n\n # Create a dataframe\n edate = edate + dt.timedelta(days=1) # Make the end date inclusive\n nmaps = int(round((edate - sdate).total_seconds() / 60. / tec_resolution))\n dtms = [sdate + dt.timedelta(minutes=tec_resolution*i) for i in range(nmaps)] \n files_all = [file_dir + dtm.strftime(\"%Y%m%d\") + \"/\" +\\\n dtm.strftime(\"%Y%m%d.%H%M\") + \".npy\" for dtm in dtms]\n files = [f.replace(file_dir, \"\") if os.path.isfile(f) else \"NaN\" for f in files_all]\n df = pd.DataFrame(data={\"datetime\":dtms, \"file_path\":files})\n\n # Write data to db\n df.to_sql(table_name, conn, schema=schema, if_exists=\"append\", index=False)\n\n return\n\nif __name__ == \"__main__\":\n\n # initialize parameters\n sdate = dt.datetime(2011, 1, 1)\n edate = dt.datetime(2016, 12, 31) # Includes the edate\n tec_resolution = 5\n file_dir=\"/home/sd-guest/Documents/data/tec_filled/\"#\"../data/tec_map/filled/\"\n db_dir=\"/home/sd-guest/Documents/data/sqlite3/\"\n # Create a table for storing tec map datetimes and file paths\n create_tec_map_table(sdate, edate, tec_resolution=tec_resolution,\n file_dir=file_dir,\n table_name=\"tec_map_filled\", \n db_name=\"tec_map.sqlite\", \n db_dir=db_dir)\n\n","sub_path":"data_pre_processing/create_tec_map_table.py","file_name":"create_tec_map_table.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"643807701","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport numpy as np\nfrom util import AA, AA_IDX, BLOSUM\n\n\n\"\"\"\nThis module contains coding for training a Gaussian Process Regression\nmodel on the Sarkisyan (2016) data set.\n\"\"\"\n\nclass SequenceGP(object):\n \n def __init__(self, load=False, X_train=None, y_train=None, \n length_scale=1, homo_noise=0.1, load_prefix=\"gfp_gp\", \n k_beta=0.1, c=1, d=2):\n if load:\n self.load(prefix=load_prefix)\n else:\n assert X_train is not None and y_train is not None\n self.X_ = np.copy(X_train)\n self.y_ = np.copy(y_train).reshape((y_train.shape[0], 1))\n self.N_ = self.X_.shape[0]\n self.params_ = np.array([homo_noise, k_beta, c, d])\n self.K_ = None\n self.Kinv_ = None\n\n def _kernel(self, Xi, Xj):\n beta = self.params_[1]\n c = self.params_[2]\n d = self.params_[3]\n kij = np.prod(BLOSUM[[Xi, Xj]]**beta)\n kii = np.prod(BLOSUM[[Xi, Xi]]**beta)\n kjj = np.prod(BLOSUM[[Xj, Xj]]**beta)\n k = kij / (np.sqrt(kii*kjj))\n k = np.exp(c*k)\n# k = (k+c)**d\n return k\n \n def _fill_K(self, print_every=100):\n self.K_ = np.zeros((self.N_, self.N_))\n total = self.N_ * (self.N_+1) / 2\n m = 0\n homo_noise = self.params_[0]\n for i in range(self.N_):\n for j in range(i, self.N_):\n kij = self._kernel(self.X_[i], self.X_[j])\n if i == j:\n kij += homo_noise\n self.K_[i, j] = kij\n self.K_[j, i] = kij\n \n m += 1\n if m % print_every == 0:\n print(\"Number of K elements filled: %i / %i\" % (m, total))\n \n def _invert_K(self):\n print(\"Inverting K...\")\n self.Kinv_ = np.linalg.inv(self.K_)\n print(\"Done inverting K.\")\n \n def build(self, print_every=100):\n self._fill_K(print_every=print_every)\n self._invert_K()\n \n def predict(self, Xstar, print_every=None, predict_variance=False):\n M = len(Xstar)\n Kstar = np.zeros((M, self.N_))\n total = M * self.N_\n m = 0\n for i in range(M):\n for j in range(self.N_):\n kij = self._kernel(Xstar[i], self.X_[j])\n Kstar[i, j] = kij\n m += 1\n if print_every is not None:\n if m % print_every == 0:\n print(\"Number of Kstar elements filled: %i / %i\" % (m, total))\n mu_star = np.matmul(Kstar, np.matmul(self.Kinv_, self.y_))\n return mu_star\n \n def save(self, prefix = \"gfp_gp\"):\n np.save(prefix + \"X.npy\", self.X_)\n np.save(prefix + \"y.npy\", self.y_)\n np.save(prefix + \"K.npy\", self.K_)\n np.save(prefix + \"Kinv.npy\", self.Kinv_)\n np.save(prefix + \"params.npy\", self.params_)\n \n def load(self, prefix=\"gfp_gp\"):\n self.X_ = np.load(prefix + \"X.npy\")\n self.y_ = np.load(prefix + \"y.npy\")\n self.K_ = np.load(prefix + \"K.npy\")\n self.Kinv_ = np.load(prefix + \"Kinv.npy\")\n self.params_ = np.load(prefix + \"params.npy\")\n self.N_ = self.X_.shape[0]\n ","sub_path":"src/gfp_gp.py","file_name":"gfp_gp.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"206024140","text":"import numpy as np\nimport math\n\ndef sum__(x, f=lambda x: x):\n\tacc = 0.\n\tfor item in x:\n\t\tacc += f(item) \n\treturn acc\ndef mean(x):\n\tn = x.shape[0]\n\tacc = 0.\n\tfor item in x:\n\t\tacc += item\n\treturn acc/n\n\ndef\tvariance(x):\n\tu = mean(x)\n\treturn sum__(x,lambda x: (x - u)**2) / x.shape[0]\n\ndef std(x):\n\treturn math.sqrt(variance(x))\n\ndef dot(x, y):\n\tif (x is None or y is None\n\tor x.shape[0] != y.shape[0]):\n\t\treturn None\n\treturn sum__(x * y, lambda x:x)\n\ndef mat_vec_prod(x, y):\n\tif (x.shape[1], 1) != y.shape:\n\t\treturn None\n\tres = np.zeros((x.shape[0],1))\n\tf = 0\n\tfor row in x:\n\t\trow = row.reshape((row.shape[0],1))\n\t\tres[f] = dot(row, y)\n\t\tf += 1\n\treturn res\n\t\ndef mat_mat_prod(x, y):\n\tif (x.shape[0] != y.shape[1]):\n\t\treturn None\n\tres = np.zeros((x.shape[0],y.shape[1]))\n\tprint(x.shape,\" \",y.shape)\n\tfor i in range(x.shape[0]):\n\t\tfor j in range(y.shape[1]):\n\t\t\tres[i][j] = dot(x[i],y[:,j])\n\treturn res\n\ndef mse(y, y_hat):\n\tif y.shape != y_hat.shape:\n\t\treturn None\n\t#return sum__(y_hat - y,lambda x: x**2)\n\tacc = 0.\n\tfor elem1,elem2 in zip(y,y_hat):\n\t\tacc += (elem1 - elem2)**2\n\treturn acc / y.shape[0]\n\ndef vec_mse(y, y_hat):\n\ty = y.reshape((y.shape[0],1))\n\ty_hat = y_hat.reshape((y_hat.shape[0],1))\n\t# print(y.shape)\n\treturn float(dot(y_hat - y,y_hat - y) / y_hat.shape[0])\n\ndef reshape(x):\n\tx = x.reshape(x.shape[0], 1)\n\treturn x\ndef linear_mse(x, y, theta):\n\ttheta = reshape(theta)\n\ty = reshape(y)\n\thypothes = mat_vec_prod(x,theta)\n\treturn float(mse(hypothes, y))\n\n\ndef\tvec_linear_mse(x, y, theta):\n\ty = reshape(y)\n\ttheta = reshape(theta)\n\thypothes = mat_vec_prod(x, theta)\n\treturn float(dot(hypothes - y,hypothes - y) / x.shape[0])\n# print(vec_linear_mse(X, Y, W))\n\n\n# def\tgradient(x, y, theta):\n# \ty = reshape(y)\n# \ttheta = reshape(theta)\n# \thypothes = mat_vec_prod(x, theta)\n# \t# scalar = float(sum__(hypothes - y) / x.shape[0])\n# \tgrad = np.zeros(theta.shape)\n# \tfor j in range(theta.shape[0]):\n# \t\tfor i in range(x.shape[0]):\n# \t\t\tgrad[j] = sum__((hypothes[i] - y[i]) * x[i])\n# \treturn grad\n\n\n\ndef vec_gradient(x, y, theta):\n\tM = x.shape[0]\n\tN = x.shape[1]\n\tprint(theta.shape)\n\tprint(x.shape)\n\tprint(y.shape)\n\n\tif (M,1) != y.shape or (N,1) != theta.shape:\n\t\tprint(\"incompatible\")\n\t\treturn None\n\ty = reshape(y)\n\ttheta = reshape(theta)\n\treturn dot(x, mat_vec_prod(x, theta) - y) / x.shape[0]\n\n# print(gradient(X, Y, Z))\n# print(vec_gradient(X, Y, Z))","sub_path":"day01/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"482920458","text":"#Sanjay Gurung session 2\n\ndef list_prime(n):\n \n for i in range(2,n+1):\n for j in range(2,i):\n if i%j==0:\n break\n else:\n print(i)\n break\n \n\n\nimport math\nn= int(input(\"Enter a number to get a prime factor of: \"))\nprint(\"Prime factors are:\")\nwhile(n%2)==0:\n print(2)\n n=n//2\n\nfor i in range(3,n+1):\n while(n%i)==0:\n print(i)\n n=n//i\n\nif n>2:\n print(n)\n\n\n\ndef convert_grade(n):\n if n>=0 and n<40:\n print(\"F\")\n elif n>=40 and n<60:\n print(\"D\")\n elif n>=60 and n<80:\n print(\"C\")\n elif n>=80 and n<90:\n print(\"B\")\n else:\n print(\"A\")\n\n\n\n\nimport math\nn= int(input(\"Enter a number for The Collatz Conjeture Sequence of:\"))\nwhile (True):\n if n % 2==0:\n n=n//2\n print(n)\n else :\n n=3*n+1\n print(n)\n if(n==1):\n break\n\n","sub_path":"Gurunglab3.py","file_name":"Gurunglab3.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"299237131","text":"# Copyright (C) 2001-2015 Andreas Lang-Nevyjel, init.at\n#\n# this file is part of md-config-server\n#\n# Send feedback to: \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License Version 2 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\"\"\" build process for md-config-server \"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import connection\nfrom django.db.models import Q\nfrom initat.cluster.backbone.models import device, device_group, device_variable, mon_ext_host, \\\n mon_contactgroup, netdevice, network_type, user, config, \\\n mon_host_dependency_templ, mon_host_dependency, mon_service_dependency, net_ip, \\\n mon_check_command_special, mon_check_command\nfrom initat.md_config_server import special_commands, constants\nfrom initat.md_config_server.config import global_config, main_config, all_commands, \\\n all_service_groups, time_periods, all_contacts, all_contact_groups, all_host_groups, all_hosts, \\\n all_services, config_dir, device_templates, service_templates, mon_config, \\\n all_host_dependencies, build_cache, build_safe_name, SimpleCounter\nfrom initat.md_config_server.constants import CACHE_MODES, DEFAULT_CACHE_MODE\nfrom initat.md_config_server.mixins import version_check_mixin\nfrom initat.md_config_server.icinga_log_reader.log_reader import host_service_id_util\nfrom lxml.builder import E # @UnresolvedImport\nimport codecs\nimport commands\nimport config_tools\nimport configfile\nimport logging_tools\nimport net_tools\nimport networkx\nimport operator\nimport json\nimport time\nimport os\nimport os.path\nimport process_tools\nimport server_command\nimport signal\nimport stat\nimport threading_tools\nimport time\n\n\nclass build_process(threading_tools.process_obj, version_check_mixin):\n def process_init(self):\n self.__log_template = logging_tools.get_logger(\n global_config[\"LOG_NAME\"],\n global_config[\"LOG_DESTINATION\"],\n zmq=True,\n context=self.zmq_context,\n init_logger=True\n )\n self.__hosts_pending, self.__hosts_waiting = (set(), set())\n self.__icinga_lock_file_name = os.path.join(global_config[\"MD_BASEDIR\"], \"var\", global_config[\"MD_LOCK_FILE\"])\n connection.close()\n self.__mach_loggers = {}\n self.__num_mach_logs = {}\n self.version = int(time.time())\n self.log(\"initial config_version is %d\" % (self.version))\n self.router_obj = config_tools.router_object(self.log)\n self.register_func(\"check_for_slaves\", self._check_for_slaves)\n\n self.register_func(\"build_host_config\", self._check_call)\n self.register_func(\"sync_http_users\", self._check_call)\n self.register_func(\"rebuild_config\", self._check_call)\n self.register_func(\"reload_md_daemon\", self._check_call)\n # store pending commands\n self.__pending_commands = []\n # ready (check_for_slaves called)\n self.__ready = False\n\n # self.__host_service_map = host_service_map(self.log)\n\n def log(self, what, log_level=logging_tools.LOG_LEVEL_OK):\n self.__log_template.log(log_level, what)\n\n def loop_post(self):\n for mach_logger in self.__mach_loggers.itervalues():\n mach_logger.close()\n self.__log_template.close()\n\n def _check_for_slaves(self, **kwargs):\n master_server = device.objects.get(Q(pk=global_config[\"SERVER_IDX\"]))\n slave_servers = device.objects.filter(Q(device_config__config__name=\"monitor_slave\")).select_related(\"domain_tree_node\")\n # slave configs\n self.__gen_config = main_config(self, master_server, distributed=True if len(slave_servers) else False)\n self.send_pool_message(\"external_cmd_file\", self.__gen_config.get_command_name())\n self.__gen_config_built = False\n self.__slave_configs, self.__slave_lut = ({}, {})\n if len(slave_servers):\n self.log(\n \"found {}: {}\".format(\n logging_tools.get_plural(\"slave_server\", len(slave_servers)),\n \", \".join(sorted([cur_dev.full_name for cur_dev in slave_servers]))))\n for cur_dev in slave_servers:\n _slave_c = main_config(\n self,\n cur_dev,\n slave_name=cur_dev.full_name,\n master_server=master_server,\n )\n self.__slave_configs[cur_dev.pk] = _slave_c\n self.__slave_lut[cur_dev.full_name] = cur_dev.pk\n else:\n self.log(\"no slave-servers found\")\n self.__ready = True\n if self.__pending_commands:\n self.log(\"processing {}\".format(logging_tools.get_plural(\"pending command\", len(self.__pending_commands))))\n while self.__pending_commands:\n _pc = self.__pending_commands.pop(0)\n self._check_call(*_pc[\"args\"], **_pc[\"kwargs\"])\n\n def send_command(self, src_id, srv_com):\n self.send_pool_message(\"send_command\", \"urn:uuid:{}:relayer\".format(src_id), srv_com)\n\n def mach_log(self, what, lev=logging_tools.LOG_LEVEL_OK, mach_name=None, **kwargs):\n if \"single_build\" in kwargs:\n self.__write_logs = kwargs[\"single_build\"]\n if mach_name is None:\n mach_name = self.__cached_mach_name\n else:\n self.__cached_mach_name = mach_name\n if mach_name not in self.__mach_loggers:\n self.__num_mach_logs[mach_name] = 0\n if self.__write_logs:\n self.__mach_loggers[mach_name] = self._get_mach_logger(mach_name)\n else:\n self.__mach_loggers[mach_name] = []\n self.__num_mach_logs[mach_name] += 1\n if self.__write_logs:\n self.__mach_loggers[mach_name].log(lev, what)\n else:\n self.__mach_loggers[mach_name].append((lev, what))\n if kwargs.get(\"global_flag\", False):\n self.log(what, lev)\n\n def get_num_mach_logs(self):\n return self.__num_mach_logs.get(self.__cached_mach_name, 0)\n\n def _get_mach_logger(self, mach_name):\n return logging_tools.get_logger(\n \"{}.{}\".format(\n global_config[\"LOG_NAME\"],\n mach_name.replace(\".\", r\"\\.\"),\n ),\n global_config[\"LOG_DESTINATION\"],\n zmq=True,\n context=self.zmq_context,\n init_logger=True,\n )\n\n def close_mach_log(self, mach_name=None, **kwargs):\n if mach_name is not None:\n self.__cached_mach_name = mach_name\n if self.__cached_mach_name:\n mach_name = self.__cached_mach_name\n del self.__num_mach_logs[mach_name]\n if self.__write_logs:\n self.__mach_loggers[mach_name].close()\n else:\n if kwargs.get(\"write_logs\", False):\n # write logs because of flag (errors ?)\n _logger = self._get_mach_logger(mach_name)\n for _lev, _what in self.__mach_loggers[mach_name]:\n _logger.log(_lev, _what)\n _logger.close()\n del _logger\n del self.__mach_loggers[mach_name]\n\n def _check_md_config(self):\n c_stat, out = commands.getstatusoutput(\"{}/bin/{} -v {}/etc/{}.cfg\".format(\n global_config[\"MD_BASEDIR\"],\n global_config[\"MD_TYPE\"],\n global_config[\"MD_BASEDIR\"],\n global_config[\"MD_TYPE\"]))\n if c_stat:\n self.log(\n \"Checking the {}-configuration resulted in an error ({:d})\".format(\n global_config[\"MD_TYPE\"],\n c_stat,\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n ret_stat = False\n else:\n self.log(\"Checking the {}-configuration returned no error\".format(global_config[\"MD_TYPE\"]))\n ret_stat = True\n return ret_stat, out\n\n def _check_call(self, *args, **kwargs):\n if self.__ready:\n getattr(self, \"_{}\".format(kwargs[\"func_name\"]))(*args, **kwargs)\n else:\n self.__pending_commands.append(\n {\n \"args\": args,\n \"kwargs\": kwargs,\n }\n )\n\n def _reload_md_daemon(self, **kwargs):\n start_daemon, restart_daemon = (False, False)\n cs_stat, cs_out = self._check_md_config()\n if not cs_stat:\n self.log(\"Checking the {}-config resulted in an error, not trying to (re)start\".format(global_config[\"MD_TYPE\"]), logging_tools.LOG_LEVEL_ERROR)\n self.log(\"error_output has {}\".format(logging_tools.get_plural(\"line\", cs_out.split(\"\\n\"))),\n logging_tools.LOG_LEVEL_ERROR)\n for line in cs_out.split(\"\\n\"):\n if line.strip().lower().startswith(\"error\"):\n self.log(\" - {}\".format(line), logging_tools.LOG_LEVEL_ERROR)\n else:\n if os.path.isfile(self.__icinga_lock_file_name):\n try:\n pid = file(self.__icinga_lock_file_name, \"r\").read().strip()\n except:\n self.log(\n \"Cannot read {} LockFile named '{}', trying to start {}\".format(\n global_config[\"MD_TYPE\"],\n self.__icinga_lock_file_name,\n global_config[\"MD_TYPE\"],\n ),\n logging_tools.LOG_LEVEL_WARN\n )\n start_daemon = True\n else:\n pid = file(self.__icinga_lock_file_name).read().strip()\n try:\n pid = int(pid)\n except:\n self.log(\n \"PID read from '{}' is not an integer ({}, {}), trying to restart {}\".format(\n self.__icinga_lock_file_name,\n str(pid),\n process_tools.get_except_info(),\n global_config[\"MD_TYPE\"],\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n restart_daemon = True\n else:\n try:\n os.kill(pid, signal.SIGHUP)\n except OSError:\n self.log(\n \"Error signaling pid {:d} with SIGHUP ({:d}), trying to restart {} ({})\".format(\n pid,\n signal.SIGHUP,\n global_config[\"MD_TYPE\"],\n process_tools.get_except_info(),\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n restart_daemon = True\n else:\n self.log(\"Successfully signaled pid {:d} with SIGHUP ({:d})\".format(pid, signal.SIGHUP))\n else:\n self.log(\n \"{} LockFile '{}' not found, trying to start {}\".format(\n global_config[\"MD_TYPE\"],\n self.__icinga_lock_file_name,\n global_config[\"MD_TYPE\"]),\n logging_tools.LOG_LEVEL_WARN)\n start_daemon = True\n if start_daemon:\n _cmd = \"start\"\n elif restart_daemon:\n _cmd = \"restart\"\n else:\n _cmd = None\n if _cmd:\n self.log(\"Trying to {} {} via collserver-call_script\".format(_cmd, global_config[\"MD_TYPE\"]))\n reply = net_tools.zmq_connection(\"md_config_server\", timeout=10).add_connection(\n \"tcp://localhost:2001\",\n server_command.srv_command(\n command=\"call_script\",\n **{\n \"arguments:arg0\": \"/etc/init.d/{}\".format(global_config[\"MD_TYPE\"]),\n \"arguments:arg1\": _cmd,\n }\n )\n )\n if reply is None:\n self.log(\"got no reply\", logging_tools.LOG_LEVEL_ERROR)\n else:\n self.log(*reply.get_log_tuple())\n\n def _sync_http_users(self, *args, **kwargs):\n self.log(\"syncing http-users\")\n self.__gen_config._create_access_entries()\n\n def _build_host_config(self, *args, **kwargs):\n src_id, srv_com = (args[0], server_command.srv_command(source=args[1]))\n dev_pks = srv_com.xpath(\".//device_list/device/@pk\", smart_strings=False)\n dev_cache_modes = list(set(srv_com.xpath(\".//device_list/device/@mode\", smart_strings=False)))\n if dev_cache_modes:\n dev_cache_mode = dev_cache_modes[0]\n dev_names = [cur_dev.full_name for cur_dev in device.objects.filter(Q(pk__in=dev_pks)).select_related(\"domain_tree_node\")]\n self.log(\"starting single build with {}, cache mode is {}: {}\".format(\n logging_tools.get_plural(\"device\", len(dev_names)),\n dev_cache_mode,\n \", \".join(sorted(dev_names))))\n srv_com[\"result\"] = self._rebuild_config(*dev_names, cache_mode=dev_cache_mode)\n srv_com.set_result(\"rebuilt config for {}\".format(\", \".join(dev_names)), server_command.SRV_REPLY_STATE_OK)\n else:\n srv_com.set_result(\"no devices gaven\", server_command.SRV_REPLY_STATE_ERROR)\n self.send_pool_message(\"send_command\", src_id, unicode(srv_com))\n\n def _cleanup_db(self):\n # cleanup tasks for the database\n num_empty_mhd = mon_host_dependency.objects.filter(Q(devices=None) & Q(dependent_devices=None)).count()\n num_empty_msd = mon_service_dependency.objects.filter(Q(devices=None) & Q(dependent_devices=None)).count()\n if num_empty_mhd:\n self.log(\"removing {} empty mon_host_dependencies\".format(num_empty_mhd))\n mon_host_dependency.objects.filter(Q(devices=None) & Q(dependent_devices=None)).delete()\n if num_empty_msd:\n self.log(\"removing {} empty mon_service_dependencies\".format(num_empty_msd))\n mon_service_dependency.objects.filter(Q(devices=None) & Q(dependent_devices=None)).delete()\n\n def _check_for_snmp_container(self):\n _co = config.objects # @UndefinedVariable\n try:\n _container = _co.get(Q(name=\"SNMP container\")) # @UndefinedVariable\n except config.DoesNotExist: # @UndefinedVariable\n self.log(\"created SNMP container class\")\n _container = _co.create(\n name=\"SNMP container\",\n system_config=True,\n server_config=True,\n enabled=False,\n description=\"container for all SNMP checks\",\n )\n _present_coms = set(_container.mon_check_command_set.all().values_list(\"name\", flat=True))\n _specials = {\"snmp {}\".format(_special.name): _special for _special in mon_check_command_special.objects.all()}\n _new = set([\"snmp {}\".format(_com.Meta.name) for _com in special_commands.special_snmp_general.special_snmp_general(self.log).get_commands()])\n _to_create = set(_specials.keys()) & (_new - _present_coms)\n for _name in _to_create:\n _new_mcc = mon_check_command.objects.create(\n name=_name,\n description=\"auto created SNMP check entry\",\n config=_container,\n mon_check_command_special=_specials[_name],\n command_line=\"/bin/true\",\n )\n\n def _rebuild_config(self, *args, **kwargs):\n # self.__host_service_map.start_collecting()\n\n single_build = True if len(args) > 0 else False\n if not single_build:\n # from mixin\n self._check_md_version()\n self._check_relay_version()\n self._cleanup_db()\n # check for SNMP container config\n self._check_for_snmp_container()\n # copy from global_config (speedup)\n self.gc = configfile.gc_proxy(global_config)\n hdep_from_topo = self.gc[\"USE_HOST_DEPENDENCIES\"] and self.gc[\"HOST_DEPENDENCIES_FROM_TOPOLOGY\"]\n if hdep_from_topo:\n host_deps = mon_host_dependency_templ.objects.all().order_by(\"-priority\")\n if len(host_deps):\n self.mon_host_dep = host_deps[0]\n else:\n self.log(\"no mon_host_dependencies found\", logging_tools.LOG_LEVEL_ERROR)\n hdep_from_topo = False\n h_list = list(args)\n cache_mode = kwargs.get(\"cache_mode\", \"???\")\n if cache_mode not in CACHE_MODES:\n # take first cache mode\n cache_mode = DEFAULT_CACHE_MODE\n self.log(\n \"rebuild_config called, single_build is {}, cache_mode is {}, hdep_from_topo is {}\".format(\n str(single_build),\n cache_mode,\n str(hdep_from_topo),\n )\n )\n if self.gc[\"DEBUG\"]:\n cur_query_count = len(connection.queries)\n cdg = device.objects.get(Q(device_group__cluster_device_group=True))\n if single_build:\n build_dv = None\n else:\n # delete old gauge variables\n device_variable.objects.filter(Q(name=\"_SYS_GAUGE_\") & Q(is_public=False) & Q(device=cdg)).delete()\n # init build variable\n build_dv = device_variable(\n device=cdg,\n is_public=False,\n name=\"_SYS_GAUGE_\",\n description=\"mon config rebuild on {}\".format(self.__gen_config.monitor_server.full_name if self.__gen_config else \"unknown\"),\n var_type=\"i\")\n # bump version\n if int(time.time()) > self.version:\n self.version = int(time.time())\n else:\n self.version += 1\n self.log(\"config_version for full build is %d\" % (self.version))\n self.send_pool_message(\"build_info\", \"start_build\", self.version, target=\"syncer\")\n # fetch SNMP-stuff from cluster and initialise var cache\n rebuild_gen_config = False\n if not h_list:\n self.log(\n \"rebuilding complete config (for master and {})\".format(\n logging_tools.get_plural(\"slave\", len(self.__slave_configs))\n )\n )\n rebuild_gen_config = True\n else:\n # FIXME, handle host-related config for only specified slaves\n self.log(\n \"rebuilding config for {}: {}\".format(\n logging_tools.get_plural(\"host\", len(h_list)),\n logging_tools.compress_list(h_list)\n )\n )\n if not self.__gen_config:\n rebuild_gen_config = True\n if rebuild_gen_config:\n self._create_general_config()\n # h_list = []\n bc_valid = self.__gen_config.is_valid()\n if bc_valid:\n # get device templates\n dev_templates = device_templates(self)\n # get serivce templates\n serv_templates = service_templates(self)\n if dev_templates.is_valid() and serv_templates.is_valid():\n pass\n else:\n if not dev_templates.is_valid():\n self.log(\"device templates are not valid\", logging_tools.LOG_LEVEL_ERROR)\n if not serv_templates.is_valid():\n self.log(\"service templates are not valid\", logging_tools.LOG_LEVEL_ERROR)\n bc_valid = False\n if bc_valid:\n if single_build:\n if not self.__gen_config_built:\n self._create_general_config(write_entries=False)\n # clean device and service entries\n for key in constants.SINGLE_BUILD_MAPS:\n if key in self.__gen_config:\n self.__gen_config[key].refresh(self.__gen_config)\n self.router_obj.check_for_update()\n total_hosts = sum([self._get_number_of_hosts(cur_gc, h_list) for cur_gc in [self.__gen_config] + self.__slave_configs.values()])\n if build_dv:\n self.log(\"init gauge with max={:d}\".format(total_hosts))\n build_dv.init_as_gauge(total_hosts)\n if not single_build:\n # build distance map\n cur_dmap, unreachable_pks = self._build_distance_map(self.__gen_config.monitor_server, show_unroutable=not single_build)\n self.send_pool_message(\"build_info\", \"unreachable_devices\", len(unreachable_pks), target=\"syncer\")\n if unreachable_pks:\n for _urd in device.objects.filter(Q(pk__in=unreachable_pks)).select_related(\"domain_tree_node\"):\n self.send_pool_message(\"build_info\", \"unreachable_device\", _urd.pk, unicode(_urd), unicode(_urd.device_group), target=\"syncer\")\n else:\n cur_dmap = {}\n unreachable_pks = []\n # todo, move to separate processes\n gc_list = [self.__gen_config]\n if not single_build:\n gc_list.extend(self.__slave_configs.values())\n for cur_gc in gc_list:\n cur_gc.cache_mode = cache_mode\n if cur_gc.master and not single_build:\n # recreate access files\n cur_gc._create_access_entries()\n\n _bc = build_cache(self.log, cdg, full_build=not single_build, unreachable_pks=unreachable_pks)\n _bc.cache_mode = cache_mode\n _bc.build_dv = build_dv\n _bc.host_list = h_list\n _bc.dev_templates = dev_templates\n _bc.serv_templates = serv_templates\n _bc.single_build = single_build\n _bc.debug = self.gc[\"DEBUG\"]\n self.send_pool_message(\"build_info\", \"start_config_build\", cur_gc.monitor_server.full_name, target=\"syncer\")\n self.log(\"create host\")\n self.log(\"bc {}\".format(_bc))\n self.log(\"cur gc {}\".format(cur_gc))\n self.log(\"cur_dmap {}\".format(cur_dmap))\n self._create_host_config_files(_bc, cur_gc, cur_dmap, hdep_from_topo)\n self.send_pool_message(\"build_info\", \"end_config_build\", cur_gc.monitor_server.full_name, target=\"syncer\")\n if not single_build:\n # refresh implies _write_entries\n cur_gc.refresh()\n if not cur_gc.master:\n # write config to disk\n cur_gc._write_entries()\n # start syncing\n self.send_pool_message(\"build_info\", \"sync_slave\", cur_gc.monitor_server.full_name, target=\"syncer\")\n del _bc\n if build_dv:\n build_dv.delete()\n if not single_build:\n cfgs_written = self.__gen_config._write_entries()\n if bc_valid and (cfgs_written or rebuild_gen_config):\n # send reload to remote instance ?\n self._reload_md_daemon()\n self.send_pool_message(\"build_info\", \"end_build\", self.version, target=\"syncer\")\n else:\n cur_gc = self.__gen_config\n res_node = E.config(\n *sum([cur_gc[key].get_xml() for key in constants.SINGLE_BUILD_MAPS], [])\n )\n if self.gc[\"DEBUG\"]:\n tot_query_count = len(connection.queries) - cur_query_count\n self.log(\"queries issued: {:d}\".format(tot_query_count))\n for q_idx, act_sql in enumerate(connection.queries[cur_query_count:], 1):\n self.log(\"{:5d} {}\".format(q_idx, act_sql[\"sql\"][:180]))\n # self.__host_service_map.end_collecting()\n del self.gc\n if single_build:\n return res_node\n\n def _build_distance_map(self, root_node, show_unroutable=True):\n self.log(\"building distance map, root node is '{}'\".format(root_node))\n # exclude all without attached netdevices\n dm_dict = {\n cur_dev.pk: cur_dev for cur_dev in device.objects.filter(\n Q(enabled=True) & Q(device_group__enabled=True)\n ).exclude(netdevice=None).select_related(\"domain_tree_node\").prefetch_related(\"netdevice_set\")\n }\n nd_dict = {}\n for dev_pk, nd_pk in netdevice.objects.filter(Q(enabled=True)).values_list(\"device\", \"pk\"):\n nd_dict.setdefault(dev_pk, set()).add(nd_pk)\n nd_lut = {value[0]: value[1] for value in netdevice.objects.filter(Q(enabled=True)).values_list(\"pk\", \"device\") if value[1] in dm_dict.keys()}\n for cur_dev in dm_dict.itervalues():\n # set 0 for root_node, -1 for all other devices\n cur_dev.md_dist_level = 0 if cur_dev.pk == root_node.pk else -1\n all_pks = set(dm_dict.keys())\n all_nd_pks = set(nd_lut.keys())\n max_level = 0\n # limit for loop\n for cur_iter in xrange(128):\n run_again = False\n # iterate until all nodes have a valid dist_level set\n src_nodes = set([key for key, value in dm_dict.iteritems() if value.md_dist_level >= 0])\n dst_nodes = all_pks - src_nodes\n self.log(\"dm_run {:3d}, {}, {}\".format(\n cur_iter,\n logging_tools.get_plural(\"source node\", len(src_nodes)),\n logging_tools.get_plural(\"dest node\", len(dst_nodes))))\n src_nds = reduce(operator.ior, [nd_dict[key] for key in src_nodes if key in nd_dict], set())\n # dst_nds = reduce(operator.ior, [nd_dict[key] for key in dst_nodes], set())\n # build list of src_nd, dst_nd tuples\n nb_list = []\n for src_nd in src_nds:\n try:\n for dst_nd in networkx.all_neighbors(self.router_obj.nx, src_nd):\n if dst_nd not in src_nds:\n nb_list.append((src_nd, dst_nd))\n except networkx.exception.NetworkXError:\n self.log(\n \"netdevice {} is not in graph: {}\".format(\n src_nd,\n process_tools.get_except_info(),\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n for src_nd, dst_nd, in nb_list:\n if src_nd in all_nd_pks and dst_nd in all_nd_pks:\n src_dev, dst_dev = (dm_dict[nd_lut[src_nd]], dm_dict[nd_lut[dst_nd]])\n new_level = src_dev.md_dist_level + 1\n if dst_dev.md_dist_level >= 0 and new_level > dst_dev.md_dist_level:\n self.log(\n \"pushing node {} farther away from root ({:d} => {:d})\".format(\n unicode(dst_dev),\n dst_dev.md_dist_level,\n new_level,\n )\n )\n dst_dev.md_dist_level = max(dst_dev.md_dist_level, new_level)\n max_level = max(max_level, dst_dev.md_dist_level)\n run_again = True\n else:\n self.log(\"dropping link ({:d}, {:d}), devices disabled?\".format(src_nd, dst_nd), logging_tools.LOG_LEVEL_WARN)\n if not run_again:\n break\n self.log(\"max distance level: {:d}\".format(max_level))\n nodes_ur = [unicode(value) for value in dm_dict.itervalues() if value.md_dist_level < 0]\n ur_pks = [_entry.pk for _entry in dm_dict.itervalues() if _entry.md_dist_level < 0]\n if nodes_ur and show_unroutable:\n self.log(\n u\"{}: {}\".format(\n logging_tools.get_plural(\"unroutable node\", len(nodes_ur)),\n u\", \".join(sorted(nodes_ur)),\n )\n )\n for level in xrange(max_level + 1):\n self.log(\n \"nodes in level {:d}: {}\".format(\n level,\n len([True for value in dm_dict.itervalues() if value.md_dist_level == level]),\n )\n )\n return {key: value.md_dist_level for key, value in dm_dict.iteritems()}, ur_pks\n\n def _create_general_config(self, write_entries=None):\n self.__gen_config_built = True\n config_list = [self.__gen_config] + self.__slave_configs.values()\n if write_entries is not None:\n prev_awc = self.__gen_config.allow_write_entries\n for cur_conf in config_list:\n # set actual value\n cur_conf.allow_write_entries = write_entries\n start_time = time.time()\n self._check_image_maps()\n self._create_gen_config_files(config_list)\n end_time = time.time()\n if write_entries is not None:\n for cur_conf in config_list:\n # restore to previous value\n cur_conf.allow_write_entries = prev_awc\n self.log(\"creating the total general config took {}\".format(logging_tools.get_diff_time_str(end_time - start_time)))\n\n def _create_gen_config_files(self, gc_list):\n for cur_gc in gc_list:\n start_time = time.time()\n # misc commands (sending of mails)\n cur_gc.add_config(all_commands(cur_gc, self))\n # servicegroups\n cur_gc.add_config(all_service_groups(cur_gc, self))\n # timeperiods\n cur_gc.add_config(time_periods(cur_gc, self))\n # contacts\n cur_gc.add_config(all_contacts(cur_gc, self))\n # contactgroups\n cur_gc.add_config(all_contact_groups(cur_gc, self))\n # hostgroups\n cur_gc.add_config(all_host_groups(cur_gc, self))\n # hosts\n cur_gc.add_config(all_hosts(cur_gc, self))\n # services\n cur_gc.add_config(all_services(cur_gc, self))\n # device dir\n cur_gc.add_config_dir(config_dir(\"device\", cur_gc, self))\n # host_dependencies\n cur_gc.add_config(all_host_dependencies(cur_gc, self))\n end_time = time.time()\n cur_gc.log(\"created host_configs in {}\".format(logging_tools.get_diff_time_str(end_time - start_time)))\n\n def _get_mon_ext_hosts(self):\n return {cur_ext.pk: cur_ext for cur_ext in mon_ext_host.objects.all()}\n\n def _check_image_maps(self):\n min_width, max_width, min_height, max_height = (16, 64, 16, 64)\n all_image_stuff = self._get_mon_ext_hosts()\n self.log(\"Found {}\".format(logging_tools.get_plural(\"ext_host entry\", len(all_image_stuff.keys()))))\n logos_dir = \"{}/share/images/logos\".format(self.gc[\"MD_BASEDIR\"])\n base_names = set()\n if os.path.isdir(logos_dir):\n logo_files = os.listdir(logos_dir)\n for log_line in [entry.split(\".\")[0] for entry in logo_files]:\n if log_line not in base_names:\n if \"{}.png\".format(log_line) in logo_files and \"{}.gd2\".format(log_line) in logo_files:\n base_names.add(log_line)\n name_case_lut = {}\n if base_names:\n stat, out = commands.getstatusoutput(\"file {}\".format(\" \".join([os.path.join(logos_dir, \"{}.png\".format(entry)) for entry in base_names])))\n if stat:\n self.log(\"error getting filetype of {}\".format(logging_tools.get_plural(\"logo\", len(base_names))), logging_tools.LOG_LEVEL_ERROR)\n else:\n base_names = set()\n for logo_name, logo_data in [\n (os.path.basename(y[0].strip()), [z.strip() for z in y[1].split(\",\") if z.strip()]) for y in [\n line.strip().split(\":\", 1) for line in out.split(\"\\n\")] if len(y) == 2]:\n if len(logo_data) == 4:\n width, height = [int(value.strip()) for value in logo_data[1].split(\"x\")]\n if min_width <= width and width <= max_width and min_height <= height and height <= max_height:\n base_name = logo_name[:-4]\n base_names.add(base_name)\n name_case_lut[base_name.lower()] = base_name\n else:\n self.log(\n \"width or height ({:d} x {:d}) not in range ([{:d} - {:d}] x [{:d} - {:d}])\".format(\n width,\n height,\n min_width,\n max_width,\n min_height,\n max_height,\n )\n )\n name_lut = {eh.name.lower(): pk for pk, eh in all_image_stuff.iteritems()}\n all_images_present = set([eh.name for eh in all_image_stuff.values()])\n all_images_present_lower = set([name.lower() for name in all_images_present])\n base_names_lower = set([name.lower() for name in base_names])\n new_images = base_names_lower - all_images_present_lower\n del_images = all_images_present_lower - base_names_lower\n present_images = base_names_lower & all_images_present_lower\n for new_image in new_images:\n mon_ext_host(\n name=new_image,\n icon_image=\"{}.png\".format(new_image),\n statusmap_image=\"%s.gd2\" % (new_image)\n ).save()\n for p_i in present_images:\n img_stuff = all_image_stuff[name_lut[p_i]]\n # check for wrong case\n if img_stuff.icon_image != \"{}.png\".format(name_case_lut[img_stuff.name]):\n # correct case\n img_stuff.icon_image = \"{}.png\".format(name_case_lut[img_stuff.name])\n img_stuff.statusmap_image = \"{}.gd2\".format(name_case_lut[img_stuff.name])\n img_stuff.save()\n if del_images:\n mon_ext_host.objects.filter(Q(name__in=del_images)).delete()\n self.log(\"Inserted {}, deleted {}\".format(\n logging_tools.get_plural(\"new ext_host_entry\", len(new_images)),\n logging_tools.get_plural(\"ext_host_entry\", len(del_images))))\n\n def _create_single_host_config(\n self,\n _bc,\n cur_gc,\n host,\n d_map,\n my_net_idxs,\n all_access,\n contact_group_dict,\n ng_ext_hosts,\n all_configs,\n nagvis_maps,\n mccs_dict,\n ):\n # optimize\n self.__safe_cc_name = global_config[\"SAFE_CC_NAME\"]\n start_time = time.time()\n # set some vars\n host_nc = cur_gc[\"device.d\"]\n # we always check for passive checks\n # if cur_gc.master:\n # check_for_passive_checks = True\n # else:\n # check_for_passive_checks = False\n # checks_are_active = True\n # if check_for_passive_checks:\n # if host.monitor_server_id and host.monitor_server_id != cur_gc.monitor_server.pk:\n # checks_are_active = False\n # check if host is actively checked via current server\n if cur_gc.master:\n if host.monitor_server_id and host.monitor_server_id != cur_gc.monitor_server.pk:\n host_is_actively_checked = False\n else:\n host_is_actively_checked = True\n else:\n host_is_actively_checked = True\n # h_filter &= (Q(monitor_server=cur_gc.monitor_server) | Q(monitor_server=None))\n self.__cached_mach_name = host.full_name\n # cache logs\n _write_logs = False\n self.mach_log(\"-------- {} ---------\".format(\"master\" if cur_gc.master else \"slave {}\".format(cur_gc.slave_name)), single_build=_bc.single_build)\n glob_log_str = \"device {:<48s}{} ({}), d={:>3s}\".format(\n host.full_name[:48],\n \"*\" if len(host.name) > 48 else \" \",\n \"a\" if host_is_actively_checked else \"p\",\n \"{:3d}\".format(d_map[host.pk]) if d_map.get(host.pk) >= 0 else \"---\",\n )\n self.mach_log(\"Starting build of config\", logging_tools.LOG_LEVEL_OK, host.full_name)\n _counter = SimpleCounter()\n if host.valid_ips:\n net_devices = host.valid_ips\n elif host.invalid_ips:\n self.mach_log(\n \"Device {} has no valid netdevices associated, using invalid ones...\".format(\n host.full_name\n ),\n logging_tools.LOG_LEVEL_WARN\n )\n net_devices = host.invalid_ips\n else:\n self.mach_log(\n \"Device {} has no netdevices associated, skipping...\".format(\n host.full_name\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n _counter.error()\n net_devices = {}\n use_host_deps, use_service_deps = (\n self.gc[\"USE_HOST_DEPENDENCIES\"],\n self.gc[\"USE_SERVICE_DEPENDENCIES\"],\n )\n if net_devices:\n # print mni_str_s, mni_str_d, dev_str_s, dev_str_d\n # get correct netdevice for host\n if host.name == self.gc[\"SERVER_SHORT_NAME\"]:\n valid_ips, traces = ([(net_ip(ip=\"127.0.0.1\"), \"localdomain\")], [(1, 0, [host.pk])])\n else:\n valid_ips, traces = self._get_target_ip_info(_bc, my_net_idxs, net_devices, _bc.get_host(host.pk))\n if not valid_ips:\n _counter.error()\n act_def_dev = _bc.dev_templates[host.mon_device_templ_id or 0]\n if _bc.single_build:\n if not valid_ips:\n valid_ips = [(net_ip(ip=\"0.0.0.0\"), host.full_name)]\n self.mach_log(\"no ips found using {} as dummy IP\".format(str(valid_ips)))\n else:\n if (len(valid_ips) > 0) != host.reachable:\n self.log(\n \"reachable flag {} for host {} differs from valid_ips {}\".format(\n str(host.reachable),\n unicode(host),\n str(valid_ips),\n ),\n logging_tools.LOG_LEVEL_CRITICAL,\n )\n if valid_ips and act_def_dev:\n host.domain_names = [cur_ip[1] for cur_ip in valid_ips if cur_ip[1]]\n valid_ip = valid_ips[0][0]\n host.valid_ip = valid_ip\n self.mach_log(\n \"Found {} for host {} : {}, mon_resolve_name is {}, using {}\".format(\n logging_tools.get_plural(\"target ip\", len(valid_ips)),\n host.full_name,\n \", \".join([\"{}{}\".format(cur_ip, \" (.{})\".format(dom_name) if dom_name else \"\") for cur_ip, dom_name in valid_ips]),\n str(host.mon_resolve_name),\n unicode(host.valid_ip)\n )\n )\n if act_def_dev.mon_service_templ_id not in _bc.serv_templates:\n self.log(\"Default service_template not found in service_templates\", logging_tools.LOG_LEVEL_WARN)\n else:\n act_def_serv = _bc.serv_templates[act_def_dev.mon_service_templ_id]\n # tricky part: check the actual service_template for the various services\n self.mach_log(\n \"Using default device_template '{}' and service_template '{}' for host {}\".format(\n act_def_dev.name,\n act_def_serv.name,\n host.full_name,\n )\n )\n # get device variables\n dev_variables, var_info = _bc.get_vars(host)\n # store\n host.dev_variables = dev_variables\n self.mach_log(\n \"device has {} ({})\".format(\n logging_tools.get_plural(\"device_variable\", len(host.dev_variables.keys())),\n \", \".join([\"{}: {:d}\".format(key, var_info[key]) for key in [\"d\", \"g\", \"c\"]]),\n )\n )\n # now we have the device- and service template\n host_config_list = []\n act_host = mon_config(\"host\", host.full_name)\n host_config_list.append(act_host)\n act_host[\"host_name\"] = host.full_name\n act_host[\"display_name\"] = host.full_name\n # action url\n if self.gc[\"ENABLE_COLLECTD\"]:\n act_host[\"process_perf_data\"] = 1 if host.enable_perfdata else 0\n # always set action_url\n act_host[\"action_url\"] = reverse(\"device:device_info\", kwargs={\"device_pk\": host.pk, \"mode\": \"rrd\"})\n act_host[\"_device_pk\"] = host.pk\n if global_config[\"USE_ONLY_ALIAS_FOR_ALIAS\"]:\n act_host[\"alias\"] = host.alias or host.name\n else:\n act_host[\"alias\"] = sorted(\n list(\n set(\n [\n entry for entry in [\n host.alias, host.name, host.full_name\n ] + [\n u\"{}.{}\".format(\n host.name, dom_name\n ) for dom_name in host.domain_names\n ] if entry.strip()\n ]\n )\n )\n )\n if host.mon_resolve_name:\n act_host[\"address\"] = host.valid_ip.ip\n else:\n v_ip = host.valid_ip\n if v_ip.alias and v_ip.alias_excl:\n act_host[\"address\"] = \"{}.{}\".format(v_ip.alias, v_ip.domain_tree_node.full_name)\n else:\n act_host[\"address\"] = host.full_name\n if traces and len(traces[0][2]) > 1:\n act_host[\"possible_parents\"] = traces\n act_host[\"retain_status_information\"] = 1 if self.gc[\"RETAIN_HOST_STATUS\"] else 0\n act_host[\"max_check_attempts\"] = act_def_dev.max_attempts\n act_host[\"retry_interval\"] = act_def_dev.retry_interval\n act_host[\"check_interval\"] = act_def_dev.check_interval\n act_host[\"notification_interval\"] = act_def_dev.ninterval\n act_host[\"_uuid\"] = host.uuid\n act_host[\"check_period\"] = cur_gc[\"timeperiod\"][act_def_dev.mon_period_id].name\n act_host[\"notification_period\"] = cur_gc[\"timeperiod\"][act_def_dev.not_period_id].name\n # removed because this line screws active / passive checks\n # act_host[\"checks_enabled\"] = 1\n # only allow active checks if this the active monitor master, very important for anovis\n act_host[\"active_checks_enabled\"] = 1 if host_is_actively_checked else 0\n # we always allow passive checks\n act_host[\"passive_checks_enabled\"] = 1\n # act_host[\"{}_checks_enabled\".format(\"active\" if checks_are_active else \"passive\")] = 1\n # act_host[\"{}_checks_enabled\".format(\"passive\" if checks_are_active else \"active\")] = 0\n act_host[\"flap_detection_enabled\"] = 1 if (host.flap_detection_enabled and act_def_dev.flap_detection_enabled) else 0\n if host.flap_detection_enabled and act_def_dev.flap_detection_enabled:\n # add flap fields\n act_host[\"low_flap_threshold\"] = act_def_dev.low_flap_threshold\n act_host[\"high_flap_threshold\"] = act_def_dev.high_flap_threshold\n n_field = []\n for short, f_name in [(\"o\", \"up\"), (\"d\", \"down\"), (\"u\", \"unreachable\")]:\n if getattr(act_def_dev, \"flap_detect_{}\".format(f_name)):\n n_field.append(short)\n if not n_field:\n n_field.append(\"o\")\n act_host[\"flap_detection_options\"] = n_field\n # if checks_are_active and not cur_gc.master:\n # # trace changes\n # always enable obsess_over_host\n if True:\n act_host[\"obsess_over_host\"] = 1\n host_groups = set(contact_group_dict.get(host.full_name, []))\n act_host[\"contact_groups\"] = list(host_groups) if host_groups else self.gc[\"NONE_CONTACT_GROUP\"]\n c_list = [entry for entry in all_access] + _bc.get_device_group_users(host.device_group_id)\n if c_list:\n act_host[\"contacts\"] = c_list\n self.mach_log(\"contact groups for host: {}\".format(\n \", \".join(sorted(host_groups)) or \"none\"))\n if host.monitor_checks or _bc.single_build:\n if host.valid_ip.ip == \"0.0.0.0\":\n self.mach_log(\"IP address is '{}', host is assumed to be always up\".format(unicode(host.valid_ip)))\n act_host[\"check_command\"] = \"check-host-ok\"\n else:\n if act_def_dev.host_check_command:\n if host_is_actively_checked:\n act_host[\"check_command\"] = act_def_dev.host_check_command.name\n else:\n self.mach_log(\"disabling host check_command (passive)\")\n else:\n self.log(\"dev_template has no host_check_command set\", logging_tools.LOG_LEVEL_ERROR)\n # check for nagvis map\n if host.automap_root_nagvis and cur_gc.master:\n # with or without .cfg ? full path ?\n act_host[\"_nagvis_map\"] = \"{}\".format(host.full_name.encode(\"ascii\", errors=\"ignore\"))\n map_file = os.path.join(self.gc[\"NAGVIS_DIR\"], \"etc\", \"maps\", \"{}.cfg\".format(host.full_name.encode(\"ascii\", errors=\"ignore\")))\n map_dict = {\n \"sources\": \"automap\",\n \"alias\": host.comment or host.full_name,\n \"parent_map\": \"\",\n \"iconset\": \"std_big\",\n \"child_layers\": 10,\n \"backend_id\": \"live_1\",\n \"root\": host.full_name,\n \"label_show\": \"1\",\n \"label_border\": \"transparent\",\n \"render_mode\": \"directed\",\n \"rankdir\": \"TB\",\n \"width\": 800,\n \"height\": 600,\n \"header_menu\": True,\n \"hover_menu\": True,\n \"context_menu\": True,\n # parent map\n \"parent_map\": host.device_group.name.replace(\" \", \"_\"),\n # special flag for anovis\n \"use_childs_for_overview_icon\": False,\n }\n try:\n map_h = codecs.open(map_file, \"w\", \"utf-8\")\n except:\n self.mach_log(\n u\"cannot open {}: {}\".format(\n map_file,\n process_tools.get_except_info()\n ),\n logging_tools.LOG_LEVEL_CRITICAL\n )\n else:\n nagvis_maps.add(map_file)\n map_h.write(\"define global {\\n\")\n for key in sorted(map_dict.iterkeys()):\n value = map_dict[key]\n if type(value) == bool:\n value = \"1\" if value else \"0\"\n elif type(value) in [int, long]:\n value = \"%d\" % (value)\n map_h.write(u\" {}={}\\n\".format(key, value))\n map_h.write(\"}\\n\")\n map_h.close()\n # check for notification options\n not_a = []\n for what, shortcut in [\n (\"nrecovery\", \"r\"),\n (\"ndown\", \"d\"),\n (\"nunreachable\", \"u\"),\n (\"nflapping\", \"f\"),\n (\"nplanned_downtime\", \"s\")\n ]:\n if getattr(act_def_dev, what):\n not_a.append(shortcut)\n if not not_a:\n not_a.append(\"n\")\n act_host[\"notification_options\"] = not_a\n # check for hostextinfo\n if host.mon_ext_host_id and host.mon_ext_host_id in ng_ext_hosts:\n if self.gc[\"MD_TYPE\"] == \"icinga\":\n # handle for nagios 2, icinga\n # act_hostext_info = mon_config(\"hostextinfo\", host.full_name)\n # act_hostext_info[\"host_name\"] = host.full_name\n for key in [\"icon_image\", \"statusmap_image\"]:\n act_host[key] = getattr(ng_ext_hosts[host.mon_ext_host_id], key)\n # FIXME, not working for nagios2\n # host_config_list.append(act_hostext_info)\n # hostext_nc[host.full_name] = act_hostext_info\n else:\n self.log(\n \"don't know how to handle hostextinfo for {}_version {:d}\".format(\n self.gc[\"MD_TYPE\"],\n self.gc[\"MD_VERSION\"]\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n # clear host from servicegroups\n cur_gc[\"servicegroup\"].clear_host(host.full_name)\n # get check_commands and templates\n conf_names = set(all_configs.get(host.full_name, []))\n # cluster config names\n cconf_names = set([_sc.mon_check_command.name for _sc in _bc.get_cluster(\"sc\", host.pk)])\n # build lut\n conf_names = sorted(\n [\n cur_c[\"command_name\"] for cur_c in cur_gc[\"command\"].values() if not cur_c.is_event_handler and (\n (\n (cur_c.get_config() in conf_names) and (host.pk not in cur_c.exclude_devices)\n ) or cur_c[\"command_name\"] in cconf_names\n )\n ]\n )\n # list of already used checks\n used_checks = set()\n # print \"*\", conf_names\n # print _bc.get_vars(host)\n for conf_name in conf_names:\n self._add_config(\n host, act_host, conf_name, used_checks, _counter, _bc,\n mccs_dict, cur_gc, act_def_serv, host_groups, host_is_actively_checked, host_config_list\n )\n # add cluster checks\n mhc_checks = _bc.get_cluster(\"hc\", host.pk)\n if len(mhc_checks):\n self.mach_log(\"adding {}\".format(logging_tools.get_plural(\"host_cluster check\", len(mhc_checks))))\n for mhc_check in mhc_checks:\n dev_names = [_bc.get_host(cur_dev).full_name for cur_dev in mhc_check.devices_list]\n if len(dev_names):\n s_check = cur_gc[\"command\"][\"check_host_cluster\"]\n serv_temp = _bc.serv_templates[mhc_check.mon_service_templ_id]\n serv_cgs = list(set(serv_temp.contact_groups).intersection(host_groups))\n sub_list = self.get_service(\n host,\n act_host,\n s_check,\n [special_commands.ArgTemplate(\n s_check,\n self._get_cc_name(\"{}{}{}\".format(s_check.get_description(), _bc.join_char, mhc_check.description)),\n arg1=mhc_check.description,\n # arg2=\"@{:d}:\".format(mhc_check.warn_value),\n # arg3=\"@{:d}:\".format(mhc_check.error_value),\n arg2=mhc_check.warn_value,\n arg3=mhc_check.error_value,\n arg4=\",\".join([\"$HOSTSTATEID:{}$\".format(_dev_name) for _dev_name in dev_names]),\n arg5=\",\".join(dev_names),\n )\n ],\n act_def_serv,\n serv_cgs,\n host_is_actively_checked,\n serv_temp,\n cur_gc)\n host_config_list.extend(sub_list)\n _counter.ok(len(sub_list))\n else:\n self.mach_log(\"ignoring empty host_cluster\", logging_tools.LOG_LEVEL_WARN)\n # add cluster service checks\n msc_checks = _bc.get_cluster(\"sc\", host.pk)\n if len(msc_checks):\n self.mach_log(\"adding {}\".format(logging_tools.get_plural(\"service_cluster check\", len(msc_checks))))\n for msc_check in msc_checks:\n if msc_check.mon_check_command.name in cur_gc[\"command\"]:\n c_com = cur_gc[\"command\"][msc_check.mon_check_command.name]\n dev_names = [(_bc.get_host(cur_dev).full_name, c_com.get_description()) for cur_dev in msc_check.devices_list]\n if len(dev_names):\n s_check = cur_gc[\"command\"][\"check_service_cluster\"]\n serv_temp = _bc.serv_templates[msc_check.mon_service_templ_id]\n serv_cgs = list(set(serv_temp.contact_groups).intersection(host_groups))\n sub_list = self.get_service(\n host,\n act_host,\n s_check,\n [\n special_commands.ArgTemplate(\n s_check,\n self._get_cc_name(\"{} / {}\".format(s_check.get_description(), c_com.get_description())),\n arg1=msc_check.description,\n # arg2=\"@{:d}:\".format(msc_check.warn_value),\n # arg3=\"@{:d}:\".format(msc_check.error_value),\n arg2=msc_check.warn_value,\n arg3=msc_check.error_value,\n arg4=\",\".join(\n [\n \"$SERVICESTATEID:{}:{}$\".format(\n _dev_name, _srv_name\n ) for _dev_name, _srv_name in dev_names\n ]\n ),\n arg5=\",\".join(\n [\n \"{}{}{}\".format(\n _dev_name, _bc.join_char, _srv_name\n ).replace(\",\", \" \") for _dev_name, _srv_name in dev_names\n ]\n ),\n )\n ],\n act_def_serv,\n serv_cgs,\n host_is_actively_checked,\n serv_temp,\n cur_gc,\n )\n host_config_list.extend(sub_list)\n _counter.ok(len(sub_list))\n else:\n self.mach_log(\"ignoring empty service_cluster\", logging_tools.LOG_LEVEL_WARN)\n else:\n self.mach_log(\n \"check command '{}' not present in list of commands {}\".format(\n msc_check.mon_check_command.name,\n \", \".join(sorted(cur_gc[\"command\"].keys()))\n ),\n logging_tools.LOG_LEVEL_ERROR,\n )\n # add host dependencies\n if use_host_deps:\n for h_dep in _bc.get_dependencies(\"hd\", host.pk):\n # check reachability\n _unreachable = [\n _bc.get_host(_dev_pk) for _dev_pk in h_dep.devices_list + h_dep.master_list if not _bc.get_host(_dev_pk).reachable\n ]\n if _unreachable:\n self.mach_log(\n \"cannot create host dependency, {} unreachable: {}\".format(\n logging_tools.get_plural(\"device\", len(_unreachable)),\n \", \".join(sorted([unicode(_dev) for _dev in _unreachable])),\n ),\n logging_tools.LOG_LEVEL_ERROR,\n )\n else:\n act_host_dep = mon_config(\"hostdependency\", \"\")\n _list = [_bc.get_host(dev_pk).full_name for dev_pk in h_dep.devices_list]\n _dep_list = [_bc.get_host(dev_pk).full_name for dev_pk in h_dep.master_list]\n if _list and _dep_list:\n if set(_list) & set(_dep_list):\n self.mach_log(\n \"host_name and dependent_host_name share some hosts: {}\".format(\n \", \".join(sorted(list(set(_list) & set(_dep_list))))\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n else:\n act_host_dep[\"host_name\"] = _list\n act_host_dep[\"dependent_host_name\"] = _dep_list\n h_dep.feed_config(act_host_dep)\n host_config_list.append(act_host_dep)\n else:\n self.mach_log(\n \"empty list or dependency_list for hostdependency.(host_name|dependency_name)\",\n logging_tools.LOG_LEVEL_ERROR\n )\n # add service dependencies\n if use_service_deps:\n for s_dep in _bc.get_dependencies(\"sd\", host.pk):\n act_service_dep = mon_config(\"servicedependency\", \"\")\n if s_dep.mon_service_cluster_id:\n # check reachability\n _unreachable = [_bc.get_host(_dev_pk) for _dev_pk in s_dep.master_list if not _bc.get_host(_dev_pk).reachable]\n if _unreachable:\n self.mach_log(\n \"cannot create host dependency, {} unreachable: {}\".format(\n logging_tools.get_plural(\"device\", len(_unreachable)),\n \", \".join(sorted([unicode(_dev) for _dev in _unreachable])),\n ),\n logging_tools.LOG_LEVEL_ERROR,\n )\n else:\n all_ok = True\n for d_host in s_dep.master_list:\n all_ok &= self._check_for_config(\n \"child\",\n all_configs,\n _bc.mcc_lut,\n _bc.mcc_lut_2,\n _bc.get_host(d_host),\n s_dep.dependent_mon_check_command_id\n )\n if all_ok:\n act_service_dep[\"dependent_service_description\"] = host_service_id_util.create_host_service_description(\n dev_pk,\n _bc.mcc_lut_3[s_dep.dependent_mon_check_command_id],\n _bc.mcc_lut[s_dep.dependent_mon_check_command_id][1],\n )\n sc_check = cur_gc[\"command\"][\"check_service_cluster\"]\n # FIXME, my_co.mcc_lut[...][1] should be mapped to check_command().get_description()\n act_service_dep[\"service_description\"] = \"{} / {}\".format(\n sc_check.get_description(),\n _bc.mcc_lut[s_dep.mon_service_cluster.mon_check_command_id][1]\n )\n act_service_dep[\"host_name\"] = _bc.get_host(s_dep.mon_service_cluster.main_device_id).full_name\n act_service_dep[\"dependent_host_name\"] = [_bc.get_host(dev_pk).full_name for cur_dev in s_dep.master_list]\n s_dep.feed_config(act_service_dep)\n host_config_list.append(act_service_dep)\n else:\n self.mach_log(\"cannot add cluster_service_dependency\", logging_tools.LOG_LEVEL_ERROR)\n else:\n # check reachability\n _unreachable = [\n _bc.get_host(_dev_pk) for _dev_pk in s_dep.master_list + s_dep.devices_list if not _bc.get_host(_dev_pk).reachable\n ]\n if _unreachable:\n self.mach_log(\n \"cannot create host dependency, {} unrechable: {}\".format(\n logging_tools.get_plural(\"device\", len(_unreachable)),\n \", \".join(sorted([unicode(_dev) for _dev in _unreachable])),\n ),\n logging_tools.LOG_LEVEL_ERROR,\n )\n else:\n all_ok = True\n for p_host in s_dep.devices_list:\n all_ok &= self._check_for_config(\n \"parent\",\n all_configs,\n _bc.mcc_lut,\n _bc.mcc_lut_2,\n _bc.get_host(p_host),\n s_dep.mon_check_command_id\n )\n for d_host in s_dep.master_list:\n all_ok &= self._check_for_config(\n \"child\",\n all_configs,\n _bc.mcc_lut,\n _bc.mcc_lut_2,\n _bc.get_host(d_host),\n s_dep.dependent_mon_check_command_id\n )\n if all_ok:\n # FIXME, TODO, must unroll loops\n # act_service_dep[\"dependent_service_description\"] = _bc.mcc_lut[s_dep.dependent_mon_check_command_id][1]\n act_service_dep[\"dependent_service_description\"] = [\n host_service_id_util.create_host_service_description(\n dev_pk,\n _bc.mcc_lut_3[s_dep.dependent_mon_check_command_id],\n _bc.mcc_lut[s_dep.dependent_mon_check_command_id][1],\n ) for dev_pk in s_dep.master_list\n ]\n # act_service_dep[\"service_description\"] = _bc.mcc_lut[s_dep.mon_check_command_id][1]\n act_service_dep[\"service_description\"] = [\n host_service_id_util.create_host_service_description(\n dev_pk,\n _bc.mcc_lut_3[s_dep.mon_check_command_id],\n _bc.mcc_lut[s_dep.mon_check_command_id][1],\n ) for dev_pk in s_dep.devices_list\n ]\n act_service_dep[\"host_name\"] = [_bc.get_host(dev_pk).full_name for dev_pk in s_dep.devices_list]\n act_service_dep[\"dependent_host_name\"] = [_bc.get_host(dev_pk).full_name for dev_pk in s_dep.master_list]\n s_dep.feed_config(act_service_dep)\n host_config_list.append(act_service_dep)\n else:\n self.mach_log(\"cannot add service_dependency\", logging_tools.LOG_LEVEL_ERROR)\n host_nc.add_device(host_config_list, host)\n else:\n self.mach_log(\"Host {} is disabled\".format(host.full_name))\n else:\n self.mach_log(\"No valid IPs found or no default_device_template found\", logging_tools.LOG_LEVEL_ERROR)\n info_str = \"{:3d} ok, {:3d} w, {:3d} e ({:3d} {}) in {}\".format(\n _counter.num_ok,\n _counter.num_warning,\n _counter.num_error,\n self.get_num_mach_logs(),\n \"l \" if _counter.num_error == 0 else \"lw\",\n logging_tools.get_diff_time_str(time.time() - start_time))\n glob_log_str = \"{}, {}\".format(glob_log_str, info_str)\n self.log(glob_log_str)\n self.mach_log(info_str)\n if _counter.num_error > 0 or self.gc[\"DEBUG\"]:\n _write_logs = True\n self.close_mach_log(write_logs=_write_logs)\n\n def _add_config(\n self, host, act_host, conf_name, used_checks, _counter, _bc, mccs_dict, cur_gc,\n act_def_serv, host_groups, host_is_actively_checked, host_config_list\n ):\n s_check = cur_gc[\"command\"][conf_name]\n if s_check.name in used_checks:\n self.mach_log(\n \"{} ({}) already used, ignoring .... (CHECK CONFIG !)\".format(\n s_check.get_description(),\n s_check[\"command_name\"],\n ),\n logging_tools.LOG_LEVEL_WARN\n )\n _counter.warning()\n else:\n used_checks.add(s_check.name)\n # s_check: instance of check_command\n if s_check.mccs_id:\n # map to mccs (mon_check_command_special instance from backbone)\n mccs = mccs_dict[s_check.mccs_id] #\n # store name of mccs (for parenting)\n mccs_name = mccs.name\n if mccs.parent_id:\n # to get the correct command_line\n com_mccs = mccs\n # link to parent\n mccs = mccs_dict[mccs.parent_id]\n else:\n com_mccs = mccs\n # create lut entry to rewrite command name to mccs\n _rewrite_lut = {\"check_command\": mccs.md_name}\n sc_array = []\n try:\n cur_special = special_commands.SPECIAL_DICT[\"special_{}\".format(mccs.name)](\n self.mach_log,\n self,\n # get mon_check_command (we need arg_ll)\n s_check=cur_gc[\"command\"][com_mccs.md_name],\n parent_check=s_check,\n host=host,\n global_config=self.gc,\n build_cache=_bc,\n cache_mode=cur_gc.cache_mode,\n )\n except:\n self.log(\n \"unable to initialize special '{}': {}\".format(\n mccs.name,\n process_tools.get_except_info()\n ),\n logging_tools.LOG_LEVEL_CRITICAL\n )\n else:\n # calling handle to return a list of checks with format\n # [(description, [ARG1, ARG2, ARG3, ...]), (...)]\n try:\n if mccs_name != mccs.name:\n # for meta specials\n sc_array = cur_special(instance=mccs_name)\n else:\n sc_array = cur_special()\n except:\n exc_info = process_tools.exception_info()\n self.log(\"error calling special {}:\".format(mccs.name),\n logging_tools.LOG_LEVEL_CRITICAL)\n for line in exc_info.log_lines:\n self.log(\" - {}\".format(line), logging_tools.LOG_LEVEL_CRITICAL)\n sc_array = []\n finally:\n cur_special.cleanup()\n if cur_special.Meta.meta and sc_array and mccs_name == mccs.name:\n # dive in subcommands, for instance 'all SNMP checks'\n # check for configs not really configured\n _dead_coms = [_entry for _entry in sc_array if not hasattr(mccs_dict[_entry], \"check_command_name\")]\n if _dead_coms:\n self.log(\"unconfigured checks: {}\".format(\", \".join(sorted(_dead_coms))), logging_tools.LOG_LEVEL_CRITICAL)\n _com_names = [mccs_dict[_entry].check_command_name for _entry in sc_array if _entry not in _dead_coms]\n for _com_name in _com_names:\n self._add_config(\n host, act_host, _com_name, used_checks, _counter, _bc,\n mccs_dict, cur_gc, act_def_serv, host_groups, host_is_actively_checked, host_config_list\n )\n sc_array = []\n else:\n # no special command, empty rewrite_lut, simple templating\n _rewrite_lut = {}\n sc_array = [special_commands.ArgTemplate(s_check, s_check.get_description(), check_active=False if not s_check.is_active else None)]\n # contact_group is only written if contact_group is responsible for the host and the service_template\n if sc_array:\n serv_temp = _bc.serv_templates[s_check.get_template(act_def_serv.name)]\n serv_cgs = list(set(serv_temp.contact_groups).intersection(host_groups))\n sc_list = self.get_service(\n host, act_host, s_check, sc_array, act_def_serv, serv_cgs, host_is_actively_checked, serv_temp, cur_gc, **_rewrite_lut\n )\n host_config_list.extend(sc_list)\n _counter.ok(len(sc_list))\n\n def _get_cc_name(self, in_str):\n if self.__safe_cc_name:\n return build_safe_name(in_str)\n else:\n return in_str\n\n def _check_for_config(self, c_type, all_configs, mcc_lut, mcc_lut_2, device, moncc_id):\n # configure mon check commands\n # import pprint\n # pprint.pprint(all_configs.get(device.full_name, []))\n ccoms = sum([mcc_lut_2.get(key, []) for key in all_configs.get(device.full_name, [])], [])\n # needed checkcommand\n nccom = mcc_lut[moncc_id]\n if nccom[0] in ccoms:\n return True\n else:\n self.mach_log(\n \"Checkcommand '{}' config ({}) not found in configs ({}) for {} '{}'\".format(\n nccom[0],\n nccom[2],\n \", \".join(sorted(ccoms)) or \"none defined\",\n c_type,\n unicode(device),\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n return False\n\n def _get_number_of_hosts(self, cur_gc, hosts):\n if hosts:\n h_filter = Q(name__in=hosts)\n else:\n h_filter = Q()\n # add master/slave related filters\n if cur_gc.master:\n pass\n # h_filter &= (Q(monitor_server=cur_gc.monitor_server) | Q(monitor_server=None))\n else:\n h_filter &= Q(monitor_server=cur_gc.monitor_server)\n h_filter &= Q(enabled=True) & Q(device_group__enabled=True)\n return device.objects.exclude(Q(device_type__identifier=\"MD\")).filter(h_filter).count()\n\n def _create_host_config_files(self, _bc, cur_gc, d_map, hdep_from_topo):\n \"\"\"\n d_map : distance map\n \"\"\"\n start_time = time.time()\n # get contacts with access to all devices\n _uo = user.objects # @UndefinedVariable\n all_access = list(\n [\n cur_u.login for cur_u in _uo.filter(\n Q(active=True) & Q(group__active=True) & Q(mon_contact__pk__gt=0)\n ) if cur_u.has_perm(\"backbone.device.all_devices\")\n ]\n )\n self.log(\"users with access to all devices: {}\".format(\", \".join(sorted(all_access))))\n server_idxs = [cur_gc.monitor_server.pk]\n # get netip-idxs of own host\n my_net_idxs = set(netdevice.objects.filter(Q(device__in=server_idxs)).filter(Q(enabled=True)).values_list(\"pk\", flat=True))\n # get ext_hosts stuff\n ng_ext_hosts = self._get_mon_ext_hosts()\n # check_hosts\n if _bc.host_list:\n # not beautiful but working\n pk_list = []\n for full_h_name in _bc.host_list:\n try:\n if full_h_name.count(\".\"):\n found_dev = device.objects.get(Q(name=full_h_name.split(\".\")[0]) & Q(domain_tree_node__full_name=full_h_name.split(\".\", 1)[1]))\n else:\n found_dev = device.objects.get(Q(name=full_h_name))\n except device.DoesNotExist:\n pass\n else:\n pk_list.append(found_dev.pk)\n h_filter = Q(pk__in=pk_list)\n else:\n h_filter = Q()\n # filter for all configs, wider than the h_filter\n ac_filter = Q()\n # add master/slave related filters\n if cur_gc.master:\n # need all devices for master\n pass\n else:\n h_filter &= Q(monitor_server=cur_gc.monitor_server)\n ac_filter &= Q(monitor_server=cur_gc.monitor_server)\n if not _bc.single_build:\n h_filter &= Q(enabled=True) & Q(device_group__enabled=True)\n ac_filter &= Q(enabled=True) & Q(device_group__enabled=True)\n # dictionary with all parent / slave relations\n ps_dict = {}\n for ps_config in config.objects.exclude(Q(parent_config=None)).select_related(\"parent_config\"): # @UndefinedVariable\n ps_dict[ps_config.name] = ps_config.parent_config.name\n _bc.set_host_list(device.objects.exclude(Q(device_type__identifier='MD')).filter(h_filter).values_list(\"pk\", flat=True))\n meta_devices = {\n md.device_group.pk: md for md in device.objects.filter(\n Q(device_type__identifier='MD')\n ).prefetch_related(\n \"device_config_set\",\n \"device_config_set__config\"\n ).select_related(\"device_group\")}\n all_configs = {}\n for cur_dev in device.objects.filter(ac_filter).select_related(\"domain_tree_node\").prefetch_related(\"device_config_set\", \"device_config_set__config\"):\n loc_config = [cur_dc.config.name for cur_dc in cur_dev.device_config_set.all()]\n if cur_dev.device_group_id in meta_devices:\n loc_config.extend([cur_dc.config.name for cur_dc in meta_devices[cur_dev.device_group_id].device_config_set.all()])\n # expand with parent\n while True:\n new_confs = set([ps_dict[cur_name] for cur_name in loc_config if cur_name in ps_dict]) - set(loc_config)\n if new_confs:\n loc_config.extend(list(new_confs))\n else:\n break\n all_configs[cur_dev.full_name] = loc_config\n # get config variables\n first_contactgroup_name = cur_gc[\"contactgroup\"][cur_gc[\"contactgroup\"].keys()[0]].name\n contact_group_dict = {}\n # get contact groups\n if _bc.host_list:\n host_info_str = logging_tools.get_plural(\"host\", len(_bc.host_list))\n ct_groups = mon_contactgroup.objects.filter(Q(device_groups__device__name__in=_bc.host_list))\n else:\n host_info_str = \"all\"\n ct_groups = mon_contactgroup.objects.all()\n ct_group = ct_groups.prefetch_related(\"device_groups\", \"device_groups__device\")\n for ct_group in ct_groups:\n if ct_group.pk in cur_gc[\"contactgroup\"]:\n pass # cg_name = cur_gc[\"contactgroup\"][ct_group.pk].name\n else:\n self.log(\n \"contagroup_idx {} for device {} not found, using first from contactgroups ({})\".format(\n unicode(ct_group),\n ct_group.name,\n first_contactgroup_name,\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n # cg_name = first_contactgroup_name\n for g_devg in ct_group.device_groups.all().prefetch_related(\"device_group\", \"device_group__domain_tree_node\"):\n for g_dev in g_devg.device_group.all():\n contact_group_dict.setdefault(g_dev.full_name, []).append(ct_group.name)\n # get valid and invalid network types\n valid_nwt_list = set(network_type.objects.filter(Q(identifier__in=[\"p\", \"o\"])).values_list(\"identifier\", flat=True))\n _invalid_nwt_list = set(network_type.objects.exclude(Q(identifier__in=[\"p\", \"o\"])).values_list(\"identifier\", flat=True))\n for n_i in net_ip.objects.all().select_related(\"network__network_type\", \"netdevice\", \"domain_tree_node\"):\n n_t = n_i.network.network_type.identifier\n n_d = n_i.netdevice.pk\n d_pk = n_i.netdevice.device_id\n if n_i.domain_tree_node_id:\n dom_name = n_i.domain_tree_node.full_name\n else:\n dom_name = \"\"\n # print n_i, n_t, n_d, d_pk, dom_name\n if d_pk in _bc.host_pks:\n cur_host = _bc.get_host(d_pk)\n # populate valid_ips and invalid_ips\n getattr(cur_host, \"valid_ips\" if n_t in valid_nwt_list else \"invalid_ips\").setdefault(n_d, []).append((n_i, dom_name))\n host_nc = cur_gc[\"device.d\"]\n # delete host if already present in host_table\n host_names = []\n for host_pk in _bc.host_pks:\n host = _bc.get_host(host_pk) # , host in check_hosts.iteritems():\n host_names.append((host.full_name, host))\n if host.full_name in host_nc:\n # now very simple\n del host_nc[host.full_name]\n # mccs dict\n mccs_dict = {mccs.pk: mccs for mccs in mon_check_command_special.objects.all()}\n for _value in list(mccs_dict.values()):\n mccs_dict[_value.name] = _value\n for value in cur_gc[\"command\"].values():\n if value.mccs_id:\n # add links back to check_command_names\n mccs_dict[value.mccs_id].check_command_name = value.name\n # caching object\n # build lookup-table\n self.send_pool_message(\"build_info\", \"device_count\", cur_gc.monitor_server.full_name, len(host_names), target=\"syncer\")\n nagvis_maps = set()\n for host_name, host in sorted(host_names):\n if _bc.build_dv:\n _bc.build_dv.count()\n self._create_single_host_config(\n _bc,\n cur_gc,\n host,\n d_map,\n my_net_idxs,\n all_access,\n # all_ms_connections,\n # all_ib_connections,\n # all_dev_relationships,\n contact_group_dict,\n ng_ext_hosts,\n all_configs,\n nagvis_maps,\n mccs_dict,\n )\n host_names = host_nc.keys()\n self.log(\"start parenting run\")\n p_dict = {}\n # host_uuids = set([host_val.uuid for host_val in all_hosts_dict.itervalues() if host_val.full_name in host_names])\n _p_ok, _p_failed = (0, 0)\n for host_name in sorted(host_names):\n host = host_nc[host_name][0]\n if \"possible_parents\" in host and not _bc.single_build:\n # parent list\n parent_list = set()\n # check for nagvis_maps\n local_nagvis_maps = []\n p_parents = host[\"possible_parents\"]\n for _p_val, _nd_val, p_list in p_parents:\n # skip first host (is self)\n host_pk = p_list[0]\n for parent_idx in p_list[1:]:\n if parent_idx in d_map:\n if d_map[host_pk] > d_map[parent_idx]:\n parent = _bc.get_host(parent_idx).full_name\n if parent in host_names and parent != host.name:\n parent_list.add(parent)\n # exit inner loop\n break\n else:\n # exit inner loop\n break\n else:\n self.log(\"parent_idx {:d} not in distance map, routing cache too old?\".format(parent_idx), logging_tools.LOG_LEVEL_ERROR)\n if \"_nagvis_map\" not in host:\n # loop again to scan for nagvis_map\n for parent_idx in p_list[1:]:\n if parent_idx in d_map:\n if d_map[host_pk] > d_map[parent_idx]:\n parent = _bc.get_host(parent_idx).full_name\n if parent in host_names and parent != host.name:\n if \"_nagvis_map\" in host_nc[parent][0]:\n local_nagvis_maps.append(host_nc[parent][0][\"_nagvis_map\"])\n else:\n self.log(\"parent_idx {:d} not in distance map, routing cache too old?\".format(parent_idx), logging_tools.LOG_LEVEL_ERROR)\n if \"_nagvis_map\" not in host and local_nagvis_maps:\n host[\"_nagvis_map\"] = local_nagvis_maps[0]\n if parent_list:\n host[\"parents\"] = list(parent_list)\n for cur_parent in parent_list:\n p_dict.setdefault(cur_parent, []).append(host_name)\n _p_ok += 1\n if _bc.debug:\n self.log(\"Setting parent of '{}' to {}\".format(host_name, \", \".join(parent_list)), logging_tools.LOG_LEVEL_OK)\n else:\n _p_failed += 1\n self.log(\"Parenting problem for '{}', {:d} traces found\".format(host_name, len(p_parents)), logging_tools.LOG_LEVEL_WARN)\n if _bc.debug:\n p_parents = host[\"possible_parents\"]\n for t_num, (_p_val, _nd_val, p_list) in enumerate(p_parents):\n host_pk = p_list[0]\n self.log(\n \" trace {:3d}, distance is {:3d}, {}\".format(\n t_num + 1,\n d_map[host_pk],\n logging_tools.get_plural(\"entry\", len(p_list) - 1),\n )\n )\n for parent_idx in p_list[1:]:\n parent = _bc.get_host(parent_idx).full_name\n self.log(\n \" {:>30s} (distance is {:3d}, in config: {})\".format(\n unicode(parent),\n d_map[parent_idx],\n parent in host_names,\n )\n )\n if \"possible_parents\" in host:\n del host[\"possible_parents\"]\n self.log(\"end parenting run, {:d} ok, {:d} failed\".format(_p_ok, _p_failed))\n if cur_gc.master and not _bc.single_build:\n if hdep_from_topo:\n # import pprint\n # pprint.pprint(p_dict)\n for parent, clients in p_dict.iteritems():\n new_hd = mon_config(\"hostdependency\", \"\")\n new_hd[\"dependent_host_name\"] = clients\n new_hd[\"host_name\"] = parent\n new_hd[\"dependency_period\"] = self.mon_host_dep.dependency_period.name\n new_hd[\"execution_failure_criteria\"] = self.mon_host_dep.execution_failure_criteria\n new_hd[\"notification_failure_criteria\"] = self.mon_host_dep.notification_failure_criteria\n new_hd[\"inherits_parent\"] = \"1\" if self.mon_host_dep.inherits_parent else \"0\"\n cur_gc[\"hostdependency\"].add_host_dependency(new_hd)\n self.log(\"created {}\".format(logging_tools.get_plural(\"nagvis map\", len(nagvis_maps))))\n # remove old nagvis maps\n nagvis_map_dir = os.path.join(self.gc[\"NAGVIS_DIR\"], \"etc\", \"maps\")\n if os.path.isdir(nagvis_map_dir):\n skipped_customs = 0\n for entry in os.listdir(nagvis_map_dir):\n if entry.startswith(\"custom_\"):\n skipped_customs += 1\n else:\n full_name = os.path.join(nagvis_map_dir, entry)\n if full_name not in nagvis_maps:\n self.log(\"removing old nagvis mapfile {}\".format(full_name))\n try:\n os.unlink(full_name)\n except:\n self.log(\n \"error removing {}: {}\".format(\n full_name,\n process_tools.get_except_info()),\n logging_tools.LOG_LEVEL_ERROR\n )\n if skipped_customs:\n self.log(\"skipped removing of {}\".format(logging_tools.get_plural(\"custom map\", skipped_customs)))\n # create group maps\n dev_groups = device_group.objects.filter(\n Q(enabled=True) &\n Q(device_group__name__in=[os.path.basename(entry).split(\".\")[0] for entry in nagvis_maps])).distinct()\n self.log(\"creating maps for {}\".format(logging_tools.get_plural(\"device group\", len(dev_groups))))\n for dev_group in dev_groups:\n map_name = os.path.join(nagvis_map_dir, \"{}.cfg\".format(dev_group.name.replace(\" \", \"_\")))\n file(map_name, \"w\").write(\"\\n\".join([\n \"define global {\",\n \" alias=Group {}\".format(dev_group.name),\n \"}\",\n ]))\n cache_dir = os.path.join(self.gc[\"NAGVIS_DIR\"], \"var\")\n if os.path.isdir(cache_dir):\n rem_ok, rem_failed = (0, 0)\n for entry in os.listdir(cache_dir):\n try:\n full_name = os.path.join(cache_dir, entry)\n except:\n self.log(\"error building full_name from entry '{}'\".format(entry), logging_tools.LOG_LEVEL_CRITICAL)\n rem_failed += 1\n else:\n if os.path.isfile(full_name):\n try:\n os.unlink(full_name)\n except:\n rem_failed += 1\n else:\n rem_ok += 1\n self.log(\n \"cleaned cache_dir {} ({:d} ok, {:d} failed)\".format(\n cache_dir,\n rem_ok,\n rem_failed,\n ),\n logging_tools.LOG_LEVEL_ERROR if rem_failed else logging_tools.LOG_LEVEL_OK\n )\n end_time = time.time()\n self.log(\n \"created configs for {} hosts in {}\".format(\n host_info_str,\n logging_tools.get_diff_time_str(end_time - start_time),\n )\n )\n\n def get_service(self, host, act_host, s_check, sc_array, act_def_serv, serv_cgs, host_is_actively_checked, serv_temp, cur_gc, **kwargs):\n ev_defined = True if s_check.event_handler else False\n self.mach_log(\n \" adding check {:<30s} ({:2d} p), template {}, {}, {}\".format(\n s_check[\"command_name\"],\n len(sc_array),\n s_check.get_template(act_def_serv.name),\n \"cg: {}\".format(\", \".join(sorted(serv_cgs))) if serv_cgs else \"no cgs\",\n \"no evh\" if not ev_defined else \"evh is {} ({})\".format(\n s_check.event_handler.name,\n \"enabled\" if (s_check.event_handler_enabled and host_is_actively_checked) else \"disabled\",\n ),\n )\n )\n ret_field = []\n\n # self.__host_service_map.add_host(host.full_name, host.pk)\n\n # for sc_name, sc in sc_array:\n for arg_temp in sc_array:\n # self.__host_service_map.add_service(arg_temp.info, s_check.check_command_pk)\n act_serv = mon_config(\"service\", arg_temp.info)\n # event handlers\n if s_check.event_handler:\n act_serv[\"event_handler\"] = s_check.event_handler.name\n act_serv[\"event_handler_enabled\"] = \"1\" if (s_check.event_handler_enabled and host_is_actively_checked) else \"0\"\n if arg_temp.check_active is not None:\n # check flag overrides device specific setting\n act_serv[\"{}_checks_enabled\".format(\"active\" if arg_temp.check_active else \"passive\")] = 1\n act_serv[\"{}_checks_enabled\".format(\"passive\" if arg_temp.check_active else \"active\")] = 0\n else:\n if arg_temp.is_active:\n act_serv[\"{}_checks_enabled\".format(\"active\" if host_is_actively_checked else \"passive\")] = 1\n act_serv[\"{}_checks_enabled\".format(\"passive\" if host_is_actively_checked else \"active\")] = 0\n else:\n act_serv[\"passive_checks_enabled\"] = 1\n act_serv[\"active_checks_enabled\"] = 0\n # display this in icinga webfrontend\n info = arg_temp.info.replace(\"(\", \"[\").replace(\")\", \"]\")\n act_serv[\"display_name\"] = info\n # create identifying string for log\n # print \"::\", s_check.check_command_pk, s_check.special_command_pk, s_check.mccs_id\n act_serv[\"service_description\"] = host_service_id_util.create_host_service_description(host.pk, s_check, info)\n act_serv[\"host_name\"] = host.full_name\n # volatile\n act_serv[\"is_volatile\"] = \"1\" if serv_temp.volatile else \"0\"\n act_serv[\"check_period\"] = cur_gc[\"timeperiod\"][serv_temp.nsc_period_id].name\n act_serv[\"max_check_attempts\"] = serv_temp.max_attempts\n act_serv[\"check_interval\"] = serv_temp.check_interval\n act_serv[\"retry_interval\"] = serv_temp.retry_interval\n act_serv[\"notification_interval\"] = serv_temp.ninterval\n act_serv[\"notification_options\"] = serv_temp.notification_options\n act_serv[\"notification_period\"] = cur_gc[\"timeperiod\"][serv_temp.nsn_period_id].name\n if serv_cgs:\n act_serv[\"contact_groups\"] = serv_cgs\n else:\n act_serv[\"contact_groups\"] = self.gc[\"NONE_CONTACT_GROUP\"]\n if not host_is_actively_checked:\n act_serv[\"check_freshness\"] = 0\n act_serv[\"freshness_threshold\"] = 3600\n if host_is_actively_checked and not cur_gc.master:\n # trace\n act_serv[\"obsess_over_service\"] = 1\n act_serv[\"flap_detection_enabled\"] = 1 if (host.flap_detection_enabled and serv_temp.flap_detection_enabled) else 0\n if serv_temp.flap_detection_enabled and host.flap_detection_enabled:\n act_serv[\"low_flap_threshold\"] = serv_temp.low_flap_threshold\n act_serv[\"high_flap_threshold\"] = serv_temp.high_flap_threshold\n n_field = []\n for short, f_name in [(\"o\", \"ok\"), (\"w\", \"warn\"), (\"c\", \"critical\"), (\"u\", \"unknown\")]:\n if getattr(serv_temp, \"flap_detect_%s\" % (f_name)):\n n_field.append(short)\n if not n_field:\n n_field.append(\"o\")\n act_serv[\"flap_detection_options\"] = n_field\n if self.gc[\"ENABLE_COLLECTD\"]:\n act_serv[\"process_perf_data\"] = 1 if (host.enable_perfdata and s_check.enable_perfdata) else 0\n # TODO: POSSIBLY remove this in favor of service_description\n act_serv[\"_device_pk\"] = host.pk\n if s_check.servicegroup_names:\n act_serv[\"_cat_pks\"] = s_check.servicegroup_pks\n act_serv[\"servicegroups\"] = s_check.servicegroup_names\n cur_gc[\"servicegroup\"].add_host(host.name, act_serv[\"servicegroups\"])\n # command_name may be altered when using a special-command\n _com_parts = [\n kwargs.get(\"command_name\", s_check[\"command_name\"])\n ] + s_check.correct_argument_list(arg_temp, host.dev_variables)\n if any([_part is None for _part in _com_parts]) and self.gc[\"DEBUG\"]:\n self.log(\"none found: {}\".format(str(_com_parts)), logging_tools.LOG_LEVEL_CRITICAL)\n else:\n act_serv[\"check_command\"] = \"!\".join(_com_parts)\n # add addon vars\n for key, value in arg_temp.addon_dict.iteritems():\n act_serv[key] = value\n # if act_host[\"check_command\"] == \"check-host-alive-2\" and s_check[\"command_name\"].startswith(\"check_ping\"):\n # self.mach_log(\n # \" removing command %s because of %s\" % (\n # s_check[\"command_name\"],\n # act_host[\"check_command\"]))\n # else:\n ret_field.append(act_serv)\n return ret_field\n\n def _get_target_ip_info(self, _bc, srv_net_idxs, net_devices, host):\n if _bc.cache_mode in [\"ALWAYS\"]:\n # use stored traces in mode ALWAYS\n traces = _bc.get_mon_trace(host, net_devices, srv_net_idxs)\n else:\n traces = []\n if not traces:\n pathes = self.router_obj.get_ndl_ndl_pathes(srv_net_idxs, net_devices.keys(), add_penalty=True)\n traces = []\n for penalty, cur_path in sorted(pathes):\n if cur_path[-1] in net_devices:\n dev_path = self.router_obj.map_path_to_device(cur_path)\n dev_path.reverse()\n traces.append((penalty, cur_path[-1], dev_path))\n traces = sorted(traces)\n _bc.set_mon_trace(host, net_devices, srv_net_idxs, traces)\n if not traces:\n self.mach_log(\n \"Cannot reach device {} (check peer_information)\".format(\n host.full_name\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n valid_ips = []\n else:\n valid_ips = []\n _nd_added = set()\n for _val, nd_pk, _loc_trace in traces:\n if nd_pk not in _nd_added:\n _nd_added.add(nd_pk)\n valid_ips.extend(net_devices[nd_pk])\n # old code, produces a lot of dups\n # valid_ips = sum([net_devices[nd_pk] for _val, nd_pk, _loc_trace in traces], [])\n return valid_ips, traces\n\n\nclass host_service_map(object):\n \"\"\"\n UNUSED\n\n here, we save the host and services we tell icinga\n then we can later resolve it when parsing the logs\n \"\"\"\n class host_service_data(object):\n def __init__(self, hosts, services, timestamp):\n self.hosts = hosts\n self.services = services\n self.timestamp = timestamp\n\n @classmethod\n def get_mapping(cls, log):\n '''\n :return host_service_map.host_service_data:\n '''\n retval = None\n try:\n data = json.load(open(host_service_map._get_filepath()))\n retval = cls.host_service_data(data['hosts'], data['services'], data['timestamp'])\n except Exception as e:\n log(\"no host service map available: {}\".format(e), logging_tools.LOG_LEVEL_WARN)\n return retval\n\n def __init__(self, log):\n self.clear()\n self.log = log\n self._collecting = False\n\n def clear(self):\n self._services = {}\n self._hosts = {}\n\n def start_collecting(self):\n self.clear()\n self._collecting = True\n\n def end_collecting(self):\n self._collecting = False\n\n data = {\n 'hosts': self._hosts,\n 'services': self._services,\n 'timestamp': int(time.time())\n }\n\n self.log(\"writing host service mapping to {}\".format(self._get_filepath()))\n with open(self._get_filepath(), \"w\") as mapping_file:\n json.dump(data, mapping_file)\n mapping_file.flush()\n\n def add_service(self, service, pk):\n if not self._collecting:\n self.log(\"collecting service mapping for {} outside of rebuild\".format(service), logging_tools.LOG_LEVEL_WARN)\n\n if service in self._services and self._services[service] != pk:\n self.log(\"multiple definitions of service {}: {} and {}\".format(service, self._services[service], pk), logging_tools.LOG_LEVEL_WARN)\n else:\n self._services[service] = pk\n\n def add_host(self, host, pk):\n if not self._collecting:\n self.log(\"collecting host mapping for {} outside of rebuild\".format(host), logging_tools.LOG_LEVEL_WARN)\n\n if host in self._hosts and self._hosts[host] != pk:\n self.log(\"multiple definitions of host {}: {} and {}\".format(host, self._hosts[host], pk), logging_tools.LOG_LEVEL_WARN)\n else:\n self._hosts[host] = pk\n\n @staticmethod\n def _get_filepath():\n return os.path.join(global_config['MD_BASEDIR'], 'var', 'host_service_map')\n","sub_path":"initat/md_config_server/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":105087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"104825311","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 15 13:46:29 2020\r\n\r\n@author: de''\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 15 10:40:57 2020\r\n\r\n@author: de''\r\n\"\"\"\r\n\r\nfrom sklearn.datasets import make_blobs\r\nimport json\r\nimport numpy as np\r\nimport math\r\nfrom tqdm import tqdm\r\nfrom scipy import sparse\r\nfrom sklearn.metrics import roc_auc_score,roc_curve,auc\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom rdkit import Chem\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC\r\nfrom tensorflow.keras.models import Model, load_model\r\nfrom tensorflow.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\r\nfrom tensorflow.keras import metrics, optimizers\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\r\n\r\ndef split_smiles(smiles, kekuleSmiles=True):\r\n try:\r\n mol = Chem.MolFromSmiles(smiles)\r\n smiles = Chem.MolToSmiles(mol, kekuleSmiles=kekuleSmiles)\r\n except:\r\n pass\r\n splitted_smiles = []\r\n for j, k in enumerate(smiles):\r\n if len(smiles) == 1:\r\n return [smiles]\r\n if j == 0:\r\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\r\n splitted_smiles.append(k + smiles[j + 1])\r\n else:\r\n splitted_smiles.append(k)\r\n elif j != 0 and j < len(smiles) - 1:\r\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\r\n splitted_smiles.append(k + smiles[j + 1])\r\n elif k.islower() and smiles[j - 1].isupper() and k != \"c\":\r\n pass\r\n else:\r\n splitted_smiles.append(k)\r\n\r\n elif j == len(smiles) - 1:\r\n if k.islower() and smiles[j - 1].isupper() and k != \"c\":\r\n pass\r\n else:\r\n splitted_smiles.append(k)\r\n return splitted_smiles\r\n\r\ndef get_maxlen(all_smiles, kekuleSmiles=True):\r\n maxlen = 0\r\n for smi in tqdm(all_smiles):\r\n spt = split_smiles(smi, kekuleSmiles=kekuleSmiles)\r\n if spt is None:\r\n continue\r\n maxlen = max(maxlen, len(spt))\r\n return maxlen\r\ndef get_dict(all_smiles, save_path, kekuleSmiles=True):\r\n words = [' ']\r\n for smi in tqdm(all_smiles):\r\n spt = split_smiles(smi, kekuleSmiles=kekuleSmiles)\r\n if spt is None:\r\n continue\r\n for w in spt:\r\n if w in words:\r\n continue\r\n else:\r\n words.append(w)\r\n with open(save_path, 'w') as js:\r\n json.dump(words, js)\r\n return words\r\n\r\ndef one_hot_coding(smi, words, kekuleSmiles=True, max_len=1000):\r\n coord_j = []\r\n coord_k = []\r\n spt = split_smiles(smi, kekuleSmiles=kekuleSmiles)\r\n if spt is None:\r\n return None\r\n for j,w in enumerate(spt):\r\n if j >= max_len:\r\n break\r\n try:\r\n k = words.index(w)\r\n except:\r\n continue\r\n coord_j.append(j)\r\n coord_k.append(k)\r\n data = np.repeat(1, len(coord_j))\r\n output = sparse.csr_matrix((data, (coord_j, coord_k)), shape=(max_len, len(words)))\r\n return output\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n data_train= pd.read_csv('E:/code/drug/drugnn/data_train.csv')\r\n data_test=pd.read_csv('E:/code/drug/drugnn/bro5.csv')\r\n inchis = list(data_train['SMILES'])\r\n rts = list(data_train['type'])\r\n \r\n smiles, targets = [], []\r\n for i, inc in enumerate(tqdm(inchis)):\r\n mol = Chem.MolFromSmiles(inc)\r\n if mol is None:\r\n continue\r\n else:\r\n smi = Chem.MolToSmiles(mol)\r\n smiles.append(smi)\r\n targets.append(rts[i])\r\n \r\n words = get_dict(smiles, save_path='E:\\code\\FingerID Reference\\drug-likeness/dict.json')\r\n \r\n features = []\r\n for i, smi in enumerate(tqdm(smiles)):\r\n xi = one_hot_coding(smi, words, max_len=2000)\r\n if xi is not None:\r\n features.append(xi.todense())\r\n features = np.asarray(features)\r\n targets = np.asarray(targets)\r\n X_train=features\r\n Y_train=targets\r\n \r\n\r\n # physical_devices = tf.config.experimental.list_physical_devices('CPU') \r\n # assert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\r\n # tf.config.experimental.set_memory_growth(physical_devices[0], True)\r\n \r\n \r\n \r\n inchis = list(data_test['SMILES'])\r\n rts = list(data_test['type'])\r\n \r\n smiles, targets = [], []\r\n for i, inc in enumerate(tqdm(inchis)):\r\n mol = Chem.MolFromSmiles(inc)\r\n if mol is None:\r\n continue\r\n else:\r\n smi = Chem.MolToSmiles(mol)\r\n smiles.append(smi)\r\n targets.append(rts[i])\r\n \r\n # words = get_dict(smiles, save_path='D:/工作文件/work.Data/CNN/dict.json')\r\n \r\n features = []\r\n for i, smi in enumerate(tqdm(smiles)):\r\n xi = one_hot_coding(smi, words, max_len=2000)\r\n if xi is not None:\r\n features.append(xi.todense())\r\n features = np.asarray(features)\r\n targets = np.asarray(targets)\r\n X_test=features\r\n Y_test=targets\r\n \r\n \r\n #model = RandomForestClassifier(n_estimators=10,max_features='auto', max_depth=None,min_samples_split=2, bootstrap=True)\r\n #model = MLPClassifier(rangdom_state=1,max_iter=300)\r\n model = SVC(C=500, kernel='rbf', gamma='auto',\r\n coef0=0.0, shrinking=True,probability=False, tol=0.0001, cache_size=200, class_weight=None, verbose=False, max_iter=- 1, decision_function_shape='ovr', break_ties=False, random_state=None)\r\n \r\n # earlyStopping = EarlyStopping(monitor='val_loss', patience=0.05, verbose=0, mode='min')\r\n #mcp_save = ModelCheckpoint('C:/Users/sunjinyu/Desktop/FingerID Reference/drug-likeness/CNN/single_model.h5', save_best_only=True, monitor='accuracy', mode='auto')\r\n # reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='min')\r\n from tensorflow.keras import backend as K\r\n X_train = K.cast_to_floatx(X_train).reshape((np.size(X_train,0),np.size(X_train,1)*np.size(X_train,2)))\r\n\r\n Y_train = K.cast_to_floatx(Y_train)\r\n \r\n# X_train,Y_train = make_blobs(n_samples=300, n_features=n_features, centers=6)\r\n model.fit(X_train, Y_train)\r\n\r\n\r\n # model = load_model('C:/Users/sunjinyu/Desktop/FingerID Reference/drug-likeness/CNN/single_model.h5')\r\n Y_predict = model.predict(K.cast_to_floatx(X_test).reshape((np.size(X_test,0),np.size(X_test,1)*np.size(X_test,2))))\r\n #Y_predict = model.predict(X_test)#训练数据\r\n x = list(Y_test)\r\n y = list(Y_predict)\r\n from pandas.core.frame import DataFrame \r\n x=DataFrame(x)\r\n y=DataFrame(y)\r\n # X= pd.concat([x,y], axis=1)\r\n #X.to_csv('C:/Users/sunjinyu/Desktop/FingerID Reference/drug-likeness/CNN/molecularGNN_smiles-master/0825/single-CNN-seed444.csv')\r\n Y_predict = [1 if i >0.5 else 0 for i in Y_predict]\r\n\r\n cnf_matrix=confusion_matrix(Y_test, Y_predict)\r\n cnf_matrix\r\n \r\n tn = cnf_matrix[0,0]\r\n tp = cnf_matrix[1,1]\r\n fn = cnf_matrix[1,0]\r\n fp = cnf_matrix[0,1]\r\n \r\n bacc = ((tp/(tp+fn))+(tn/(tn+fp)))/2#balance accurance\r\n pre = tp/(tp+fp)#precision/q+\r\n rec = tp/(tp+fn)#recall/se\r\n sp=tn/(tn+fp)\r\n q_=tn/(tn+fn)\r\n f1 = 2*pre*rec/(pre+rec)#f1score\r\n mcc = ((tp*tn) - (fp*fn))/math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))#Matthews correlation coefficient\r\n acc=(tp+tn)/(tp+fp+fn+tn)#accurancy\r\n fpr, tpr, thresholds =roc_curve(Y_test, Y_predict)\r\n AUC = auc(fpr, tpr)\r\n print('bacc:',bacc)\r\n print('pre:',pre)\r\n print('rec:',rec)\r\n print('f1:',f1)\r\n print('mcc:',mcc)\r\n print('sp:',sp)\r\n print('q_:',q_)\r\n print('acc:',acc)\r\n print('auc:',AUC)","sub_path":"Discussion/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"417131554","text":"\n##################################################################################################################\n\"\"\"\nUsed for computing the EOM indicator. The Ease of Movement indicator is a technical study that attempts\nto quantify a mix of momentum and volume information into one value.\n\"\"\"\n\n# Libs\nimport numpy as np\n\n# Own modules\nfrom PhyTrade.Economic_model.Technical_Analysis.Technical_Indicators.ABSTRACT_indicator import ABSTRACT_indicator\n\n__version__ = '1.1.1'\n__author__ = 'Victor Guillet'\n__date__ = '10/09/2019'\n\n##################################################################################################################\n\n\nclass EOM(ABSTRACT_indicator):\n def __init__(self, big_data, timeperiod=14):\n # --> EMV initialisation\n self.timeperiod = timeperiod\n\n # -------------------------- CCI CALCULATION ---------------------------\n # --> Slice data to obtain Data falling in data slice + timeframe\n eom_df = big_data.data_slice.data[big_data.data_slice.start_index-self.timeperiod:big_data.data_slice.stop_index]\n\n dm = ((eom_df['High'] + eom_df['Low'])/2) - ((eom_df['High'].shift(1) + eom_df['Low'].shift(1))/2)\n br = (eom_df['Volume'] / 100000000) / (eom_df['High'] - eom_df['Low'])\n eom = dm / br\n eom_values = eom.rolling(self.timeperiod, center=False).mean()\n\n self.eom_values = np.array(eom_values.values[self.timeperiod:])\n\n \"\"\"\n\n\n\n\n \"\"\"\n # ===================== INDICATOR OUTPUT DETERMINATION ==============\n def get_output(self, big_data, include_triggers_in_bb_signal=False):\n from PhyTrade.Tools.MATH_tools import MATH_tools\n\n # ----------------- Bear/Bullish continuous signal\n self.bb_signal = self.eom_values\n\n # --> Normalising eom bb signal values between -1 and 1\n # self.bb_signal = MATH_tools().normalise_minus_one_one(self.eom_values)\n self.bb_signal = MATH_tools().alignator_minus_one_one(self.bb_signal, signal_max=5, signal_min=-5)\n\n","sub_path":"PhyTrade/Economic_model/Technical_Analysis/Technical_Indicators/EOM_gen.py","file_name":"EOM_gen.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"100595799","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport json\nfrom tqdm import tqdm\nfrom random import shuffle\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport tensorflow.keras.backend as K\nfrom collections import Counter\nimport re\nimport numpy as np\nfrom tensorflow.keras import preprocessing\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras import backend\n\nbase_path = '../data/Raw_Claim/'\nexam_param_file = 'exam_param.txt'\nresult_file = 'validation.txt'\n# EXAM_NUM = '09'\n# train_batch_size = 100\n# test_batch_size = 100\n# EPOCHS = 6\n# MAX_WORD_LENGTH = 100\n# EMB_DIM = 256\n\ndata_in_path = base_path + 'input_data/'\ntest_data_path = base_path + 'test_data/'\nmeta_data_path = base_path + 'meta_data/'\nvocab_file = meta_data_path + 'vocab.voc'\nlabel_file = meta_data_path + 'labels_section.pickle'\nfreq_file = meta_data_path + 'word_freq.pickle'\n\n\ndef get_file_n_folder():\n # if not os.path.exists(data_in_path):\n # os.makedirs(data_in_path)\n # if not os.path.exists(test_data_path):\n # os.makedirs(test_data_path)\n if not os.path.exists(data_out_path):\n os.makedirs(data_out_path)\n if not os.path.exists(meta_data_path):\n os.makedirs(meta_data_path)\n\n tr = os.listdir(data_in_path)\n tr = [file for file in tr if file.endswith(\".txt\")]\n # tr_file_list = [file for file in tr_file_list if file.startswith(\"cpc\")]\n\n te = os.listdir(test_data_path)\n te = [file for file in te if file.endswith(\".txt\")]\n return tr, te\n\n\nclass Dataset:\n\n def __init__(self, train_path, test_path, is_shuffle, train_bs, test_bs, epoch, max_length):\n self.train_path = train_path\n self.test_path = test_path\n self.is_shuffle = is_shuffle\n self.train_bs = train_bs\n self.test_bs = test_bs\n self.epoch = epoch\n self.max_length = max_length\n self.special_tokens = ['', '', '', '']\n\n if not os.path.exists(vocab_file):\n print('No vocabulary.')\n print('Making vocabulary.')\n self.build_vocab_by_patent(vocab_file)\n print('Complete build vocabulary!')\n\n if not os.path.exists(label_file):\n print('No labels.')\n print('Making labels.')\n self.build_labels()\n print('Complete build labels!')\n\n # print('Loading vocabulary...')\n self.idx2word, self.word2idx = pickle.load(open(vocab_file, 'rb'))\n print('Successfully load %d vocabulary!' % (len(self.idx2word)))\n self.idx2label, self.label2idx = pickle.load(open(label_file, 'rb'))\n print('Successfully load %d labels' % (len(self.idx2label)))\n\n def build_labels(self):\n error_cnt = 0\n label_list = []\n for file in self.train_path:\n with open(data_in_path + file, encoding='utf-8') as f:\n for line in f:\n try:\n patent = json.loads(line)\n labels = patent['cpc'].split('|')\n labels = [label[0] for label in labels]\n for label in labels:\n if label not in label_list:\n label_list.append(label)\n except:\n error_cnt += 1\n label2idx = {label: idx for idx, label in enumerate(label_list)}\n label = (label_list, label2idx)\n pickle.dump(label, open(label_file, 'wb'))\n\n def build_freq(self, word_list):\n word_counts = Counter(word_list)\n freq = Counter()\n if os.path.exists(freq_file):\n with open(freq_file, 'rb') as freq_dist_f:\n freq = pickle.load(freq_dist_f)\n print('frequency distribution loaded', len(freq))\n for word, cnt in word_counts.items():\n freq[word] += cnt\n print('freq len: ', len(freq))\n with open(freq_file, 'wb') as freq_dist_f:\n pickle.dump(freq, freq_dist_f)\n return freq\n\n def build_vocab_by_patent(self, vocab_file):\n error_cnt = 0\n label_list = []\n for file in self.train_path:\n word_list = []\n with open(data_in_path + file, encoding='utf-8') as f:\n for line in tqdm(f):\n try:\n # print('line: ', line)\n patent = json.loads(line)\n text = re.sub('[-=.#/?:$}(){,]', ' ', patent['title'] + patent['ab'] + patent['cl'])\n token = text.split()\n # token = tokenizer(patent['title'])\n # print('token: ', token)\n # doc = en.tokenizer(patent['title']+patent['ab']+patent['cl'])\n labels = patent['cpc'].split('|')\n for tok in token:\n word_list.append(tok.lower())\n labels = [label[0] for label in labels]\n for label in labels:\n if label not in label_list:\n label_list.append(label)\n except:\n error_cnt += 1\n # print('error: ', line)\n # print('\\nIn \"%s\" word_list: %d, error_cnt: %d\\n' % (file, len(word_list), error_cnt))\n idx2word = self.build_freq(word_list)\n idx2word = self.special_tokens + [word for word, _ in idx2word.most_common(99996)]\n print('idx2word: ', len(idx2word), idx2word[:10])\n print('idx2label: ', len(label_list), label_list)\n word2idx = {word: idx for idx, word in enumerate(idx2word)}\n label2idx = {label: idx for idx, label in enumerate(label_list)}\n vocab = (idx2word, word2idx)\n label = (label_list, label2idx)\n pickle.dump(vocab, open(vocab_file, 'wb'))\n pickle.dump(label, open(label_file, 'wb'))\n\n def text_to_sequence(self, text_list):\n sequences = []\n for text in text_list:\n sequences.append([self.word2idx[word] for word in text if word in self.word2idx.keys()])\n return sequences\n\n def sequence_to_text(self, sequence):\n return [self.idx2word[idx] for idx in sequence if idx != 0]\n\n def read_lines(self, indices, path):\n line_count = 0\n texts = []\n labels = []\n # print('indices: ', indices)\n with open(path, encoding='utf-8') as f:\n for line in f:\n if line_count in indices:\n try:\n patent = json.loads(line)\n # text = re.sub('[-=.#/?:$}(){,]', ' ', patent['title'] + patent['ab'])\n text = re.sub('[-=.#/?:$}(){,]', ' ', patent['title'])\n label = patent['cpc'].split('|')\n texts.append(text.lower().split())\n labels.append(list(set([cpc[0] for cpc in label])))\n except:\n pass\n line_count += 1\n return texts, labels\n\n def create_multiplehot_labels(self, labels_index):\n labels = []\n # print(len(label))\n for batch in labels_index:\n label = [0] * len(self.label2idx)\n # print(item)\n for cpc in batch:\n label[self.label2idx[cpc]] = 1\n labels.append(label)\n # print('label_repr: ', labels)\n return labels\n\n def data_generator(self, is_train):\n if is_train:\n batch_size = self.train_bs\n is_shuffle = self.is_shuffle # 셔플을 여기서 해줘야해. 밖에서는 느려\n file_list = tr_file_list\n path = data_in_path\n else:\n batch_size = self.test_bs\n is_shuffle = False\n file_list = test_file_list\n path = test_data_path\n # print(file_list)\n for file in tqdm(file_list):\n cur_file = path + file\n # print(file)\n with open(cur_file, encoding='utf-8') as f: # 일단 읽어서 길이는 알아둔다.\n data_length = len(f.readlines())\n # print('Num of pat: ', data_length)\n\n indices = list(range(data_length)) # 인덱스를 미리 만들어주는게 제너레이터 사용의 핵심.\n if is_shuffle:\n shuffle(indices) # 셔플할꺼라면 이걸... 내장 라이브러리 random에 있는 함수.\n # print('suffled indices: ', indices)\n current_count = 0\n # while True:\n # if current_count >= data_length:\n # return\n # else:\n while current_count < data_length:\n target_indices = indices[current_count:current_count + batch_size]\n texts, labels = self.read_lines(target_indices, cur_file)\n tokenized_title = texts\n labels = self.create_multiplehot_labels(labels)\n indexed_encoder_inputs = self.text_to_sequence(tokenized_title)\n padded_encoder_inputs = pad_sequences(indexed_encoder_inputs,\n maxlen=self.max_length,\n padding='pre')\n # print(padded_encoder_inputs, labels)\n current_count += batch_size\n yield padded_encoder_inputs, labels\n\n def mapping_fn(self, x, y=None):\n inputs, label = {'x': x}, y\n return inputs, label\n\n def train_input_fn(self):\n dataset = tf.data.Dataset.from_generator(generator=lambda: self.data_generator(is_train=True),\n output_types=(tf.int64, tf.int64),\n output_shapes=(\n (None, self.max_length), # 넣어주면 graph그릴때 잘못 들어온 입력을 잡아줄 수 있다.\n (None, None))) # labels count: unknown\n dataset = dataset.map(self.mapping_fn)\n dataset = dataset.repeat(count=self.epoch)\n return dataset\n\n def test_input_fn(self):\n dataset = tf.data.Dataset.from_generator(generator=lambda: self.data_generator(is_train=False),\n output_types=(tf.int64, tf.int64),\n output_shapes=((None, self.max_length),\n (None, None)))\n dataset = dataset.map(self.mapping_fn)\n return dataset\n\n def eval_input_fn(self):\n dataset = tf.data.Dataset.from_generator(\n generator=lambda: self.data_generator(is_train=False),\n output_types=(tf.int64, tf.int64),\n output_shapes=((None, self.max_length), (None, None)))\n dataset = dataset.map(self.mapping_fn)\n return dataset\n\n\ndef model_fn(features, labels, mode, params):\n TRAIN = mode == tf.estimator.ModeKeys.TRAIN\n EVAL = mode == tf.estimator.ModeKeys.EVAL\n PREDICT = mode == tf.estimator.ModeKeys.PREDICT\n # feature['x'] => (bs, 20)\n\n train_op = features\n loss = features\n predicted_token = features\n embedding_layer = tf.keras.layers.Embedding(params['vocab_size'],\n params['EMB_DIM'])(features['x']) # (bs, 20, EMD_SIZE)\n\n dropout_emb = tf.keras.layers.Dropout(rate=0.5)(embedding_layer) # (bs, 20, EMD_SIZE)\n\n filter_sizes = [3, 4, 5]\n pooled_outputs = []\n for filter_size in filter_sizes:\n conv = tf.keras.layers.Conv1D(\n filters=100,\n kernel_size=filter_size,\n padding='valid',\n activation=tf.nn.relu,\n kernel_constraint=tf.keras.constraints.max_norm(3.))(dropout_emb) # (bs, 20, 100)\n # 최대 norm 지정, weight clipping이 바로 이 부분\n\n pool = tf.keras.layers.GlobalMaxPool1D()(conv) # [(bs, 100), (bs, 100), (bs, 100)]\n pooled_outputs.append(pool)\n\n h_pool = tf.concat(pooled_outputs, axis=1) # (bs, 300)\n\n hidden = tf.keras.layers.Dense(units=250, activation=tf.nn.relu,\n kernel_constraint=tf.keras.constraints.max_norm(3.))(h_pool) # (bs, 200)\n dropout_hidden = tf.keras.layers.Dropout(rate=0.5)(hidden, training=TRAIN)\n logits = tf.keras.layers.Dense(units=params['label_size'])(dropout_hidden) # 이렇게하��� one-hot 필요\n\n if TRAIN:\n global_step = tf.train.get_global_step()\n if params['smoothing'] == 0:\n loss = tf.losses.sigmoid_cross_entropy(labels, logits)\n else:\n loss = tf.losses.sigmoid_cross_entropy(labels, logits, weights=1.0, label_smoothing=params['smoothing'])\n # loss = tf.losses.softmax_cross_entropy(labels, logits)\n train_op = tf.train.AdamOptimizer(0.001).minimize(loss, global_step)\n pred = tf.nn.sigmoid(logits)\n accuracy = tf.metrics.accuracy(labels, tf.round(pred))\n precision = tf.metrics.precision(labels, tf.round(pred))\n recall = tf.metrics.recall(labels, tf.round(pred))\n return tf.estimator.EstimatorSpec(mode=mode,\n train_op=train_op,\n loss=loss,\n eval_metric_ops={'acc': accuracy,\n 'prec': precision,\n 'recall': recall})\n\n elif EVAL:\n loss = tf.losses.sigmoid_cross_entropy(labels, logits)\n pred = tf.nn.sigmoid(logits)\n accuracy = tf.metrics.accuracy(labels, tf.round(pred))\n precision = tf.metrics.precision(labels, tf.round(pred))\n recall = tf.metrics.recall(labels, tf.round(pred))\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops={'acc': accuracy,\n 'prec': precision,\n 'recall': recall})\n\n elif PREDICT:\n pred = tf.nn.sigmoid(logits)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n # 'prob': tf.nn.sigmoid(logits),\n # tf.to_int32(a > 0.5)\n 'prob': tf.round(pred)})\n\n plot_model(model_fn(), to_file=data_out_path + 'model.png')\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n train_op=train_op,\n loss=loss,\n predictions={'prediction': predicted_token})\n\n\ntf.enable_eager_execution()\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n\n# valid = {}\n\nexams = open(exam_param_file, encoding='utf-8')\n\n# for line in exams:\n# valid = {}\n# ld_params = json.loads(line)\n# print('Exam No: %d\\n' % ld_params['EXAM_NUM'])\n# #, params[\"train_batch_size\"], params[\"test_batch_size\"], params[\"EPOCHS\"], params[\"MAX_WORD_LENGTH\"])\n# data_out_path = base_path + 'result_' + str(ld_params['EXAM_NUM']) + '/'\n# tr_file_list, test_file_list = get_file_n_folder()\n# print('\\nFile List for Training: ', tr_file_list, '\\n', 'File List for Testing: ', test_file_list)\n#\n# dataset = Dataset(train_path=tr_file_list,\n# test_path=test_file_list,\n# is_shuffle=True,\n# train_bs=ld_params[\"train_batch_size\"],\n# test_bs=ld_params[\"test_batch_size\"],\n# epoch=ld_params[\"EPOCHS\"],\n# max_length=ld_params[\"MAX_WORD_LENGTH\"])\n#\n# hyper_params = {'vocab_size': len(dataset.word2idx),\n# 'label_size': len(dataset.label2idx),\n# 'embedding_dimension': EMB_DIM,\n# 'smoothing': ld_params[\"smoothing\"]}\n#\n# est = tf.estimator.Estimator(model_fn=model_fn,\n# params=hyper_params,\n# model_dir=data_out_path)\n#\n# est.train(dataset.train_input_fn)\n# valid[ld_params['EXAM_NUM']] = est.evaluate(dataset.eval_input_fn, steps=10)\n# print(valid)\n# with open(data_out_path + str(ld_params['EXAM_NUM']) + '_' + result_file, 'w+') as res:\n# res.write(json.dumps(str(valid)))\n\n\nvalid = {}\nhyper_params = {\"EXAM_NUM\": 59, \"train_batch_size\": 500, \"test_batch_size\": 100, \"EPOCHS\": 10,\n \"MAX_WORD_LENGTH\": 15, \"EMB_DIM\": 256, \"smoothing\": 0.1}\n\n\"\"\"\n{\"EXAM_NUM\":59,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.1}\n{\"EXAM_NUM\":60,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.2}\n{\"EXAM_NUM\":61,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.3}\n{\"EXAM_NUM\":62,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.4}\n{\"EXAM_NUM\":57,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":10,\"EMB_DIM\":256,\"smoothing\":0}\n{\"EXAM_NUM\":58,\"train_batch_size\":500,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0}\n{\"EXAM_NUM\":55,\"train_batch_size\":100,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.3}\n{\"EXAM_NUM\":56,\"train_batch_size\":100,\"test_batch_size\":100,\"EPOCHS\":10,\"MAX_WORD_LENGTH\":15,\"EMB_DIM\":256,\"smoothing\":0.4}\n\"\"\"\n\nprint('Exam No: %d\\n' % hyper_params['EXAM_NUM'])\n# , params[\"train_batch_size\"], params[\"test_batch_size\"], params[\"EPOCHS\"], params[\"MAX_WORD_LENGTH\"])\ndata_out_path = base_path + 'result_' + str(hyper_params['EXAM_NUM']) + '/'\n\ntr_file_list, test_file_list = get_file_n_folder()\nprint('\\nFile List for Training: ', tr_file_list, '\\n', 'File List for Testing: ', test_file_list)\n\ndataset = Dataset(train_path=tr_file_list,\n test_path=test_file_list,\n is_shuffle=True,\n train_bs=hyper_params[\"train_batch_size\"],\n test_bs=hyper_params[\"test_batch_size\"],\n epoch=hyper_params[\"EPOCHS\"],\n max_length=hyper_params[\"MAX_WORD_LENGTH\"])\n\nhyper_params['vocab_size'] = len(dataset.word2idx)\nhyper_params['label_size'] = len(dataset.label2idx)\n\nest = tf.estimator.Estimator(model_fn=model_fn,\n params=hyper_params,\n model_dir=data_out_path)\n\n# 성공은 했는데, 파일을 통째로 돌려서 오래걸림\n# pred = est.predict(input_fn=dataset.eval_input_fn)\n# pred = [str(logits) for logits in pred]\n# print(pred[:10])\n\n# 좀 잘라서 돌릴 방법을 찾아보쟈\npred = est.predict(input_fn=dataset.eval_input_fn)\npred = [str(logits) for logits in pred]\nprint(pred[:10])\n\n\nprint('\\n')\nprint('true label: ', dataset.eval_input_fn())\n# (x, label) = dataset.eval_input_fn\n# print('true label: ', label)\n# print('pred: ', pred)\n# for item in pred:\n# print(item)\n\n# for texts, label_origin in dataset.eval_input_fn().take(1):\n# print(texts['x'], label_origin)\n# pred = est.predict(texts['x'][0])\n# print(pred)\n# for item in pred:\n# print(item)\n# # pred = [str(logits) for logits in pred]\n# # print('pred: ', pred)\n# # valid[ld_params['EXAM_NUM']] = est.predict(dataset.eval_input_fn, steps=10)\n# # print(valid)\n","sub_path":"CNN_20191011/test_cnn_20191105.py","file_name":"test_cnn_20191105.py","file_ext":"py","file_size_in_byte":19550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"373930220","text":"#!D:\\Program Files\\Anaconda3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/21 21:39\n# @Author : 老萝卜\n# @File : 集合的运算-times_2.py\n# @Software: PyCharm Community Edition\n\n# & 交集运算\ns1 = {1,2,3,4,5}\ns2 = {3,4,5,6,7}\n\nresult = s1 & s2 # {3,4,5}\n\n\n# | 并集运算\nresult = s1 | s2 # {1, 2, 3, 4, 5, 6, 7}\n\n# - 差集运算\nresult = s1 - s2 # {1, 2}\nresult = s2 - s1 # {6, 7}\n\n\n# ^ 异或集\nresult = s1 ^ s2 # {1, 2, 6, 7}\nprint(s1,s2,result)\n\n# <= 检查一个集合是否是另一个集合的子集\n# < 检查一个集合是否是另一个集合的真子集\n# a = {1,2,3,4,5}\na = {1,2,3}\n\nb = {1,2,3,4,5}\n\nresult =a <= b\nprint(result)\n\nresult =a < b\nprint(result)\n\n# >=检查一个集合是否是另一个集合的超集\n# >检查一个集合是否是另一个集合的真超集\n\na = {1,2,3}\n\nb = {1,2,3,4,5}\nresult =b >= a\nprint(result)\n\nresult =b > a\nprint(result)\n","sub_path":"homework_zero_class/lesson8/集合的运算-times_2.py","file_name":"集合的运算-times_2.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"385864217","text":"import math\n\nprint(\"calculadora de equações de 2o grau no formato ax2 + bx + c\")\na = int(input(\"digite a: \"))\n\nif (a == 0):\n print(\"não é equação do 2o grau\")\nelse:\n b = int(input(\"digite b: \"))\n c = int(input(\"digite c: \"))\n\n delta = ((b*b) - 4*a*c)\n\n if (delta < 0):\n print(\"não possui raízes reais\")\n else:\n x1 = (-b + math.sqrt(delta)) / (2*a)\n x2 = (-b - math.sqrt(delta)) / (2*a)\n\n print(\"x1 = {}, x2 = {}\".format(x1,x2))\n \n","sub_path":"lista2/11_equacao_2o_grau.py","file_name":"11_equacao_2o_grau.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"96917739","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Bullet(Sprite):\n\n def __init__(self, settings, screen, boat):\n super(Bullet, self).__init__()\n\n self.screen = screen\n\n self.rect = pygame.Rect(0, 0, settings.bullet_width, settings.bullet_height)\n self.rect.centerx = boat.rect.right\n # self.rect.top = boat.rect.top\n self.rect.centery = boat.rect.centery\n\n self.x = float(self.rect.x)\n\n self.colour = settings.bullet_colour\n self.speed_factor = settings.bullet_speed_factor\n\n def update(self):\n self.x += self.speed_factor\n self.rect.x = self.x\n\n def draw_bullet(self):\n pygame.draw.rect(self.screen, self.colour, self.rect)","sub_path":"Chapter 12 - Pygame/Chapter 12 - Exercises/12-5 Sideways Shooter/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"322715030","text":"import random\n\ndef integrate(fun, a=0, b=1, n_iter=100):\n count = 0.0\n length = b - a\n for i in range(n_iter):\n x = random.random() * length + a\n y = fun(x)\n count += y\n return count / n_iter * length\n","sub_path":"mc.py","file_name":"mc.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453910380","text":"from blocket_scraper.managers import page_manager, file_manager\n\n\n#search_url = \"https://www.blocket.se/annonser/hela_sverige/fordon/motorcyklar?cg=1140&f=c\"\n#search_url = \"https://www.blocket.se/annonser/hela_sverige/fordon/motorcyklar/ovrigt?cg=1148&plo=1&q=motorcykel\"\n#search_url = \"https://www.blocket.se/annonser/hela_sverige\"\n#search_url = \"https://www.blocket.se/annonser/hela_sverige/for_hemmet?cg=2000\"\n#search_url = \"https://www.blocket.se/annonser/hela_sverige/fordon/motorcyklar/custom?cg=1142&q=yamaha%20virago%201100\"\nsearch_url = \"https://www.blocket.se/annonser/hela_sverige/fordon/motorcyklar/scooter?cg=1144&q=yamaha\"\n\ndef start():\n\n price_list = []\n full_blocket_base_webpage = page_manager.get_web_page(search_url)\n filename = page_manager.get_search_word(full_blocket_base_webpage, search_url)\n saved_products = file_manager.load_products_from_file(filename)\n found_products = page_manager.get_all_products(saved_products, full_blocket_base_webpage, search_url, filename)\n\n for price in found_products:\n price_list.append(int(price.price))\n\n found_products.sort(key=lambda x: x.price)\n\n return found_products, price_list","sub_path":"CSharp/azureFunction/blocket_scraper/blocket_scraper.py","file_name":"blocket_scraper.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"67774177","text":"from setuptools import setup, find_packages\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetup(\n name='cf_submit',\n version='1.4.0',\n scripts=['cf'],\n author='Nasreddine Bac Ali',\n author_email='nasreddine.bacali95@gmail.com',\n description='Submit Codeforces codes via terminal and other coll stuff',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/bacali95/cf_submit',\n packages=find_packages(),\n package_data={\n 'cf_submit': [\n 'bin/cf_checker',\n 'bash_completion/cf'\n ]\n },\n install_requires=[\n 'lxml',\n 'robobrowser',\n 'prettytable',\n 'requests',\n 'Werkzeug>=0.16,<1.0'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"255328153","text":"#from keras.datasets import mnist\nimport matplotlib.pyplot as plt\nimport csv\nimport datetime\nimport pandas as pd\nfrom ast import literal_eval\n \ndata = pd.read_csv('dataset3.csv', names=['b','c','d','label'])\ndata.dropna(inplace=True)\nrvvv=data.d.tolist()\nb=literal_eval(rvvv[15])\na = []\nc = []\nfor i in range(len(b)):\n\tif (i%2) is 0:\n\t\ta.append(b[i])\n\telse:\n\t\tc.append(b[i])\n# plot 4 images as gray scale\nx=[0,1,2,3,4,5,6,7]\n\nplt.plot(b)\n\n# show the plot\nplt.show()","sub_path":"handwritten recogfnizer/neurosky.py","file_name":"neurosky.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"550553824","text":"from flask import (\n render_template,\n Blueprint,\n)\nfrom flask_login import login_required, current_user\n\nfrom app.models.bcs import Service, WorkerService, ClientService\n\nblueprint = Blueprint('index', __name__)\n\n\n@blueprint.route('/', methods=['GET', 'POST'])\n@login_required\ndef index():\n if current_user.is_admin:\n services = Service.query.all()\n assigned_to_self = [ws.service_id for ws in WorkerService.query.filter_by(user_id=current_user.id).all()]\n else:\n services = [service for service in Service.query.all() if service.is_available]\n assigned_to_self = [cs.service_id for cs in ClientService.query.filter_by(user_id=current_user.id).all()]\n\n return render_template(\"index.jinja.html\",\n services=services,\n assigned_to_self=assigned_to_self,\n is_admin=current_user.is_admin)\n","sub_path":"app/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"555297664","text":"import sys\r\n\r\nclass Dequeue:\r\n def __init__(self):\r\n self.items = []\r\n def push_back(self, v):\r\n self.items.append(v)\r\n def push_front(self, v):\r\n self.items.insert(0, v)\r\n def pop_back(self):\r\n try:\r\n print(self.items.pop())\r\n except IndexError:\r\n print(-1)\r\n def pop_front(self):\r\n try:\r\n print(self.items.pop(0))\r\n except IndexError:\r\n print(-1)\r\n def back(self):\r\n try:\r\n print(self.items[-1])\r\n except IndexError:\r\n print(-1)\r\n def front(self):\r\n try:\r\n print(self.items[0])\r\n except IndexError:\r\n print(-1)\r\n def size(self):\r\n print(len(self.items))\r\n def empty(self):\r\n if len(self.items) == 0:\r\n print(1)\r\n else:\r\n print(0)\r\n\r\n\r\nmy_Queue = Dequeue()\r\nn = int(input())\r\n\r\nfor i in range(n):\r\n order = sys.stdin.readline().split()\r\n\r\n if order[0] == \"push_front\":\r\n value = int(order[1])\r\n my_Queue.push_front(value)\r\n elif order[0] == \"push_back\":\r\n value = int(order[1])\r\n my_Queue.push_back(value) \r\n elif order[0] == \"pop_front\":\r\n my_Queue.pop_front()\r\n elif order[0] == \"pop_back\":\r\n my_Queue.pop_back()\r\n elif order[0] == \"size\":\r\n my_Queue.size()\r\n elif order[0] == \"empty\":\r\n my_Queue.empty()\r\n elif order[0] == \"front\":\r\n my_Queue.front()\r\n elif order[0] == \"back\":\r\n my_Queue.back()","sub_path":"Baekjoon/Queue/10866(덱).py","file_name":"10866(덱).py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"352693220","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom .budget_plan_template import BudgetPlanCommon\nfrom openerp.addons.account_budget_activity.models.account_activity \\\n import ActivityCommon\n\n\nclass BudgetPlanPersonnel(BudgetPlanCommon, models.Model):\n _name = 'budget.plan.personnel'\n _inherits = {'budget.plan.template': 'template_id'}\n _inherit = ['mail.thread']\n _description = \"Personnel Budget - Budget Plan\"\n\n template_id = fields.Many2one(\n 'budget.plan.template',\n required=True,\n ondelete='cascade',\n )\n # TEMP\n plan_line_ids = fields.One2many(\n 'budget.plan.personnel.line',\n 'plan_id',\n string='Budget Plan Lines',\n copy=True,\n readonly=True,\n states={'draft': [('readonly', False)],\n 'submit': [('readonly', False)]},\n track_visibility='onchange',\n )\n plan_revenue_line_ids = fields.One2many(\n 'budget.plan.personnel.line',\n 'plan_id',\n string='Revenue Plan Lines',\n copy=True,\n readonly=True,\n domain=[('budget_method', '=', 'revenue')], # Have domain\n states={'draft': [('readonly', False)],\n 'submit': [('readonly', False)]},\n track_visibility='onchange',\n )\n plan_expense_line_ids = fields.One2many(\n 'budget.plan.personnel.line',\n 'plan_id',\n string='Expense Plan Lines',\n copy=True,\n readonly=True,\n domain=[('budget_method', '=', 'expense')], # Have domain\n states={'draft': [('readonly', False)],\n 'submit': [('readonly', False)]},\n track_visibility='onchange',\n )\n # plan_summary_revenue_line_ids = fields.One2many(\n # 'budget.plan.personnel.summary',\n # 'plan_id',\n # string='Summary by Activity Group',\n # domain=[('budget_method', '=', 'revenue')],\n # readonly=True,\n # help=\"Summary by Activity Group View\",\n # )\n # plan_summary_expense_line_ids = fields.One2many(\n # 'budget.plan.personnel.summary',\n # 'plan_id',\n # string='Summary by Activity Group',\n # domain=[('budget_method', '=', 'expense')],\n # readonly=True,\n # help=\"Summary by Activity Group View\",\n # )\n planned_revenue = fields.Float(\n string='Total Revenue Plan',\n compute='_compute_planned_overall',\n store=True,\n help=\"All Revenue\",\n )\n planned_expense = fields.Float(\n string='Total Expense Plan',\n compute='_compute_planned_overall',\n store=True,\n help=\"All Expense\",\n )\n planned_overall = fields.Float(\n string='Total Planned',\n compute='_compute_planned_overall',\n store=True,\n help=\"All Revenue - All Expense\",\n )\n\n @api.multi\n @api.depends('plan_line_ids',\n 'plan_revenue_line_ids',\n 'plan_expense_line_ids')\n def _compute_planned_overall(self):\n for rec in self:\n amounts = rec.plan_revenue_line_ids.mapped('planned_amount')\n rec.planned_revenue = sum(amounts)\n amounts = rec.plan_expense_line_ids.mapped('planned_amount')\n rec.planned_expense = sum(amounts)\n rec.planned_overall = rec.planned_revenue - rec.planned_expense\n\n @api.multi\n @api.depends('plan_line_ids',\n 'plan_revenue_line_ids',\n 'plan_expense_line_ids')\n def _compute_planned_overall(self):\n for rec in self:\n amounts = rec.plan_revenue_line_ids.mapped('planned_amount')\n rec.planned_revenue = sum(amounts)\n amounts = rec.plan_expense_line_ids.mapped('planned_amount')\n rec.planned_expense = sum(amounts)\n rec.planned_overall = rec.planned_revenue - rec.planned_expense\n\n @api.onchange('personnel_costcenter_id')\n def _onchange_personnel_costcenter_id(self):\n self.section_id = self.personnel_costcenter_id.section_id\n\n # Call inherited methods\n @api.multi\n def unlink(self):\n for rec in self:\n rec.plan_line_ids.mapped('template_id').unlink()\n self.mapped('template_id').unlink()\n return super(BudgetPlanPersonnel, self).unlink()\n\n @api.model\n def convert_plan_to_budget_control(self, active_id):\n head_src_model = self.env['budget.plan.personnel']\n line_src_model = self.env['budget.plan.personnel.line']\n return self._convert_plan_to_budget_control(active_id,\n head_src_model,\n line_src_model)\n\n\nclass BudgetPlanPersonnelLine(ActivityCommon, models.Model):\n _name = 'budget.plan.personnel.line'\n _inherits = {'budget.plan.line.template': 'template_id'}\n _description = \"Personnel Budget - Budget Plan Line\"\n\n plan_id = fields.Many2one(\n 'budget.plan.personnel',\n string='Budget Plan',\n ondelete='cascade',\n index=True,\n required=True,\n )\n template_id = fields.Many2one(\n 'budget.plan.line.template',\n required=True,\n ondelete='cascade',\n )\n\n @api.model\n def create(self, vals):\n res = super(BudgetPlanPersonnelLine, self).create(vals)\n res.write({'chart_view': res.plan_id.chart_view,\n 'fiscalyear_id': res.plan_id.fiscalyear_id.id})\n return res\n\n @api.multi\n def unlink(self):\n self.mapped('template_id').unlink()\n return super(BudgetPlanPersonnelLine, self).unlink()\n","sub_path":"pabi_budget_plan/models/budget_plan_personnel.py","file_name":"budget_plan_personnel.py","file_ext":"py","file_size_in_byte":5555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"225966621","text":"import sys\nimport os\nimport csv\nimport shutil\n\nimport modularitysearch.utils.namegenerators as namegenerators\n\ndef giNumber2String(giNumber):\n \"\"\"Convert gi int to string with appropriate prefix, spacers.\"\"\"\n numStr = str(int(giNumber))\n pad = '0'*(9 - len(numStr))\n return 'gi|' + pad + numStr\n\n\ndef cleanedProteome(sourceDirectory, source):\n gffFile = namegenerators.getGenomicGFF(sourceDirectory, source)\n with open(gffFile, 'r') as gffOpen:\n gff = gffOpen.readlines()\n\n regiontext = '##sequence-region'\n fileend = '###'\n # NOT gene, which isn't keyed to protein_id that shows up in protein.faa\n proteinmarker = 'CDS'\n firstSection = True\n giNumber = 0\n geneHeaders = []\n cleanedGffFile = namegenerators.getGenomicGFF(sourceDirectory, source)\n with open(cleanedGffFile, 'w') as cleanedGffOpen:\n for lineNum, line in enumerate(gff):\n # remove newline character\n line = line.replace('\\n', '')\n\n # Header section\n if line[0] == '#':\n items = line.split(' ')\n # New chromosome\n if line[:len(regiontext)] == regiontext:\n chromosome = items[1]\n chromosomeLength = int(items[3])\n else:\n # Feature section\n items = line.split('\\t')\n featureStop = int(items[4])\n # drop feature if it overlaps origin. antiSMASH can't handle.\n if featureStop > chromosomeLength:\n continue\n\n geneHeader = ''\n if items[2] == 'CDS':\n giNumber += 1\n\n properties = {prop.split('=')[0]: prop.split('=')[1]\n for prop in items[-1].split(';')}\n if 'pseudo' in properties:\n continue\n protein_id = properties['protein_id']\n\n geneHeader += '>' + giNumber2String(giNumber) + ' '\n geneHeader += '[protein_id={}]'.format(\n properties['protein_id'])\n geneHeader += '[description={}]'.format(\n properties['product'].replace('[', '').replace(']', ''))\n geneHeader += '[chromosome={}]'.format(chromosome)\n geneHeader += '[location=({},{})]'.format(\n items[3], items[4])\n geneHeader += '[direction={}]'.format(items[6])\n geneHeader += '[cdsID={}]'.format(properties['ID'])\n\n geneHeaders.append((properties['protein_id'], geneHeader))\n cleanedGffOpen.write(line)\n\n # Load protein sequences\n sequences = dict()\n proteomeNCBI_file = namegenerators.getProteomeNCBI(sourceDirectory, source)\n with open(proteomeNCBI_file, 'r') as proteomeNCBI_read:\n for lineNum, line in enumerate(proteomeNCBI_read):\n if line[0] == '>':\n protein_id = line.replace('>', '').split(' ')[0]\n sequences[protein_id] = ''\n else:\n sequences[protein_id] += line\n\n # Write new proteome file\n headeredSequences = [(geneHeader, sequences[protein_id])\n for protein_id, geneHeader in geneHeaders]\n proteomeFile = namegenerators.getProteome(sourceDirectory, source)\n with open(proteomeFile, 'w') as proteomeWrite:\n for geneHeader, sequence in headeredSequences:\n proteomeWrite.write(geneHeader + '\\n')\n proteomeWrite.write(sequence)\n\nif __name__ == \"__main__\":\n # Copy downloaded data into new clean_batch folder\n oldSourceDirectory = sys.argv[1]\n #('/Users/eliweinstein/Documents/ResearchProjects/LogicalHomology/'\n # 'data/NCBIrefseq/genomes_batch8-7-17-14h43m/sources_list.csv')\n batch = namegenerators.getBatchName()\n oldFolder = namegenerators.getHomeLocation(oldSourceDirectory)\n newFolder = namegenerators.getGenomesCleanDirName(\n oldSourceDirectory, batch)\n shutil.copytree(oldFolder, newFolder)\n sourceDirectory = os.path.join(newFolder,\n os.path.split(oldSourceDirectory)[1])\n\n with open(sourceDirectory, 'rU') as sourceCSV:\n sourceCSVreader = csv.reader(sourceCSV)\n for row in sourceCSVreader:\n source = row[0]\n cleanedProteome(sourceDirectory, source)\n","sub_path":"modularitysearch/datasets/NCBIrefseq_Clean.py","file_name":"NCBIrefseq_Clean.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629163142","text":"import pyrebase\n\nfirebaseConfig = {\n 'apiKey': \"AIzaSyAIuua7NGadNu4dHChW0hYGHLApMW_XVOE\",\n 'authDomain': \"pbhustle-702d9.firebaseapp.com\",\n 'databaseURL': \"https://pbhustle-702d9.firebaseio.com\",\n 'projectId': \"pbhustle-702d9\",\n 'storageBucket': \"pbhustle-702d9.appspot.com\",\n 'messagingSenderId': \"63903745303\",\n 'appId': \"1:63903745303:web:1f610bfc6f8df057e52352\",\n 'measurementId': \"G-YKZE3DKT4Z\"\n }\ndb=pyrebase.initialize_app(firebaseConfig).database()\n\n\n\nclass standings:\n\n def __init__(self):\n\n self.CF_Standings=[]\n self.CC_Standings=[]\n self.PB_Standings=[]\n self.ratings=[]\n\n\n\n def codeforces(self):\n\n self.ratings=db.child('Ratings').get().val()\n self.CF_Standings.clear()\n for id in self.ratings:\n \n name=db.child('users').child(id).child('name').get().val()\n handle=db.child('users').child(id).child('CF_id').get().val()\n rating=self.ratings[id]['CF']\n\n self.CF_Standings.append([name,handle,rating,'https://codeforces.com/profile/'+handle])\n\n self.CF_Standings.sort(key= lambda x: int(x[2][:x[2].index(' ')]),reverse=True)\n\n j=0\n while( j=0 : \n sample=sound[int((instant-(width/2))):int(instant+(width/2))]\n# print(len(sample))\n cut = w*sample\n return cut\n\n#s=sampling(data,2*fe,20000)\n#ts=np.arange(len(s)) \n#plt.plot(ts,s)\nsamples=[]\nwidth=44100\n#print((len(data)/width)-2)\n\n\n#Fonction qui parcourt le morceau et le découpe.\n#arguments, morceau et nombre d'echantillons\n#samples number = (len/largeur_des_echantillons)-2\ndef stocking_samples(sound, samples_number):\n for i in range(1,samples_number+1):\n samp=sampling(sound,(i/samples_number)*fe,int(len(sound)/samples_number))\n samples.append(samp)\n \n return samples\n#--------------------------------------------------------\n#test \nsample=stocking_samples(data,2041)\n#print(len(sample))\n#---------------------------------------------------------------\n#samp=sampling(data,1*fe,1000)\ndef short_term_transform(samples):\n transform=[]\n for i in range(len(samples)):\n #tr= np.asarray(samples[i])\n transform.append(np.fft.fft(samples[i],2048))\n \n return transform\n \ntransf= short_term_transform(sample)","sub_path":"decoupe.py","file_name":"decoupe.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"53159484","text":"\nimport numpy as np\n\n\ndef get_ion_state(line):\n \"\"\"\n Get the ionization state of a `VoigtFit.Line` instance or of `line_tag` string:\n ex: Line<'FeII_2374'> --> II\n ex: Line<'CIa_1656'> --> I\n ex: 'CIV_1550' --> IV\n \"\"\"\n if isinstance(line, str):\n ion = line.split('_')[0]\n else:\n ion = line.ion\n\n if 'H2' in ion:\n return ''\n elif 'CO' in ion:\n return ''\n else:\n pass\n\n element = ion[:2] if ion[1].islower() else ion[0]\n length = len(element)\n ion_state = ion[length:]\n if ion_state[-1].islower():\n ion_state = ion_state[:-1]\n return ion_state\n\n\ndef match_ion_state(line, all_lines):\n \"\"\"\n Find a line that matches the ionization state of the input `line`.\n If more lines match, then choose the strongest line.\n \"\"\"\n matches = match_ion_state_all(line, all_lines)\n\n N_matches = len(matches)\n if N_matches == 0:\n msg = \"No matches found!\"\n line_match = None\n\n elif N_matches == 1:\n line_match = matches[0]\n msg = \"Found 1 match: %s\" % line_match.tag\n\n else:\n line_strength = [ll.l0 * ll.f for ll in matches]\n idx = np.argmax(line_strength)\n line_match = matches[idx]\n msg = \"Found %i matches. Strongest line: %s\" % (N_matches, line_match.tag)\n\n return line_match, msg\n\n\ndef match_ion_state_all(line, all_lines):\n \"\"\"\n Find all lines that match the ionization state of the input `line`.\n \"\"\"\n if isinstance(line, str):\n line_tag = line\n else:\n line_tag = line.tag\n\n ion_state = get_ion_state(line)\n matches = list()\n for this_line in all_lines:\n if this_line.tag == line_tag:\n continue\n\n this_state = get_ion_state(this_line)\n if this_state == ion_state:\n matches.append(this_line)\n\n return matches\n\n\ndef tau_percentile(x, tau, a=0.997):\n \"\"\"\n Determine the range of x that encompasses the fraction `a`\n of the total apparent optical depth of the absorption profile, `tau`\n \"\"\"\n y = np.cumsum(tau)\n y = y/y.max()\n\n a_low = (1 - a) / 2\n a_high = (1 + a) / 2\n\n x_range = list()\n for p in [a_low, a_high]:\n i1 = max((y < p).nonzero()[0])\n i2 = i1 + 1\n slope = (y[i2] - y[i1]) / (x[i2] - x[i1])\n x_int = x[i2] + (p - y[i2])/slope\n x_range.append(x_int)\n\n return x_range\n\n# vmin, vmax = tau_noise_range(vel_ref[mask], tau, tau_err, threshold=threshold)\ndef tau_noise_range(x, tau, tau_err, threshold=1.5):\n \"\"\"\n Determine the range of x for which the cumulative `tau` is significantly\n above the noise-level determined from the cumulative error.\n \"\"\"\n y = np.cumsum(tau)\n y_err = np.sqrt(np.cumsum(tau_err**2))\n N_pix = len(tau)\n\n low_noise = np.median(y_err[:N_pix//2])\n upper_noise = np.median(y_err[N_pix//2:])\n y_low = threshold * low_noise\n y_high = max(y) - threshold * upper_noise\n\n # For the upper range:\n imax = min((y > y_high).nonzero()[0])\n xmax = x[imax]\n\n # For the lower range:\n imin = max((y < y_low).nonzero()[0])\n xmin = x[imin]\n\n return (xmin, xmax)\n\n\ndef equivalent_width(wl, flux, err, *, aper, z_sys=0.):\n \"\"\"\n Measure equivalent width in a region determined by the aperture `aper`,\n given as a boolean array with `True` where the equivalent width should\n be evaluated.\n\n Returns rest-frame equivalent width and its uncertainty in same units as `wl`,\n by default in Å.\n \"\"\"\n assert np.sum(aper) > 0, \"Must have a non-zero number of pixels in the aperture\"\n assert len(aper) == len(wl), \"Aperture must have same number of elements as wl, flux, err\"\n\n nan_values = np.isnan(flux[aper])\n if nan_values.any():\n # Skip NaN values by removing them from the aperture:\n aper = aper & ~nan_values\n W_rest = np.trapz(1. - flux[aper], wl[aper]) / (z_sys + 1)\n W_err = np.sqrt(np.nansum(err[aper]**2)) * np.mean(np.diff(wl)) / (z_sys + 1)\n\n return W_rest, W_err\n","sub_path":"VoigtFit/funcs/limits.py","file_name":"limits.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"51150797","text":"\"\"\"\nscrub.py\nv1.0 2015-05-02\nv1.1 2015-11-21 active property and stop word handling added\nv1.2 2016-07-14 Unicode error handling added\nv1.3 2016-07-19 Added functions to remove accents and replace curly quotes. There are two \n options for curly quotes. Using ftfy will also normalise line breaks.\nscott.kleinman@csun.edu\n\n1. Requires configuration in config.py file in same folder as scrub.py\n2. Stop word files must be .txt files with comma, space, or line-separated words.\n\"\"\"\n\n__author__ = \"Scott Kleinman\"\n__copyright__ = \"copyright 2015-, The WE1S Project\"\n__license__ = \"GPL\"\n__version__ = \"1.3\"\n__email__ = \"scott.kleinman@csun.edu\"\n\nimport os, re, codecs, ftfy\nimport unicodedata as ud\nfrom config import *\niterations = len(options)\n\n# Read and scrub files in input directory\t\ndef readFiles(input_file_path, output_file_path):\n ## Defaults here for now\n encoding = 'utf-8'\n error_handling = 'strict'\n fileList = os.listdir(input_file_path)\n\t# Open individual files\n for file in fileList:\n file_path = os.path.join(input_file_path, file)\n with codecs.open(file_path,'r',encoding=encoding,errors=error_handling) as f:\n text = f.read()\n # Call the scrub function\n output = scrub(text)\n # Write the scrubbed text to a new file\n output_path = os.path.join(output_file_path, file)\n with codecs.open(output_path,'w',encoding='utf8',errors=error_handling) as fh:\n fh.write(output)\n fh.close\n\ndef remove_accents(input_str):\n nfkd_form = ud.normalize('NFKD', input_str)\n return u\"\".join([c for c in nfkd_form if not ud.combining(c)])\n\ndef scrub(text):\n # Cycle iterations\n for i in range(0,iterations):\n values = options[i][\"values\"]\n # If the iteration is for stop words...\n if values == \"stopwords\":\n fh = open(stopwords_location, 'r')\n stoplist = fh.read()\n fh.close()\n stoplist = re.sub(\"\\s+\", \",\", stoplist)\n stoplist = stoplist.split(\",\")\n replace = \"\"\n print(stoplist)\n for item in stoplist:\n find = r\"\\b(?=\\w)\" + re.escape(item) + r\"\\b(?!\\w)\"\n text = re.sub(find, \"\", text)\n # Otherwise...\n else:\n # Delete inactive values\n for item in values:\n if \"active\" in item.keys() and item[\"active\"] == False:\n values.remove(item)\n num_values = len(values)\n # Apply each value in iteration\n for j in range(0, num_values):\n find = values[j][\"find\"]\n replace = values[j][\"replace\"]\n text = re.sub(find, replace, text)\n\n # Remove left and right (curly) quotation marks\n #text = text.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\").replace(u\"\\u201c\",'\"').replace(u\"\\u201d\", '\"')\n # ftfy may be more comprehensive than the above. It also normalises line breaks to \"\\n\"\n text = ftfy.fix_text(text, normalization='NFKC')\n\n # Remove accents\n text = remove_accents(text)\n\n return text\n\n# Initiate\nprint(\"Processing...\\n\")\nprint(\"Reading \"+input_file_path+\"\\n\")\n\n# Read and Scrub Files\nreadFiles(input_file_path, output_file_path)\n\n# Generate Log String\nout = \"Number of iterations: \" + str(iterations) + \"\\n\\n\"\nfor i, item in enumerate(options):\n values = options[i][\"values\"]\n if values == \"stopwords\":\n fh = open(stopwords_location, 'r')\n stoplist = fh.read()\n fh.close()\n stoplist = re.sub(\"\\s+\", \", \", stoplist)\n out += \"Stopwords Removed: \" + stoplist + \"\\n\"\n else:\n out += \"Iteration: \" + str(i+1) + \"\\n\"\n out += \"Find\\tReplace\\n\"\n for j, value in enumerate(values):\n out += value[\"find\"] + \"\\t-->\\t\" + value[\"replace\"] + \"\\n\"\n \t# Retrieve metadata\n if len(item) > 1:\n out += \"Metadata:\\n\"\t\n for k in item:\n if k != \"values\":\n out += str(k) + \": \" + str(options[i][k]) + \"\\n\"\n out += \"\\n\"\n\n out += \"Curly quotes removed.\\n\"\n out += \"Accents removed.\\n\"\n\n# Print Log\nprint(out)\n\n# Save Log File\nif save_log == True:\n log_file = os.path.join(output_file_path, \"log.txt\")\n f = open(log_file,'w')\n out = out\n f.write(out)\n f.close\n\n# Success\nprint(\"Done!\\n\")","sub_path":"we1s-scripts/Scrub/scrub.py","file_name":"scrub.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"645222555","text":"import json\nimport boto3\n\ndef lambda_handler(event, context):\n \n event = event[\"body-json\"]\n \n # check if voter has already voted, if yes, deny vote\n # check if voter is in the table with valid voters\n dynamoDB = boto3.resource('dynamodb')\n \n dynamoVotanti = dynamoDB.Table('Votanti')\n dynamoCandidati = dynamoDB.Table('Candidati')\n \n # table with voters that have the right to vote\n dynamoCNPDB = dynamoDB.Table('CNPDataBase')\n\n cnp = event[\"CNP\"]\n \n partid = event[\"Partid\"]\n nume_candidat = event[\"NumeCandidat\"]\n\n max_cifre_cnp = 13\n\n cnp_gresit = {\n 'message':'false'\n }\n \n if len(cnp) is not max_cifre_cnp or not cnp.isdigit():\n return {\n 'statusCode': 402,\n 'body': json.dumps(cnp_gresit)\n }\n \n # get the amount of votes the candidate that votes has\n response = dynamoCandidati.get_item(\n Key = {\n \"Partid\": partid,\n \"NumeCandidat\": nume_candidat\n }\n )\n \n candidat_inexistent = {\n 'message': \"false\"\n }\n \n if \"Item\" not in response:\n return {\n 'statusCode': 406,\n 'body': json.dumps(candidat_inexistent)\n }\n voturi = response[\"Item\"][\"Voturi\"]\n \n voturi = int(voturi) + 1\n \n dynamoCandidati.update_item(\n Key={\n \"Partid\": partid,\n \"NumeCandidat\": nume_candidat\n },\n UpdateExpression=\"set Voturi = :t\",\n ExpressionAttributeValues={\n ':t': str(voturi)\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n \n dynamoVotanti.put_item(\n Item = {\n 'CNP': cnp\n }\n )\n \n message = {\n 'message':'true'\n }\n \n return {\n 'statusCode': 200,\n 'body': json.dumps(message)\n }\n\n","sub_path":"Cloud/Lambdas/DynamoDB/insert_voters_in_dynamodb.py","file_name":"insert_voters_in_dynamodb.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"15483435","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ncreated by me for task2 to read nexus files and convert each, individual file\nto the PHYLIP file format.\n\"\"\"\nimport argparse\nimport glob\nimport os\nfrom Bio import AlignIO\n\n\ndef get_parser():\n \"\"\"\n using argparse to takes the list as input\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--indirectory\", help='directory to use', type=str)\n parser.add_argument(\"--outputfile1\", help='outputfiles_name', type=str)\n parser.add_argument(\"--outputfile2\", help='outputfiles_name', type=str)\n parser.add_argument(\"--outputfile3\", help='outputfiles_name', type=str)\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_parser()\n outfile1 = open(args.outputfile1, \"w\")\n file1 = glob.glob(os.path.join(args.indirectory, \"*.nexus\"))\n with open(file1[0], \"r\") as infile1:\n aln = AlignIO.read(infile1, 'nexus')\n AlignIO.write(aln, outfile1, 'phylip-relaxed')\n outputfile2 = open(args.outputfile2, \"w\")\n with open(file1[1], \"r\") as infile2:\n aln = AlignIO.read(infile2, 'nexus')\n AlignIO.write(aln, outputfile2, 'phylip-relaxed')\n outputfile3 = open(args.outputfile3, \"w\")\n with open(file1[2], \"r\") as infile3:\n aln = AlignIO.read(infile3, 'nexus')\n AlignIO.write(aln, outputfile3, 'phylip-relaxed')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/mforoozani1/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"239123404","text":"y=int(input())\np=list(map(int,input().split()))\ncount1=1\ncount2=1\nfor i in range(0,y-1):\n if(p[i]<(p[i+1])):\n count1=count1+1\n count2=1\n else:\n for j in range(i+1,y-1):\n if(p[j]<(p[j+1])):\n count2=count2+1\n count1=1\n else:\n break\nif(count1>count2):\n print(count1)\nelse:\n print(count2)\n \n","sub_path":"lenmax.py","file_name":"lenmax.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"211344607","text":"GITHUB_USERNAME = \"github-user\"\nGITHUB_PERSONAL_TOKEN = \"github-user-personal-token\"\nGITHUB_REPOSITORY = \"github-repo\"\n\nGITHUB_REPOSITORIES = {\n GITHUB_USERNAME: {\n GITHUB_REPOSITORY: {\n \"uid\": \"ac711a94-adfa-43ff-8f11-195d78c6f57d\",\n \"owner\": GITHUB_USERNAME,\n \"name\": GITHUB_REPOSITORY,\n \"jobs\": [\"test\", \"deploy\"],\n \"status\": \"ready\",\n }\n }\n}\n","sub_path":"examples/hobotnica/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"736317","text":"import os\nimport copy\nimport subprocess\nimport __builtin__\nfrom mock import patch\nimport mock\nfrom config_server import NoResponseException\nfrom config_server import check_if_oprp_started\nfrom config_server import kill_existing_process_on_port\nfrom config_server import get_oprp_pid\nfrom config_server import identify_existing_config_file\nfrom config_server import get_config_file_path\nfrom config_server import create_module_string\nfrom config_server import NoMatchException\nfrom config_server import convert_to_simple_list\nfrom config_server import convert_to_gui_drop_down\nfrom config_server import create_new_configuration_dict\nfrom config_server import get_issuer_from_gui_config\nfrom config_server import write_config_file\nfrom config_server import convert_config_gui_structure\nfrom config_server import create_key_dict_pair_if_non_exist\nfrom config_server import _generate_static_input_fields\nfrom config_server import convert_instance\nfrom config_server import convert_to_value_list\nfrom config_server import set_dynamic_discovery_issuer_config_gui_structure\nfrom config_server import load_config_module\n\n__author__ = 'danielevertsson'\n\nimport unittest\n\nclass TestConfigServer(unittest.TestCase):\n\n def test_check_if_oprp_started_raises_NoResponseException(self):\n with self.assertRaises(NoResponseException):\n check_if_oprp_started(None, oprp_url=\"http://1234.1234.1234.1234:8000\", timeout=1)\n\n def test_returns_correct_pid(self):\n p = subprocess.Popen(['grep', 'rp_conf_0.py'], stdout=subprocess.PIPE)\n pid = get_oprp_pid(0)\n self.assertEqual(p.pid, pid)\n\n def test_killing_existing_process(self):\n _port = 0\n _filename = \"rp_conf_%s.py\" % _port\n\n #Process which simulate a running OPRP instance\n subprocess.Popen(['grep', _filename], stdout=subprocess.PIPE)\n kill_existing_process_on_port(_port)\n _pid = get_oprp_pid(_port)\n self.assertEqual(_pid, None)\n\n def test_convert_to_simple_list(self):\n gui_list = [{\"name\": \"test1\"}, {\"name\": \"test2\"}]\n simple_list = convert_to_simple_list(gui_list)\n self.assertEqual([\"test1\", \"test2\"], simple_list)\n\n def test_convert_to_gui_list(self):\n gui_list = convert_to_gui_drop_down([\"test1\", \"test2\"])\n expected_list = [{\"type\": \"test1\", \"name\": \"test1\"},\n {\"type\": \"test2\", \"name\": \"test2\"}]\n self.assertEqual(gui_list, expected_list)\n\n @patch('config_server._generate_static_input_fields')\n def test_get_static_disco_issuer_from_gui_config(self, mock_generate_static_input_fields):\n issuer = 'issuer_test'\n mock_generate_static_input_fields.return_value = _generate_static_input_fields(issuer)\n gui_config = create_new_configuration_dict()\n returned_issuer = get_issuer_from_gui_config(gui_config)\n self.assertEqual(issuer, returned_issuer)\n\n def test_get_dynamic_disco_issuer_from_gui_config(self):\n issuer = 'issuer_test'\n gui_config = create_new_configuration_dict()\n gui_config = set_dynamic_discovery_issuer_config_gui_structure(issuer, gui_config)\n returned_issuer = get_issuer_from_gui_config(gui_config)\n self.assertEqual(issuer, returned_issuer)\n\n @patch('config_server._generate_static_input_fields')\n def test_get_static_disco_issuer_from_gui_config_containing_both_dynamic_and_static_issuer(self, mock_generate_static_input_fields):\n static_issuer = 'static_issuer'\n mock_generate_static_input_fields.return_value = _generate_static_input_fields(static_issuer)\n gui_config = create_new_configuration_dict()\n gui_config = set_dynamic_discovery_issuer_config_gui_structure(\"dynamic_issuer\", gui_config, show_field=False)\n returned_issuer = get_issuer_from_gui_config(gui_config)\n self.assertEqual(static_issuer, returned_issuer)\n\n def _setup_config_server_mock(self, mock_server_config):\n mock_server_config.OPRP_DIR_PATH = '../rp/'\n mock_server_config.OPRP_SSL_MODULE = \"sslconf\"\n mock_server_config.HOST = \"localhost\"\n\n @patch('config_server.CONF')\n def test_identify_existing_config_file(self, mock_server_config):\n self._setup_config_server_mock(mock_server_config)\n\n _port = 0\n config_file = get_config_file_path(_port, mock_server_config.OPRP_DIR_PATH)\n configuration = create_module_string({}, _port)\n write_config_file(config_file, configuration, _port)\n\n config_client_dict = identify_existing_config_file(_port)\n self.assertTrue(isinstance(config_client_dict, dict))\n os.remove(config_file)\n\n @patch('config_server.CONF')\n def test_identify_existing_missing_client_attribute(self, mock_server_config):\n self._setup_config_server_mock(mock_server_config)\n _port = 1\n config_file = get_config_file_path(_port, mock_server_config.OPRP_DIR_PATH)\n write_config_file(config_file, \"\", _port)\n with self.assertRaises(AttributeError):\n identify_existing_config_file(_port)\n os.remove(config_file)\n\n @patch('config_server.CONF')\n def test_identify_config_file_which_does_not_exist(self, mock_server_config):\n self._setup_config_server_mock(mock_server_config)\n with self.assertRaises(NoMatchException):\n identify_existing_config_file(-1)\n\n def test_create_key_if_non_exist(self):\n dict = {}\n dict = create_key_dict_pair_if_non_exist(\"key\", dict)\n dict['key']['sub_key'] = \"value\"\n self.assertEqual(dict['key']['sub_key'], \"value\")\n\n @patch('config_server.identify_existing_config_file')\n def test_overwrite_static_with_dynamic_discovery(self, mock_identify_existing_config_file):\n static_client_discovery_info = {\"provider_info\": {\"jwks_uri\": \"example.com/jwks\",\n \"authorization_endpoint\": \"example.com/auth\",\n \"response_types_supported\": \"response_types_supported\",\n \"id_token_signing_alg_values_supported\": ['alg'],\n \"subject_types_supported\": ['subject_type'],\n \"issuer\": \"example.com\"}}\n mock_identify_existing_config_file.return_value = copy.deepcopy(static_client_discovery_info)\n\n dynamic_discovery_issuer = \"example2.com\"\n new_gui_config = create_new_configuration_dict()\n new_gui_config = set_dynamic_discovery_issuer_config_gui_structure(dynamic_discovery_issuer, new_gui_config)\n config_file_dict = convert_config_gui_structure(new_gui_config, 0, \"id\")\n\n self.assertDictContainsSubset({\"srv_discovery_url\": dynamic_discovery_issuer}, config_file_dict)\n with self.assertRaises(KeyError):\n config_file_dict['provider_info']\n\n @patch('config_server._generate_static_input_fields')\n @patch('config_server.identify_existing_config_file')\n def test_overwrite_dynamic_with_static_discovery(self, mock_identify_existing_config_file, mock_generate_static_input_fields):\n dynamic_discovery_issuer = \"example2.com\"\n static_client_discovery_info = {\"srv_discovery_url\": dynamic_discovery_issuer}\n mock_identify_existing_config_file.return_value = copy.deepcopy(static_client_discovery_info)\n\n default_static_discovery_value = \"example\"\n mock_generate_static_input_fields.return_value = _generate_static_input_fields(default_static_discovery_value)\n new_gui_config = create_new_configuration_dict()\n new_gui_config['fetchStaticProviderInfo']['showInputFields'] = True\n config_file_dict = convert_config_gui_structure(new_gui_config, 0, \"id\")\n\n self.assertTrue(config_file_dict['provider_info'])\n with self.assertRaises(KeyError):\n config_file_dict['srv_discovery_url']\n\n @patch('config_server.identify_existing_config_file')\n def test_do_not_overwrite_custom_value_config_file(self, mock_identify_existing_config_file):\n custom_info = {\"custom_key\": \"custom_value\"}\n mock_identify_existing_config_file.return_value = copy.deepcopy(custom_info)\n new_gui_config = create_new_configuration_dict()\n config_dict = convert_config_gui_structure(new_gui_config, 0, \"id\")\n self.assertDictContainsSubset(custom_info, config_dict)\n\n def test_convert_list_instance_to_list_should_be_untouched(self):\n to_list = True\n value = [\"test\"]\n field_value = convert_instance(to_list, value)\n self.assertEqual(field_value, convert_to_value_list(value))\n\n def test_convert_string_to_non_list_instance_should_untouched(self):\n to_list = False\n value = \"test\"\n field_value = convert_instance(to_list, value)\n self.assertEqual(field_value, value)\n\n def test_convert_string_to_list_instance(self):\n to_list = True\n value = \"test\"\n field_value = convert_instance(to_list, value)\n self.assertEqual(field_value, convert_to_value_list([value]))\n\n @patch('config_server.identify_existing_config_file')\n def test_if_instance_id_is_save_to_config_file(self, mock_identify_existing_config_file):\n new_gui_config = create_new_configuration_dict()\n instance_id = \"my_instance_id\"\n mock_identify_existing_config_file.side_effect = NoMatchException()\n config_dict = convert_config_gui_structure(new_gui_config, 0, instance_id)\n self.assertDictContainsSubset({\"instance_id\": instance_id}, config_dict)\n\n def test_import_non_existing_module(self):\n with self.assertRaises(ImportError):\n load_config_module(\"non_existing_module\")\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/oic_op/config_server/test_config_server.py","file_name":"test_config_server.py","file_ext":"py","file_size_in_byte":9841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"506100474","text":"from __future__ import annotations\n\nimport ctypes\nfrom dataclasses import dataclass\nimport numpy as np\nfrom inspect import signature\nimport subprocess\nimport tempfile\nimport os\nimport typing\nimport traceback\nfrom typing import List\nimport codegen.control as control\n\n_debug = False\n\n\ndef logical_and(a, b):\n if isinstance(a, CTypeWrapper):\n return a.logical_and(b)\n else:\n return a and b\n\n\ndef logical_or(a, b):\n if isinstance(a, CTypeWrapper):\n return a.logical_or(b)\n else:\n return a or b\n\n\n@dataclass\nclass _TypeInfo:\n codegen_type: str\n np_type: typing.Type[np.number]\n ctype: object\n py_type: typing.Union[typing.Type[int], typing.Type[float]]\n\n\n_known_types = [\n _TypeInfo('int32_t', np.int32, ctypes.c_int, int),\n _TypeInfo('int64_t', np.int64, ctypes.c_long, int),\n _TypeInfo('uint32_t', np.uint32, ctypes.c_uint, int),\n _TypeInfo('uint64_t', np.uint64, ctypes.c_ulong, int),\n _TypeInfo('float', np.float32, ctypes.c_float, float),\n _TypeInfo('double', np.float64, ctypes.c_double, float),\n]\n\n\ndef type_info(*,\n codegen_type: str = None,\n np_type: typing.Type[np.number] = None,\n ctype: object = None,\n py_type: typing.Union[typing.Type[int], typing.Type[float]] = None) -> _TypeInfo:\n # must specify exactly one query parameter\n assert sum([int(codegen_type is not None),\n int(np_type is not None),\n int(ctype is not None),\n int(py_type is not None)]) == 1\n # python type overrides\n if py_type is not None:\n if py_type == int:\n np_type = np.int64\n elif py_type == float:\n np_type = np.float64\n else:\n raise ValueError(f'bad py_type: {py_type}')\n # TODO use dicts for lookup maybe\n for t in _known_types:\n if t.codegen_type == codegen_type: return t\n if t.np_type == np_type: return t\n if t.ctype == ctype: return t\n else:\n raise ValueError(f'no match found for inputs {codegen_type} {np_type} {ctype} {py_type}')\n\n\ndef normalize_to_type_info(t) -> _TypeInfo:\n if t == 'int':\n return type_info(np_type=np.int64)\n if t == 'uint':\n return type_info(np_type=np.uint64)\n if t == int or t == float:\n return type_info(py_type=t)\n if isinstance(t, str):\n return type_info(codegen_type=t)\n if issubclass(t, np.number):\n return type_info(np_type=t)\n return type_info(ctype=t)\n\n\nclass Match:\n def __init__(self, context: Context, outvar):\n self.context = context\n self.outvar = outvar\n if hasattr(outvar, 'to_atoms'):\n outvars: List[CTypeWrapper] = outvar.to_atoms()\n self.outvars = [CTypeWrapper(context, context.get_varname(v.type), v.type) for v in outvars]\n else:\n assert isinstance(outvar, CTypeWrapper)\n self.outvars = [CTypeWrapper(context, context.get_varname(outvar.type), outvar.type)]\n # list of (condition variable, func) to be run\n # resulting code looks like:\n # v1 = \n # v2 = \n # v3;\n # ...\n # if (v1) {stuff; v3 = result}\n # else if (v2) {stuff; v3 = result}\n self.cases = []\n\n def case(self, condition: str):\n # no parameters, I guess?\n def foo(func):\n self.cases.append((condition, func))\n\n return foo\n\n def default(self):\n def foo(func):\n self.cases.append((None, func))\n\n return foo\n\n def get_result(self):\n self.context.label('condition')\n self.context.code_direct(\"if (0) {}\\n\")\n for cond, func in self.cases:\n if cond is not None:\n assert isinstance(cond, CTypeWrapper)\n self.context.code_direct(f\"else if ({cond.var}) {{\\n\")\n else:\n self.context.code_direct(f\"else {{\\n\")\n res_var: CTypeWrapper = func()\n\n if hasattr(res_var, 'to_atoms'):\n assert len(res_var.to_atoms()) == len(self.outvars)\n for lvar, rvar in zip(self.outvars, res_var.to_atoms()):\n assert isinstance(lvar, CTypeWrapper)\n assert isinstance(rvar, CTypeWrapper)\n self.context.code_line(f\"{lvar.var} = {rvar.var}\")\n self.outvar = self.outvar.from_atoms(self.outvars)\n\n else:\n assert len(self.outvars) == 1\n assert isinstance(self.outvars[0], CTypeWrapper)\n self.context.code_line(f\"{self.outvars[0].var} = {res_var.var}\")\n self.outvar = self.outvars[0]\n self.context.code_direct(\"}\\n\")\n return self.outvar\n\n\nclass Context(control.Context):\n\n def __init__(self):\n self.last_var = 0\n self.last_tag = 0\n self._code = ''\n self._debug_filename_log = False\n # self._debug_filename_log = True\n\n def label(self, tag: str = ''):\n self._code += self._label(tag)\n\n def _label(self, tag: str = '') -> str:\n \"\"\"Search up the stack to find where we are being called from\"\"\"\n for frame in traceback.extract_stack()[::-1]:\n # find the first frame outside of this file\n if frame.filename != __file__:\n self.last_tag += 1\n if self._debug_filename_log:\n return f'puts(\"// {frame.filename}:{frame.lineno} {tag} {self.last_tag}\");\\n'\n else:\n return f'#line {frame.lineno} \"{frame.filename}\"\\n'\n else:\n return \"\"\n\n def match(self, var):\n return Match(self, var)\n\n def code_line(self, line: str):\n self._code += self._label()\n self._code += line + ';\\n'\n\n def code_direct(self, code):\n self._code += code\n\n def literal(self, x, type):\n self.label('literal')\n # type = _py_codegen_type_map[type]\n type = normalize_to_type_info(type)\n if isinstance(x, CTypeWrapper) and x.type == type:\n return x\n elif isinstance(x, CTypeWrapper):\n x = x.var\n var = self.get_varname(type)\n # TODO: smarter casts?\n self._code += f'{var} = ({type.codegen_type})({x});\\n'\n return CTypeWrapper(self, var, type)\n\n def cast(self, x, type):\n self.label('cast')\n return self.literal(x, type)\n\n def get_varname(self, t: _TypeInfo) -> str:\n if not isinstance(t, _TypeInfo):\n t = normalize_to_type_info(t)\n self.label('get_var')\n var = f'v{self.last_var}'\n self._code += f'{t.codegen_type} {var};\\n'\n self.last_var += 1\n return var\n\n def get_var_wrapper(self, t: _TypeInfo) -> CTypeWrapper:\n varname = self.get_varname(t)\n wrapper = CTypeWrapper(self, varname, t)\n return wrapper\n\n def logical_and(self, a, b):\n if hasattr(a, 'logical_and'):\n return a.logical_and(b)\n else:\n return a and b\n\n def logical_or(self, a, b):\n if hasattr(a, 'logical_or'):\n return a.logical_or(b)\n else:\n return a or b\n\n\nclass CTypeWrapper:\n\n def __init__(self, context: Context, var: str, type: _TypeInfo):\n self.context = context\n self.var = var\n # TODO changet type to TypeInfo\n self.type = type\n\n def general_arithmetic(self, other: CTypeWrapper, op):\n if not isinstance(other, CTypeWrapper):\n # TODO: smarter casts\n other_var = f'({self.type.codegen_type})({other})'\n else:\n other_var = other.var\n new_var = self.context.get_varname(self.type)\n self.context.code_line(f'{new_var} = {self.var} {op} {other_var}')\n return CTypeWrapper(self.context, new_var, self.type)\n\n def __add__(self, other: CTypeWrapper):\n return self.general_arithmetic(other, '+')\n\n def __sub__(self, other):\n return self.general_arithmetic(other, '-')\n\n def __mul__(self, other):\n return self.general_arithmetic(other, '*')\n\n def __floordiv__(self, other):\n return self.general_arithmetic(other, '/')\n\n # TODO division\n # def __truediv__(self, other):\n # if not isinstance(other, CTypeWrapper):\n # # TODO: smarter casts\n # other_var = f'(double)({other})'\n # else:\n # other_var = self.context.cast(other, 'double').var\n # double_self = self.context.cast(self, 'double')\n # new_var = self.context.get_var(self.type)\n # self.context.code_line(f'{new_var} = {double_self.var} / {other_var}')\n # return CTypeWrapper(self.context, new_var, self.type)\n\n def __gt__(self, other):\n return self.general_arithmetic(other, '>')\n\n def __ge__(self, other):\n return self.general_arithmetic(other, '>=')\n\n def __lt__(self, other):\n return self.general_arithmetic(other, '<')\n\n def __le__(self, other):\n return self.general_arithmetic(other, '<=')\n\n def __eq__(self, other):\n return self.general_arithmetic(other, '==')\n\n def __lshift__(self, other):\n return self.general_arithmetic(other, '<<')\n\n def __rshift__(self, other):\n return self.general_arithmetic(other, '>>')\n\n def __and__(self, other):\n return self.general_arithmetic(other, '&')\n\n def __or__(self, other):\n return self.general_arithmetic(other, '|')\n\n def __xor__(self, other):\n return self.general_arithmetic(other, '^')\n\n def logical_and(self, other):\n return self.general_arithmetic(other, '&&')\n\n def logical_or(self, other):\n return self.general_arithmetic(other, '||')\n\n def __neg__(self):\n new_var = self.context.get_varname(self.type)\n self.context.code_line(f'{new_var} = -{self.var}')\n return CTypeWrapper(self.context, new_var, self.type)\n\n def __str__(self):\n return f'{self.type} {self.var}'\n\n\ndef codegen_compile(func, return_type, *arg_types):\n \"\"\"\n :param func:\n :param datatype: either 'float', 'double' or 'int'\n :return:\n \"\"\"\n func_name = func.__name__\n sig = signature(func)\n assert len(arg_types) + 1 == len(sig.parameters)\n\n return_typeinfo = normalize_to_type_info(return_type)\n if return_typeinfo is None:\n return None\n return_codegen_type = return_typeinfo.codegen_type\n\n context = Context()\n codegen_params = [context.get_var_wrapper(normalize_to_type_info(t)) for t in arg_types]\n header_params = [p.type.codegen_type + ' ' + p.var for p in codegen_params]\n\n # header\n context._code = f\"\"\"\n#include \n{return_codegen_type} {func_name}({','.join(header_params)}){{\n\"\"\"\n\n ret = func(context, *codegen_params)\n code = context._code\n code += f'return {ret.var};\\n}}\\n'\n\n if not _debug:\n fd, code_file_path = tempfile.mkstemp(prefix='codegen_', suffix='.c', text=True)\n os.close(fd)\n else:\n code_file_path = '/tmp/testfile.c'\n\n with open(code_file_path, 'w') as f:\n f.write(code)\n\n lib_file_path = code_file_path + '.so'\n\n try:\n subprocess.check_call([\n 'gcc',\n code_file_path,\n '-o', lib_file_path,\n '-fPIC',\n '-shared',\n '-O3',\n ])\n lib = ctypes.CDLL(lib_file_path)\n cfunc = lib[func_name]\n cfunc.restype = return_typeinfo.ctype\n cfunc.argtypes = [normalize_to_type_info(t).ctype for t in arg_types]\n\n finally:\n if not _debug:\n os.unlink(code_file_path)\n os.unlink(lib_file_path)\n\n setattr(cfunc, 'source', code)\n\n return cfunc\n","sub_path":"codegen/codegen_c.py","file_name":"codegen_c.py","file_ext":"py","file_size_in_byte":11679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"400117232","text":"class Matrix:\n def __init__(self, matrixVal):\n self.matrix = matrixVal\n\n\nclass Solution:\n def removeIslands(self, matrix):\n # Time O(n*m) || Space O(n*m)\n if not matrix:\n return matrix\n\n n = len(matrix)\n m = len(matrix[0])\n rangeVal = [0, m - 1, n - 1]\n resultMatrix = Matrix(matrix)\n for i in range(0, n): # Time O(n*m)\n for j in range(0, m):\n if i in rangeVal or j in rangeVal:\n if resultMatrix.matrix[i][j] == 1:\n self.identifyBoundryIsland(i, j, resultMatrix, n, m)\n\n for i in range(0, n): # Time O(n*m)\n for j in range(0, m):\n if resultMatrix.matrix[i][j] == 2:\n resultMatrix.matrix[i][j] = 1\n elif resultMatrix.matrix[i][j] == 1:\n resultMatrix.matrix[i][j] = 0\n return resultMatrix.matrix\n\n def identifyBoundryIsland(self, row, col, resultMatrix, n, m):\n # Space O(n*m)[recursive Call]\n\n if 0 <= row < n and 0 <= col < m and resultMatrix.matrix[row][col] == 1:\n resultMatrix.matrix[row][col] = 2\n\n self.identifyBoundryIsland(row + 1, col, resultMatrix, n, m)\n self.identifyBoundryIsland(row, col + 1, resultMatrix, n, m)\n self.identifyBoundryIsland(row - 1, col, resultMatrix, n, m)\n self.identifyBoundryIsland(row, col - 1, resultMatrix, n, m)\n\n return\n\n\nif __name__ == \"__main__\":\n\n print(\n Solution().removeIslands(\n [\n [1, 0, 0, 0, 1, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 1, 0],\n [1, 1, 0, 1, 0, 0, 1, 0],\n [1, 1, 0, 1, 1, 0, 1, 0],\n [1, 0, 0, 0, 1, 0, 0, 0],\n ]\n )\n )\n","sub_path":"Medium/removeIslands.py","file_name":"removeIslands.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"171575705","text":"\"\"\" Benchmarks for finite_range routine\n\nRun benchmarks with::\n\n import nibabel as nib\n nib.bench()\n\nIf you have doctests enabled by default in nose (with a noserc file or\nenvironment variable), and you have a numpy version <= 1.6.1, this will also\nrun the doctests, let's hope they pass.\n\nRun this benchmark with:\n\n nosetests -s --match '(?:^|[\\\\b_\\\\.//-])[Bb]ench' /path/to/bench_finite_range\n\"\"\"\nfrom __future__ import division, print_function\n\nimport sys\n\nimport numpy as np\n\n\nfrom .butils import print_git_title\n\nfrom numpy.testing import measure\n\nfrom nibabel.volumeutils import finite_range # NOQA\n\n\ndef bench_finite_range():\n rng = np.random.RandomState(20111001)\n repeat = 10\n img_shape = (128, 128, 64, 10)\n arr = rng.normal(size=img_shape)\n sys.stdout.flush()\n print_git_title(\"\\nFinite range\")\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 all finite', mtime))\n arr[:, :, :, 1] = np.nan\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 many NaNs', mtime))\n arr[:, :, :, 1] = np.inf\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('float64 many infs', mtime))\n # Int16 input, float output\n arr = np.random.random_integers(low=-1000, high=1000, size=img_shape)\n arr = arr.astype(np.int16)\n mtime = measure('finite_range(arr)', repeat)\n print('%30s %6.2f' % ('int16', mtime))\n sys.stdout.flush()\n","sub_path":"env/lib/python3.6/site-packages/nibabel/benchmarks/bench_finite_range.py","file_name":"bench_finite_range.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"183327086","text":"#/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n @File : wordcloud-1.py \n @Contact : guoxin@126.com\n @License : (C)Copyright 2018-2019, xguo\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2020/5/26 10:13 xguo 1.0 None\n\n'''\n\nfrom wordcloud import WordCloud\nimport jieba\nfrom imageio import imread\n\ndef main():\n mk=imread('C:\\project\\python_study\\china.jpg')\n wc=WordCloud(width=800,height=600,background_color=\"white\",font_path=\"C:\\Windows\\Fonts\\simfang.ttf\",mask=mk)\n # wc.generate('Do not go gentle into that good night')\n with open('C:\\project\\python_study\\现代日本.txt',encoding='utf-8') as f:\n t=f.read()\n txt=\" \".join(jieba.lcut(t))\n wc.generate(txt)\n wc.to_file('wchy.jpg')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"py/wordcloud-1.py","file_name":"wordcloud-1.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"88968571","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport time\nfrom multiprocessing import Process\n\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy.orm import sessionmaker\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nfrom oe_daemonutils.dossierservice.commands.security import RetrieveSystemTokenCommand\nfrom oe_daemonutils.processor import EntryProcessor, FeedProcessor\n\n\nclass DaemonController(object):\n def __init__(self, settings, oauth_helper, daemon_manager_class, service_cls, notifier_class=None,\n feed_endpoint=None, failure_threshold=5, timeout_default=60, max_timeout=300,\n invocation_timeout=60):\n \"\"\"\n Initialize the daemon feed controller given a daemon manager and a daemon processor\n\n :param settings: general configuration settings\n :param oauth_helper: authorization helper to get the system token\n :param daemon_manager_class: data manager class to get and update the latest feed entry id\n :param service_cls: implementation of the dossier service\n :param notifier_class: implementation of a notifier\n :param feed_endpoint: provide a custom feed endpoint (not from teh standard 'daemon.feed.endpoint' setting)\n :param failure_threshold: the couples of times the daemon should failure before opening the circuit\n :param timeout_default: default sleep time while circuit is open\n :param max_timeout: max sleep time while circuit is open\n :param invocation_timeout: max time span an operation should take, before timing out\n \"\"\"\n self.feed_endpoint = feed_endpoint if feed_endpoint else settings['daemon.feed.endpoint']\n self.failure_threshold = failure_threshold\n self.timeout_default = timeout_default\n self.max_timeout = max_timeout\n self.invocation_timeout = invocation_timeout\n # logging\n self.logger = logging.getLogger(settings['daemon.logger.name'])\n\n engine = engine_from_config(settings, 'sqlalchemy.')\n self.session_maker = sessionmaker(\n bind=engine,\n extension=ZopeTransactionExtension()\n )\n self.daemon_manager_class = daemon_manager_class\n self.notifier_class = notifier_class\n self.service_cls = service_cls\n self.settings = settings\n self.retrieve_system_token_command = RetrieveSystemTokenCommand(self.logger, oauth_helper=oauth_helper)\n\n def _check_entries(self):\n \"\"\"\n check the feed and process new items\n \"\"\"\n notifier = self.notifier_class(self.settings, self.logger) if self.notifier_class else None\n entry_processor = EntryProcessor(self.settings, self.logger, self.retrieve_system_token_command,\n self.service_cls)\n feed_processor = FeedProcessor(self.logger, self.feed_endpoint,\n self.failure_threshold,\n self.timeout_default,\n self.max_timeout,\n self.invocation_timeout,\n self.retrieve_system_token_command\n )\n session = self.session_maker()\n notifications_dict = None\n entries_to_process = None\n last_entry_ts = None\n last_entry_ts_datetime = None\n daemon_manager = None\n try:\n daemon_manager = self.daemon_manager_class(session)\n last_entry_ts = daemon_manager.retrieve_last_entry_id()\n last_entry_ts_datetime = feed_processor.date_from_string(last_entry_ts) if last_entry_ts else None\n entries_to_process = feed_processor.process_feed(last_entry_ts_datetime)\n notifications_dict = entry_processor.process_entries(entries_to_process, last_entry_ts, daemon_manager)\n\n finally:\n if notifier and notifications_dict and len(notifications_dict) > 0:\n notifier.notify(notifications_dict)\n session.close()\n\n del entries_to_process\n del last_entry_ts\n del last_entry_ts_datetime\n del daemon_manager\n del session\n del notifications_dict\n del notifier\n del entry_processor\n del feed_processor\n\n def run_daemon(self):\n \"\"\"\n check the feed and process new items\n handle errors\n \"\"\"\n\n try:\n self._check_entries()\n except Exception as ex:\n self.logger.error('error')\n self.logger.exception(ex)\n raise ex\n\n def _subprocess_daemon(self):\n p = Process(target=self.run_daemon, args=())\n p.start()\n p.join() # this blocks until the process terminates\n exitcode = None\n while exitcode is None: # make sure there is an exitcode\n exitcode = p.exitcode\n if exitcode != 0:\n raise Exception('processing failed')\n\n def run(self): # pragma: no cover\n \"\"\"\n run the daemon indefinitely\n \"\"\"\n self.logger.info('daemon started')\n try:\n while True:\n self._subprocess_daemon()\n time.sleep(1)\n except (KeyboardInterrupt, SystemExit):\n self._handle_manual_stop()\n except Exception as e:\n self._handle_unrecoverable_error(e)\n\n def _handle_manual_stop(self):\n self.logger.warn('manual stop')\n self.logger.warn('daemon stopped')\n\n def _handle_unrecoverable_error(self, ex):\n self.logger.error('unrecoverable error')\n self.logger.exception(ex)\n self.logger.warn('daemon stopped')\n raise ex\n","sub_path":"oe_daemonutils/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"129457517","text":"#\n# @lc app=leetcode.cn id=49 lang=python3\n#\n# [49] 字母异位词分组\n#\nimport collections\nfrom typing import List\n\n# @lc code=start\n\n\n# class Solution:\n# def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n# mp = collections.defaultdict(list)\n\n# for st in strs:\n# counts = [0] * 26\n# for ch in st:\n# counts[ord(ch) - ord(\"a\")] += 1\n# # 需要将 list 转换成 tuple 才能进行哈希\n# mp[tuple(counts)].append(st)\n\n# return list(mp.values())\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n ans = []\n group_map = {}\n\n for s in strs:\n found = False\n for group in ans:\n if self.is_anagram(s, group[0]):\n group.append(s)\n found = True\n break\n if not found:\n ans.append([s])\n\n return ans\n\n def is_anagram(self, s1: str, s2: str) -> bool:\n if len(s1) != len(s2):\n return False\n count = {}\n for c in s1:\n count[c] = count.get(c, 0) + 1\n for c in s2:\n if c not in count:\n return False\n count[c] -= 1\n if count[c] == 0:\n del count[c]\n return len(count) == 0\n\n# @lc code=end\n\n\ns = Solution()\nprint(\n s.groupAnagrams(\n [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\n\n\n'''\n这段代码实现了将给定字符串列表中的异位词进行分组。具体来说,它使用一个 defaultdict 对象 mp 来记录每个异位词所在的组。对于每个字符串 st,它首先计算出 st 中每个字符出现的次数,然后将这些次数组成一个元组作为 key,并将 st 添加到 mp[key] 中。最后,它返回 mp 中所有的值,即每个组中的字符串列表。\n\n这段代码依赖于 Python 的 collections 模块中的 defaultdict 类来创建默认值为列表的字典对象。它还使用了 ord() 函数将字符转换为 ASCII 码,并使用了 tuple() 函数将列表转换为元组。\n\n需要注意的是,这段代码的时间复杂度为 O(nk),其中 n 是字符串列表的长度,k 是字符串的平均长度。虽然它的时间复杂度比一般的暴力算法要好,但如果字符串非常长,仍然可能会超时。\n'''\n\n'''\n\n使用 `dict` 对象也可以实现这个算法,不过需要在添加每个字符串时都要检查它是否属于某个已经存在的异位词组。具体来说,可以遍历已有的异位词组列表,对于每个组,将当前字符串与组中的任意一个字符串进行比较,如果它们是异位词,则将当前字符串添加到该组中,并标记已经处理过。如果当前字符串不属于任何已有的组,则创建一个新的组并将当前字符串添加到其中。\n\n这个算法的时间复杂度为 $O(n^2 k)$,其中 $n$ 是字符串列表的长度,$k$ 是字符串的平均长度。因为需要在两个字符串之间进行比较,所以时间复杂度是二次的。这个算法的空间复杂度为 $O(n)$。\n\n相比之下,使用 `defaultdict` 可以将时间复杂度降低到 $O(nk)$,并且代码更加简洁易懂。因此,使用 `defaultdict` 是更好的选择。\n'''\n","sub_path":"leetcode/49-hash-table-字母异位词分组/字母异位词分组.py","file_name":"字母异位词分组.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"128113525","text":"# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries\n# SPDX-License-Identifier: MIT\n\nimport json\nimport random\nimport time\n\nimport socketpool\nimport rtc\nimport wifi\n\nimport adafruit_ntp\nfrom adafruit_azureiot import IoTHubDevice\n\n# Get wifi details and more from a secrets.py file\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"WiFi secrets are kept in secrets.py, please add them there!\")\n raise\n\nprint(\"Connecting to WiFi...\")\nwifi.radio.connect(secrets[\"ssid\"], secrets[\"password\"])\n\nprint(\"Connected to WiFi!\")\n\nif time.localtime().tm_year < 2022:\n print(\"Setting System Time in UTC\")\n pool = socketpool.SocketPool(wifi.radio)\n ntp = adafruit_ntp.NTP(pool, tz_offset=0)\n\n # NOTE: This changes the system time so make sure you aren't assuming that time\n # doesn't jump.\n rtc.RTC().datetime = ntp.datetime\nelse:\n print(\"Year seems good, skipping set time.\")\n\n# You will need an Azure subscription to create an Azure IoT Hub resource\n#\n# If you don't have an Azure subscription:\n#\n# If you are a student, head to https://aka.ms/FreeStudentAzure and sign up, validating with your\n# student email address. This will give you $100 of Azure credit and free tiers of a load of\n# service, renewable each year you are a student\n#\n# If you are not a student, head to https://aka.ms/FreeAz and sign up to get $200 of credit for 30\n# days, as well as free tiers of a load of services\n#\n# Create an Azure IoT Hub and an IoT device in the Azure portal here:\n# https://aka.ms/AzurePortalHome.\n# Instructions to create an IoT Hub and device are here: https://aka.ms/CreateIoTHub\n#\n# The free tier of IoT Hub allows up to 8,000 messages a day, so try not to send messages too often\n# if you are using the free tier\n#\n# Once you have a hub and a device, copy the device primary connection string.\n# Add it to the secrets.py file in an entry called device_connection_string\n#\n# The adafruit-circuitpython-azureiot library depends on the following libraries:\n#\n# From the Adafruit CircuitPython Bundle https://github.com/adafruit/Adafruit_CircuitPython_Bundle:\n# * adafruit-circuitpython-minimqtt\n# * adafruit-circuitpython-requests\n\n\nesp = None\npool = socketpool.SocketPool(wifi.radio)\n# Create an IoT Hub device client and connect\ndevice = IoTHubDevice(pool, esp, secrets[\"device_connection_string\"])\n\nprint(\"Connecting to Azure IoT Hub...\")\n\n# Connect to IoT Central\ndevice.connect()\n\nprint(\"Connected to Azure IoT Hub!\")\n\nmessage_counter = 60\n\nwhile True:\n try:\n # Send a device to cloud message every minute\n # You can see the overview of messages sent from the device in the Overview tab\n # of the IoT Hub in the Azure Portal\n if message_counter >= 60:\n message = {\"Temperature\": random.randint(0, 50)}\n device.send_device_to_cloud_message(json.dumps(message))\n message_counter = 0\n else:\n message_counter += 1\n\n # Poll every second for messages from the cloud\n device.loop()\n except (ValueError, RuntimeError) as e:\n print(\"Connection error, reconnecting\\n\", str(e))\n # If we lose connectivity, reset the wifi and reconnect\n wifi.radio.enabled = False\n wifi.radio.enabled = True\n wifi.radio.connect(secrets[\"ssid\"], secrets[\"password\"])\n device.reconnect()\n continue\n time.sleep(1)\n","sub_path":"examples/azureiot_native_networking/azureiot_hub_simpletest.py","file_name":"azureiot_hub_simpletest.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"630789895","text":"import zipfile\n\nfrom zavod import Context\nfrom zavod.shed.bods import parse_bods_fh\n\n\ndef crawl(context: Context) -> None:\n fn = context.fetch_resource(\"source.zip\", context.data_url)\n with zipfile.ZipFile(fn, \"r\") as zf:\n for name in zf.namelist():\n if not name.endswith(\".json\"):\n continue\n with zf.open(name, \"r\") as fh:\n parse_bods_fh(context, fh)\n","sub_path":"datasets/dk/cvr/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259986869","text":"from django.utils.translation import ugettext as _\nfrom django.conf import settings\n\nfrom django.contrib.gis.db import models\n\nimport datetime\nfrom time import strftime, gmtime\nfrom django.utils import timezone\n\n\n############\n# Response options for CharField data types\n#\nINCIDENT_CHOICES = (\n ('Collision', (\n ('Collision with stationary object or vehicle', 'Collision with a stationary object or vehicle'),\n ('Collision with moving object or vehicle', 'Collision with a moving object or vehicle'),\n )\n ),\n ('Near miss', (\n ('Near collision with stationary object or vehicle', 'Near miss with a stationary object or vehicle'),\n ('Near collision with moving object or vehicle', 'Near miss with a moving object or vehicle'),\n )\n ),\n ('Fall', (\n ('Fall', 'Lost control and fell'),\n )\n )\n)\nINCIDENT_WITH_CHOICES = (\n ('Vehicle', (\n ('Vehicle, head on', 'Head on'),\n ('Vehicle, side', 'Side impact'),\n ('Vehicle, angle', 'Angle impact'),\n ('Vehicle, rear end', 'Rear end'),\n ('Vehicle, open door', 'Open vehicle door'),\n )\n ),\n ('Person/animal', (\n ('Another cyclist', 'Another cyclist'),\n ('Pedestrian', 'Pedestrian'),\n ('Animal', 'Animal'),\n )\n ),\n ('Infrastructure', (\n ('Curb', 'Curb'),\n ('Train Tracks', 'Train Tracks'),\n ('Pothole', 'Pothole'),\n ('Lane divider', 'Lane divider'),\n ('Sign/Post', 'Sign/Post'),\n ('Roadway', 'Roadway'),\n )\n ),\n ('Other', 'Other (please describe)')\n)\n\nINJURY_CHOICES = (\n ('Yes', (\n ('Injury, no treatment', 'Medical treatment not required'),\n ('Injury, saw family doctor', 'Saw a family doctor'),\n ('Injury, hospital emergency visit', 'Visited the hospital emergency dept.'),\n ('Injury, hospitalized', 'Overnight stay in hospital')\n )\n ),\n ('No', (\n ('No injury', 'No injury'),\n )\n )\n)\n\nPURPOSE_CHOICES = (\n (\"Commute\", \"To/from work or school\"),\n (\"Exercise or recreation\", \"Exercise or recreation\"),\n (\"Social reason\", \"Social reason (e.g., movies, visit friends)\"),\n (\"Personal business\", \"Personal business\"),\n (\"During work\", \"During work\")\n)\nROAD_COND_CHOICES = (\n ('Dry', 'Dry'),\n ('Wet','Wet'),\n ('Loose sand, gravel, or dirt', 'Loose sand, gravel, or dirt'),\n ('Icy','Icy'),\n ('Snowy','Snowy'),\n ('Don\\'t remember', 'I don\\'t remember')\n)\nSIGHTLINES_CHOICES = (\n ('No obstructions', 'No obstructions'),\n ('View obstructed', 'View obstructed'),\n ('Glare or reflection', 'Glare or reflection'),\n ('Obstruction on road', 'Obstruction on road'),\n ('Don\\'t Remember', 'Don\\'t Remember')\n)\nRIDING_ON_CHOICES = (\n ('Busy street', (\n ('Busy street bike lane', 'On a painted bike lane'),\n ('Busy street, no bike facilities', 'On road with no bike facilities')\n )\n ),\n ('Quiet street', (\n ('Quiet street bike lane', 'On a painted bike lane'),\n ('Quiet street, no bike facilities', 'On road with no bike facilities')\n )\n ),\n ('Not on the street', (\n ('Cycle track', 'On a physically separated bike lane (cycle track)'),\n ('Mixed use trail', 'On a mixed use trail'),\n ('Sidewalk', 'On the sidewalk'),\n )\n ),\n ('Don\\'t remember', 'I don\\'t remember')\n)\nLIGHTS_CHOICES = (\n (\"NL\", \"No Lights\"),\n (\"FB\", \"Front and back lights\"),\n (\"F\", \"Front lights only\"),\n (\"B\", \"Back lights only\"),\n ('Don\\'t remember', 'I don\\'t remember')\n\n)\nTERRAIN_CHOICES = (\n ('Uphill', 'Uphill'),\n ('Downhill','Downhill'),\n ('Flat', 'Flat'),\n ('Don\\'t remember', 'I don\\'t remember')\n)\n\n# AGE_CHOICES = ((\"2001\", \"2001\"), (\"2000\", \"2000\") ... (\"1915\", \"1915\")) Based on current year minus youngest age a person can report and year for 100-year-old\nYOUNGEST_AGE = 13\nyoungestYear = int(strftime(\"%Y\", gmtime())) - YOUNGEST_AGE\nAGE_CHOICES = []\nfor y in xrange(100):\n AGE_CHOICES.append((str(youngestYear-y), str(youngestYear-y)))\n\nfrom calendar import month_name as month\nMONTH_CHOICES = [(str(i+1), str(month[i+1])) for i in xrange(12)]\n\n\nSEX_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n ('Other', 'Other')\n)\nBOOLEAN_CHOICES = (\n ('Y', 'Yes'),\n ('N', 'No'),\n ('I don\\'t know', 'I don\\'t know')\n)\nCARDINAL_DIRECTIONS_CHOICES = (\n ('N','N'),\n ('NE','NE'),\n ('E','E'),\n ('SE','SE'),\n ('S','S'),\n ('SW','SW'),\n ('W','W'),\n ('NW', 'NW'),\n ('I don\\'t know', 'I don\\'t know')\n)\nTURNING_CHOICES = (\n ('Heading straight','Heading straight'),\n ('Turning left','Turning left'),\n ('Turning right','Turning right'),\n ('I don\\'t remember', 'I don\\'t remember')\n)\n\n\n##########\n# Incident class.\n# Main class for Incident Report. Contains all required, non-required, and spatial fields. Setup to allow easy export to a singular shapefile.\n# Captures all data about the accident and environmental conditions when the bike incident occurred.\nclass Incident(models.Model):\n ########### INCIDENT FIELDS\n date = models.DateTimeField(\n 'Date reported',\n auto_now_add=True # Date is set automatically when object created\n )\n # Spatial fields\n # Default CRS -> WGS84\n geom = models.PointField(\n 'Location'\n )\n objects = models.GeoManager() # Required to conduct geographic queries\n\n incident_date = models.DateTimeField(\n 'When was the incident?'\n )\n\n incident = models.CharField(\n 'What type of incident was it?',\n max_length=150,\n choices=INCIDENT_CHOICES\n )\n\n incident_with = models.CharField(\n 'What sort of object did you collide or nearly collide with?',\n max_length=100,\n choices=INCIDENT_WITH_CHOICES\n )\n\n # Injury details (all optional)\n injury = models.CharField(\n 'Were you injured?',\n max_length=50,\n choices= INJURY_CHOICES # Without this, field has 'Unknown' for None rather than the desired \"---------\"\n )\n\n trip_purpose = models.CharField(\n 'What was the purpose of your trip?',\n max_length=50,\n choices=PURPOSE_CHOICES,\n blank=True,\n null=True\n )\n ###########\n\n ############## PERSONAL DETAILS FIELDS\n # Personal details about the participant (all optional)\n age = models.CharField(\n 'What is your birth year?',\n max_length=15,\n choices=AGE_CHOICES,\n blank=True,\n null=True\n )\n birthmonth = models.CharField(\n 'What is your birth month?',\n max_length=15,\n choices=MONTH_CHOICES,\n blank=True,\n null=True\n )\n sex = models.CharField(\n 'Please select your sex',\n max_length=10,\n choices=SEX_CHOICES,\n blank=True,\n null=True\n )\n regular_cyclist = models.CharField(\n 'Do you bike at least once a week?',\n max_length=50,\n choices=BOOLEAN_CHOICES,\n blank=True,\n null=True\n )\n helmet = models.CharField(\n 'Were you wearing a helmet?',\n max_length=50,\n choices=BOOLEAN_CHOICES,\n blank=True,\n null=True\n )\n intoxicated = models.CharField(\n 'Were you intoxicated?',\n max_length=50,\n choices=BOOLEAN_CHOICES,\n blank=True,\n null=True\n )\n #######################\n\n ############### CONDITIONS FIELDS\n road_conditions = models.CharField(\n 'What were the road conditions?',\n max_length=50,\n choices=ROAD_COND_CHOICES,\n blank=True,\n null=True\n )\n sightlines = models.CharField(\n 'How were the sight lines?',\n max_length=50,\n choices=SIGHTLINES_CHOICES,\n blank=True,\n null=True\n )\n cars_on_roadside = models.CharField(\n 'Were there cars parked on the roadside',\n max_length=50,\n choices= BOOLEAN_CHOICES,\n blank=True,\n null=True\n )\n riding_on = models.CharField(\n 'Where were you riding your bike?',\n max_length=50,\n choices=RIDING_ON_CHOICES,\n blank=True,\n null=True\n )\n bike_lights = models.CharField(\n 'Were you using bike lights?',\n max_length=200,\n choices=LIGHTS_CHOICES,\n blank=True,\n null=True\n )\n terrain = models.CharField(\n 'What was the terrain like?',\n max_length=50,\n choices=TERRAIN_CHOICES,\n blank=True,\n null=True\n )\n direction = models.CharField(\n 'What direction were you heading?',\n max_length=50,\n choices=CARDINAL_DIRECTIONS_CHOICES,\n blank=True,\n null=True\n )\n turning = models.CharField(\n 'How were you moving?',\n max_length=50,\n choices=TURNING_CHOICES,\n blank=True,\n null=True\n )\n ########################\n\n ########## DETAILS FIELDS\n incident_detail = models.TextField(\n 'Please give a brief description of the incident',\n max_length=300,\n blank=True,\n null=True\n )\n\n # Placeholder for automatically added weather using an HTTP_GET from rss?\n weather = models.CharField(\n 'What was the weather like?',\n max_length=100,\n blank=True,\n null=True\n )\n ##############\n\n\n # reverses latlngs and turns tuple of tuples into list of lists\n def latlngList(self):\n return list(self.geom)[::-1]\n\n\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(weeks=1) <= self.date < now\n\n @property\n def incident_type(self):\n for (kind, choices) in INCIDENT_CHOICES:\n for c in choices:\n if self.incident in c: return kind\n\n # For admin site\n was_published_recently.admin_order_field = 'date'\n was_published_recently.boolean = True\n was_published_recently.short_description = 'Reported this week?'\n\n # toString()\n def __unicode__(self):\n return unicode(self.incident_date)\n\n class Meta:\n app_label = 'mapApp'\n","sub_path":"mapApp/models/incident.py","file_name":"incident.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"68355433","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\UnityPy\\classes\\PPtr.py\n# Compiled at: 2020-03-30 16:40:06\n# Size of source mod 2**32: 4452 bytes\nfrom ..streams import EndianBinaryReader, EndianBinaryWriter\n\nclass PPtr:\n\n def __init__(self, reader: EndianBinaryReader):\n self.index = -2\n self.file_id = reader.read_int()\n self.path_id = reader.read_int() if reader.version2 < 14 else reader.read_long()\n self.assets_file = reader.assets_file\n\n def __getattr__(self, key):\n manager = None\n if self.file_id == 0:\n manager = self.assets_file\n else:\n if self.file_id > 0:\n if self.file_id - 1 < len(self.assets_file.externals):\n if self.index == -2:\n external_name = self.assets_file.externals[(self.file_id - 1)].name\n files = self.assets_file.parent.files\n if external_name not in files:\n external_name = external_name.upper()\n manager = self.assets_file.parent.files[external_name]\n if manager:\n if self.path_id in manager.objects:\n self = manager.objects[self.path_id]\n return getattr(self, key)\n raise NotImplementedError('PPtr')\n\n def __repr__(self):\n return self.__class__.__name__\n\n def __bool__(self):\n return False\n\n\ndef save_ptr(obj, writer: EndianBinaryWriter):\n if isinstance(obj, PPtr):\n writer.write_int(obj.file_id)\n writer.write_int(obj.path_id)\n else:\n writer.write_int(0)\n writer.write_int(obj.path_id)","sub_path":"pycfiles/UnityPy-1.3.0-py3.6/PPtr.cpython-36.py","file_name":"PPtr.cpython-36.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"76643863","text":"from datetime import datetime, timedelta, timezone as _timezone, date as _date, tzinfo\nimport datetime as _datetime\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom . import functions as _func\nfrom pytz import timezone\nimport pytz as _pytz\n\n\n# ---------- Time --------------\nAPI_DATETIME_FORMAT_ISO: str = '%Y-%m-%dT%H:%M:%S'\nDATABASE_DATETIME_FORMAT: str ='%Y-%m-%d %H:%M:%S'\nPRINT_DATETIME_FORMAT: str ='%m/%d %H:%M'\nDEFAULT_TIME_ZONE: timezone = timezone('Asia/Seoul')\n\n\n# ----------- TIME CONSTANT -----------\nFIVE_MINUTES: timedelta = timedelta(minutes=5)\nONE_YEAR: timedelta = timedelta(days=365)\nONE_MONTH: timedelta = timedelta(days=30)\nONE_DAY: timedelta = timedelta(days=1)\nONE_HOUR: timedelta = timedelta(hours=1)\nONE_MINUTE: timedelta = timedelta(minutes=1)\nONE_SECOND: timedelta = timedelta(seconds=1)\nONE_WEEK: timedelta = timedelta(days=7)\n\n\n# ---------- PSS ----------\nPSS_START_DATE: _date = _date(year=2016, month=1, day=6)\nPSS_START_DATETIME: datetime = datetime(year=2016, month=1, day=6)\n\nPSS_TOURNEY_START_DATETIME: datetime = datetime(year=2019, month=10, day=1)\n\n\n# ---------- TIMEOUT-----------\nACCESS_TOKEN_TIMEOUT: timedelta = timedelta(hours=12)\nLOGIN_CHECK_TIMEOUT: timedelta = timedelta(minutes=10)\nSEARCH_IMMUNITY_TIMEOUT: timedelta = timedelta(minutes=10)\nSEARCH_IMMUNITY_PRINT_TIMEOUT: timedelta = timedelta(minutes=180)\nSEARCH_IMMUNITY_SOON_TIMEOUT: timedelta = timedelta(minutes=60)\nBOT_REPLY_TIMEOUT_SEC: int = 10\n\n\n\ndef init():\n global gTimeZone\n\n gTimeZone = DEFAULT_TIME_ZONE\n\n \ndef get_now() -> datetime:\n return datetime.now(gTimeZone)\n\n\ndef get_utc_now() -> datetime:\n return datetime.now(_timezone.utc)\n\n\ndef get_time_zone():\n return gTimeZone\n\n\ndef get_TimeAsTimeZone( aTime: datetime )->datetime:\n sTimeZone = get_time_zone()\n sTime = datetime.strptime( aTime, API_DATETIME_FORMAT_ISO ).astimezone(sTimeZone)\n return sTime\n\n\ndef get_BotTimeUTCFormat( aTime: datetime )->datetime:\n sTimeZone = _timezone.utc\n sTime = datetime.strptime( aTime, API_DATETIME_FORMAT_ISO ).replace(tzinfo=sTimeZone)\n\n return sTime\n\n\ndef get_time_diff( aStart: datetime, aEnd: datetime)->datetime: #str:\n return abs(aEnd- aStart)\n \n \ndef get_next_day( aDate: datetime = None ) -> datetime:\n sDate = aDate or get_utc_now()\n sResult = datetime(sDate.year, sDate.month, sDate.day, tzinfo=_timezone.utc)\n sResult = sResult + ONE_DAY\n return sResult\n\ndef getLastDayOfMonth( aYear:int, aMonth:int ) -> datetime:\n sNextMonth = getFirstDayOfNextMonth( aYear, aMonth )\n\n return sNextMonth - ONE_DAY\n\n\ndef getFirstDayOfNextMonth( aYear:int, aMonth:int ) -> datetime:\n sNextMonth = 0\n sYear = 0\n if ( aMonth == 12 ):\n sNextMonth = 1\n sYear = aYear + 1\n else:\n sNextMonth = aMonth + 1\n sYear = aYear\n\n sFirstDayOfNextMonth = getDateTimeFormatFromDate( sYear, sNextMonth, 1 )\n\n return sFirstDayOfNextMonth\n\n\ndef isTourneyStart() ->bool:\n return True\n sUtcNow = get_utc_now()\n sTourneyFirstDay = getTourneyStartDate( sUtcNow )\n\n return sUtcNow > sTourneyFirstDay\n \n \ndef getTourneyStartDate( aUtcNow:datetime ):\n sFirstDayNExtMonth = getFirstDayOfNextMonth( aUtcNow.year, aUtcNow.month )\n result = sFirstDayNExtMonth - ONE_WEEK\n return result\n\n\ndef isStilLogin( aNow:datetime, aLastLoginDate:datetime, aLastHeartBeat:datetime ):\n sIsLogin = True\n \n sLastLoginDate = get_BotTimeUTCFormat( aLastLoginDate )\n sLastHeartBeat = get_BotTimeUTCFormat( aLastHeartBeat )\n \n sIsLogin = True\n sTime = None\n if sLastHeartBeat > sLastLoginDate:\n sTime = sLastHeartBeat\n else:\n sTime = sLastLoginDate\n \n if aNow - sTime > LOGIN_CHECK_TIMEOUT:\n sIsLogin = False\n \n _func.debug_log( \"isStilLogin\", f'NOW : {aNow} HB : {sLastHeartBeat} Login : {sLastLoginDate} sisLogin : {sIsLogin}' )\n return sIsLogin\n\n\ndef getDateTimeFormatFromDate( aYear: int = PSS_START_DATE.year, aMonth: int = PSS_START_DATE.month, aDay: int = 1 ):\n return datetime(year=aYear, month=aMonth, day=aDay, hour=0, minute=0, second=0, microsecond=0, tzinfo= _timezone.utc) \n","sub_path":"utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"442805965","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport yaml\nfrom schema import Schema, And, Use, Optional, Or, Hook\nfrom ..adaptor import FRAMEWORKS\nfrom ..strategy import STRATEGIES\nfrom ..objective import OBJECTIVES\nfrom ..pruners import PRUNERS\nfrom ..utils import logger\nfrom ..version import __version__\nimport re\nimport copy\nimport itertools\nfrom collections import OrderedDict\nfrom .dotdict import DotDict\nimport os, datetime\n\ndef constructor_register(cls):\n yaml_key = \"!{}\".format(cls.__name__)\n\n def constructor(loader, node):\n instance = cls.__new__(cls)\n yield instance\n\n state = loader.construct_mapping(node, deep=True)\n instance.__init__(**state)\n\n yaml.add_constructor(\n yaml_key,\n constructor,\n yaml.SafeLoader,\n )\n return cls\n\n@constructor_register\nclass Pruner():\n def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None,\n target_sparsity=None, update_frequency=1, prune_type='basic_magnitude',\n method='per_tensor', names=[], parameters=None):\n self.start_epoch = start_epoch\n self.end_epoch = end_epoch\n self.update_frequency = update_frequency\n self.target_sparsity = target_sparsity\n self.initial_sparsity = initial_sparsity\n self.update_frequency = update_frequency\n assert prune_type.replace('_', '') in [i.lower() for i in PRUNERS], \\\n 'now only support {}'.format(PRUNERS.keys())\n self.prune_type = prune_type\n self.method = method\n self.names= names\n self.parameters = parameters\n\n# Schema library has different loading sequence priorities for different\n# value types.\n# To make sure the fields under dataloader.transform field of yaml file\n# get loaded with written sequence, this workaround is used to convert\n# None to {} in yaml load().\nyaml.SafeLoader.add_constructor('tag:yaml.org,2002:null', lambda loader, node: {})\n# Add python tuple support because best_configure.yaml may contain tuple\nyaml.SafeLoader.add_constructor('tag:yaml.org,2002:python/tuple',\n lambda loader, node: tuple(loader.construct_sequence(node)))\n\ndef _valid_accuracy_field(key, scope, error):\n assert bool(\n 'relative' in scope['accuracy_criterion']) != bool(\n 'absolute' in scope['accuracy_criterion'])\n\ndef _valid_prune_epoch(key, scope, error):\n if \"start_epoch\" in scope and \"end_epoch\" in scope:\n assert scope[\"start_epoch\"] <= scope[\"end_epoch\"]\n\ndef _valid_prune_sparsity(key, scope, error):\n if \"initial_sparsity\" in scope and \"target_sparsity\" in scope:\n assert scope[\"initial_sparsity\"] <= scope[\"target_sparsity\"]\n elif \"initial_sparsity\" in scope:\n assert scope[\"initial_sparsity\"] >= 0\n else:\n assert scope[\"target_sparsity\"] < 1\n\n# used for '123.68 116.78 103.94' style to float list\ndef input_to_list_float(data):\n if isinstance(data, str):\n return [float(s.strip()) for s in data.split()]\n\n if isinstance(data, float):\n return [data]\n\n assert isinstance(data, list)\n return [float(d) for d in data]\n\ndef input_int_to_float(data):\n if isinstance(data, str):\n # used for '123.68, 116.78, 103.94' style\n if ',' in data:\n data = data.split(',')\n # used for '123.68 116.78 103.94' style\n else:\n data = data.split()\n\n if len(data) == 1:\n return float(data[0].strip())\n else:\n return [float(s.strip()) for s in data]\n elif isinstance(data, list):\n return [float(s) for s in data]\n elif isinstance(data, int):\n return float(data)\n\ndef input_to_list_int(data):\n if isinstance(data, str):\n return [int(s.strip()) for s in data.split(',')]\n\n if isinstance(data, int):\n return [data]\n\n assert isinstance(data, list)\n return [int(d) for d in data]\n\ndef input_to_list(data):\n if isinstance(data, str):\n if ',' in data:\n return [s.strip() for s in data.split(',')]\n\n return [s.strip() for s in data.split()]\n\n if isinstance(data, int):\n return [data]\n\n assert isinstance(data, list)\n return data\n\ndef list_to_tuple(data):\n if isinstance(data, str):\n return tuple([int(s.strip()) for s in data.split(',')])\n\n elif isinstance(data, list):\n if isinstance(data[0], list):\n result = []\n for item in data:\n result.append(tuple([int(s) for s in item]))\n return result\n else:\n return tuple([int(s) for s in data])\n\ndef percent_to_float(data):\n if isinstance(data, str) and re.match(r'-?\\d+(\\.\\d+)?%', data):\n data = float(data.strip('%')) / 100\n if isinstance(data, int):\n data = float(data)\n else:\n assert isinstance(data, float), 'This field should be float, int or percent string'\n return data\n\nops_schema = Schema({\n Optional('weight', default=None): {\n Optional('granularity', default=None): And(\n list,\n lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),\n Optional('scheme', default=None): And(\n list,\n # asym_float and placeholder is only for PyTorch framework\n lambda s: all(i in ['asym', 'sym', 'asym_float', 'placeholder'] for i in s)),\n Optional('dtype', default=None): And(\n list,\n lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),\n Optional('algorithm', default=None): And(\n list,\n lambda s: all(i in ['minmax'] for i in s)),\n Optional('bit', default=None): And(\n Or(float, list),\n Use(input_to_list_float),\n lambda s: all(0.0 < i <= 7.0 for i in s))\n },\n Optional('activation', default=None): {\n Optional('granularity', default=None): And(\n list,\n lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),\n Optional('scheme', default=None): And(\n list,\n lambda s: all(i in ['asym', 'sym'] for i in s)),\n Optional('dtype', default=None): And(\n list,\n lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),\n Optional('algorithm', default=None): And(\n list,\n lambda s: all(i in ['minmax', 'kl'] for i in s))\n }\n})\n\ngraph_optimization_schema = Schema({\n\n Optional('precisions', default={'precisions': ['fp32']}): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in [ 'fp32', 'bf16'] for i in s)),\n\n Optional('op_wise', default={'weight': {}, 'activation': {}}): {\n Optional('weight', default=None): {\n Optional('dtype', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['fp32', 'bf16'] for i in s)),\n },\n Optional('activation', default=None): {\n Optional('dtype', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['fp32', 'bf16'] for i in s)),\n }\n }\n})\n\nmodel_conversion_schema = Schema({\n 'source': And(str, lambda s: s.lower() == 'qat'),\n 'destination': And(str, lambda s: s.lower() == 'default')\n})\n\nfilter_schema = Schema({\n Optional('LabelBalance'): {\n 'size': And(int, lambda s: s > 0)\n },\n})\n\ntransform_schema = Schema({\n Optional('ResizeWithRatio'):{\n Optional('min_dim'): int,\n Optional('max_dim'): int,\n Optional('padding'): bool,\n Optional('constant_value'): int\n },\n Optional('CropToBoundingBox'): {\n 'offset_height': int,\n 'offset_width': int,\n 'target_height': int,\n 'target_width': int\n },\n Optional('Cast'): {\n Optional('dtype'): str\n },\n Optional('RandomResizedCrop'): {\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0)),\n Optional('scale'): And(list, lambda s: all(isinstance(i, float) for i in s)),\n Optional('ratio'): And(list, lambda s: all(isinstance(i, float) for i in s)),\n Optional('interpolation'): And(\n str,\n lambda s: s in ['nearest', 'bilinear', 'bicubic']),\n },\n Optional('AlignImageChannel'): {\n Optional('dim'): int\n },\n Optional('ToNDArray'): Or({}, None),\n Optional('CropResize'): {\n 'x': int,\n 'y': int,\n 'width': int,\n 'height': int,\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0)),\n Optional('interpolation'): And(\n str,\n lambda s: s in ['nearest', 'bilinear', 'bicubic']),\n },\n Optional('RandomHorizontalFlip'): Or({}, None),\n Optional('RandomVerticalFlip'): Or({}, None),\n Optional('ToTensor'): Or({}, None),\n Optional('ToPILImage'): Or({}, None),\n Optional('Normalize'): {\n Optional('mean'): And(list, lambda s: all(isinstance(i, float) for i in s)),\n Optional('std'): And(list, lambda s: all(isinstance(i, float) for i in s))\n },\n Optional('Resize'): {\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0)),\n Optional('interpolation'): And(\n str,\n lambda s: s in ['nearest', 'bilinear', 'bicubic']),\n },\n Optional('RandomCrop'): {\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0))\n },\n Optional('Rescale'): Or({}, None),\n Optional('CenterCrop'): {\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0))\n },\n Optional('PaddedCenterCrop'): {\n 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)),\n And(int, lambda s: s > 0)),\n Optional('crop_padding'): And(int, lambda s: s > 0),\n },\n Optional('ToArray'): Or({}, None),\n Optional('QuantizedInput'): {\n Optional('dtype', default='int8'): And(str, lambda s: s in ['int8', 'uint8']),\n Optional('scale'): And(float, lambda s: s > 0),\n },\n Optional('Transpose'): {\n 'perm': And(list, lambda s: all(isinstance(i, int) for i in s)),\n },\n # THIS API IS TO BE DEPRECATED!\n Optional('ParseDecodeImagenet'): Or({}, None),\n Optional('ParseDecodeCoco'): Or({}, None),\n Optional('ParseDecodeVoc'): Or({}, None),\n Optional('BilinearImagenet'): {\n 'height': And(int, lambda s: s > 0),\n 'width': And(int, lambda s: s > 0),\n Optional('central_fraction'): float,\n Optional('mean_value'): And(Or(str, list), Use(input_to_list_float)),\n Optional('scale'): float,\n },\n Optional('ResizeCropImagenet'): {\n 'height': And(int, lambda s: s > 0),\n 'width': And(int, lambda s: s > 0),\n Optional('random_crop'): bool,\n Optional('slice_crop'): bool,\n Optional('resize_side'): And(int, lambda s: s > 0),\n Optional('random_flip_left_right'): bool,\n Optional('mean_value'): And(Or(str, list), Use(input_to_list_float)),\n Optional('scale'): float\n },\n Optional('ResizeWithAspectRatio'):{\n 'height': And(int, lambda s: s > 0),\n 'width': And(int, lambda s: s > 0),\n },\n Optional('ParseDecodeImagenet'): Or({}, None),\n Optional('ToArray'): Or({}, None),\n Optional('QuantizedInput'): {\n Optional('dtype', default='int8'): And(str, lambda s: s in ['int8', 'uint8']),\n Optional('scale'): And(float, lambda s: s > 0),\n },\n Optional('Transpose'): {\n 'perm': And(list, lambda s: all(isinstance(i, int) for i in s)),\n },\n})\n\npostprocess_schema = Schema({\n Optional('LabelShift'): int,\n Optional('SquadV1'): {\n 'label_file': str,\n 'vocab_file': str\n },\n Optional('Collect'): {\n 'length': int\n },\n})\n\ndataset_schema = Schema({\n Optional('CIFAR10'): {\n 'root': str,\n Optional('train'): bool,\n Optional('download'): bool,\n },\n Optional('CIFAR100'): {\n 'root': str,\n Optional('train'): bool,\n Optional('download'): bool,\n },\n Optional('MNIST'): {\n 'root': str,\n Optional('train'): bool,\n Optional('download'): bool,\n },\n Optional('FashionMNIST'): {\n 'root': str,\n Optional('train'): bool,\n Optional('download'): bool,\n },\n Optional('ImageFolder'): {\n 'root': str,\n },\n Optional('TFRecordDataset'): {\n 'root': str,\n },\n Optional('ImageRecord'): {\n 'root': str,\n },\n Optional('dummy_v2'): {\n 'input_shape': And(Or(str, list), Use(list_to_tuple)),\n Optional('label_shape'): And(Or(str, list), Use(list_to_tuple)),\n Optional('low'): Or(\n float,\n And(int, Use(input_int_to_float)),\n And(list, Use(input_int_to_float)),\n And(str, Use(input_int_to_float))),\n Optional('high'): Or(\n float,\n And(int, Use(input_int_to_float)),\n And(list, Use(input_int_to_float)),\n And(str, Use(input_int_to_float))),\n Optional('dtype'): And(Or(str, list), Use(input_to_list)),\n },\n Optional('dummy'): {\n 'shape': And(Or(str, list), Use(list_to_tuple)),\n Optional('low'): Or(\n float,\n And(int, Use(input_int_to_float)),\n And(list, Use(input_int_to_float)),\n And(str, Use(input_int_to_float))),\n Optional('high'): Or(\n float,\n And(int, Use(input_int_to_float)),\n And(list, Use(input_int_to_float)),\n And(str, Use(input_int_to_float))),\n Optional('dtype'): And(Or(str, list), Use(input_to_list)),\n Optional('label'): bool,\n },\n Optional('bert'): {\n 'root': str,\n 'label_file': str,\n Optional('task'): And(str, lambda s: s in [\"classifier\", \"squad\"]),\n Optional('model_type'): And(str, lambda s: s in ['bert', 'xlnet', 'xlm']),\n },\n Optional('VOCRecord'): {\n 'root': str,\n },\n Optional('COCORecord'): {\n 'root': str,\n Optional('num_cores'): int,\n },\n Optional('COCORaw'): {\n 'root': str,\n Optional('img_dir'): str,\n Optional('anno_dir'): str,\n Optional('num_cores'): int,\n },\n Optional('COCONpy'): {\n 'root': str,\n 'npy_dir': str,\n Optional('anno_dir'): str,\n Optional('num_cores'): int,\n },\n Optional('ImagenetRaw'): {\n 'data_path': str,\n Optional('image_list'): str,\n },\n Optional('style_transfer'): {\n 'content_folder': str,\n 'style_folder': str,\n Optional('crop_ratio'): float,\n Optional('resize_shape'): And(Or(str, list), Use(input_to_list_int)),\n Optional('image_format'): str,\n },\n Optional('GLUE'): {\n 'data_dir': str,\n 'model_name_or_path': str,\n Optional('max_seq_length'): int,\n Optional('do_lower_case'): bool,\n Optional('task'): str,\n Optional('model_type'): str,\n Optional('dynamic_length'): bool,\n Optional('evaluate'): bool\n },\n # TO BE DEPRECATED!\n Optional('Imagenet'): {\n 'root': str,\n },\n})\n\ndataloader_schema = Schema({\n Optional('last_batch', default='rollover'): And(str, lambda s: s in ['rollover', 'discard']),\n Optional('batch_size', default=1): And(int, lambda s: s > 0),\n 'dataset': dataset_schema,\n Optional('filter'): filter_schema,\n Optional('transform'): transform_schema,\n Optional('shuffle'): bool,\n Optional('distributed'): bool,\n})\n\nconfigs_schema = Schema({\n Optional('cores_per_instance'): And(int, lambda s: s > 0),\n Optional('num_of_instance', default=1): And(int, lambda s: s > 0),\n Optional('inter_num_of_threads'): And(int, lambda s: s > 0),\n Optional('intra_num_of_threads'): And(int, lambda s: s > 0),\n Optional('kmp_blocktime'): And(int, lambda s: s >= 0),\n Optional('kmp_affinity', default='granularity=fine,verbose,compact,1,0'): str,\n})\n\noptimizer_schema = Schema({\n Optional('SGD'): {\n 'learning_rate': float,\n Optional('momentum'): float,\n Optional('nesterov'): bool,\n Optional('weight_decay'): float\n }\n})\n\ncriterion_schema = Schema({\n Optional('CrossEntropyLoss'): {\n Optional('reduction', default='mean'): And(str, lambda s: s in ['none', 'sum', 'mean'])\n },\n Optional('KnowledgeDistillationLoss'): {\n Optional('temperature'): And(float, lambda s: s > 0),\n Optional('loss_types'): And(list, lambda s: all(i in ['CE', 'KL'] for i in s)),\n Optional('loss_weights'): And(list, lambda s: all(i >= 0 for i in s) and sum(s) == 1.0),\n }\n})\n\ntrain_schema = Schema({\n 'optimizer': optimizer_schema,\n 'criterion': criterion_schema,\n Optional('dataloader'): dataloader_schema,\n Optional('start_epoch', default=0): int,\n Optional('end_epoch'): int,\n Optional('iteration'): int,\n Optional('frequency'): int,\n # TODO reserve for multinode training support\n Optional('hostfile'): str\n})\n\nweight_compression_schema = Schema({\n Optional('initial_sparsity', default=0): And(float, lambda s: s < 1.0 and s >= 0.0),\n Optional('target_sparsity', default=0.97): float,\n Optional('start_epoch', default=0): int,\n Optional('end_epoch', default=4): int,\n Optional('pruners'): And(list, \\\n lambda s: all(isinstance(i, Pruner) for i in s))\n})\n\napproach_schema = Schema({\n Optional('weight_compression'): weight_compression_schema,\n})\n\ndefault_workspace = './nc_workspace/{}/'.format(\n datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\nschema = Schema({\n 'model': {\n 'name': str,\n 'framework': And(str, lambda s: s in FRAMEWORKS),\n Optional('inputs', default=[]): And(Or(str, list), Use(input_to_list)),\n Optional('outputs', default=[]): And(Or(str, list), Use(input_to_list)),\n },\n Optional('version', default=float(__version__.split('.')[0])): And(\n Or(float,\n And(int, Use(input_int_to_float)),\n And(str, Use(input_int_to_float))),\n lambda s: s == float(__version__.split('.')[0])),\n Optional('device', default='cpu'): And(str, lambda s: s in ['cpu', 'gpu']),\n Optional('quantization', default={'approach': 'post_training_static_quant', \\\n 'calibration': {'sampling_size': [100]}, \\\n 'recipes': {'scale_propagation_max_pooling': True,\n 'scale_propagation_concat': True,\n 'first_conv_or_matmul_quantization': True},\n 'model_wise': {'weight': {'bit': [7.0]},\n 'activation': {}}}): {\n Optional('approach', default='post_training_static_quant'): And(\n str,\n # TODO check if framework support dynamic quantize\n # Now only onnruntime and pytorch supoort\n lambda s: s in ['post_training_static_quant',\n 'post_training_dynamic_quant',\n 'quant_aware_training']),\n Optional('train', default=None): train_schema,\n Optional('advance', default=None): {\n Optional('bias_correction'): And(str, lambda s: s in ['weight_empirical']),\n },\n Optional('calibration', default={'sampling_size': [100]}): {\n Optional('sampling_size', default=[100]): And(Or(str, int, list), Use(input_to_list)),\n Optional('dataloader', default=None): dataloader_schema\n },\n Optional('recipes', default={'scale_propagation_max_pooling': True,\n 'scale_propagation_concat': True,\n 'first_conv_or_matmul_quantization': True}): {\n Optional('scale_propagation_max_pooling', default=True):\n And(bool, lambda s: s in [True, False]),\n Optional('scale_propagation_concat', default=True):\n And(bool, lambda s: s in [True, False]),\n Optional('first_conv_or_matmul_quantization', default=True):\n And(bool, lambda s: s in [True, False]),\n Optional('fast_bias_correction', default=False):\n And(bool, lambda s: s in [True, False]),\n Optional('weight_correction', default=False):\n And(bool, lambda s: s in [True, False]),\n },\n Optional('model_wise', default={'weight': {'bit': [7.0]}, 'activation': {}}): {\n Optional('weight', default= {'bit': [7.0]}): {\n Optional('granularity', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),\n Optional('scheme', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['asym', 'sym'] for i in s)),\n Optional('dtype', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),\n Optional('algorithm', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['minmax'] for i in s)),\n Optional('bit', default=[7.0]): And(\n Or(float, list),\n Use(input_to_list_float),\n lambda s: all(0.0 < i <= 7.0 for i in s))\n\n },\n Optional('activation', default=None): {\n Optional('granularity', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),\n Optional('scheme', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['asym', 'sym'] for i in s)),\n Optional('dtype', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),\n Optional('algorithm', default=None): And(\n Or(str, list),\n Use(input_to_list),\n lambda s: all(i in ['minmax', 'kl'] for i in s)),\n }\n },\n Optional('op_wise', default=None): {\n str: ops_schema\n },\n },\n\n Optional('graph_optimization'): graph_optimization_schema,\n\n Optional('model_conversion'): model_conversion_schema,\n\n Optional('tuning', default={\n 'strategy': {'name': 'basic'},\n 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},\n 'objective': 'performance',\n 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False},\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): {\n Optional('strategy', default={'name': 'basic'}): {\n 'name': And(str, lambda s: s in STRATEGIES), Optional('sigopt_api_token'): str,\n Optional('sigopt_project_id'): str,\n Optional('sigopt_experiment_name', default='nc-tune'): str,\n Optional('accuracy_weight', default=1.0): float,\n Optional('latency_weight', default=1.0): float\n } ,\n Hook('accuracy_criterion', handler=_valid_accuracy_field): object,\n Optional('accuracy_criterion', default={'relative': 0.01}): {\n Optional('relative'): And(Or(str, float), Use(percent_to_float)),\n Optional('absolute'): And(Or(str, int, float), Use(percent_to_float)),\n Optional('higher_is_better', default=True): bool,\n },\n Optional('objective', default='performance'): And(str, lambda s: s in OBJECTIVES),\n Optional('exit_policy', default={'timeout': 0,\n 'max_trials': 100,\n 'performance_only': False}): {\n Optional('timeout', default=0): int,\n Optional('max_trials', default=100): int,\n Optional('performance_only', default=False): bool,\n },\n Optional('random_seed', default=1978): int,\n Optional('tensorboard', default=False): And(bool, lambda s: s in [True, False]),\n Optional('workspace', default={'path': default_workspace}): {\n Optional('path', default=None): str,\n Optional('resume'): str\n }\n },\n Optional('evaluation'): {\n Optional('accuracy'): {\n Optional('metric', default=None): {\n Optional('topk'): And(int, lambda s: s in [1, 5]),\n Optional('mAP'): {\n Optional('anno_path'): str,\n Optional('iou_thrs', default=0.5):\n Or(And(str, lambda s: s in ['0.5:0.05:0.95']),\n And(float, lambda s: s <= 1.0 and s >= 0.0)),\n Optional('map_points', default=0): And(int, lambda s: s in [0, 11, 101])\n },\n Optional('COCOmAP'): {\n Optional('anno_path'): str,\n Optional('map_key', default='DetectionBoxes_Precision/mAP'): str\n },\n Optional('VOCmAP'): {\n Optional('anno_path'): str\n },\n Optional('SquadF1'): Or({}, None),\n Optional('MSE'): {\n Optional('compare_label'): bool\n },\n Optional('RMSE'): {\n Optional('compare_label'): bool\n },\n Optional('MAE'): {\n Optional('compare_label'): bool\n },\n Optional('Accuracy'): Or({}, None),\n Optional('Loss'): Or({}, None),\n Optional('BLEU'): Or({}, None),\n Optional('SquadF1'): Or({}, None),\n Optional('F1'): Or({}, None),\n Optional('mIOU'): {\n Optional('num_classes'): int\n },\n Optional('GLUE'): {\n Optional('task'): str\n },\n },\n Optional('configs'): configs_schema,\n Optional('iteration', default=-1): int,\n Optional('dataloader'): dataloader_schema,\n Optional('postprocess'): {\n Optional('transform'): postprocess_schema\n },\n },\n Optional('performance'): {\n Optional('warmup', default=5): int,\n Optional('iteration', default=-1): int,\n Optional('configs'): configs_schema,\n Optional('dataloader'): dataloader_schema,\n Optional('postprocess'): {\n Optional('transform'): postprocess_schema\n }\n },\n },\n Optional('pruning'): {\n Optional(\"train\"): train_schema,\n Optional(\"approach\"): approach_schema\n },\n\n Optional('distillation'): {\n Optional(\"train\"): train_schema\n },\n\n Optional(\"train\"): train_schema\n})\n\nquantization_default_schema = Schema({\n Optional('model', default={'name': 'default_model_name', \\\n 'framework': 'NA', \\\n 'inputs': [], 'outputs': []}): dict,\n\n Optional('version', default=float(__version__.split('.')[0])): str,\n\n Optional('device', default='cpu'): str,\n\n Optional('quantization', default={'approach': 'post_training_static_quant', \\\n 'calibration': {'sampling_size': [100]}, \\\n 'recipes': {'scale_propagation_max_pooling': True,\n 'scale_propagation_concat': True,\n 'first_conv_or_matmul_quantization': True},\n 'model_wise': {'weight': {'bit': [7.0]},\n 'activation': {}}}): dict,\n\n Optional('tuning', default={\n 'strategy': {'name': 'basic'},\n 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},\n 'objective': 'performance',\n 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False},\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): dict,\n\n Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}} }): dict\n})\n\npruning_default_schema = Schema({\n Optional('model', default={'name': 'default_model_name', \\\n 'framework': 'NA', \\\n 'inputs': [], 'outputs': []}): dict,\n\n Optional('version', default=float(__version__.split('.')[0])): str,\n\n Optional('device', default='cpu'): str,\n\n Optional('tuning', default={\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): dict,\n\n Optional('pruning', default={'approach': {'weight_compression':{'initial_sparsity': 0, \\\n 'target_sparsity': 0.97, 'start_epoch': 0, \\\n 'end_epoch': 4}}}): dict\n})\n\ngraph_optimization_default_schema = Schema({\n Optional('model', default={'name': 'resnet50', \\\n 'framework': 'NA', \\\n 'inputs': [], 'outputs': []}): dict,\n\n Optional('version', default=float(__version__.split('.')[0])): str,\n\n Optional('device', default='cpu'): str,\n\n Optional('quantization', default={'approach': 'post_training_static_quant', \\\n 'calibration': {'sampling_size': [100]}, \\\n 'recipes': {'scale_propagation_max_pooling': True,\n 'scale_propagation_concat': True,\n 'first_conv_or_matmul_quantization': True},\n 'model_wise': {'weight': {'bit': [7.0]},\n 'activation': {}}}): dict,\n\n Optional('tuning', default={\n 'strategy': {'name': 'basic'},\n 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},\n 'objective': 'performance',\n 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False},\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): dict,\n\n Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}} }): dict,\n\n Optional('graph_optimization', default={'precisions': ['bf16, fp32']}): dict\n})\n\nbenchmark_default_schema = Schema({\n Optional('model', default={'name': 'resnet50', \\\n 'framework': 'NA', \\\n 'inputs': [], 'outputs': []}): dict,\n\n Optional('version', default=float(__version__.split('.')[0])): str,\n\n Optional('device', default='cpu'): str,\n\n Optional('quantization', default={'approach': 'post_training_static_quant', \\\n 'calibration': {'sampling_size': [100]}, \\\n 'recipes': {'scale_propagation_max_pooling': True,\n 'scale_propagation_concat': True,\n 'first_conv_or_matmul_quantization': True},\n 'model_wise': {'weight': {'bit': [7.0]},\n 'activation': {}}}): dict,\n\n Optional('tuning', default={\n 'strategy': {'name': 'basic'},\n 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},\n 'objective': 'performance',\n 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False},\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): dict,\n\n Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}} }): dict\n})\n\ndistillation_default_schema = Schema({\n Optional('model', default={'name': 'default_model_name', \\\n 'framework': 'NA', \\\n 'inputs': [], 'outputs': []}): dict,\n\n Optional('version', default=float(__version__.split('.')[0])): str,\n\n Optional('device', default='cpu'): str,\n\n Optional('tuning', default={\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': default_workspace}}): dict,\n\n Optional('distillation', default={\n 'train': {'start_epoch': 0, 'end_epoch': 10, \\\n 'iteration': 1000, 'frequency': 1, \\\n 'optimizer': {'SGD': {'learning_rate': 0.001}}, \\\n 'criterion': {'KnowledgeDistillationLoss': \\\n {'temperature': 1.0, \\\n 'loss_types': ['CE', 'KL'], \\\n 'loss_weights': [0.5, 0.5]}}}}): dict,\n\n Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}} }): dict\n})\n\nclass Conf(object):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n def __init__(self, cfg_fname):\n assert cfg_fname is not None\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n\n def _read_cfg(self, cfg_fname):\n \"\"\"Load a config file following yaml syntax.\n\n Args:\n cfg_fname(string): The name of configuration yaml file\n \"\"\"\n try:\n with open(cfg_fname, 'r') as f:\n content = f.read()\n cfg = yaml.safe_load(content)\n validated_cfg = schema.validate(cfg)\n\n # if user yaml doesn't include version field, neural_compressor will write a supported version\n # into it.\n if 'version' not in cfg:\n leading_whitespace = re.search(r\"[ \\t]*model\\s*:\",\n content).group().split(\"model\")[0]\n content = re.sub(r'model\\s*:',\n 'version: {}\\n\\n{}model:'.format(\n float(__version__.split('.')[0]),\n leading_whitespace\n ),\n content)\n with open(cfg_fname, 'w') as f:\n f.write(content)\n\n return validated_cfg\n\n except Exception as e:\n logger.error(\"{}.\".format(e))\n raise RuntimeError(\n \"The yaml file format is not correct. Please refer to document.\"\n )\n\nclass Quantization_Conf(Conf):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n\n def __init__(self, cfg_fname):\n if cfg_fname:\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n else:\n self.usr_cfg = DotDict(quantization_default_schema.validate(dict()))\n self._model_wise_tune_space = None\n self._opwise_tune_space = None\n\n def _merge_dicts(self, src, dst):\n \"\"\"Helper function to merge src dict into dst dict.\n\n If the key in src doesn't exist in dst, then add this key and value\n pair to dst.\n If the key in src is in dst and the value intersects with the one in\n dst, then override the value in dst with the intersect value.\n\n Args:\n src (dict): The source dict merged from\n dst (dict): The source dict merged to\n\n Returns:\n dict: The merged dict from src to dst\n \"\"\"\n for key in src:\n if key in dst:\n if isinstance(dst[key], dict) and isinstance(src[key], dict):\n self._merge_dicts(src[key], dst[key])\n elif dst[key] == src[key] or src[key] is None:\n pass # same leaf value\n else:\n value = [value for value in src[key]\n if value in dst[key] or isinstance(value, float)]\n if value != []:\n dst[key] = value\n else:\n if not isinstance(src[key], dict):\n dst[key] = src[key]\n\n return dst\n\n def modelwise_tune_space(self, model_wise_quant):\n cfg = self.usr_cfg\n\n self._model_wise_tune_space = OrderedDict()\n for optype in model_wise_quant.keys():\n self._model_wise_tune_space[optype] = self._merge_dicts(cfg.quantization.model_wise,\n model_wise_quant[optype])\n\n return self._model_wise_tune_space\n\n def _weight_compute(self, combined_cfg):\n temp_set = set()\n for _, config in combined_cfg.items():\n temp_str = ''\n for part, params in config.items():\n temp_str = temp_str + part\n for _, param in params.items():\n temp_str += str(param)\n temp_str += '_'\n temp_set.add(temp_str)\n return len(temp_set)\n\n def _sort_cfgs(self, combined_cfgs):\n cfgs_num = len(combined_cfgs)\n for i in range(cfgs_num):\n for j in range(cfgs_num-i-1):\n weight_a = self._weight_compute(combined_cfgs[j])\n weight_b = self._weight_compute(combined_cfgs[j+1])\n if weight_a > weight_b:\n temp = combined_cfgs[j]\n combined_cfgs[j] = combined_cfgs[j+1]\n combined_cfgs[j+1] = temp\n return combined_cfgs\n\n def _combine_optype_quant_cfgs(self, model_wise_quant_cfgs):\n if len(model_wise_quant_cfgs) == 0:\n return []\n temp_cfgs = OrderedDict()\n\n for optype, cfgs in model_wise_quant_cfgs.items():\n if len(cfgs) > 0:\n temp_cfgs[optype] = copy.deepcopy(cfgs)\n\n if not bool(temp_cfgs):\n return []\n\n keys, values = zip(*temp_cfgs.items())\n\n return self._sort_cfgs([dict(zip(keys, v)) for v in itertools.product(*values)])\n\n def opwise_tune_space(self, opwise_quant):\n def _is_regex(pattern):\n if re.match(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*$\", pattern):\n return False\n return True\n\n opwise = copy.deepcopy(opwise_quant)\n for k, v in opwise.items():\n opwise[k] = self._merge_dicts(self._model_wise_tune_space[k[1]], opwise[k])\n\n cfg = self.usr_cfg\n if cfg.quantization.op_wise:\n for k, v in cfg.quantization.op_wise.items():\n is_regex = _is_regex(k)\n for k_op, _ in opwise.items():\n if not is_regex and k == k_op[0]:\n opwise[k_op] = self._merge_dicts(v, opwise[k_op])\n\n if is_regex and re.match(k, k_op[0]):\n opwise[k_op] = self._merge_dicts(v, opwise[k_op])\n\n self._opwise_tune_space = opwise\n return self._opwise_tune_space\n\n def expand_tune_cfgs(self, tune_space):\n \"\"\"generate all possible tuning combinations for each op or model wise tuning.\n\n Args:\n tune_space (dict): The tuning space to be expanded.\n\n Returns:\n dict: The expanded tuning configs\n \"\"\"\n cfg_lists = self._expand_tune_cfgs_recursively(tune_space)\n\n # remove unreasonable tuning combinations\n valid_cfgs = []\n quant_dtype = ['int8', 'uint8', 'int4', 'uint4']\n\n for cfg in cfg_lists:\n cfg = DotDict(cfg)\n dtype = cfg.activation.dtype\n\n if dtype not in quant_dtype:\n cfg.activation.clear()\n cfg.activation.dtype = dtype\n\n if 'weight' in cfg:\n dtype = cfg.weight.dtype\n if dtype not in quant_dtype:\n cfg.weight.clear()\n cfg.weight.dtype = dtype\n if (cfg.weight.dtype != cfg.activation.dtype and\n cfg.weight.dtype not in quant_dtype and\n cfg.activation.dtype not in quant_dtype) or \\\n (cfg.weight.dtype != cfg.activation.dtype and\n cfg.weight.dtype in quant_dtype and\n cfg.activation.dtype not in quant_dtype) or \\\n (cfg.weight.dtype != cfg.activation.dtype and\n cfg.weight.dtype not in quant_dtype and cfg.activation.dtype in quant_dtype):\n continue\n\n valid_cfgs.append(cfg)\n\n # remove duplicated configurations\n valid_cfgs = [cfg[0] for cfg in itertools.groupby(valid_cfgs)]\n return valid_cfgs\n\n def _expand_tune_cfgs_recursively(self, cfg_dict):\n \"\"\"Helper function of recursively generating all combinations.\n\n Args:\n cfg_dict (dict): The dict of conf space.\n\n Returns:\n list: List containing all combinations\n \"\"\"\n assert isinstance(cfg_dict, dict)\n combinations = OrderedDict()\n for key in cfg_dict:\n if isinstance(cfg_dict[key], dict):\n lists = self._expand_tune_cfgs_recursively(cfg_dict[key])\n combinations[key] = lists\n\n if len(combinations) != 0:\n return self._expand_tune_cfgs_recursively(combinations)\n\n keys, values = zip(*cfg_dict.items())\n values = list(filter(None, values))\n lists = [dict(zip(keys, v)) for v in itertools.product(*values)]\n return lists\n\nclass Pruning_Conf(Conf):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n\n def __init__(self, cfg_fname):\n if cfg_fname:\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n else:\n self.usr_cfg = DotDict(pruning_default_schema.validate(dict()))\n\nclass Graph_Optimization_Conf(Quantization_Conf):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n\n def __init__(self, cfg_fname):\n if cfg_fname:\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n else:\n self.usr_cfg = DotDict(graph_optimization_default_schema.validate(dict()))\n\nclass Benchmark_Conf(Conf):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n\n def __init__(self, cfg_fname):\n if cfg_fname:\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n else:\n self.usr_cfg = DotDict(benchmark_default_schema.validate(dict()))\n\nclass Distillation_Conf(Conf):\n \"\"\"config parser.\n\n Args:\n cfg_fname (string): The path to the configuration file.\n\n \"\"\"\n\n def __init__(self, cfg_fname):\n if cfg_fname:\n self.usr_cfg = DotDict(self._read_cfg(cfg_fname))\n else:\n self.usr_cfg = DotDict(distillation_default_schema.validate(dict()))\n","sub_path":"neural_compressor/conf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":44412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"506476259","text":"# -*- coding:utf-8 -*-\r\n#\r\n# Copyright © 2009-2010 Pierre Raybaut\r\n# Licensed under the terms of the MIT License\r\n# (see spyderlib/__init__.py for details)\r\n\r\n\"\"\"Pylint Code Analysis Plugin\"\"\"\r\n\r\n# pylint: disable=C0103\r\n# pylint: disable=R0903\r\n# pylint: disable=R0911\r\n# pylint: disable=R0201\r\n\r\nfrom spyderlib.qt.QtGui import QInputDialog, QVBoxLayout, QGroupBox, QLabel\r\nfrom spyderlib.qt.QtCore import SIGNAL, Qt\r\n\r\nimport sys\r\n\r\n# For debugging purpose:\r\nSTDOUT = sys.stdout\r\n\r\n# Local imports\r\nfrom spyderlib.utils.translations import get_translation\r\n_ = get_translation(\"p_pylint\", dirname=\"spyderplugins\")\r\nfrom spyderlib.config import get_icon\r\nfrom spyderlib.utils.qthelpers import create_action\r\nfrom spyderlib.plugins import SpyderPluginMixin, PluginConfigPage\r\n\r\nfrom spyderplugins.widgets.pylintgui import PylintWidget, is_pylint_installed\r\n\r\n\r\nclass PylintConfigPage(PluginConfigPage):\r\n def setup_page(self):\r\n hist_group = QGroupBox(_(\"History\"))\r\n hist_label = QLabel(_(\"Pylint plugin results are stored here:\\n\"\r\n \"%s\\n\\nThe following option \"\r\n \"will be applied at next startup.\\n\"\r\n ) % PylintWidget.DATAPATH)\r\n hist_label.setTextInteractionFlags(Qt.TextSelectableByMouse)\r\n hist_label.setWordWrap(True)\r\n hist_spin = self.create_spinbox(_(\"History: \"),\r\n _(\" results\"), 'max_entries', default=50,\r\n min_=10, max_=1000000, step=10)\r\n\r\n hist_layout = QVBoxLayout()\r\n hist_layout.addWidget(hist_label)\r\n hist_layout.addWidget(hist_spin)\r\n hist_group.setLayout(hist_layout)\r\n\r\n vlayout = QVBoxLayout()\r\n vlayout.addWidget(hist_group)\r\n vlayout.addStretch(1)\r\n self.setLayout(vlayout)\r\n\r\n\r\nclass Pylint(PylintWidget, SpyderPluginMixin):\r\n \"\"\"Python source code analysis based on pylint\"\"\"\r\n CONF_SECTION = 'pylint'\r\n CONFIGWIDGET_CLASS = PylintConfigPage\r\n def __init__(self, parent=None):\r\n PylintWidget.__init__(self, parent=parent,\r\n max_entries=self.get_option('max_entries', 50))\r\n SpyderPluginMixin.__init__(self, parent)\r\n \r\n #------ SpyderPluginWidget API --------------------------------------------- \r\n def get_plugin_title(self):\r\n \"\"\"Return widget title\"\"\"\r\n return _(\"Pylint\")\r\n \r\n def get_plugin_icon(self):\r\n \"\"\"Return widget icon\"\"\"\r\n return get_icon('pylint.png')\r\n \r\n def get_focus_widget(self):\r\n \"\"\"\r\n Return the widget to give focus to when\r\n this plugin's dockwidget is raised on top-level\r\n \"\"\"\r\n return self.treewidget\r\n \r\n def get_plugin_actions(self):\r\n \"\"\"Return a list of actions related to plugin\"\"\"\r\n # Font\r\n history_action = create_action(self, _(\"History...\"),\r\n None, 'history.png',\r\n _(\"Set history maximum entries\"),\r\n triggered=self.change_history_depth)\r\n self.treewidget.common_actions += (None, history_action)\r\n return []\r\n \r\n def register_plugin(self):\r\n \"\"\"Register plugin in Spyder's main window\"\"\"\r\n self.connect(self, SIGNAL(\"edit_goto(QString,int,QString)\"),\r\n self.main.editor.load)\r\n self.connect(self, SIGNAL('redirect_stdio(bool)'),\r\n self.main.redirect_internalshell_stdio)\r\n self.main.add_dockwidget(self)\r\n \r\n pylint_act = create_action(self, _(\"Run pylint code analysis\"),\r\n triggered=self.run_pylint)\r\n pylint_act.setEnabled(is_pylint_installed())\r\n self.register_shortcut(pylint_act, context=\"Pylint\",\r\n name=\"Run analysis\", default=\"F8\")\r\n \r\n self.main.source_menu_actions += [pylint_act]\r\n self.main.editor.pythonfile_dependent_actions += [pylint_act]\r\n \r\n def refresh_plugin(self):\r\n \"\"\"Refresh pylint widget\"\"\"\r\n self.remove_obsolete_items()\r\n \r\n def closing_plugin(self, cancelable=False):\r\n \"\"\"Perform actions before parent main window is closed\"\"\"\r\n return True\r\n \r\n def apply_plugin_settings(self, options):\r\n \"\"\"Apply configuration file's plugin settings\"\"\"\r\n # The history depth option will be applied at \r\n # next Spyder startup, which is soon enough\r\n pass\r\n \r\n #------ Public API ---------------------------------------------------------\r\n def change_history_depth(self):\r\n \"Change history max entries\"\"\"\r\n depth, valid = QInputDialog.getInteger(self, _('History'),\r\n _('Maximum entries'),\r\n self.get_option('max_entries'),\r\n 10, 10000)\r\n if valid:\r\n self.set_option('max_entries', depth)\r\n \r\n def run_pylint(self):\r\n \"\"\"Run pylint code analysis\"\"\"\r\n self.analyze( self.main.editor.get_current_filename() )\r\n \r\n def analyze(self, filename):\r\n \"\"\"Reimplement analyze method\"\"\"\r\n if self.dockwidget and not self.ismaximized:\r\n self.dockwidget.setVisible(True)\r\n self.dockwidget.setFocus()\r\n self.dockwidget.raise_()\r\n PylintWidget.analyze(self, filename)\r\n\r\n\r\n#===============================================================================\r\n# The following statements are required to register this 3rd party plugin:\r\n#===============================================================================\r\nPLUGIN_CLASS = Pylint\r\n\r\n","sub_path":"spyderplugins/p_pylint.py","file_name":"p_pylint.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"427261979","text":"import random\n\ndef getDay( s ):\n r=''\n\n if s[0] == '1':\n if s[1] == '0':\n r = 'десятое'\n if s[1] == '1':\n r = 'одинадцатое'\n if s[1] == '2':\n r = 'двенадцатое'\n if s[1] == '3':\n r = 'тринадцатое'\n if s[1] == '4':\n r = 'четырнадцатое'\n if s[1] == '5':\n r = 'пятнадцатое'\n if s[1] == '6':\n r = 'шестнадцатое'\n if s[1] == '7':\n r = 'семнадцатое'\n if s[1] == '8':\n r = 'восемнадцатое'\n if s[1] == '9':\n r = 'девятнадцатое'\n else:\n if s[1] == '1':\n r = 'первое'\n if s[1] == '2':\n r = 'второе'\n if s[1] == '3':\n r = 'третье'\n if s[1] == '4':\n r = 'четвертое'\n if s[1] == '5':\n r = 'пятое'\n if s[1] == '6':\n r = 'шестое'\n if s[1] == '7':\n r = 'седьмое'\n if s[1] == '8':\n r = 'восьмое'\n if s[1] == '9':\n r = 'девятое'\n\n\n if s[0] == '2':\n if s[1] == '0':\n r = 'двадцатое'\n else:\n r = 'двадцать '+r\n\n if s[0] == '3':\n if s[1] == '0':\n r = 'тридцатое'\n else:\n r = 'тридцать '+r\n\n return r\n\ndef getMonth( s ):\n r = ''\n if s == '01':\n r = 'января'\n if s == '02':\n r = 'февраля'\n if s == '03':\n r = 'марта'\n if s == '04':\n r = 'апреля'\n if s == '05':\n r = 'мая'\n if s == '06':\n r = 'июня'\n if s == '07':\n r = 'июля'\n if s == '08':\n r = 'августа'\n if s == '09':\n r = 'сентября'\n if s == '10':\n r = 'октября'\n if s == '11':\n r = 'ноября'\n if s == '12':\n r = 'декабря'\n\n return r\n\ndef bDayOut( s ):\n d,m,y = s.split('.')\n sd = getDay( d )\n sm = getMonth( m )\n\n r = sd + ' ' + sm + ' ' + y +' года'\n\n return r\n\n\n\ndef goGame():\n bDayOut( '10.11.2019')\n\n\n dicGreatMan = {\n 1:['Пётр 1', '09.06.1672'],\n 2:['Иван IV (Грозный)', '25.08.1530'],\n 3:['Суворов Александр Васильевич', '24.11.1730'],\n 4:['Ломоносов Михаил Васильевич', '19.11.1711'],\n 5:['Пушкин Александр Сергеевич', '06.06.1799'],\n 6:['Ленин Владимир Ильич', '22.04.1870'],\n 7:['Сталин Иосиф Виссарионович', '21.12.1878'],\n 8:['Чайковский Пётр Ильич', '07.05.1840'],\n 9:['Лермонтов Михаил Юрьевич', '15.10.1814'],\n 10:['Есенин Сергей Александрович', '03.10.1895']\n }\n ind = [1,2,3,4,5,6,7,8,9,10]\n\n\n while True:\n nYes = 0\n nNo=0\n indSampl = random.sample( ind, 5 )\n for i in indSampl:\n name = dicGreatMan.get( i )[0]\n bDay = dicGreatMan.get( i )[1]\n dateIn = input( 'Введите дату рождения (дд.мм.гггг) для {}: '.format( name ) )\n if bDay != dateIn:\n nNo += 1\n print( 'Правильная дата рождения: ', bDayOut( bDay ) )\n else:\n nYes += 1\n print( 'Правильно!' )\n print( '\\nПравильных ответов - {}\\nОшибок - {}\\n'.format( nYes, nNo ))\n n = input( 'Продолжить? (Да-1, Нет-0): \\n' )\n if n == '0':\n break\n print( \"Конец игре.\")\n\nif __name__ == '__main__':\n goGame()","sub_path":"victory.py","file_name":"victory.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"252167635","text":"path = \"./input/\"\nwith open(path + \"f_libraries_of_the_world.txt\",\"r\") as fileObj:\n\n B,L,D = fileObj.readline().split()\n B = int(B)\n L = int(L)\n D = int(D)\n #keeps scores of books , index is book ID\n s = fileObj.readline().split()\n #scores[len(scores)-1] = scores[len(scores)-1].rstrip('\\n')\n #stores book ID . each index represnts corresponding library in the library multi list\n print(B,L,D)\n scores = [int(i) for i in s]\n books = []\n #stores number of books , signup process time in days, and the num of books lib can ship per day\n libraries = []\n a = fileObj.readlines()\n x = []\n y =[]\n x1 =[]\n x_int =[]\n for i in range(len(a)):\n if(i%2==0):\n x = a[i].split()\n x1 = [int(x[0]),int(x[1]),int(x[2])]\n x_int = [int(int(i)/2),x1[0],x1[1],x1[2]]\n libraries.append(x_int)\n else:\n x = a[i].split()\n for i in x:\n y.append(int(i))\n books.append(y)\n\ndef takeSecond(elem):\n return elem[2]\n\nlibraries.sort(key=takeSecond)\n\nsum = 0\nfor i in range(len(libraries)):\n sum+=libraries[i][2]\n if(sum > D):\n last_lib_id = i - 1\n break\n last_lib_id = i\n\nlibraries = libraries[:last_lib_id+1:]\nnum_libs = len(libraries)\nbooks_to_scan = []\ncumulative_days = 0\n\ndef bookScore(elem):\n return scores[elem]\n\nfor i in libraries:\n cumulative_days += i[2]\n days_left = D - cumulative_days\n max_books = days_left * i[3]\n books_in_lib = books[i[0]]\n books_in_lib.sort(key=bookScore, reverse=True)\n\n if(len(books_in_lib) <= max_books):\n books_to_scan.append(books_in_lib)\n else:\n temp = []\n for j in range(0,len(books_in_lib)):\n if(j <= max_books):\n temp.append(books_in_lib[j])\n books_to_scan.append(temp)\n\nwith open(path + \"output.txt\",\"w\") as f:\n f.write(str(num_libs)+\"\\n\")\n for i in range(len(libraries)):\n f.write(str(libraries[i][0])+\" \"+str(len(books_to_scan[i]))+\"\\n\")\n s = \"\"\n for j in books_to_scan[i]:\n s = s + str(j) + \" \"\n f.write(s + \"\\n\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"355880484","text":"# -*- coding: utf-8 -*-\n\nfrom BaseDialog import *\nfrom uipy.ui_gas_drilling_radius_dlg import *\n\nfrom rpc import CbmUtil, SQLClientHelper, CbmClientHelper\nfrom cbm.ttypes import *\n\nimport DataHelper\nimport UiHelper\n\nfrom math import sqrt, exp, pow, sin, cos, tan, radians, pi, asin\n\nclass GasDrillingRadiusDlg(BaseDialog):\n\tdef __init__(self, mine_id=-1, parent=None):\n\t\tsuper(GasDrillingRadiusDlg, self).__init__(parent)\n\t\tself.ui = Ui_gas_drilling_radius_dlg()\n\t\tself.ui.setupUi(self)\n\t\tself.initUi(self.ui) # 美化ui\n\t\tself.setTitle(u\"瓦斯抽采半径计算\")\n\t\tself.setFixedSize(self.width(), self.height())\n\t\tself.ui.save.clicked.connect(self.onSave)\n\t\tself.ui.cacl.clicked.connect(self.onCacl)\n\t\tself.connect(self.ui.coal, QtCore.SIGNAL('currentIndexChanged(int)'), self.onCoalChanged)\n\t\t# 待设计的矿井\n\t\tself.mine_id = mine_id\n\t\t# 初始化\n\t\tself.init()\n\t\t\n\tdef init(self):\n\t\t# 填充煤层列表\n\t\tself.fillCoalCombox()\n\n\tdef fillCoalCombox(self):\n\t\t# 清空煤层下拉列表\n\t\tself.ui.coal.clear()\n\n\t\t# 查找矿井辖属的煤层\n\t\tcoal_lists = SQLClientHelper.GetCoalListByForeignKey('mine_id', self.mine_id)\n\t\t# 添加到煤层下拉列表\n\t\tUiHelper.AddObjectListToCombobox(self.ui.coal, coal_lists)\n\n\tdef getDrp(self, coal_id, create_new=False):\n\t\tdrp = SQLClientHelper.GetDrillingRadiusParamByForeignKey('coal_id', coal_id)\n\t\tif drp.id <= 0 and create_new:\n\t\t\t# 新建一个瓦斯半径数据对象提交到数据库\n\t\t\tdrp = DrillingRadiusParam()\n\t\t\tdrp.coal_id = coal_id\n\t\t\tdrp_id = SQLClientHelper.AddDrillingRadiusParam(drp)\n\t\t\tif drp_id <= 0:\n\t\t\t\tdrp.id = -1\n\t\t\telse:\n\t\t\t\tdrp = SQLClientHelper.GetDrillingRadiusParamById(drp_id)\n\t\treturn drp\n\n\tdef onCoalChanged(self, index):\n\t\tif index < 0:return\n\t\t# 查询当前煤层\n\t\tcoal_id, ok = self.ui.coal.itemData(index).toInt()\n\t\tcoal = SQLClientHelper.GetCoalById(coal_id)\n\t\tif coal.id <= 0:return\n\n\t\t# 查询��层关联的抽采半径参数对象\n\t\t# 如果没有就内部新建一个\n\t\tdrp = self.getDrp(coal.id, True)\n\t\tif drp.id <= 0:return\n\n\t\t# 填充煤层数据\n\t\tself.ui.hw.setText(u'%.1f' % coal.hw)\n\t\tself.ui.gas_w0.setText(u'%.1f' % coal.gas_w0)\n\t\tself.ui.gas_wc3.setText(u'%.1f' % coal.gas_wc3)\n\n\t\t# 填充瓦斯抽采半径数据\n\t\tself.ui.rho.setText(u'%.1f' % drp.rho)\n\t\tself.ui.a.setText(u'%.1f' % drp.a)\n\t\tself.ui.t.setText(u'%.1f' % drp.t)\n\t\tself.ui.l.setText(u'%.1f' % drp.l)\n\t\tself.ui.q0.setText(u'%.1f' % drp.q0)\n\t\tself.ui.r.setText(u'%.1f' % drp.r)\n\t\tself.ui.qm.setText(u'%.1f' % drp.qm)\n\t\tself.ui.qsum.setText(u'%.1f' % drp.qsum)\n\t\tself.ui.eta.setText(u'%.1f' % drp.eta)\n\t\tself.ui.k1.setText(u'%.1f' % drp.k1)\n\n\tdef onSave(self):\n\t\tindex = self.ui.coal.currentIndex()\n\t\tif index < 0:\n\t\t\tUiHelper.MessageBox(u'sorry, 出了点问题,请联系技术人员(错误码:Y1)')\n\t\t\treturn\n\t\t# 查询当前煤层\n\t\tcoal_id, ok = self.ui.coal.itemData(index).toInt()\n\t\tcoal = SQLClientHelper.GetCoalById(coal_id)\n\t\tif coal.id <= 0:\n\t\t\tUiHelper.MessageBox(u'sorry, 出了点问题,请联系技术人员(错误码:Y2)')\n\t\t\treturn\n\n\t\t# 查询煤层关联的抽采半径参数对象\n\t\t# 如果没有就内部新建一个\n\t\tdrp = self.getDrp(coal.id, False)\n\t\tif drp.id <= 0:\n\t\t\tUiHelper.MessageBox(u'sorry, 出了点问题,请联系技术人员(错误码:Y3)')\n\t\t\treturn\n\n\t\t# 保存煤层\n\t\tcoal.hw, ok = self.ui.hw.text().toDouble()\n\t\tcoal.gas_w0, ok = self.ui.gas_w0.text().toDouble()\n\t\tcoal.gas_wc3, ok = self.ui.gas_wc3.text().toDouble()\n\t\tif not SQLClientHelper.UpdateCoal(coal):\n\t\t\tUiHelper.MessageBox(u'sorry, 出了点问题,请联系技术人员(错误码:Y4)')\n\t\t\treturn\n\n\t\t# 保存瓦斯抽采半径数据\n\t\tdrp.rho, ok = self.ui.rho.text().toDouble()\n\t\tdrp.a, ok = self.ui.a.text().toDouble()\n\t\tdrp.t, ok = self.ui.t.text().toDouble()\n\t\tdrp.l, ok = self.ui.l.text().toDouble()\n\t\tdrp.q0, ok = self.ui.q0.text().toDouble()\n\t\tdrp.r, ok = self.ui.r.text().toDouble()\n\t\tdrp.qm, ok = self.ui.qm.text().toDouble()\n\t\tdrp.qsum, ok = self.ui.qsum.text().toDouble()\n\t\tdrp.eta, ok = self.ui.eta.text().toDouble()\n\t\tdrp.k1, ok = self.ui.k1.text().toDouble()\n\n\t\tif not SQLClientHelper.UpdateDrillingRadiusParam(drp):\n\t\t\tUiHelper.MessageBox(u'sorry, 出了点问题,请联系技术人员(错误码:Y5)')\n\t\t\n\t\tUiHelper.MessageBox(u'恭喜您,更新数据成功!')\n\n\t\t# 关闭对话框并返回1\n\t\t# self.accept()\n\n\tdef onCacl(self):\n\t\tDataHelper.drilling_ratio(40,10,1)\n\t\treturn\n\t\t# 从界面中提取数据\n\t\tr, ok = self.ui.r.text().toDouble()\n\t\tl, ok = self.ui.l.text().toDouble()\n\t\tk1, ok = self.ui.k1.text().toDouble()\n\t\trho, ok = self.ui.rho.text().toDouble()\n\t\tq0, ok = self.ui.q0.text().toDouble()\n\t\ta, ok = self.ui.a.text().toDouble()\n\t\tt, ok = self.ui.t.text().toDouble()\n\t\th, ok = self.ui.hw.text().toDouble()\n\t\tw, ok = self.ui.gas_w0.text().toDouble()\n\t\tqm, ok = self.ui.qm.text().toDouble()\n\t\tq, ok = self.ui.qsum.text().toDouble()\n\t\teta, ok = self.ui.eta.text().toDouble()\n\t\t\n\t\t# 计算\n\t\tif r == 0 or h == 0 or w == 0 or l == 0:\n\t\t\tUiHelper.MessageBox(u'所有数据都必须大于0!!!')\n\t\t\treturn\n\t\t# 计算Qm\n\t\tif 2*r < h or abs(2*r - h) < 1e-3:\n\t\t\ts = pi * r * r # 面积\n\t\t\tqm = 1440*k1*q0*(1-exp(-1*a*t)) / (rho * a * s *(r+l))\n\t\telse:\n\t\t\ts = 2 * r * r * asin(0.5*h/r) + h*sqrt(r*r -0.25*h*h)\n\t\t\tqm = 1440*k1*q0*(1-exp(-1*a*t)) / (rho * a * s *(r+l))\n\t\t\n\t\t# 计算eta和wc\n\t\teta = qm / w\n\t\twc = w - q\n\n\t\t# 更新到界面\n\t\tself.ui.qm.setText(u'%.1f' % qm)\n\t\tself.ui.eta.setText(u'%.1f' % eta)\n\t\tself.ui.gas_wc3.setText(u'%.1f' % wc)\n\n\t\tUiHelper.MessageBox(u'更新计算结果成功!')\n\t\t# 关闭对话框并返回1\n\t\t# self.accept()","sub_path":"python/cbm/dialogs/GasDrillingRadiusDlg.py","file_name":"GasDrillingRadiusDlg.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"141943061","text":"from tealight.logo import move, turn\n\n\ndef square(side):\n for i in range(0,3): #repeats this many times, should equal number of edges of decoration\n move(side)\n turn(120)\n\ndef waterwheel(edges, size): #side = sidelength\n angle = 360 / edges #finds angle between each edge\n decoration = size / 2 #makes square side length half size\n for i in range(0, edges):\n move(size)\n square(decoration)\n turn(angle)\n\nturn(-90) #affects starting pitch of \"turtle\"\nwaterwheel(12,75)\n","sub_path":"logo/waterwheel.py","file_name":"waterwheel.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"501651081","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\n This script should parse the pcap and csv files and find\n the required info for each return from the .xyz files.\n Then save it in 1 place...\n\"\"\"\n\nimport sys, os, logging, re, datetime\nimport numpy as np\nimport pandas as pd\nfrom vlp16_tables import *\nfrom scipy.interpolate import interp1d\n\nlog_dir = 'p_logging'\ntxt_dir_in = 'p_01_apx_csv_shapefile'\ntxt_in_base_len = len(txt_dir_in)\npcap_dir_in = 'p_22_likely_gcp_pcap'\npcap_in_base_len = len(pcap_dir_in)\nxyz_dir_in = 'p_20_likely_gcp'\nxyz_dir_in_ref = 'p_21_refined_gcp'\nout_dir_ascii = 'p_24_likely_gcp_infos'\nout_ascii_base_len = len(out_dir_ascii)\nfn_keyword = 'hi_freq_apx'\n\ndef remove_min_sec(ts):\n return (int(ts) // 3600) * 3600\n\n# ### Function to calculate the gaps between given azimuths. Needed to interpolate azimuths that are not given.\n\ndef get_azim_gap(azimuths, dual = True, preserve_shape = False):\n \"\"\"\n Only works for dual returns now.\n \n preserve_shape is relevant for dual, where the azimuths repeat.\n if False: return only unique gaps.\n if True: return same shape as azimuths\n \"\"\"\n if dual:\n azimuths_gap_flat = np.zeros_like(azimuths[0::2]).flatten()\n azimuths_gap_flat[:-1] = ((azimuths[0::2].flatten()[1:] - azimuths[0::2].flatten()[:-1]) % 36000)\n azimuths_gap_flat[-1] = azimuths_gap_flat[-2]\n azimuths_gap = azimuths_gap_flat.reshape(azimuths[0::2].shape)\n if preserve_shape:\n #either of the following lines should work the same. only use one.\n #azimuths_gap = np.hstack((azimuths_gap.reshape((azimuths_gap.size,1)), azimuths_gap.reshape((azimuths_gap.size,1)))).flatten().reshape(azimuths.shape)\n azimuths_gap = np.tile(azimuths_gap,2)\n return azimuths_gap\n else:\n raise NotImplementedError\n\ndef get_micros_pulses(micros, dual = True, preserve_shape = False):\n \"\"\"\n preserve_shape is relevant for dual, where the azimuths repeat.\n if False: return only unique gaps.\n if True: return same shape as azimuths\n \"\"\"\n if dual:\n if preserve_shape:\n micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_DUAL.T.flatten() * 1e6\n else:\n micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_DUAL.T[0::2,:].flatten() * 1e6\n else:\n micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_SINGLE.T.flatten() * 1e6\n \n MICROS_PER_HOUR = np.array([1e6 * 3600]).astype(np.int64)[0]\n micros_pulses %= MICROS_PER_HOUR\n return micros_pulses\n\ndef get_nanos_pulses(micros, dual = True, preserve_shape = False):\n \"\"\"\n Get the nanoseconds from the top of the hour for each laser pulse. Need for boresight!\n preserve_shape is relevant for dual, where the azimuths repeat.\n if False: return only unique gaps.\n if True: return same shape as azimuths\n \"\"\"\n nanos = np.copy(micros) \n nanos *= 1000\n if dual:\n if preserve_shape:\n nanos_pulses = np.expand_dims(nanos, axis=1) + (TIMING_OFFSETS_DUAL_nano.T.flatten()).astype(np.int64)\n else:\n nanos_pulses = np.expand_dims(nanos, axis=1) + (TIMING_OFFSETS_DUAL_nano.T[0::2,:].flatten()).astype(np.int64)\n else:\n nanos_pulses = np.expand_dims(nanos, axis=1) + (TIMING_OFFSETS_SINGLE_nano.T.flatten()).astype(np.int64)\n NANOS_PER_HOUR = np.array([1e9 * 3600]).astype(np.int64)[0]\n nanos_pulses %= NANOS_PER_HOUR\n return nanos_pulses\n\ndef get_precision_azimuth(az_simple, azimuths_gap, dual = True, minimal_shape = True):\n az_simple = az_simple[np.newaxis, :]\n azimuths_gap = azimuths_gap[np.newaxis,:]\n if dual:\n timing_offsets_within_block = TIMING_OFFSETS_DUAL[:,0]\n az_pulses = np.tile(az_simple,(LASERS_PER_DATA_BLOCK)).reshape(az_simple.shape[0], LASERS_PER_DATA_BLOCK, az_simple.shape[1])\n az_pulses = az_pulses.transpose((0,2,1))\n precision_azimuth = az_pulses[:,:,:] + timing_offsets_within_block / (2 * T_CYCLE) * np.expand_dims(azimuths_gap, axis=2)\n precision_azimuth = precision_azimuth % 36000\n if not minimal_shape:\n precision_azimuth = np.tile(precision_azimuth.transpose((0,2,1)), (1,2,1)).transpose((0,2,1))\n precision_azimuth = precision_azimuth.reshape((precision_azimuth.shape[0], precision_azimuth.shape[1] * precision_azimuth.shape[2]))\n return precision_azimuth[0,:]\n else:\n raise NotImplementedError\n\ndef get_azimuth_of_interest(loc_raw_data, loc_packet_pos, loc_pulse_pos):\n #azimuth\n azimuths = np.zeros((DATA_BLOCKS, 1), dtype = np.int32)\n azim_data = azimuths.view(dtype = np.uint8)\n azim_data[:,0:2] = loc_raw_data[loc_packet_pos, :-6].reshape(DATA_BLOCKS, 100)[:, 2:4]\n azim_data = azim_data.reshape((DATA_BLOCKS * 4))\n azimuths = azim_data.view(dtype= np.int32)\n azimuths_gap = get_azim_gap(azimuths)\n # ### Calculate the azimutzhs for each datapoint\n #Use the following simplified array if in dual mode\n #Otherwise can still refer to it, but it's just the original array\n mode_hypothesis = loc_raw_data[0, RETURN_MODE_OFFSET]\n if mode_hypothesis == RETURN_MODE_DUAL:\n # az_simple = azimuths[:,0::2]\n az_simple = azimuths[0::2]\n else:\n az_simple = azimuths\n prec_az = get_precision_azimuth(az_simple, azimuths_gap, True, True)\n the_azimuth_rad = np.deg2rad(prec_az[loc_pulse_pos] / 100)\n return the_azimuth_rad\n\ndef make_puck_ranges_mm(channel_data):\n puck_ranges_mm = np.zeros((DATA_BLOCKS * LASERS_PER_DATA_BLOCK), dtype = np.uint32)\n puck_range_bytes = puck_ranges_mm.view(dtype = np.uint8)\n puck_range_bytes[0::4] = channel_data[0::3]\n puck_range_bytes[1::4] = channel_data[1::3]\n puck_ranges_mm *= 2\n return puck_ranges_mm\n\ndef make_puck_intensities(channel_data):\n puck_intens = np.zeros((DATA_BLOCKS * LASERS_PER_DATA_BLOCK), dtype = np.uint8)\n puck_intens[:] = channel_data[2::3]\n return puck_intens\n\ndef puck_shperical_to_cartesian(puck_range, alpha, omega, vert_corr):\n \"\"\"\n input: range, alpha, omega, vert_corr\n output:\n xyz in puck coordinates\n \"\"\"\n X_puck = puck_range * np.cos(omega) * np.sin(alpha)\n Y_puck = puck_range * np.cos(omega) * np.cos(alpha)\n Z_puck = puck_range * np.sin(omega)\n Z_puck += vert_corr\n return np.vstack((X_puck, Y_puck, Z_puck)).T\n\ndef puck_to_uav_coords(XYZ_puck):\n # first rotate into XYZ of the drone!\n # x_roll = -90 #degrees\n # y_pitch = 0\n # z_yaw = -90\n #rotation from puck to uav coordinates:\n R_01 = np.array([[0., 1., 0.],\n [0., 0., 1.],\n [1., 0., 0.]])\n\n XYZ_uav = np.matmul(XYZ_puck, R_01)\n return XYZ_uav\n\ndef pcap_to_uav_coord(pcap_file_in, pcap_dir_in, xyz_str):\n \"\"\"\n Process a point upto the return coordinates in uav frame\n Input:\n 1 pcap file, where the point of interest is roughly in the middle\n pcap directory name\n string corresponding to the point, coming from the xyz file\n \"\"\"\n with open(os.path.join(pcap_dir_in, pcap_file_in), \"rb\") as fh:\n raw_pcap = fh.read()\n #24 bytes = global header, 16+42 bytes = packet header, 1200 bytes = vlp returns, 2 bytes = vlp factory bytes\n raw_data = np.frombuffer(raw_pcap, dtype = np.uint8)[24:].reshape((len(raw_pcap)//1264,1264))[:,16+42:]\n\n target_nano = (np.array([xyz_str.split()[9]])).astype(np.int64)[0]\n target_channel = int(xyz_str.split()[10])\n target_intensity = int(xyz_str.split()[5])\n\n\n # #### Take Ethernet timestamp of (1st) packet, discard sub-hour info and add replace it with that from GNSS µs timestamp\n # \n # What happens when the capture rolls over a full hour?\n # \n # **Need to deal with this when such data is captured!**\n # \n # # Solution below!\n micros = np.zeros((raw_data.shape[0],), dtype = np.int64)\n micro_bytes = micros.view(dtype = np.uint8)\n micro_bytes[0::8] = raw_data[:, DATA_PACK_TIMESTAMP_OFFSET + 0]\n micro_bytes[1::8] = raw_data[:, DATA_PACK_TIMESTAMP_OFFSET + 1]\n micro_bytes[2::8] = raw_data[:, DATA_PACK_TIMESTAMP_OFFSET + 2]\n micro_bytes[3::8] = raw_data[:, DATA_PACK_TIMESTAMP_OFFSET + 3]\n\n # #### Another problem could be that the UDP packets are not guaranteed to arrive in order.\n # \n # An assumption that is made for the following calculations is that this does not happen.\n # \n # **Need to deal with this when such data is captured!**\n while (micros[1:] < micros[:-1]).sum() > 0:\n jump_position = np.where((micros[1:] < micros[:-1]))[0][0] + 1\n micros[jump_position:] += int(3.6e9)\n\n nanos_pulses = get_nanos_pulses(micros)\n\n precise_location = np.where(nanos_pulses == target_nano)\n if precise_location[0].size == 0: #wrong packet?!\n return None\n \n the_packet_pos = precise_location[0][0]\n the_pulse_pos = precise_location[1][0]\n assert target_channel == the_pulse_pos % 16, f'Target channel does not match. xyz_str: {xyz_str}. the_pulse_pos: {the_pulse_pos}'\n\n # the following contains only channel data (i.e. no timestamp, factory bytes or azimuth)\n channel_data = raw_data[the_packet_pos,:-6].reshape((1, DATA_BLOCKS, 100))[:,:,4:]\n # channel_data = channel_data.reshape((1, DATA_BLOCKS * LASERS_PER_DATA_BLOCK * 3))\n channel_data = channel_data.reshape((DATA_BLOCKS * LASERS_PER_DATA_BLOCK * 3))\n\n #puck ranges in mm\n puck_ranges = make_puck_ranges_mm(channel_data)\n\n #intensities as 1 byte\n puck_intens = make_puck_intensities(channel_data)\n\n\n # strongest and last ranges (meters) + intensities\n strongest_return_ranges = puck_ranges.reshape((DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[1::2].flatten() / 1000\n strongest_return_intensities = puck_intens.reshape((DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[1::2].flatten()\n last_return_ranges = puck_ranges.reshape((DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[0::2].flatten() / 1000\n last_return_intensities = puck_intens.reshape((DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[0::2].flatten()\n\n #range of the point of interest\n the_return_id = int(xyz_str.split()[4])\n if the_return_id==1:\n the_precise_range = strongest_return_ranges[the_pulse_pos]\n assert strongest_return_intensities[the_pulse_pos]==target_intensity,\\\n f'Target intensity not matched! xyz_str: {xyz_str}. This intensity: {strongest_return_intensities[the_pulse_pos]}'\n elif the_return_id==2:\n the_precise_range = last_return_ranges[the_pulse_pos]\n assert last_return_intensities[the_pulse_pos]==target_intensity,\\\n f'Target intensity not matched! xyz_str: {xyz_str}. This intensity: {last_return_intensities[the_pulse_pos]}'\n else:\n raise ValueError(f'return ID > 2! ({the_return_id}). xyz_str: {xyz_str}')\n assert the_precise_range > 0, f'Range is not larger than 0! xyz_str: {xyz_str}'\n\n the_azimuth = get_azimuth_of_interest(raw_data, the_packet_pos, the_pulse_pos)\n elev_angle_and_corr = elevation_and_vert_corr_by_laser_id[target_channel]\n the_elev_angle = elev_angle_and_corr[0]\n the_vert_corr = elev_angle_and_corr[1]\n\n puck_xyz = puck_shperical_to_cartesian(the_precise_range, the_azimuth, the_elev_angle, the_vert_corr)\n uav_xyz = puck_to_uav_coords(puck_xyz)\n\n return uav_xyz\n\ndef apx_df_to_interp_funcs(apx_df):\n \"\"\"\n Makes 6 functions that are used to interpolate lon, lat, ele,\n rol, pitch and yaw for any time that is covered by the apx dataset.\n \"\"\"\n f_lat = interp1d(apx_df.index, apx_df[\"lat_EPSG32632\"], kind = 'cubic', fill_value=\"extrapolate\")\n f_lon = interp1d(apx_df.index, apx_df[\"lon_EPSG32632\"], kind = 'cubic', fill_value=\"extrapolate\")\n f_ele = interp1d(apx_df.index, apx_df[\"elevation\"], kind = 'cubic', fill_value=\"extrapolate\")\n f_yaw = interp1d(apx_df.index, apx_df[\"heading_continuous\"], kind = 'cubic', fill_value=\"extrapolate\")\n f_rol = interp1d(apx_df.index, apx_df[\"roll\"], kind = 'cubic', fill_value=\"extrapolate\")\n f_pit = interp1d(apx_df.index, apx_df[\"pitch\"], kind = 'cubic', fill_value=\"extrapolate\")\n return f_lat, f_lon, f_ele, f_yaw, f_rol, f_pit\n\ndef apx_funcs_to_interp_values(xyz_str, f_lat, f_lon, f_ele, f_yaw, f_rol, f_pit):\n \"\"\"\n Uses the 6 functions made above to provide lon, lat, ... for a specific timestamp\n \"\"\"\n the_time = float(xyz_str.split()[7]) + 1e9\n lat = f_lat(the_time)\n lon = f_lon(the_time)\n ele = f_ele(the_time)\n yaw = -np.deg2rad(f_yaw(the_time) % 360)\n rol = -np.deg2rad(f_rol(the_time))\n pit = -np.deg2rad(f_pit(the_time))\n return lat, lon, ele, yaw, rol, pit\n\ndef yaw_correction_to_interp_func():\n \"\"\"\n Makes 1 function that is used to interpolate yaw correction is present.\n 'yaw_correction.csv' must be present in p_03_pcap!\n \"\"\"\n if 'yaw_correction.csv' in os.listdir('p_03_pcap'):\n yaw_df = pd.read_csv(os.path.join('p_03_pcap', 'yaw_correction.csv'), index_col=0)\n else:\n return None\n\n f_yaw_corr = interp1d(yaw_df.index, yaw_df[\"smooth_yaw_err\"], kind = 'cubic', fill_value=\"extrapolate\")\n\n return f_yaw_corr\n\ndef yaw_corr_func_to_interp_value(xyz_str, f_yaw_corr):\n \"\"\"\n Uses the function made above to provide yaw correction for a specific timestamp\n \"\"\"\n if f_yaw_corr is None:\n return 0\n \n the_time = float(xyz_str.split()[7]) + 1e9\n yaw_corr = np.deg2rad((f_yaw_corr(the_time) +360) % 360)\n \n return yaw_corr\n\nif __name__=='__main__':\n this_script = sys.argv[0]\n\n if len(sys.argv)<=1:\n print(\"ERROR\\n\\nPlease provide the absolute path of the campaign's top directory.\\n\\nEXITING\")\n sys.exit(0)\n else:\n try: top_dir = sys.argv[1].rstrip(os.path.sep)\n except Exception as e: print(e); sys.exit(0)\n\n if sys.byteorder == 'little':\n print(f\"System endianness: {sys.byteorder}.\\nGood to go :)\")\n else:\n print(f\"System endianness: {sys.byteorder}.\\nSTOP right now!\")\n sys.exit(0)\n\n os.chdir(top_dir)\n if os.getcwd() != top_dir:\n print(\"Something went wrong.\")\n print(f\"cwd: {os.getcwd()}\")\n sys.exit(0)\n \n # if refined files are available\n if xyz_dir_in_ref in os.listdir():\n xyz_dir_in = xyz_dir_in_ref\n \n if log_dir not in os.listdir():\n os.mkdir(log_dir)\n\n if out_dir_ascii not in os.listdir():\n os.mkdir(out_dir_ascii)\n\n logging.basicConfig(format='<%(asctime)s> <%(levelname)-8s> <%(message)s>',\n level=logging.ERROR,\n filename=os.path.join(top_dir, log_dir, f'{datetime.date.today().strftime(\"%Y.%m.%d\")}_{os.path.split(this_script)[-1]}.log'),\n datefmt='%Y-%m-%d %H:%M:%S')\n logging.info(f\"{this_script} has started and received a valid top directory name\")\n\n potential_dirs_in = len(re.findall(txt_dir_in, \" \".join(os.listdir())))\n if potential_dirs_in == 0:\n print(\"No valid input directories found. Make sure to run script 00_join_pcap_.. first\\nEXITING!\")\n sys.exit(0)\n elif potential_dirs_in > 1:\n print(\"Multiple input directories found. Choose one:\")\n valid_choices = dict()\n for i, d_name in enumerate(os.listdir()):\n if os.path.isfile(d_name):\n continue\n elif txt_dir_in not in d_name:\n continue\n else:\n print(f\"{i}: {d_name}\")\n valid_choices[i] = d_name\n\n dir_in_selected = False\n while not dir_in_selected:\n try:\n dir_in_id = int(input(\"Which directory do we use? Type a number and press enter.\\n\"))\n if dir_in_id in valid_choices.keys():\n txt_dir_in = valid_choices[dir_in_id]\n dir_in_selected = True\n except:\n print(\"Invalid choice!\")\n potential_pcap_dirs_in = len(re.findall(pcap_dir_in, \" \".join(os.listdir())))\n if potential_pcap_dirs_in == 0:\n print(\"No valid PCAP input directories found. Make sure to run the other scripts first\\nEXITING!\")\n sys.exit(0)\n elif potential_pcap_dirs_in > 1:\n print(\"Multiple input directories found. Choose one:\")\n valid_choices = dict()\n for i, d_name in enumerate(os.listdir()):\n if os.path.isfile(d_name):\n continue\n elif pcap_dir_in not in d_name:\n continue\n else:\n print(f\"{i}: {d_name}\")\n valid_choices[i] = d_name\n\n dir_in_selected = False\n while not dir_in_selected:\n try:\n dir_in_id = int(input(\"Which directory do we use? Type a number and press enter.\\n\"))\n if dir_in_id in valid_choices.keys():\n pcap_dir_in = valid_choices[dir_in_id]\n dir_in_selected = True\n except:\n print(\"Invalid choice!\")\n\n print(f\"Using {txt_dir_in} and {pcap_dir_in} as input\")\n logging.info(f\"Using {txt_dir_in} and {pcap_dir_in} as input\")\n\n print(\"Making APX dataframe...\")\n csv_files = [f for f in sorted(os.listdir(txt_dir_in)) if fn_keyword in f]\n print(f\"Found {len(csv_files)} CSV files.\")\n big_apx_df = pd.concat((pd.read_csv(os.path.join(txt_dir_in, f), sep = \";\", index_col = 0, na_values = \"NAN\") for f in csv_files))\n big_apx_df = big_apx_df[[\"lon_EPSG32632\", \"lat_EPSG32632\", \"elevation\", \"heading_continuous\", \"roll\", \"pitch\"]]\n \n # print(big_apx_df.head())\n # somehow there might be duplicate times??\n big_apx_df = big_apx_df[~big_apx_df.index.duplicated(keep='first')]\n bad_rows = np.isnan(big_apx_df).sum(axis=1) > 0\n big_apx_df = big_apx_df[~bad_rows]\n\n f_lat, f_lon, f_ele, f_yaw, f_rol, f_pit = apx_df_to_interp_funcs(big_apx_df)\n f_yaw_corr = yaw_correction_to_interp_func() # returns interpolation function or None\n if f_yaw_corr is None:\n print('NO yaw correction')\n else:\n print ('USING yaw correction')\n\n print(f\"Done concatenating {len(csv_files)} CSV files into dataframe.\")\n print(\"\\n\\n\\n\")\n logging.info(f\"Used {csv_files} from {txt_dir_in}\")\n\n\n\n # pcap_to_uav_coord('line_001_point_0000.pcap', pcap_dir_in, '343040.693 5709133.784 28.559 1 1 171 1 614249080.422688961 -18.780 -12.399 8 28.559')\n # curr_str = '343040.693 5709133.784 28.559 1 1 171 1 614249080.422688961 -18.780 1880422689024 8 28.559'\n # uav_coords = pcap_to_uav_coord('line_001_point_0000.pcap', pcap_dir_in, curr_str)\n # print(uav_coords)\n\n # lat, lon, ele, yaw, rol, pit = apx_funcs_to_interp_values(curr_str, f_lat, f_lon, f_ele, f_yaw, f_rol, f_pit)\n # print(lat, lon, ele, yaw, rol, pit)\n # pcap_to_uav_coord('line_001_point_0001.pcap', pcap_dir_in, '343040.717 5709133.886 28.578 1 1 177 1 614249080.122606993 -18.267 1880122606904 12 28.578')\n\n pcap_files_to_process = sorted([fn for fn in os.listdir(pcap_dir_in) \\\n if len(fn)==24 and fn.endswith('.pcap')])\n xyz_lines_to_process = []\n xyz_files_to_process = sorted([fn for fn in os.listdir(xyz_dir_in)\\\n if len(fn)==12 and fn.endswith('.xyz')])\n for xyz_file in xyz_files_to_process:\n with open(os.path.join(xyz_dir_in, xyz_file), 'r') as fh:\n curr_lines = fh.readlines()\n xyz_lines_to_process.extend(curr_lines)\n\n assert len(pcap_files_to_process)==len(xyz_lines_to_process), f'nr of PCAP files and strings do not match ({len(pcap_files_to_process)} vs {len(xyz_lines_to_process)})'\n n_rows = len(pcap_files_to_process)\n n_cols = 3 + 3 + 6 + 1 #xyz pulse, xyz drone, sin/cos rotation, yaw correction\n output = np.ones((n_rows, n_cols), dtype = np.float64) * np.nan\n temp_angles = np.ones((n_rows, 3), dtype = np.float64) * np.nan\n for pcap_file, xyz_line, row_number in zip(pcap_files_to_process, xyz_lines_to_process, np.arange(n_rows)):\n print(row_number)\n uav_coords = pcap_to_uav_coord(pcap_file, pcap_dir_in, xyz_line)\n if uav_coords is not None:\n uav_coords = uav_coords.flatten()\n lat, lon, ele, yaw, rol, pit = apx_funcs_to_interp_values(xyz_line, f_lat, f_lon, f_ele, f_yaw, f_rol, f_pit)\n output[row_number][:3] = uav_coords\n output[row_number][3:6] = lat, lon, ele\n temp_angles[row_number][:] = yaw, rol, pit\n yaw_correction = yaw_corr_func_to_interp_value(xyz_line, f_yaw_corr)\n output[row_number][12] = yaw_correction\n for i in range(temp_angles.shape[1]):\n #6 columns with xyz uav and lon, lat, elev\n col0 = 6 + i*2\n col1 = col0 + 1\n print(col0, col1, temp_angles[0,i])\n output[:,col0] = np.sin(temp_angles[:,i])\n output[:,col1] = np.cos(temp_angles[:,i])\n \n if np.isnan(output).any():\n print('WARNING bad output.')\n np.savetxt(os.path.join(out_dir_ascii, 'human_readable.txt'), output)\n np.save(os.path.join(out_dir_ascii, 'point_infos.npy'), output, False)","sub_path":"14_boresight_calib/02.1_extract_point_infos.py","file_name":"02.1_extract_point_infos.py","file_ext":"py","file_size_in_byte":21092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"36137628","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nfrom astropy.visualization import make_lupton_rgb\n#I = fits.getdata(input(\"input file name:\"))\n#I = fits.getdata('Dusty_xy_total.fits')\n#print(np.shape(I))\n\nn = 20\nWavelength = 1e-2*1e5**(n/50)\n\ndef ShowIm(n):\n Data = I[n,:,:]\n mean = np.mean(Data)\n std = np.std(Data)\n plt.imshow(I[n,:,:],vmin = mean-std , vmax = mean+3*std, cmap='gray')\n plt.colorbar()\n plt.title(\"Wavelength = {} $\\mu m$\".format(Wavelength))\n plt.show()\n return \n\ndef RGB():\n Data = fits.getdata(input('input fits file:'))\n B = Data[0,:,:]\n G = Data[1,:,:]\n R = Data[2,:,:]\n image = make_lupton_rgb(R, G, B, stretch=0.01)\n mean = np.mean(image)\n std = np.std(image)\n plt.imshow(image)#,vmin = mean-std , vmax = mean+3*std)\n plt.show()\n\n\nRGB()\n#ShowIm(n)\n","sub_path":"Stellar/Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315238237","text":"\nfrom .network import Network\nfrom .legacy_networks import NETWORKS\n\nfrom aiobitcoin.tools.tx.Tx import Tx as BitcoinTx\nfrom aiobitcoin.tools.block import Block as BitcoinBlock\n\nfrom ..serialize import h2b\n\nBUILT_IN_NETWORKS = [\n\n # BTC bitcoin mainnet : xprv/xpub\n Network(\n 'BTC', \"Bitcoin\", \"mainnet\",\n b'\\x80', b'\\0', b'\\5', h2b(\"0488ADE4\"), h2b(\"0488B21E\"),\n BitcoinTx, BitcoinBlock,\n h2b('F9BEB4D9'), 8333, [\n \"seed.bitcoin.sipa.be\", \"dnsseed.bitcoin.dashjr.org\",\n \"bitseed.xf2.org\", \"dnsseed.bluematt.me\",\n ],\n bech32_hrp='bc'\n ),\n\n # BTC bitcoin testnet : tprv/tpub\n Network(\n \"XTN\", \"Bitcoin\", \"testnet3\",\n b'\\xef', b'\\x6f', b'\\xc4', h2b(\"04358394\"), h2b(\"043587CF\"),\n BitcoinTx, BitcoinBlock,\n h2b('0B110907'), 18333, [\n \"bitcoin.petertodd.org\", \"testnet-seed.bitcoin.petertodd.org\",\n \"bluematt.me\", \"testnet-seed.bluematt.me\"\n ],\n bech32_hrp='tb'\n ),\n\n # LTC litecoin mainnet : Ltpv/Ltub\n Network(\n \"LTC\", \"Litecoin\", \"mainnet\",\n b'\\xb0', b'\\x30', b'\\5',\n h2b('019d9cfe'), h2b('019da462'),\n tx=BitcoinTx, block=BitcoinBlock,\n bech32_hrp='lc'\n ),\n\n # LTC litecoin testnet : ttpv/ttub\n Network(\n \"XLT\", \"Litecoin\", \"testnet\",\n b'\\xef', b'\\x6f', b'\\xc4',\n h2b('0436ef7d'), h2b('0436f6e1'),\n tx=BitcoinTx, block=BitcoinBlock,\n bech32_hrp='tl'\n )\n\n]\n\n\ndef _transform_NetworkValues_to_Network(nv):\n defaults = dict(\n tx=None, block=None, magic_header=None, dns_bootstrap=[], default_port=None, bech32_hrp=None)\n defaults.update(nv._asdict())\n\n return Network(**defaults)\n\n\ndef _import_legacy():\n for n in NETWORKS:\n n1 = _transform_NetworkValues_to_Network(n)\n BUILT_IN_NETWORKS.append(n1)\n\n\n_import_legacy()\n","sub_path":"aiobitcoin/tools/networks/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"125484188","text":"import numpy as np\nimport pandas as pd\nimport copt\nimport warnings\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom numpy.linalg import multi_dot\nfrom lifelines.utils import concordance_index as c_index_score\nfrom lights.base.base import Learner, extract_features, normalize, block_diag, \\\n get_xi_from_xi_ext, logistic_grad, get_times_infos, get_ext_from_vect, \\\n get_vect_from_ext\nfrom lights.init.mlmm import MLMM\nfrom lights.init.cox import initialize_asso_params\nfrom lights.model.e_step_functions import EstepFunctions\nfrom lights.model.m_step_functions import MstepFunctions\nfrom lights.model.regularizations import ElasticNet, SparseGroupL1\n\n\nclass QNMCEM(Learner):\n \"\"\"QNMCEM Algorithm for the lights model inference\n\n Parameters\n ----------\n fit_intercept : `bool`, default=True\n If `True`, include an intercept in the model for the time independent\n features\n\n l_pen_EN : `float`, default=0.\n Level of penalization for the ElasticNet\n\n l_pen_SGL_beta : `float`, default=0.\n Level of penalization for the Sparse Group l1 on beta\n\n l_pen_SGL_gamma : `float`, default=0.\n Level of penalization for the Sparse Group l1 on gamma\n\n eta_elastic_net: `float`, default=0.1\n The ElasticNet mixing parameter, with 0 <= eta_elastic_net <= 1.\n For eta_elastic_net = 0 this is ridge (L2) regularization\n For eta_elastic_net = 1 this is lasso (L1) regularization\n For 0 < eta_elastic_net < 1, the regularization is a linear combination\n of L1 and L2\n\n eta_sp_gp_l1: `float`, default=0.1\n The Sparse Group l1 mixing parameter, with 0 <= eta_sp_gp_l1 <= 1\n\n max_iter : `int`, default=100\n Maximum number of iterations of the solver\n\n verbose : `bool`, default=True\n If `True`, we verbose things, otherwise the solver does not\n print anything (but records information in history anyway)\n\n print_every : `int`, default=10\n Print history information when ``n_iter`` (iteration number) is\n a multiple of ``print_every``\n\n tol : `float`, default=1e-5\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it). By default the solver does ``max_iter``\n iterations\n\n warm_start : `bool`, default=True\n If true, learning will start from the last reached solution\n\n fixed_effect_time_order : `int`, default=5\n Order of the higher time monomial considered for the representations of\n the time-varying features corresponding to the fixed effect. The\n dimension of the corresponding design matrix is then equal to\n fixed_effect_time_order + 1\n\n asso_functions : `list` or `str`='all', default='all'\n List of association functions wanted or string 'all' to select all\n defined association functions. The available functions are :\n - 'lp' : linear predictor\n - 're' : random effects\n - 'tps' : time dependent slope\n - 'ce' : cumulative effects\n\n initialize : `bool`, default=True\n If `True`, we initialize the parameters using MLMM model, otherwise we\n use arbitrarily chosen fixed initialization\n\n copt_accelerate : `bool`, default=False\n If `True`, we choose copt solver with accelerated proximal\n gradient (FISTA), otherwise we use regular ISTA.\n\n compute_obj : `bool`, default=False\n If `True`, we compute the global objective to be minimized by the QNMCEM\n algorithm and store it in history.\n\n MC_sep: `bool`, default=False\n If `False`, we use the same set of MC samples for all subject,\n otherwise we sample a seperate set of MC samples for each subject\n \"\"\"\n\n def __init__(self, fit_intercept=False, l_pen_EN=0., l_pen_SGL_beta=0.,\n l_pen_SGL_gamma=0., eta_elastic_net=.1, eta_sp_gp_l1=.1,\n max_iter=100, verbose=True, print_every=10, tol=1e-5,\n warm_start=True, fixed_effect_time_order=5,\n asso_functions='all', initialize=True, copt_accelerate=False,\n compute_obj=False, MC_sep=False):\n Learner.__init__(self, verbose=verbose, print_every=print_every)\n self.max_iter = max_iter\n self.tol = tol\n self.warm_start = warm_start\n self.fit_intercept = fit_intercept\n self.fixed_effect_time_order = fixed_effect_time_order\n self.asso_functions = asso_functions\n self.initialize = initialize\n self.copt_accelerate = copt_accelerate\n self.l_pen_EN = l_pen_EN\n self.l_pen_SGL_beta = l_pen_SGL_beta\n self.l_pen_SGL_gamma = l_pen_SGL_gamma\n self.eta_elastic_net = eta_elastic_net\n self.eta_sp_gp_l1 = eta_sp_gp_l1\n self.ENet = ElasticNet(l_pen_EN, eta_elastic_net)\n self._fitted = False\n self.compute_obj = compute_obj\n self.MC_sep = MC_sep\n\n # Attributes that will be instantiated afterwards\n self.n_samples = None\n self.n_time_indep_features = None\n self.n_long_features = None\n self.S = None\n self.T_u = None\n self.theta = {\n \"beta_0\": np.empty(1),\n \"beta_1\": np.empty(1),\n \"long_cov\": np.empty(1),\n \"phi\": np.empty(1),\n \"xi\": np.empty(1),\n \"baseline_hazard\": pd.Series(),\n \"gamma_0\": np.empty(1),\n \"gamma_1\": np.empty(1)\n }\n\n @property\n def asso_functions(self):\n return self._asso_functions\n\n @asso_functions.setter\n def asso_functions(self, val):\n if not (val == 'all' or set(val).issubset({'lp', 're', 'tps', 'ce'})):\n raise ValueError(\"``asso_functions`` must be either 'all', or a \"\n \"`list` in ['lp', 're', 'tps', 'ce']\")\n self._asso_functions = val\n\n @property\n def copt_accelerate(self):\n return self._copt_accelerate\n\n @copt_accelerate.setter\n def copt_accelerate(self, val):\n if self.warm_start:\n warnings.warn(\"Careful using simultaneously ``warm_start`` and \"\n \"``copt_accelerate``: warmstart can diminish FISTA \"\n \"acceleration effectiveness\")\n self._copt_accelerate = val\n\n @property\n def fitted(self):\n return self._fitted\n\n @staticmethod\n def _rel_theta(theta, prev_theta, eps):\n \"\"\"\n #TODO\n :param theta:\n :param prev_theta:\n :param eps:\n :return:\n \"\"\"\n rel = 0\n for key_ in theta.keys():\n tmp = np.linalg.norm(theta[key_] - prev_theta[key_]) / \\\n (np.linalg.norm(theta[key_]) + eps)\n rel = max(rel, tmp)\n return rel\n\n @staticmethod\n def _log_lik(pi_xi, f_mean):\n \"\"\"Computes the approximation of the likelihood of the lights model\n\n Parameters\n ----------\n pi_xi : `np.ndarray`, shape=(n_samples,)\n Probability estimates for being on the high-risk group given\n time-independent features\n\n f_mean : `np.ndarray`, shape=(n_samples, K)\n The mean value of f(Y, T, delta| S, G ; theta) over S\n\n Returns\n -------\n prb : `float`\n The approximated log-likelihood computed on the given data\n \"\"\"\n pi_xi_ = np.vstack((1 - pi_xi, pi_xi)).T\n prb = np.log((pi_xi_ * f_mean).sum(axis=-1)).mean()\n return prb\n\n def _func_obj(self, pi_xi, f_mean):\n \"\"\"The global objective to be minimized by the QNMCEM algorithm\n (including penalization)\n\n Parameters\n ----------\n pi_xi : `np.ndarray`, shape=(n_samples,)\n Probability estimates for being on the high-risk group given\n time-independent features\n\n f_mean : `np.ndarray`, shape=(n_samples, K)\n The mean value of f(Y, T, delta| S, G ; theta) over S\n\n Returns\n -------\n output : `float`\n The value of the global objective to be minimized\n \"\"\"\n p, L = self.n_time_indep_features, self.n_long_features\n eta_sp_gp_l1 = self.eta_sp_gp_l1\n l_pen_SGL_beta = self.l_pen_SGL_beta\n l_pen_SGL_gamma = self.l_pen_SGL_gamma\n theta = self.theta\n log_lik = self._log_lik(pi_xi, f_mean)\n # xi elastic net penalty\n xi = theta[\"xi\"]\n xi_pen = self.ENet.pen(xi)\n # beta sparse group l1 penalty\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n groups = np.arange(0, len(beta_0)).reshape(L, -1).tolist()\n SGL1 = SparseGroupL1(l_pen_SGL_beta, eta_sp_gp_l1, groups)\n beta_0_pen = SGL1.pen(beta_0)\n beta_1_pen = SGL1.pen(beta_1)\n # gamma sparse group l1 penalty\n gamma_0, gamma_1 = theta[\"gamma_0\"], theta[\"gamma_1\"]\n gamma_0_x, gamma_1_x = theta[\"gamma_0_x\"], theta[\"gamma_1_x\"]\n gamma_0_pen = self.ENet.pen(gamma_0_x)\n groups = np.arange(0, len(gamma_0)).reshape(L, -1).tolist()\n SGL1 = SparseGroupL1(l_pen_SGL_gamma, eta_sp_gp_l1, groups)\n gamma_0_pen += SGL1.pen(gamma_0)\n gamma_1_pen = self.ENet.pen(gamma_1_x)\n gamma_1_pen += SGL1.pen(gamma_1)\n pen = xi_pen + beta_0_pen + beta_1_pen + gamma_0_pen + gamma_1_pen\n return -log_lik + pen\n\n def _get_proba(self, X):\n \"\"\"Probability estimates for being on the high-risk group given\n time-independent features\n\n Parameters\n ----------\n X : `np.ndarray`, shape=(n_samples, n_time_indep_features)\n The time-independent features matrix\n\n Returns\n -------\n output : `np.ndarray`, shape=(n_samples,)\n Returns the probability of the sample for being on the high-risk\n group given time-independent features\n \"\"\"\n xi_0, xi = self.theta[\"xi_0\"], self.theta[\"xi\"]\n u = xi_0 + X.dot(xi)\n return logistic_grad(u)\n\n @staticmethod\n def _get_post_proba(pi_xi, Lambda_1):\n \"\"\"Posterior probability estimates for being on the high-risk group\n given all observed data\n\n Parameters\n ----------\n pi_xi : `np.ndarray`, shape=(n_samples,)\n Comes from get_proba function\n\n Lambda_1 : `np.ndarray`, shape=(n_samples, K)\n Approximated integral (see (15) in the lights paper) with\n \\tilde(g)=1\n\n Returns\n -------\n pi_est : `np.ndarray`, shape=(n_samples,)\n Returns the posterior probability of the sample for being on the\n high-risk group given all observed data\n \"\"\"\n tmp = Lambda_1 * np.vstack((1 - pi_xi, pi_xi)).T\n pi_est = tmp[:, 1] / tmp.sum(axis=1)\n return pi_est\n\n @staticmethod\n def intensity(rel_risk, indicator):\n \"\"\"Compute the intensity of f_data_given_latent\n\n Parameters\n ----------\n rel_risk: `np.ndarray`, shape=(N_MC, K, n_samples, J)\n The relative risk\n\n indicator: `np.ndarray`, shape=(n_samples, J)\n The indicator matrix for comparing event times (T == T_u)\n\n Returns\n -------\n intensity : `np.ndarray`, shape=(N_MC, K, n_samples)\n The value of intensity\n \"\"\"\n intensity = (rel_risk * indicator).sum(axis=-1)\n return intensity\n\n @staticmethod\n def survival(rel_risk, indicator):\n \"\"\"Computes the survival function\n\n Parameters\n ----------\n rel_risk: `np.ndarray`, shape=(N_MC, K, n_samples, J)\n The relative risk\n\n indicator: `np.ndarray`, shape=(n_samples, J)\n The indicator matrix for comparing event times (T <= T_u)\n\n Returns\n -------\n survival : `np.ndarray`, shape=(n_samples, K, N_MC)\n The value of the survival function\n \"\"\"\n survival = np.exp(-(rel_risk * indicator).sum(axis=-1).T)\n return survival\n\n def f_y_given_latent(self, extracted_features, g3):\n \"\"\"Computes the density of the longitudinal processes given latent\n variables\n\n Parameters\n ----------\n extracted_features : `tuple, tuple`,\n The extracted features from longitudinal data.\n Each tuple is a combination of fixed-effect design features,\n random-effect design features, outcomes, number of the longitudinal\n measurements for all subject or arranged by l-th order.\n\n g3 : `list` of n_samples `np.array`s with shape=(K, n_i, N_MC)\n The values of g3 function\n\n Returns\n -------\n f_y : `np.ndarray`, shape=(n_samples, K, N_MC)\n The value of the f(Y | S, G ; theta)\n \"\"\"\n (U_list, V_list, y_list, N_list) = extracted_features[0]\n n_samples, n_long_features = self.n_samples, self.n_long_features\n phi = self.theta[\"phi\"]\n N_MC = g3[0].shape[2]\n K = 2 # 2 latent groups\n f_y = np.ones(shape=(n_samples, K, N_MC))\n for i in range(n_samples):\n n_i, y_i, M_iS = sum(N_list[i]), y_list[i], g3[i]\n inv_Phi_i = [[phi[l, 0]] * N_list[i][l] for l in\n range(n_long_features)]\n inv_Phi_i = np.concatenate(inv_Phi_i).reshape(-1, 1)\n f_y[i] = (1 / (np.sqrt(((2 * np.pi) ** n_i) * np.prod(inv_Phi_i)))\n * np.exp(\n np.sum(-0.5 * ((y_i - M_iS) ** 2) / inv_Phi_i, axis=1)))\n return f_y\n\n def mlmm_density(self, extracted_features):\n \"\"\"Computes the log-likelihood of the multivariate linear mixed model\n\n Parameters\n ----------\n extracted_features : `tuple, tuple`,\n The extracted features from longitudinal data.\n Each tuple is a combination of fixed-effect design features,\n random-effect design features, outcomes, number of the longitudinal\n measurements for all subject or arranged by l-th order.\n\n Returns\n -------\n output : `float`\n The value of the log-likelihood\n \"\"\"\n (U_list, V_list, y_list, N), (U_L, V_L, y_L, N_L) = extracted_features\n n_samples, n_long_features = len(U_list), len(U_L)\n theta = self.theta\n D, phi = theta[\"long_cov\"], theta[\"phi\"]\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n beta_stack = np.hstack((beta_0, beta_1))\n\n log_lik = np.zeros((n_samples, 2))\n for i in range(n_samples):\n U_i, V_i, y_i, n_i = U_list[i], V_list[i], y_list[i], sum(N[i])\n inv_Phi_i = [[phi[l, 0]] * N[i][l] for l in range(n_long_features)]\n inv_Sigma_i = np.diag(np.concatenate(inv_Phi_i))\n tmp_1 = multi_dot([V_i, D, V_i.T]) + inv_Sigma_i\n tmp_2 = y_i - U_i.dot(beta_stack)\n\n op1 = n_i * np.log(2 * np.pi)\n op2 = np.log(np.linalg.det(tmp_1))\n op3 = np.diag(multi_dot([tmp_2.T, np.linalg.inv(tmp_1), tmp_2]))\n\n log_lik[i] = np.exp(-.5 * (op1 + op2 + op3))\n\n return log_lik\n\n def f_data_given_latent(self, X, extracted_features, T, T_u, delta, S,\n MC_sep):\n \"\"\"Estimates the data density given latent variables\n\n Parameters\n ----------\n X : `np.ndarray`, shape=(n_samples, n_time_indep_features)\n The time-independent features matrix\n\n extracted_features : `tuple, tuple`,\n The extracted features from longitudinal data.\n Each tuple is a combination of fixed-effect design features,\n random-effect design features, outcomes, number of the longitudinal\n measurements for all subject or arranged by l-th order.\n\n T : `np.ndarray`, shape=(n_samples,)\n Censored times of the event of interest\n\n T_u : `np.ndarray`, shape=(J,)\n The J unique training censored times of the event of interest\n\n delta : `np.ndarray`, shape=(n_samples,)\n Censoring indicator\n\n S : `np.ndarray`, shape=(N_MC, r)\n Set of constructed Monte Carlo samples\n\n MC_sep: `bool`, default=False\n If `False`, we use the same set of MC samples for all subject,\n otherwise we sample a seperate set of MC samples for each subject\n\n Returns\n -------\n f : `np.ndarray`, shape=(n_samples, K, N_MC)\n The value of the f(Y, T, delta| S, G ; theta)\n \"\"\"\n theta, alpha = self.theta, self.fixed_effect_time_order\n baseline_hazard, phi = theta[\"baseline_hazard\"], theta[\"phi\"]\n E_func = EstepFunctions(X, T, T_u, delta, extracted_features, alpha,\n self.asso_functions, theta, MC_sep)\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n gamma_0, gamma_1 = theta[\"gamma_0\"], theta[\"gamma_1\"]\n gamma_0_x, gamma_1_x = theta[\"gamma_0_x\"], theta[\"gamma_1_x\"]\n g1 = E_func.g1(S, gamma_0, gamma_0_x, beta_0,\n gamma_1, gamma_1_x, beta_1, False)\n g3 = E_func.g3(S, beta_0, beta_1)\n baseline_val = baseline_hazard.values.flatten()\n rel_risk = g1.swapaxes(0, 2) * baseline_val\n _, ind_1, ind_2 = get_times_infos(T, T_u)\n intensity = self.intensity(rel_risk, ind_1)\n survival = self.survival(rel_risk, ind_2)\n f = (intensity ** delta).T * survival\n if not self.MC_sep:\n f_y = self.f_y_given_latent(extracted_features, g3)\n f *= f_y\n return f\n\n def predict_marker(self, X, Y, prediction_times=None):\n \"\"\"Marker rule of the lights model for being on the high-risk group\n\n Parameters\n ----------\n X : `np.ndarray`, shape=(n_samples, n_time_indep_features)\n The time-independent features matrix\n\n Y : `pandas.DataFrame`, shape=(n_samples, n_long_features)\n The longitudinal data. Each element of the dataframe is\n a pandas.Series\n\n prediction_times : `np.ndarray`, shape=(n_samples,), default=None\n Times for prediction, that is up to which one has longitudinal data.\n If `None`, takes the last measurement times in Y\n\n Returns\n -------\n marker : `np.ndarray`, shape=(n_samples,)\n Returns the marker rule of the sample for being on the high-risk\n group\n \"\"\"\n if self._fitted:\n n_samples = X.shape[0]\n theta, alpha = self.theta, self.fixed_effect_time_order\n ext_feat = extract_features(Y, alpha)\n last_measurement = np.array(list(map(max, ext_feat[0][2])))\n if prediction_times is None:\n prediction_times = last_measurement\n else:\n if not (prediction_times > last_measurement).all():\n raise ValueError('Prediction times must be greater than the'\n ' last measurement times for each subject')\n\n # predictions for alive subjects only\n delta_prediction = np.zeros(n_samples)\n T_u = self.T_u\n f = self.f_data_given_latent(X, ext_feat, prediction_times, T_u,\n delta_prediction, self.S, self.MC_sep)\n pi_xi = self._get_proba(X)\n marker = self._get_post_proba(pi_xi, f.mean(axis=-1))\n return marker\n else:\n raise ValueError('You must fit the model first')\n\n def _update_theta(self, **kwargs):\n \"\"\"Update class attributes corresponding to lights model parameters\n \"\"\"\n for key, value in kwargs.items():\n if key in [\"long_cov\", \"phi\", \"baseline_hazard\",\n \"beta_0\", \"beta_1\", \"gamma_0\", \"gamma_1\",\n \"gamma_0_x\", \"gamma_1_x\"]:\n self.theta[key] = value\n elif key == \"xi\":\n xi_0, xi = get_xi_from_xi_ext(value, self.fit_intercept)\n self.theta[\"xi_0\"], self.theta[\"xi\"] = xi_0, xi\n else:\n raise ValueError('Parameter {} is not defined'.format(key))\n\n def fit(self, X, Y, T, delta):\n \"\"\"Fits the lights model\n\n Parameters\n ----------\n X : `np.ndarray`, shape=(n_samples, n_time_indep_features)\n The time-independent features matrix\n\n Y : `pandas.DataFrame`, shape=(n_samples, n_long_features)\n The longitudinal data. Each element of the dataframe is\n a pandas.Series\n\n T : `np.ndarray`, shape=(n_samples,)\n Censored times of the event of interest\n\n delta : `np.ndarray`, shape=(n_samples,)\n Censoring indicator\n \"\"\"\n self._start_solve()\n verbose = self.verbose\n max_iter = self.max_iter\n print_every = self.print_every\n tol = self.tol\n warm_start = self.warm_start\n fit_intercept = self.fit_intercept\n alpha = self.fixed_effect_time_order\n n_samples, p = X.shape\n L = Y.shape[1]\n self.n_samples = n_samples\n self.n_time_indep_features = p\n self.n_long_features = L\n q_l = alpha + 1\n r_l = 2 # Affine random effects\n if fit_intercept:\n p += 1\n\n if self.asso_functions == 'all':\n self.asso_functions = ['lp', 're', 'tps', 'ce']\n asso_functions = self.asso_functions\n nb_asso_param = len(asso_functions)\n if 're' in asso_functions:\n nb_asso_param += 1\n N = 10 # Number of initial Monte Carlo sample for S\n\n X = normalize(X) # Normalize time-independent features\n ext_feat = extract_features(Y, alpha) # Features extraction\n T_u = np.unique(T)\n self.T_u = T_u\n J, ind_1, ind_2 = get_times_infos(T, T_u)\n\n # Initialization\n # TODO: for debugging and update hyper-params if not useful\n xi_ext = .5 * np.concatenate((np.ones(p), np.zeros(p)))\n\n if self.initialize:\n # Initialize longitudinal submodels\n mlmm = MLMM(max_iter=max_iter, verbose=verbose, tol=tol,\n print_every=print_every, fixed_effect_time_order=alpha)\n mlmm.fit(ext_feat)\n beta = mlmm.fixed_effect_coeffs\n D = mlmm.long_cov\n phi = mlmm.phi\n est = initialize_asso_params(X, T, delta)\n time_indep_cox_coeffs, baseline_hazard = est\n else:\n # Fixed initialization\n q = q_l * L\n r = r_l * L\n beta = np.zeros((q, 1))\n D = np.diag(np.ones(r))\n phi = np.ones((L, 1))\n time_indep_cox_coeffs = np.zeros(p)\n baseline_hazard = pd.Series(data=.5 * np.ones(J), index=T_u)\n\n # TODO: for debugging and update hyper-params if not useful\n gamma_0_x = time_indep_cox_coeffs.reshape(-1, 1)\n gamma_0_x_ext = get_ext_from_vect(gamma_0_x)\n gamma_0 = 1e-4 * np.ones((L * nb_asso_param, 1))\n gamma_1_x = gamma_0_x.copy()\n gamma_1_x_ext = gamma_0_x_ext.copy()\n gamma_1 = gamma_0.copy()\n beta_0 = beta.reshape(-1, 1)\n beta_1 = beta_0.copy()\n\n self._update_theta(beta_0=beta_0, beta_1=beta_1, xi=xi_ext,\n gamma_0=gamma_0, gamma_1=gamma_1,\n gamma_0_x=gamma_0_x, gamma_1_x=gamma_1_x, long_cov=D,\n phi=phi, baseline_hazard=baseline_hazard)\n\n # Stopping criteria and bounds vector for the L-BGFS-B algorithm\n maxiter, pgtol = 60, 1e-5\n bounds_xi = [(0, None)] * 2 * p\n bounds_gamma_time_indep = [(0, None)] * 2 * p\n\n # Instanciates E-step and M-step functions\n E_func = EstepFunctions(X, T, T_u, delta, ext_feat, alpha,\n asso_functions, self.theta, self.MC_sep)\n F_func = MstepFunctions(fit_intercept, X, T, delta, L, p, self.l_pen_EN,\n self.eta_elastic_net, alpha, asso_functions)\n\n S = E_func.construct_MC_samples(N)\n f = self.f_data_given_latent(X, ext_feat, T, self.T_u, delta, S,\n self.MC_sep)\n Lambda_1 = E_func.Lambda_g(np.ones(shape=(n_samples, 2, 2 * N)), f)\n pi_xi = self._get_proba(X)\n\n # Store init values\n if self.compute_obj:\n f_mean = f.mean(axis=-1)\n if self.MC_sep:\n f_mean *= self.mlmm_density(ext_feat)\n obj = self._func_obj(pi_xi, f_mean)\n self.history.update(n_iter=0, obj=obj,\n rel_obj=np.inf, theta=self.theta)\n else:\n self.history.update(n_iter=0, theta=self.theta)\n\n prev_theta = self.theta.copy()\n rel_theta_list = [0] * 4\n\n if verbose:\n self.history.print_history()\n\n for n_iter in range(1, max_iter + 1):\n\n # E-Step\n pi_est = self._get_post_proba(pi_xi, Lambda_1)\n E_g4 = E_func.Eg(E_func.g4(S), Lambda_1, pi_xi, f)\n E_g5 = E_func.Eg(E_func.g5(S), Lambda_1, pi_xi, f)\n\n def E_g1(gamma_0_, gamma_0_x_, beta_0_,\n gamma_1_, gamma_1_x_, beta_1_):\n return E_func.Eg(\n E_func.g1(S, gamma_0_, gamma_0_x_, beta_0_, gamma_1_,\n gamma_1_x_, beta_1_), Lambda_1, pi_xi, f)\n\n def E_log_g1(gamma_0_, gamma_0_x_, beta_0_,\n gamma_1_, gamma_1_x_, beta_1_):\n return E_func.Eg(\n np.log(E_func.g1(S, gamma_0_, gamma_0_x_, beta_0_, gamma_1_,\n gamma_1_x_, beta_1_)), Lambda_1, pi_xi, f)\n\n def E_g6(gamma_0_, gamma_0_x_, beta_0_,\n gamma_1_, gamma_1_x_, beta_1_):\n return E_func.Eg(\n E_func.g6(S, gamma_0_, gamma_0_x_, beta_0_, gamma_1_,\n gamma_1_x_, beta_1_),\n Lambda_1, pi_xi, f)\n\n # M-Step\n D = E_g4.sum(axis=0) / n_samples # D update\n\n if warm_start:\n xi_init = xi_ext\n beta_init = [beta_0.flatten(), beta_1.flatten()]\n gamma_x_init = [gamma_0_x_ext.flatten(),\n gamma_1_x_ext.flatten()]\n gamma_init = [gamma_0.flatten(), gamma_1.flatten()]\n else:\n xi_init = np.zeros(2 * p)\n beta_init = [np.zeros(L * q_l),\n np.zeros(L * q_l)]\n gamma_x_init = [np.zeros(2 * p), np.zeros(2 * p)]\n gamma_init = [np.zeros(L * nb_asso_param),\n np.zeros(L * nb_asso_param)]\n\n # xi update\n xi_ext = fmin_l_bfgs_b(\n func=lambda xi_ext_: F_func.P_pen_func(pi_est, xi_ext_),\n x0=xi_init,\n fprime=lambda xi_ext_: F_func.grad_P_pen(pi_est, xi_ext_),\n disp=False, bounds=bounds_xi, maxiter=maxiter, pgtol=pgtol)[0]\n\n # beta_0 update\n eta_sp_gp_l1 = self.eta_sp_gp_l1\n l_pen_SGL_beta = self.l_pen_SGL_beta\n pi_est_K = np.vstack((1 - pi_est, pi_est))\n gamma_K = [gamma_0, gamma_1]\n groups = np.arange(0, len(beta_0)).reshape(L, -1).tolist()\n prox = SparseGroupL1(l_pen_SGL_beta, eta_sp_gp_l1, groups).prox\n args_all = {\"pi_est\": pi_est_K, \"E_g5\": E_g5, \"E_g4\": E_g4,\n \"gamma\": gamma_K, \"baseline_hazard\": baseline_hazard,\n \"extracted_features\": ext_feat, \"phi\": phi,\n \"ind_1\": ind_1, \"ind_2\": ind_2}\n args_0 = {\"E_g1\": lambda v: E_g1(gamma_0, gamma_0_x, v,\n gamma_1, gamma_1_x, beta_1),\n \"group\": 0}\n beta_0_prev = beta_0.copy()\n copt_max_iter = 1\n beta_0 = copt.minimize_proximal_gradient(\n fun=F_func.R_func, x0=beta_init[0], prox=prox,\n max_iter=copt_max_iter,\n args=[{**args_all, **args_0}], jac=F_func.grad_R,\n step=lambda x: 2e-3,\n accelerated=self.copt_accelerate).x.reshape(-1, 1)\n\n # beta_1 update\n args_1 = {\"E_g1\": lambda v: E_g1(gamma_0, gamma_0_x, beta_0_prev,\n gamma_1, gamma_1_x, v),\n \"group\": 1}\n beta_1 = copt.minimize_proximal_gradient(\n fun=F_func.R_func, x0=beta_init[1], prox=prox,\n max_iter=copt_max_iter,\n args=[{**args_all, **args_1}], jac=F_func.grad_R,\n step=lambda x: 2e-3,\n accelerated=self.copt_accelerate).x.reshape(-1, 1)\n\n # gamma_0 update\n beta_K = [beta_0, beta_1]\n gamma = [gamma_0, gamma_1]\n gamma_x = [gamma_0_x, gamma_1_x]\n groups = np.arange(0, len(gamma_0)).reshape(L, -1).tolist()\n l_pen_SGL_gamma = self.l_pen_SGL_gamma\n prox = SparseGroupL1(l_pen_SGL_gamma, eta_sp_gp_l1, groups).prox\n args_all = {\"pi_est\": pi_est_K, \"E_g5\": E_g5,\n \"phi\": phi, \"beta\": beta_K,\n \"baseline_hazard\": baseline_hazard,\n \"extracted_features\": ext_feat,\n \"ind_1\": ind_1, \"ind_2\": ind_2,\n \"gamma\": gamma,\n \"gamma_x\": gamma_x}\n args_0_x = {\"E_g1\": lambda v: E_g1(gamma_0, v, beta_0,\n gamma_1, gamma_1_x, beta_1),\n \"E_log_g1\": lambda v: E_log_g1(gamma_0, v, beta_0,\n gamma_1, gamma_1_x, beta_1),\n \"E_g6\": lambda v: E_g6(gamma_0, v, beta_0,\n gamma_1, gamma_1_x, beta_1),\n \"group\": 0}\n gamma_0_prev = gamma_0.copy()\n gamma_0_x_prev = gamma_0_x.copy()\n # time independence part\n gamma_0_x_ext = fmin_l_bfgs_b(\n func=lambda gamma_0_x_ext_: F_func.Q_x_pen_func(\n gamma_0_x_ext_, *[{**args_all, **args_0_x}]),\n x0=gamma_x_init[0],\n fprime=lambda gamma_0_x_ext_: F_func.grad_Q_x_pen(\n gamma_0_x_ext_, *[{**args_all, **args_0_x}]),\n disp=False, bounds=bounds_gamma_time_indep, maxiter=maxiter,\n pgtol=pgtol)[0]\n gamma_0_x = get_vect_from_ext(gamma_0_x_ext).reshape(-1, 1)\n\n # time dependence part\n args_0 = {\"E_g1\": lambda v: E_g1(v, gamma_0_x, beta_0,\n gamma_1, gamma_1_x, beta_1),\n \"E_log_g1\": lambda v: E_log_g1(v, gamma_0_x, beta_0,\n gamma_1, gamma_1_x,\n beta_1),\n \"E_g6\": lambda v: E_g6(v, gamma_0_x, beta_0,\n gamma_1, gamma_1_x, beta_1),\n \"group\": 0}\n gamma_0 = copt.minimize_proximal_gradient(\n fun=F_func.Q_func, x0=gamma_init[0], prox=prox,\n max_iter=copt_max_iter,\n args=[{**args_all, **args_0}], jac=F_func.grad_Q,\n step=lambda x: 2e-3,\n accelerated=self.copt_accelerate).x.reshape(-1, 1)\n\n # gamma_1 update\n gamma_1_x_prev = gamma_1_x.copy()\n args_1_x = {\"E_g1\": lambda v: E_g1(gamma_0_prev, gamma_0_x_prev,\n beta_0, gamma_1, v, beta_1),\n \"E_log_g1\": lambda v: E_log_g1(gamma_0_prev, gamma_0_x_prev,\n beta_0, gamma_1, v, beta_1),\n \"E_g6\": lambda v: E_g6(gamma_0_prev, gamma_0_x_prev,\n beta_0, gamma_1, v, beta_1),\n \"group\": 1}\n # time independence part\n gamma_1_x_ext = fmin_l_bfgs_b(\n func=lambda gamma_1_x_ext_: F_func.Q_x_pen_func(\n gamma_1_x_ext_, *[{**args_all, **args_1_x}]),\n x0=gamma_x_init[1],\n fprime=lambda gamma_1_indep_ext_: F_func.grad_Q_x_pen(\n gamma_1_indep_ext_, *[{**args_all, **args_1_x}]),\n disp=False, bounds=bounds_gamma_time_indep, maxiter=maxiter,\n pgtol=pgtol)[\n 0]\n gamma_1_x = get_vect_from_ext(gamma_1_x_ext).reshape(-1, 1)\n\n # time dependence part\n args_1 = {\"E_g1\": lambda v: E_g1(gamma_0_prev, gamma_0_x_prev,\n beta_0, v, gamma_1_x, beta_1),\n \"E_log_g1\": lambda v: E_log_g1(gamma_0_prev, gamma_0_x_prev,\n beta_0, v, gamma_1_x, beta_1),\n \"E_g6\": lambda v: E_g6(gamma_0_prev, gamma_0_x_prev,\n beta_0, v, gamma_1_x, beta_1),\n \"group\": 1}\n gamma_1 = copt.minimize_proximal_gradient(\n fun=F_func.Q_func, x0=gamma_init[1], prox=prox,\n max_iter=copt_max_iter,\n args=[{**args_all, **args_1}], jac=F_func.grad_Q,\n step=lambda x: 2e-3,\n accelerated=self.copt_accelerate).x.reshape(-1, 1)\n\n # beta, gamma needs to be updated before the baseline\n self._update_theta(beta_0=beta_0, beta_1=beta_1,\n gamma_0=gamma_0, gamma_1=gamma_1,\n gamma_0_x=gamma_0_x, gamma_1_x=gamma_1_x)\n E_func.theta = self.theta\n E_g1 = E_func.Eg(E_func.g1(S, gamma_0, gamma_0_x, beta_0,\n gamma_1, gamma_1_x, beta_1),\n Lambda_1, pi_xi, f)\n\n # baseline hazard update\n baseline_hazard = pd.Series(\n data=((((ind_1 * 1).T * delta).sum(axis=1)) /\n ((E_g1.T * (ind_2 * 1).T).swapaxes(0, 1) * pi_est_K)\n .sum(axis=2).sum(axis=1)), index=T_u)\n\n # phi update\n beta_stack = np.hstack((beta_0, beta_1))\n (U_L, V_L, y_L, N_L) = ext_feat[1]\n phi = np.zeros((L, 1))\n for l in range(L):\n pi_est_ = np.concatenate([[pi_est[i]] * N_L[l][i]\n for i in range(n_samples)])\n pi_est_stack = np.vstack((1 - pi_est_, pi_est_)).T # K = 2\n N_l, y_l, U_l, V_l = sum(N_L[l]), y_L[l], U_L[l], V_L[l]\n beta_l = beta_stack[q_l * l: q_l * (l + 1)]\n E_g5_l = E_g5.reshape(n_samples, L, q_l)[:, l].reshape(-1, 1)\n E_g4_l = block_diag(E_g4[:, r_l * l: r_l * (l + 1),\n r_l * l: r_l * (l + 1)])\n tmp = y_l - U_l.dot(beta_l)\n phi_l = (pi_est_stack * (\n tmp * (tmp - 2 * (V_l.dot(E_g5_l))))).sum() \\\n + np.trace(V_l.T.dot(V_l).dot(E_g4_l))\n phi[l] = phi_l / N_l\n\n self._update_theta(phi=phi, baseline_hazard=baseline_hazard,\n long_cov=D, xi=xi_ext)\n pi_xi = self._get_proba(X)\n E_func.theta = self.theta\n S = E_func.construct_MC_samples(N)\n f = self.f_data_given_latent(X, ext_feat, T, T_u, delta, S, self.MC_sep)\n\n rel_theta = self._rel_theta(self.theta, prev_theta, 1e-2)\n rel_theta_list.pop(0)\n rel_theta_list.append(rel_theta)\n prev_theta = self.theta.copy()\n if n_iter % print_every == 0:\n if self.compute_obj:\n prev_obj = obj\n f_mean = f.mean(axis=-1)\n if self.MC_sep:\n f_mean *= self.mlmm_density(ext_feat)\n obj = self._func_obj(pi_xi, f_mean)\n rel_obj = abs(obj - prev_obj) / abs(prev_obj)\n self.history.update(n_iter=n_iter, theta=self.theta,\n obj=obj, rel_obj=rel_obj)\n else:\n self.history.update(n_iter=n_iter, theta=self.theta)\n if verbose:\n self.history.print_history()\n if (n_iter + 1 > max_iter) or (rel_theta < tol):\n self._fitted = True\n self.S = S # useful for predictions\n break\n else:\n # Update for next iteration\n Lambda_1 = E_func.Lambda_g(np.ones((n_samples, 2, 2 * N)), f)\n\n self._end_solve()\n\n def score(self, X, Y, T, delta):\n \"\"\"Computes the C-index score with the trained parameters on the given\n data\n\n Parameters\n ----------\n X : `np.ndarray`, shape=(n_samples, n_time_indep_features)\n The time-independent features matrix\n\n Y : `pandas.DataFrame`, shape=(n_samples, n_long_features)\n The longitudinal data. Each element of the dataframe is\n a pandas.Series\n\n T : `np.ndarray`, shape=(n_samples,)\n Censored times of the event of interest\n\n delta : `np.ndarray`, shape=(n_samples,)\n Censoring indicator\n\n Returns\n -------\n output : `float`\n The C-index score computed on the given data\n \"\"\"\n if self._fitted:\n return c_index_score(T, self.predict_marker(X, Y), delta)\n else:\n raise ValueError('You must fit the model first')\n","sub_path":"lights/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":37756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"423114850","text":"import logging\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User, Permission\n\nfrom wakawaka.models import Revision\n\nfrom wikiglue.mail import send_wiki_notification\n\n\nlogger = logging.getLogger(__name__)\n\n@receiver(post_save, sender=User)\ndef add_wiki_permissions(sender, instance, created, **kwargs):\n \"\"\"This signal handler makes sure, that every user who registers has the right to CRUD wiki \n pages \n \"\"\"\n logger.info(\"Receiving pre_save for user %s\" % instance)\n if not instance.has_perm('change_wikipage'):\n try:\n wiki_chg = Permission.objects.get(codename=\"change_wikipage\")\n wiki_add = Permission.objects.get(codename=\"add_wikipage\")\n wiki_del = Permission.objects.get(codename=\"delete_wikipage\")\n rev_chg = Permission.objects.get(codename=\"change_revision\")\n rev_add = Permission.objects.get(codename=\"add_revision\")\n rev_del = Permission.objects.get(codename=\"delete_revision\")\n instance.user_permissions.add(wiki_chg, wiki_add, wiki_del, rev_chg, rev_add, rev_del)\n except Permission.DoesNotExist:\n # this may happen during an inital \"createsuperuser\" during \"syncdb\"\n pass\n\n\n@receiver(post_save, sender=Revision)\ndef send_wiki_changes_notifications(sender, instance, created, **kwargs):\n \"\"\"\n Sends a mail if a wiki page is edited (which actually means, 'when a revision\n is created'), to all users which have the field 'notify_recent_changes' set to True in their\n user profile. \n \"\"\"\n logging.info(\"Receiving post_save for revision %s\" % instance)\n\n # Revision objects are only relevant to us, when created (as every rev represents a page edit)\n if created:\n # looks sth like this [(u'mail@example.com',), (u'foo@example.com',)]\n emails = User.objects.filter(userprofile__notify_recent_changes=True).values_list('email')\n send_wiki_notification([tupl[0] for tupl in emails], instance)\n","sub_path":"wikiglue/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"615979354","text":"\"\"\"\nClass to read and store all hrts data\n\"\"\"\n\nimport logging\n\nfrom models.signal_base import SignalBase\n\nlogger = logging.getLogger(__name__)\n\n# ----------------------------\n__author__ = \"B. VIOLA\"\n# ----------------------------\n\n\nclass HRTSData:\n\n # ------------------------\n def __init__(self, constants):\n \"\"\"\n Init function\n\n :param constants: instance of Kg1Consts class\n \"\"\"\n self.constants = constants\n # density\n self.density = {}\n\n # ------------------------\n def read_data(self, shot_no, read_uid=\"JETPPF\"):\n \"\"\"\n Read in HRTX data\n\n\n :param shot_no: shot number\n \"\"\"\n\n for hrts_chan in self.constants.hrts.keys():\n node_name = self.constants.hrts[hrts_chan]\n hrts_signal = SignalBase(self.constants)\n dda = node_name[: node_name.find(\"/\")]\n dtype = node_name[node_name.find(\"/\") + 1 :]\n _ = hrts_signal.read_data_ppf(\n dda, dtype, shot_no, read_bad=True, read_uid=read_uid\n )\n # status = hrts_signal.read_data_ppf(self, dda, dtype, int(shot_no), seq=0,read_uid)\n # hrts_signal.read_data_ppf(self, dda, dtype, shot_no, read_uid=\"JETPPF\", seq=0)\n\n if hrts_signal.data is not None:\n\n self.density[hrts_chan] = hrts_signal\n","sub_path":"python/models/hrts_data.py","file_name":"hrts_data.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"109585868","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Regularized Information Maximization.\n\nAs introduced in [1]_.\n\nReferences\n----------\n\n .. [1] Discriminative clustering by regularized information maximization,\n by Gomes, R. and Krause, A. and Perona, P., NIPS 2010\n\"\"\"\n\nimport numpy as np\nimport theano\n\nfrom breze.arch.model.rim import Rim as _Rim\nfrom breze.learn.base import (\n UnsupervisedBrezeWrapperBase, TransformBrezeWrapperMixin)\n\n\nclass Rim(_Rim, UnsupervisedBrezeWrapperBase, TransformBrezeWrapperMixin):\n \"\"\"Class for regularized information maximization.\n\n Attributes\n ----------\n\n parameters : ParamterSet object\n Parameters of the model.\n\n n_inpt : integer\n Input dimensionality of the data.\n\n n_clusters : integer\n Amount of clusters to use.\n\n c_rim : float\n Value indicating the regularization strength.\n\n optimizer : string or pair\n Can be either a string or a pair. In any case,\n ``climin.util.optimizer`` is used to construct an optimizer. In the\n case of a string, the string is used as an identifier for the\n optimizer which is then instantiated with default arguments. If a\n pair, expected to be ``(identifier, kwargs)`` for more fine control\n of the optimizer.\n\n max_iter : integer\n Maximum number of optimization iterations to perform.\n\n verbose : boolean\n Flag indicating whether to print out information during fitting.\"\"\"\n\n transform_expr_name = 'output'\n\n def __init__(self, n_inpt, n_clusters, c_rim, optimizer='lbfgs',\n max_iter=1000, verbose=False):\n \"\"\"Create a Rim object.\n\n Paramters\n ---------\n\n n_inpt : integer\n Input dimensionality of the data.\n\n n_clusters : integer\n Amount of clusters to use.\n\n c_rim : float\n Value indicating the regularization strength.\n\n optimizer : string or pair\n Can be either a string or a pair. In any case,\n ``climin.util.optimizer`` is used to construct an optimizer. In the\n case of a string, the string is used as an identifier for the\n optimizer which is then instantiated with default arguments. If a\n pair, expected to be ``(identifier, kwargs)`` for more fine control\n of the optimizer.\n\n max_iter : integer\n Maximum number of optimization iterations to perform.\n\n verbose : boolean\n Flag indicating whether to print out information during fitting.\n \"\"\"\n super(Rim, self).__init__(\n n_inpt, n_clusters, c_rim)\n self.f_transform = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n\n self.optimizer = optimizer\n self.max_iter = max_iter\n self.verbose = verbose\n","sub_path":"breze/learn/rim.py","file_name":"rim.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"516369117","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\n\nsent_len = 4\n\ntokens = [\n 'i like it '.split(' '), #positive\n 'i hate it '.split(' '), #negative\n 'i don\\'t hate it'.split(' '), #positive\n 'i don\\'t like it'.split(' '), #negative\n ]\nsentiments = [\n [1],\n [0],\n [1],\n [0]\n ]\n\nvocab = sorted({ token for sent in tokens for token in sent })\ntoken2index = { token: index for (index, token) in enumerate(vocab) }\nindexes = [ [ token2index[token] for token in sent ] for sent in tokens ]\n\ntoken_prefixes = sorted({ tuple(sent[:i]) for sent in tokens for i in range(sent_len+1) })\nindex_prefixes = [ [ token2index[token] for token in prefix ] for prefix in token_prefixes ]\n\nembedding_size = 2\nstate_size = 2\n\nclass Cell(tf.nn.rnn_cell.RNNCell):\n def __init__(self):\n super(Cell, self).__init__()\n self.W = None\n self.b = None\n\n @property\n def state_size(self):\n return state_size\n\n @property\n def output_size(self):\n return state_size\n\n def build(self, inputs_shape):\n self.W = self.add_variable('W', [state_size+embedding_size, state_size], tf.float32, tf.random_normal_initializer(stddev=0.1, seed=0))\n self.b = self.add_variable('b', [state_size], tf.float32, tf.zeros_initializer())\n self.built = True\n \n def call(self, x, curr_state):\n state_input = tf.concat([ curr_state, x ], axis=1)\n new_state = tf.tanh(tf.matmul(state_input, self.W) + self.b)\n return (new_state, new_state)\n\ng = tf.Graph()\nwith g.as_default():\n sents = tf.placeholder(tf.int32, [None, None], 'sents')\n targets = tf.placeholder(tf.float32, [None, 1], 'targets')\n \n batch_size = tf.shape(sents)[0]\n \n embedding_matrix = tf.get_variable('embedding_matrix', [ len(vocab), embedding_size ], tf.float32, tf.random_normal_initializer(stddev=0.01, seed=0))\n embedded = tf.nn.embedding_lookup(embedding_matrix, sents)\n \n init_state = tf.get_variable('init_state', [ state_size ], tf.float32, tf.random_normal_initializer(stddev=0.1, seed=0))\n batch_init = tf.tile(tf.reshape(init_state, [1, state_size]), [batch_size, 1])\n \n cell = Cell()\n (outputs, state) = tf.nn.dynamic_rnn(cell, embedded, initial_state=batch_init)\n \n W = tf.get_variable('W', [state_size, 1], tf.float32, tf.random_normal_initializer(stddev=0.01, seed=0))\n b = tf.get_variable('b', [1], tf.float32, tf.zeros_initializer())\n logits = tf.matmul(state, W) + b\n probs = tf.sigmoid(logits)\n\n error = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))\n \n step = tf.train.GradientDescentOptimizer(1.0).minimize(error)\n\n init = tf.global_variables_initializer()\n\n g.finalize()\n\n s = tf.Session()\n with s.as_default():#tf.Session() as s:\n s.run([ init ], { })\n\n (fig, ax) = plt.subplots(1, 3)\n plt.ion()\n \n train_errors = list()\n print('epoch', 'train error')\n for epoch in range(1, 2000+1):\n s.run([ step ], { sents: indexes, targets: sentiments })\n\n [ train_error ] = s.run([ error ], { sents: indexes, targets: sentiments })\n train_errors.append(train_error)\n \n if epoch%100 == 0:\n print(epoch, train_error)\n\n [ curr_embeddings ] = s.run([ embedding_matrix ], { })\n\n ax[0].cla()\n for (token, token_vec) in zip(vocab, curr_embeddings.tolist()):\n ax[0].plot(token_vec[0], token_vec[1], linestyle='', marker='o', markersize=10)\n ax[0].text(token_vec[0], token_vec[1], token)\n ax[0].set_xlim(-2, 2)\n ax[0].set_xlabel('x0')\n ax[0].set_ylim(-2, 2)\n ax[0].set_ylabel('x1')\n ax[0].grid(True)\n ax[0].set_title('embeddings')\n \n ax[1].cla()\n for (token_prefix, index_prefix) in zip(token_prefixes, index_prefixes):\n [ curr_prefix_vec ] = s.run([ state ], { sents: [index_prefix] })\n ax[1].plot(curr_prefix_vec[0][0], curr_prefix_vec[0][1], linestyle='', marker='o', markersize=10)\n ax[1].text(curr_prefix_vec[0][0], curr_prefix_vec[0][1], ' '.join(token_prefix))\n ax[1].set_xlim(-1, 1)\n ax[1].set_xlabel('x0')\n ax[1].set_ylim(-1, 1)\n ax[1].set_ylabel('x1')\n ax[1].grid(True)\n ax[1].set_title('prefixes')\n \n ax[2].cla()\n ax[2].plot(np.arange(len(train_errors)), train_errors, color='red', linestyle='-', label='train')\n ax[2].set_xlim(0, 2000)\n ax[2].set_xlabel('epoch')\n ax[2].set_ylim(0.0, 1.0)\n ax[2].set_ylabel('XE') #Cross entropy\n ax[2].grid(True)\n ax[2].set_title('Error progress')\n ax[2].legend()\n \n fig.tight_layout()\n plt.draw()\n plt.pause(0.0001)\n\n print()\n \n [ curr_probs ] = s.run([ probs ], { sents: indexes })\n for (sent, prob) in zip(tokens, curr_probs[:,0].tolist()):\n print(' '.join(sent), round(prob, 2))\n \n print()\n for (sent, index) in zip(tokens, indexes):\n [ curr_prefixes_vec ] = s.run([ outputs ], { sents: [index] })\n print(*sent, *np.round(curr_prefixes_vec, 2)[0], sep=' ')\n\n fig.show()\n","sub_path":"tensorflow/07_-_Recurrent_neural_networks/03_-_Simple_RNN.py","file_name":"03_-_Simple_RNN.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"64055833","text":"import conclave.lang as sal\nfrom conclave.codegen.spark import SparkCodeGen\nfrom conclave import CodeGenConfig\nfrom conclave.utils import *\nfrom conclave.comp import dag_only\n\n\ndef setup():\n\n colsIn1 = [\n defCol(\"a\", \"INTEGER\", [1]),\n defCol(\"b\", \"INTEGER\", [1]),\n defCol(\"c\", \"INTEGER\", [1]),\n defCol(\"d\", \"INTEGER\", [1])\n ]\n\n in1 = sal.create(\"in1\", colsIn1, set([1]))\n\n in2 = sal.create(\"in2\", colsIn1, set([1]))\n\n return [in1, in2]\n\n\n@dag_only\ndef agg():\n\n in1 = setup()[0]\n\n agg = sal.aggregate(in1, \"agg\", [\"a\", \"b\"], \"c\", \"sum\", \"agg1\")\n\n out = sal.collect(agg, 1)\n\n return set([in1])\n\n\nif __name__ == \"__main__\":\n\n dag_agg = agg()\n cfg_agg = CodeGenConfig('agg')\n cg_agg = SparkCodeGen(cfg_agg, dag_agg)\n cg_agg.generate('agg', '/tmp')\n","sub_path":"examples/deprecated/pspark_test.py","file_name":"pspark_test.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340140406","text":"# Happy Holidays fellow Code Warriors!\n# Now, Dasher! Now, Dancer! Now, Prancer, and Vixen! On, Comet! On, Cupid! \n# On, Donder and Blitzen! That's the order Santa wanted his reindeer...right? \n# What do you mean he wants them in order by their last names!? Looks like we need your help Code Warrior!\n\n# Sort Santa's Reindeer\n# Write a function that accepts a sequence of Reindeer names, and returns a sequence with \n# the Reindeer names sorted by their last names.\n\n# Notes:\n# It's guaranteed that each string is composed of two words\n# In case of two identical last names, keep the original order\n\ndef sort_reindeer(reindeer_names):\n import pandas as pd\n df = pd.DataFrame(columns = ['first name','last name'])\n for ppl in reindeer_names:\n name = ppl.split(' ')\n fstn = name[0]\n lstn = name[1]\n dff = pd.DataFrame(columns = ['first name','last name'],\n data = [{'first name':fstn, 'last name':lstn}])\n df = df.append(dff, ignore_index=True)\n df = df.reset_index().sort_values(by= ['last name', 'index'])\n df = df.reset_index(drop=True).drop(columns=['index'],axis=1)\n rows = df.shape[0]\n ans = []\n for row in range(rows):\n name = df.iloc[row][0] + ' ' + df.iloc[row][1]\n ans.append(name)\n return ans\n\n# others' solution\ndef sort_reindeer(reindeer_names):\n return sorted(reindeer_names, key=lambda s:s.split()[1])","sub_path":"SOLVED/sort_santas_reindeer.py","file_name":"sort_santas_reindeer.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"442528850","text":"\"\"\"Remote control support for Apple TV.\"\"\"\nfrom homeassistant.components import remote\n\nfrom homeassistant.const import CONF_NAME\n\nfrom .const import DOMAIN, KEY_API, KEY_POWER, CONF_IDENTIFIER\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up the Apple TV remote platform.\"\"\"\n if not discovery_info:\n return\n\n identifier = discovery_info[CONF_IDENTIFIER]\n name = discovery_info[CONF_NAME]\n api = hass.data[KEY_API][identifier]\n power = hass.data[KEY_POWER][identifier]\n\n async_add_entities([AppleTVRemote(api, power, name)])\n\n\nclass AppleTVRemote(remote.RemoteDevice):\n \"\"\"Device that sends commands to an Apple TV.\"\"\"\n\n def __init__(self, atv, power, name):\n \"\"\"Initialize device.\"\"\"\n self._atv = atv\n self._name = name\n self._power = power\n self._power.listeners.append(self)\n\n @property\n def device_info(self):\n \"\"\"Return the device info.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Apple\",\n \"model\": \"Remote\",\n \"name\": self.name,\n \"sw_version\": \"0.0\",\n \"via_device\": (DOMAIN, self._atv.metadata.device_id),\n }\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._name\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return \"remote_\" + self._atv.metadata.device_id\n\n @property\n def is_on(self):\n \"\"\"Return true if device is on.\"\"\"\n return self._power.turned_on\n\n @property\n def should_poll(self):\n \"\"\"No polling needed for Apple TV.\"\"\"\n return False\n\n async def async_turn_on(self, **kwargs):\n \"\"\"Turn the device on.\n\n This method is a coroutine.\n \"\"\"\n await self._power.set_power_on(True)\n\n async def async_turn_off(self, **kwargs):\n \"\"\"Turn the device off.\n\n This method is a coroutine.\n \"\"\"\n await self._power.set_power_on(False)\n\n def async_send_command(self, command, **kwargs):\n \"\"\"Send a command to one device.\n\n This method must be run in the event loop and returns a coroutine.\n \"\"\"\n # Send commands in specified order but schedule only one coroutine\n async def _send_commands():\n for single_command in command:\n if not hasattr(self._atv.remote_control, single_command):\n continue\n\n await getattr(self._atv.remote_control, single_command)()\n\n return _send_commands()\n","sub_path":"custom_components/apple_tv/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"232499499","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport datetime\nimport logging\nimport os\nimport re\nimport socket\nimport sys\nimport subprocess\n\n\ndef ip_scanner(to_check):\n failures = []\n for key in to_check.keys():\n print('\\nChecking {} IPs'.format(key.upper()))\n print('-' * 25)\n\n server = to_check[key]\n for ip in server['ip_list']:\n if ip == '255.255.255.0':\n continue\n\n print(' {}'.format(ip))\n logging.info('Checking {} - {}'.format(key, ip))\n\n # make sure command will run on both windows and unix systems\n if os.name == 'nt':\n ping_cmd = 'ping -n 1 -w 2000 {} > nul'.format(ip)\n else:\n ping_cmd = 'ping -c 1 {} > /dev/null'.format(ip)\n status_print(' Pinging ', 1)\n\n if os.system(ping_cmd) != 0:\n status_print(' fail', 2)\n logging.error('{} is not reachable'.format(ip))\n for port in server['ports']:\n status_print(' Port {} '.format(port), 1)\n status_print('n/a', 2)\n failures.append(ip)\n else:\n status_print(' pass', 2)\n ports_failed = port_scanner(ip, server['ports'], server['mode'])\n if ports_failed:\n failures.append(ip)\n print\n\n return failures\n\n\ndef port_scanner(ip, port_list, mode):\n ports_failed = False\n\n for port in port_list:\n status_print(' Port {} '.format(port), 1)\n logging.info('Checking port {}'.format(port))\n address = (ip, port)\n\n try:\n if mode == 'tcp':\n t = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n t.settimeout(2)\n t.connect(address)\n t.close()\n else:\n u = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n u.settimeout(2)\n buf = 1024\n payload = '\\x1b' + 47 * '\\0'\n\n u.sendto(payload, address)\n msg, address = u.recvfrom(buf)\n u.close()\n\n status_print(' pass', 2)\n logging.info('Port {}: open'.format(port))\n\n except socket.error as msg: # Error routine\n ports_failed = True\n status_print(' fail', 2)\n logging.error('IP {} Port {}: closed'.format(ip, port))\n logging.debug('Port {} error message: {}'.format(port, msg))\n\n return ports_failed\n\n\ndef status_print(message, mode):\n if mode == 1:\n sys.stdout.write(message.ljust(15, '.'))\n sys.stdout.flush()\n if mode == 2:\n print(message.rjust(10, '.'))\n\n\ndef extract_ip(string):\n return re.findall(r\"(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\", string)\n\n\ndef build_ip_list(base_list):\n p = subprocess.check_output(['getrackinfo', '-v'])\n rack_ips = iter(p.splitlines())\n for line in rack_ips:\n if 'NTP' in line:\n base_list['ntp']['ip_list'] = extract_ip(next(rack_ips))\n elif 'DNS' in line:\n next(rack_ips)\n next(rack_ips)\n base_list['dns']['ip_list'] = extract_ip(next(rack_ips))\n elif 'public' in line:\n base_list['nodes']['ip_list'].extend(extract_ip(line))\n elif 'Remote Ipmi' in line:\n base_list['rmm']['ip_list'].extend(extract_ip(line))\n\n return base_list\n\n\ndef check_hostnames(host_name, ip):\n test = socket.gethostbyname(host_name)\n if test == ip:\n test = 'PASS'\n\n\ndef check_mtu(ip, port):\n routeinfo = subprocess.check_output(['ip', 'route', 'get', ip])\n dev = re.search('.*dev (\\w+) .*', routeinfo).groups()[0]\n mtuinfo = subprocess.check_output(['ip', 'link', 'show', dev])\n mtu = re.search('.*mtu ([0-9]+) .*', mtuinfo).groups()[0]\n print(mtu)\n return int(mtu)\n\n\ndef main():\n any_failed = []\n # init logging for the script\n logging.basicConfig(filename='{}.log'.format(datetime.date.today()),\n format=\"[%(levelname)8s] %(message)s\",\n level=logging.DEBUG\n )\n\n logging.info('=' * 80)\n logging.info('New Run'.center(80))\n logging.info('=' * 80)\n\n # Build IPs list from node\n base_list = {\n \"nodes\": {\n \"mode\": \"tcp\",\n \"ip_list\": [],\n \"ports\": [9094, 9095, 9096, 9097, 9098, 22, 80, 443, 4443]\n },\n \"rmm\": {\n \"mode\": \"tcp\",\n \"ip_list\": [],\n \"ports\": [80, 443, 5123, 7578, 7578,5120,5123]\n },\n \"service_clients\": {\n \"mode\": \"tcp\",\n \"ip_list\": [],\n \"ports\": [3218, 9020, 9021, 9022, 9023, 9024, 9025, 9040]\n },\n \"dns\": {\n \"mode\": \"tcp\",\n \"ip_list\": [],\n \"ports\": [53]\n },\n \"ntp\": {\n \"mode\": \"udp\",\n \"ip_list\": [],\n \"ports\": [123]\n },\n \"smtp\": {\n \"mode\": \"tcp\",\n \"ip_list\": [\"202.238.84.20\"],\n \"ports\": [25]\n },\n \"ad\": {\n \"mode\": \"tcp\",\n \"ip_list\": [\"10.247.134.54\"],\n \"ports\": [389, 636]\n }\n }\n\n to_check = build_ip_list(base_list)\n\n # Iterate through the loaded data and check the IPs and ports\n failed_ips = ip_scanner(to_check)\n if failed_ips:\n any_failed.extend(failed_ips)\n\n if any_failed:\n print('There were issues with the following servers, please check the log for more details:')\n for i in any_failed:\n print(' {}'.format(i))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"portscan.py","file_name":"portscan.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598171339","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns=[\n url(r'^$',views.home,name = 'home'),\n url(r'^search/', views.search_results, name='search_results'),\n url(r'^image/(\\d+)',views.image,name ='image'),\n url(r'^new/image$', views.new_image, name='new-image'),\n url(r'^follow/(\\d+)',views.follow,name=\"follow\"),\n url(r'^edit/profile$', views.update_profile, name='update_profile'),\n url(r'^comment/(\\d+)/$', views.comment, name='comment'),\n url(r'^profile/(\\d+)', views.profile, name='profile'),\n url(r'^likes/(?P\\d+)',views.likes,name ='like'),\n url(r'^api/profile/$', views.ProfileList.as_view(), name='proapi'),\n url(r'^api/project/$', views.ProjectList.as_view(), name='projectapi')\n]\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","sub_path":"insta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"215546541","text":"from zoundry.appframework.resources.resourceutils import ZMappedImageList\r\nfrom zoundry.appframework.ui.events.listevents import ZEVT_CHECKBOX_LIST_CHANGE\r\nfrom zoundry.appframework.ui.util.uiutil import getRootWindowOrDialog\r\nfrom zoundry.appframework.ui.widgets.controls.listex import IZCheckBoxListViewContentProvider\r\nfrom zoundry.appframework.ui.widgets.controls.listex import ZCheckBoxListViewWithButtons\r\nfrom zoundry.appframework.ui.widgets.controls.listex import ZListViewEx\r\nfrom zoundry.appframework.ui.widgets.dialog import ZBaseDialog\r\nfrom zoundry.appframework.ui.widgets.dialogs.standarddialogs import ZStandardDialog\r\nfrom zoundry.base.util.text.textutil import getSafeString\r\nfrom zoundry.blogapp.messages import _extstr\r\nimport wx\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Content provider for a list of blogs.\r\n# ------------------------------------------------------------------------------\r\nclass ZBlogListContentProvider(IZCheckBoxListViewContentProvider):\r\n\r\n def __init__(self, blogs, blog):\r\n self.blogs = blogs\r\n self.blog = blog\r\n self.checked = []\r\n\r\n for listBlog in self.blogs:\r\n isChecked = listBlog == blog\r\n self.checked.append(isChecked)\r\n\r\n self.imageMap = ZMappedImageList()\r\n # end __init__()\r\n\r\n def getImageList(self):\r\n return self.imageMap\r\n # end getImageList()\r\n\r\n def getNumColumns(self):\r\n return 1\r\n # end getNumColumns()\r\n\r\n def getNumRows(self):\r\n return len(self.blogs)\r\n # end getNumRows()\r\n\r\n def getColumnInfo(self, columnIndex): #@UnusedVariable\r\n return (u\"Blog Name\", None, None, ZListViewEx.COLUMN_LOCKED | ZListViewEx.COLUMN_RELATIVE, 100) #$NON-NLS-1$\r\n # end getColumnInfo()\r\n\r\n def getRowText(self, rowIndex, columnIndex): #@UnusedVariable\r\n return self.blogs[rowIndex].getName()\r\n # end getRowText()\r\n\r\n def getRowImage(self, rowIndex, columnIndex): #@UnusedVariable\r\n return -1\r\n # end getRowImage()\r\n\r\n def isChecked(self, rowIndex):\r\n return self.checked[rowIndex]\r\n # end isChecked()\r\n\r\n def setChecked(self, rowIndex, checked):\r\n self.checked[rowIndex] = checked\r\n # end setChecked()\r\n\r\n def getCheckedBlogs(self):\r\n checkedBlogs = []\r\n for index in range(0, len(self.blogs)):\r\n if self.checked[index]:\r\n checkedBlogs.append(self.blogs[index])\r\n return checkedBlogs\r\n # end getCheckedBlogs()\r\n\r\n def allBlogsChecked(self):\r\n u\"\"\"Returns true if all blogs in the list are checked.\"\"\" #$NON-NLS-1$\r\n for checked in self.checked:\r\n if not checked:\r\n return False\r\n return True\r\n # end allBlogsChecked()\r\n\r\n# end ZBlogListContentProvider\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Dialog to prompt the user to confirm deletion of a blog post.\r\n# ------------------------------------------------------------------------------\r\nclass ZConfirmDeletePostFromMultipleBlogsDialog(ZStandardDialog):\r\n\r\n def __init__(self, parent, document, blog, blogs):\r\n self.document = document\r\n self.blog = blog\r\n self.blogs = blogs\r\n\r\n title = _extstr(u\"deletepostsdialog.DeletePublishedPost\") #$NON-NLS-1$\r\n caption = u\"%s: '%s'\\n%s:\" % (_extstr(u\"deletepostsdialog.DeleteBlogPost\"), getSafeString(self.document.getTitle()), _extstr(u\"deletepostsdialog.DeleteFromTheseBlogs\")) #$NON-NLS-1$ #$NON-NLS-3$ #$NON-NLS-2$\r\n buttonMask = ZBaseDialog.YES_BUTTON | ZBaseDialog.NO_BUTTON\r\n ZStandardDialog.__init__(self, parent, title, caption, buttonMask, u\"question\") #$NON-NLS-1$\r\n\r\n (w, h) = self.GetBestSizeTuple()\r\n w = min(max(w, 500), 600)\r\n self.SetSize(wx.Size(w, h))\r\n self.Layout()\r\n # end __init__()\r\n\r\n def _createStandardDialogWidgets(self):\r\n ZStandardDialog._createStandardDialogWidgets(self)\r\n\r\n self.provider = ZBlogListContentProvider(self.blogs, self.blog)\r\n style = wx.LC_REPORT | wx.LC_NO_HEADER | wx.LC_SINGLE_SEL\r\n self.blogsCheckList = ZCheckBoxListViewWithButtons(self.provider, self.topPanel, style = style)\r\n\r\n singleBlog = len(self.blogs) == 1\r\n self.localCopyCB = wx.CheckBox(self.topPanel, wx.ID_ANY, _extstr(u\"deletepostsdialog.DeleteLocalCopy\")) #$NON-NLS-1$\r\n self.localCopyCB.SetValue(singleBlog)\r\n# self.localCopyCB.Enable(singleBlog)\r\n # end _createStandardDialogWidgets()\r\n\r\n def _layoutTopPanel(self):\r\n # horizontal sizer holds the image and the caption\r\n hsizer = wx.BoxSizer(wx.HORIZONTAL)\r\n if self.image:\r\n hsizer.Add(self.image, 0, wx.LEFT | wx.TOP | wx.BOTTOM, 10)\r\n\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer.Add(self.captionText, 0, wx.EXPAND | wx.ALL, 3)\r\n sizer.Add(self.blogsCheckList, 1, wx.EXPAND | wx.ALL, 3)\r\n hsizer.AddSizer(sizer, 1, wx.EXPAND | wx.ALL, 10)\r\n\r\n # vertical sizer has the horizontal sizer and the checkbox\r\n vsizer = wx.BoxSizer(wx.VERTICAL)\r\n vsizer.AddSizer(hsizer, 1, wx.EXPAND | wx.ALL, 2)\r\n vsizer.Add(self.localCopyCB, 0, wx.ALIGN_RIGHT | wx.ALL, 5)\r\n\r\n return vsizer\r\n # end _layoutTopPanel()\r\n\r\n def _bindWidgetEvents(self):\r\n ZStandardDialog._bindWidgetEvents(self)\r\n\r\n self.Bind(ZEVT_CHECKBOX_LIST_CHANGE, self.onCheckListChange, self.blogsCheckList)\r\n # end _bindWidgetEvents()\r\n\r\n def onCheckListChange(self, event):\r\n allBlogs = self.provider.allBlogsChecked()\r\n self.localCopyCB.SetValue(allBlogs)\r\n event.Skip()\r\n # end onCheckListChange()\r\n\r\n def _setInitialFocus(self):\r\n self._getNoButton().SetFocus()\r\n # end _setInitialFocus()\r\n\r\n def showDialog(self):\r\n result = self.ShowModal()\r\n return (result == wx.ID_YES, self.localCopyCB.IsChecked(), self.provider.getCheckedBlogs())\r\n # end showDialog()\r\n\r\n# end ZConfirmDeletePostFromMultipleBlogsDialog\r\n\r\n\r\ndef ZShowConfirmDeletePostFromMultipleBlogsDialog(parent, document, blog, blogs):\r\n u\"\"\"Convenience function for opening the 'confirm delete post\r\n from multiple blogs dialog. Returns a tuple:\r\n 0 : wx.ID_*\r\n 1 : alsoDeleteLocalCopy\r\n 2 : list of blogs to delete from\"\"\" #$NON-NLS-1$\r\n\r\n parent = getRootWindowOrDialog(parent)\r\n dlg = ZConfirmDeletePostFromMultipleBlogsDialog(parent, document, blog, blogs)\r\n dlg.CenterOnParent()\r\n\r\n result = dlg.showDialog()\r\n dlg.Destroy()\r\n if parent is not None:\r\n parent.RemoveChild(dlg)\r\n return result\r\n# end ZShowConfirmDeletePostFromMultipleBlogsDialog()\r\n","sub_path":"src/python/zoundry/blogapp/ui/dialogs/deletepostsdialog.py","file_name":"deletepostsdialog.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415447298","text":"try:\n from numba import jit\nexcept:\n def jit(**kwargs):\n def jitd(func):\n def helper(*args, **kwargs):\n return func(*args, **kwargs)\n return helper\n return jitd\n \nfrom Magnetar.utils import atmosphere\nimport numpy as np\n\n@jit(nopython=True,parallel=True)\ndef bbfunk( ee, tt): # per mode\n return 208452.792 * ee**3 / np.expm1(ee / tt) / 2\n\n\ndef bb_atmo_f(temp):\n return atmosphere() * (lambda ee: bbfunk(ee,temp))\n\n\n#\n# blackbody atmosphere (convenience class with parallel structure to condensed_surface)\n#\nclass bb_atmo(atmosphere):\n def __init__(self,teff,mag_strength,mag_inclination,*args,ofraction=0.5,**kwargs):\n self.teff = teff\n self.ofraction = np.clip(ofraction,0,1)\n self.xtemp = (1-self.ofraction)**0.25\n self.otemp = self.ofraction**0.25\n rat=teff*(2.0/(self.xtemp**4+self.otemp**4))**0.25\n self.xtemp*=rat\n self.otemp*=rat\n self.mag_inclination = mag_inclination\n self.mag_strength=mag_strength\n def __str__(self):\n outstring='''#\n# class bb_atmo\n#\n# effective_temperature %12g keV\n# O fraction %12g\n# X temperature %12g keV\n# O temperature %12g keV\n# mag_strength %12g Gauss [not used]\n# mag_inclination %12g radians [not used]\n#\\n''' % (self.teff, self.ofraction, self.xtemp, self.otemp, self.mag_strength, self.mag_inclination)\n return outstring+atmosphere.__str__(self)\n\n def xintensity(self, dataarray):\n if self.xtemp>0:\n return bbfunk(dataarray[-1], self.xtemp)\n else:\n return 0*dataarray[-1]\n\n def ointensity(self, dataarray):\n if self.otemp>0:\n return bbfunk(dataarray[-1], self.otemp)\n else:\n return 0*dataarray[-1]\n\n#\n# pure X blackbody atmosphere (convenience function with parallel structure to condensed_surface)\n#\ndef bb_atmo_purex(teff,mag_strength,mag_inclination,*args,**kwargs):\n return bb_atmo(teff,mag_strength,mag_inclination,ofraction=0)\n\n#\n# Thompson_Kostenko_Magnetosphere (convenience class function with parallel structure to condensed_surface)\n#\nclass Thompson_Kostenko_Magnetosphere(atmosphere):\n def __init__(self,teff,mag_strength,mag_inclination,normalization=1,alpha=1):\n self.mag_inclination=mag_inclination\n self.normalization=normalization\n self.alpha=alpha\n def __str__(self):\n outstring='''#\n# class modified_bb_atmo\n#\n# normalization %12g at 10 keV\n# alpha %12g\n''' % (normalization,alpha)\n return outstring+atmosphere.__str__(self)\n @jit(nopython=True,parallel=True)\n def _ointensity(dataarray,mag_inclination,normalization,alpha):\n ee=dataarray[-1]/10.0\n coskb2=(np.cos(np.radians(mag_inclination)\n ) * np.cos(np.radians(dataarray[-3])) +\n np.sin(np.radians(mag_inclination)\n ) * np.sin(np.radians(dataarray[-3])\n ) * np.cos(np.radians(dataarray[-2])))**2\n return normalization*ee**alpha*(1.0-coskb2)\n def ointensity(self, dataarray):\n return Thompson_Kostenko_Magnetosphere._ointensity(np.array(dataarray),self.mag_inclination,self.normalization,self.alpha)\n def xintensity(self, dataarray):\n return 0.0 # np.zeros((len(dataarray[-1])))\n \n \n\n#\n# modified blackbody atmosphere (convenience class with parallel structure to condensed_surface)\n#\n# freq_power quantifies how the opacity depends on photon energy: kappa goes as (1/energy)**freq_power\n# freq_power is 2 (Et) for free-free opacity and zero for scattering\n#\n# sigma_power quantifies how the temperature depends on column density: T goes as sigma**sigma_power\n# sigma_power is (alpha+1)/(4+alpha-beta) if opacity is proportional to density**alpha temperature**beta\n#\n# unmagnetized free-free : alpha=1, beta=-3.5 -> 0.23529411764705882 (4/17)\n# magnetized free-free : alpha=1, beta=-1.5 -> 0.3076923077 (4/13) \n# electron-scattering : alpha=0, beta=0 -> 0.25 (4/16)\n#\n# Based on the power-law atmospheres for neutron stars in\n#\n# https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..599H for the magnetized case and\n#\n# https://ui.adsabs.harvard.edu/abs/1984ApJ...287..244H for unmagnetized case\n#\n#\n\nclass modified_bb_atmo(atmosphere):\n def __init__(self,teff,mag_strength,mag_inclination,*args,freq_power=2,sigma_power=4./13.,kb_suppress=True,limb_darkening=True,**kwargs):\n self.effective_temperature = teff\n self.freq_power = freq_power\n self.sigma_power = sigma_power\n self.surface_temperature = teff\n self.mag_inclination = mag_inclination\n self.mag_strength=mag_strength\n self.ecyc = mag_strength/4.4e13*511.\n self.kb_suppress=kb_suppress\n self.limb_darkening=limb_darkening\n self.adjust_surface_temperature()\n \n def __str__(self):\n outstring='''#\n# class modified_bb_atmo\n#\n# effective_temperature %12g keV\n# surface_temperature %12g keV [tau=1 for upgoing O-mode with E=surface_temperature, neglecting k dot b suppression]\n# mag_strength %12g Gauss\n# mag_inclination %12g degrees\n# cyclotron energy %12g keV\n# k dot b supression %12s \n# limb darkening %12s \n# freq_power %12g [cross-section goes as 1/freq**freqpower for O-mode]\n# sigma_power %12g [temperature goes as column-density**sigma_power]\n#\n# ./atm -o XXX_%g_%g_%g -B %g -T %g -b %g -m 11 -p 5 -a 1.5 -M 2 -D 5 \n#\n''' % (self.effective_temperature, self.surface_temperature, self.mag_strength, self.mag_inclination, self.ecyc, \n self.kb_suppress, self.limb_darkening, self.freq_power, self.sigma_power,\n np.log10(self.mag_strength),np.log10(self.effective_temperature)+7.06462726,self.mag_inclination,\n np.log10(self.mag_strength),np.log10(self.effective_temperature)+7.06462726,self.mag_inclination)\n return outstring+atmosphere.__str__(self)\n \n # Auxiliary functions defined for numba\n @jit(nopython=True,parallel=True)\n def _xintensity(dataarray,surface_temperature,freq_power,sigma_power,ecyc,limb_darkening):\n ee=dataarray[-1]\n sigmax=np.abs(surface_temperature/ee)**freq_power\n sigmax*=np.where(ee= b_path['prof']:\n b_path['prof'] = round(paths[239][0]['prof'],1)\n b_path['path'] = paths[239][0]['path']\n b_path['b_t'] = b_time\n b_path[\"ac\"] = ac_type\n \n # if the final best path is profitable after taking the lease out then add the best path\n # the the final paths list, and reduce the fleet of the used ac type\n if b_path[\"prof\"] > 0:\n h_demand,fs = update_demand(h_demand, b_path[\"path\"], comp, ac, ac_type)\n ac[b_path[\"ac\"]][\"fleet\"] -= 1\n b_path[\"fs\"] = fs\n final_paths.append(b_path)\n else:\n profitable = False\n # if at the start of the current loop there was only one ac then it was used up and no others are available\n if sum(planes) == 1:\n profitable = False\n \n# total profit is the sum of all profits of final paths\nt_p = 0\n# print the profit, block time, ac type and flight schedule of each of the routes to be flown\nfor i in final_paths:\n print(\"route profit -\",i[\"prof\"],\" \\tblock time -\",i[\"b_t\"],\"\\tac type -\",i[\"ac\"])#,\"flight schedule\",i[\"fs\"])\n print()\n t_p += i[\"prof\"]\n\nprint()\nprint(round(time.time()-s,2),\"seconds\")\nprint()\nprint(\"total profit\",round(t_p,1))\n \n\nimport matplotlib.pyplot as plt \n\nfor o in range(1,4):\n plt.figure()\n for i in final_paths:\n t = [0]\n loc = [0,0]\n if i[\"ac\"] == o:\n t,loc = plot_help(i[\"path\"], comp, i[\"ac\"])\n plt.plot(t,loc,\"-o\",alpha=0.6)\n plt.title(\"ac type {0}\".format(o))\n plt.xlabel(\"time\")\n plt.ylabel(\"location\")\n plt.grid()\n plt.show()\n\nask, rpk, load, cask, rask, rrpk = kpis(final_paths, ac, dist, ac_prof)\nprint(\"ask\",ask,\" rpk\",rpk,\" load f\",round(load,3),\" cask\",cask,\" rask\",rask,\" rrpk\",rrpk)\n\n\n#old_ac = 3\n#count = 0\n##Return arrays as latex tables\n#from tabulate import tabulate\n#import numpy as np\n#for i in final_paths:\n# if old_ac != i[\"ac\"]:\n# count = 1\n# else:\n# count += 1\n# old_ac = i[\"ac\"]\n#\n# lst = []\n# for o in i[\"fs\"]:\n# lst.append([o[0],o[1],o[2],o[3]])\n# flst = np.array(lst)\n# print(\"\\\\begin{table}[H]\")\n# print(\"\\centering\")\n# print(\"\\caption{{Flight schedule of ac type {0} plane {1}}}\".format(i[\"ac\"],count))\n# print(tabulate(flst, headers=[\"Departure Time\",\"Departure\",\"Destination\",\"Passengers\"], tablefmt=\"latex\", stralign=\"center\", numalign=\"center\"))#, floatfmt=\".0f\"))\n# print(\"\\end{table}\")\n# print()\n# print()\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n","sub_path":"Dynamic-Programing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"339818259","text":"import numpy as np\nfrom numpy import exp, sin, cos, sqrt\nfrom scipy.linalg import expm, logm\nimport warnings\nimport matplotlib.pyplot as plt\n\n\n\nPauli = [\n np.matrix(np.eye(2), dtype=np.complex), # identity\n np.matrix([[0, 1],[1, 0]], dtype=np.complex), # X\n np.matrix([[0,-1j],[1j,0]], dtype=np.complex), # Y\n np.matrix([[1, 0],[0,-1]], dtype=np.complex) # Z\n]\n\n\n\n\n# Useful function to get a quick overview over the entries of a large matrix\n\ndef color_matrix(M, bound_opt=True, cmap=\"Blues\", low_=-2.0, high_=2.0):\n if isinstance(M,np.matrix):\n M = np.array(M)\n\n if len(M.shape)!=2:\n raise IndexError()\n\n if bound_opt:\n max_ = np.amax(M)\n if type(max_)==np.complex128 or type(max_)==np.complex64:\n max_ = max(max_.real, max_.imag)\n min_ = np.amin(M)\n if type(min_)==np.complex128 or type(min_)==np.complex64:\n min_ = min(min_.real, min_.imag)\n range_ = max_-min_\n delta = 0.1*range_\n\n low_ = min_ - delta\n high_ = max_ + delta\n \n if M.dtype == np.complex:\n fig, axs = plt.subplots(1,2,figsize=(10,3), constrained_layout=True)\n psm0 = axs[0].pcolormesh(M.real, cmap=cmap, rasterized=True, vmin=low_, vmax=high_)\n axs[0].set_title(\"real\")\n axs[0].set_ylim(M.shape[1],0)\n psm1 = axs[1].pcolormesh(M.imag, rasterized=True, cmap=cmap, vmin=low_, vmax=high_)\n axs[1].set_title(\"imag\")\n axs[1].set_ylim(M.shape[1],0)\n fig.colorbar(psm1, ax=axs[1])\n plt.show()\n else:\n fig, ax = plt.subplots(1,1,figsize=(5,3), constrained_layout=True)\n psm = ax.pcolormesh(M, cmap=cmap, rasterized=True, vmin=low_, vmax=high_)\n ax.set_ylim(M.shape[1],0)\n fig.colorbar(psm, ax=ax)\n plt.show()\n\n\n\n\ndef is_hermitian(M):\n \n if (len(M.shape)!=2):\n raise LinAlgError(\"M not a matrix\")\n \n if (M.shape[0] != M.shape[1]):\n raise LinAlgError(\"M has to be a square matrix\")\n \n M = np.matrix(M)\n dist = np.linalg.norm(M-M.H)\n return np.isclose(dist, 0.0)\n\n\ndef is_unitary(M):\n\n if (len(M.shape)!=2):\n raise LinAlgError(\"M is not a matrix\")\n\n M = np.matrix(M)\n\n dist = np.linalg.norm(M*M.H - np.matrix(np.eye(M.shape[0])))\n return np.isclose(dist, 0.0)\n\n\n\n# The keyword pseudo indicates if one wants to check the positivity of the matrix\n\n\ndef is_density_matrix(rho, pseudo=True):\n # Check if Hermitean\n if not is_hermitian(rho):\n return False\n # Check if trace is 1\n if not np.isclose(np.trace(rho), 1):\n print(rho)\n print(\"trace: \", np.trace(rho))\n return False\n if not pseudo:\n #Check positivity\n W, v = np.linalg.eig(rho)\n for w in W:\n if not np.isclose(w, 0.0):\n if w.real < 0:\n return False\n return True\n\n\n\n#\n# Following are two diagonalization algorithm\n# \n# The first is for 2x2 matrices, which orders the eigenvalues from highest to lowest\n# The second is for nxn matrices, which doesn't order the eigenvalues\n# \n\n\ndef diag(rho):\n v, w = np.linalg.eigh(rho)\n \n if v[0] > v[1]:\n raise ValueError()\n \n S = np.matrix(np.diag(v))\n U = np.matrix(w, dtype=np.complex)\n \n if not np.isclose(np.linalg.norm(U.H@U - Pauli[0]), 0.0):\n raise ValueError()\n \n \n dist = np.linalg.norm(rho-U@S@U.H)\n if not np.isclose(dist, 0.0):\n raise ValueError()\n \n return S, U\n\n\n#\n# The following function are for generate SU(2) and U(2) matrices\n#\n#\n\ndef su2(param, generator=False):\n\n if not isinstance(param, list):\n if len(param.shape)!=1:\n raise ValueError(\"array not one dimensional!\")\n if (len(param)!=3):\n raise ValueError(\"wrong number of SU(2) angles\")\n\n\n if generator:\n #\n # param[0] = a_x\n # param[1] = a_y\n # param[2] = a_z\n #\n #\n # U = exp(-i \\sum_{i\\in\\{x,y,z\\}} a_i \\sigma_i)\n #\n\n H = sum([param[i]*Pauli[i+1] for i in range(3)])\n return expm(-1j*H)\n\n else:\n\n #\n # param[0] = \\theta\n # param[1] = \\phi_1\n # param[2] = \\phi_2\n #\n # The SU(2) matrix is\n # \n # / e^(i \\phi_1) \\cos(\\theta) e^(i \\phi_2) \\sin(\\theta) \\\n # U = | |\n # \\ -e^(-i\\phi_2) \\sin(\\theta) e^(-i \\phi_1) \\cos(\\theta) /\n \n theta, phi1, phi2 = param\n \n return np.matrix([[exp(1j*phi1)*cos(theta), exp(1j*phi2)*sin(theta)]\n ,[-exp(-1j*phi2)*sin(theta), exp(-1j*phi1)*cos(theta)]])\n\ndef get_su2_param(U, generator=False):\n detU = np.linalg.det(U)\n if not np.isclose(detU.real, 1, rtol=1e-3):\n raise ValueError()\n if generator:\n ###################################\n # U = exp(-i \\sum_k a_k \\sigma_k) #\n ###################################\n \n LogU = 1j * logm(U)\n a0 = LogU[0,1].real\n a1 = -LogU[0,1].imag\n a2 = LogU[0,0].real\n param = np.array([a0, a1, a2])\n else:\n #######################################################\n # / e^{i t1} cos(t0) e^{i t2} sin(t0) \\ #\n # U = | | #\n # \\ -e^{-i t2} sin(t0) e^{-i t1} cos(t0) / #\n #######################################################\n \n a = np.abs(U[0,0])\n t1 = np.angle(U[0,0])\n t2 = np.angle(U[0,1])\n t0 = np.arccos(a)\n \n param = np.array([t0, t1, t2])\n\n return param\n\n\ndef u2(param):\n #\n # param[0] = global angle of U(2) \\phi\n # param[1:] = SU(2) angle\n #\n if not isinstance(param, list):\n if len(param.shape)!=1:\n raise ValueError(\"array not one dimensional!\")\n\n if (len(param)!=4):\n raise ValueError(\"wrong number of U(2) angles\")\n \n phi = param[0]\n su2_matrix = su2(param[1:])\n return exp(1j*phi/2)*su2_matrix\n\ndef get_u2_param(U):\n \n if not isinstance(U, np.matrix):\n U = np.matrix(U)\n\n # check unitarity\n dist = np.linalg.norm(U.H@U - np.matrix(np.eye(U.shape[0])))\n if not np.isclose(dist, 0.0):\n raise ValueError(\"Matrix not unitary, ||U^adj U - 1||_F = {}\".format(dist))\n\n # det U = e^{i \\phi/2}\n\n phi = - 1j * np.log(np.linalg.det(U))\n\n su2matrix = np.exp(-1j*phi/2) * U\n\n su2param = get_su2_param(su2matrix)\n\n param = [phi] + list(su2param)\n\n return np.array(param)\n\n\ndef pseudo_random_density_matrix(n=2):\n #\n # This function generates a random hermitian matrix with trace=1,\n # but it ignors the semi-positivity of the matrix\n #\n\n rho = np.zeros((n, n), dtype=np.complex)\n residue = 1.0\n for i in range(n-1):\n val = np.random.rand()*residue\n rho[i,i] = val\n residue -= val\n \n rho[-1, -1] = residue\n \n \n for i in range(n):\n for j in range(i+1,n):\n val = np.random.rand() + 1j*np.random.rand()\n rho[i,j] = val\n rho[j,i] = np.conjugate(val)\n \n return np.matrix(rho)\n\n\n\ndef random_density_matrix(n=2, warn=True):\n if n != 2:\n if warn:\n warnings.warn(\"In general, the output is not a positive semi-define matrix for n!=2\") \n return pseudo_random_density_matrix(n=n)\n\n a = np.random.rand()*0.5 + 0.5\n D = np.matrix([[a,0],[0,1-a]])\n\n U = su2(np.random.rand(3))\n\n return U@D@U.H\n\n\ndef test_checks(M):\n print(is_density_matrix(M, pseudo=False), is_density_matrix(M, pseudo=True), is_hermitian(M), is_unitary(M))\n\nif __name__ == \"__main__\":\n rho0 = random_density_matrix()\n color_matrix(rho0)\n\n test_checks(rho0)\n\n S, U = diagonalize(rho0)\n color_matrix(S)\n color_matrix(U)\n\n\n rho1 = random_density_matrix(n=4)\n color_matrix(rho1)\n\n test_checks(rho1)\n\n S, U = diagonalize(rho1)\n color_matrix(S)\n color_matrix(U)\n\n U0 = u2([1.2, 4.1, 5.2, 1.9])\n color_matrix(U0)\n\n test_checks(U0)\n\n U1 = su2([1.2, 3.1, 2.1])\n color_matrix(U1)\n get_su2_param(U1)\n\n U2 = su2([1.2, 1.2, 1.5], generator=True)\n color_matrix(U2)\n\n test_checks(U1)\n\n","sub_path":"src/tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"327320929","text":"\n\n#calss header\nclass _KNIFE():\n\tdef __init__(self,): \n\t\tself.name = \"KNIFE\"\n\t\tself.definitions = [u'a tool, usually with a metal blade and a handle, used for cutting and spreading food or other substances, or as a weapon: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_knife.py","file_name":"_knife.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"287999247","text":"# coding: utf-8\n\n# 左上座標が引数, m*mの範囲が一致しているかをチェック\ndef check_pattern(x, y):\n for i in range(m):\n for j in range(m):\n nx = x + i\n ny = y + j\n if image[nx][ny] != check[i][j]:\n return False\n return True\n\n\nn = int(input())\nimage = [[int(i) for i in input().split()] for _ in range(n)]\nm = int(input())\ncheck = [[int(i) for i in input().split()] for _ in range(m)]\n\nfor i in range(n - m + 1):\n for j in range(n - m + 1):\n if check_pattern(i, j):\n print(i, j)\n","sub_path":"other/glassess.py","file_name":"glassess.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"456367699","text":"from typing import TypeVar, TypedDict, Tuple, Optional, Union\n\n\nclass _BSTSave(TypedDict):\n root: int\n children: Tuple[Optional['_BSTSave'], Optional['_BSTSave']]\n\n\nclass RedBlackTree:\n \"\"\"naar idee van https://en.wikipedia.org/wiki/Red%E2%80%93black_tree (insert en delete)\"\"\"\n def __init__(self, root: Optional[object] = None):\n self.root: Optional[object] = root\n self.color: bool = True # False is black, red is True\n self.left: Optional[RedBlackTree] = None\n self.right: Optional[RedBlackTree] = None\n self.parent: Optional[RedBlackTree] = None\n self.size: int = 0\n\n def insertItem(self, newItem: object) -> bool:\n \"\"\"\n voegt een item toe aan de RBT\n :param newItem: object met getId() als functie, de searchKey is een getal.\n :return: Succes, geeft aan of de operatie geslaagd is.\n :pre newItem, of een item met hetzelfde SearchKey, zit nog niet in de RBT\n :post newItem zit in de RBT, de grootte van de RBT is met 1 verhoogt.\n \"\"\"\n self.size += 1\n newNode = self.insertRecurse(newItem)\n newNode.__insertRepairTree()\n return True\n\n def insertRecurse(self, newItem: object) -> 'RedBlackTree':\n \"\"\"\n zoekt het juiste blad recursief en steekt het nieuwe item in dat blad\n :param newItem: item dat geïnsert moet worden\n :return: None\n :pre geen\n :post het nieuwe item zit in de boom\n \"\"\"\n if self.root is None:\n self.root = newItem\n self.color = True\n return self\n\n if newItem.getId() < self.root.getId():\n if self.left is None:\n child: RedBlackTree = RedBlackTree(newItem)\n child.parent = self\n self.left = child\n return child\n\n return self.left.insertRecurse(newItem)\n\n if newItem.getId() > self.root.getId():\n if self.right is None:\n child: RedBlackTree = RedBlackTree(newItem)\n child.parent = self\n self.right = child\n return child\n\n return self.right.insertRecurse(newItem)\n\n def __insertRepairTree(self) -> None:\n \"\"\"\n balanceert de boom nadat er een insert is gedaan\n :return: None\n :pre: de boom is niet gebalanceert\n :post: de boom is gebalanceert\n \"\"\"\n if self.parent is None:\n self.__insertCase1()\n return\n if self.parent.color is False:\n return\n if self.uncle is not None and self.uncle.color is True:\n self.__insertCase2()\n return\n self.__insertCase3()\n\n def __insertCase1(self) -> None:\n \"\"\"zet de kleur naar zwart\"\"\"\n self.color = False\n\n def __insertCase2(self) -> None:\n \"\"\"kleuren veranderen en recursieve aanroep naar repair tree\"\"\"\n self.parent.color = False\n self.uncle.color = False\n self.grandparent.color = True\n self.grandparent.__insertRepairTree()\n\n def __insertCase3(self) -> None:\n \"\"\"rotaties en herkleuringen\"\"\"\n if self == self.parent.right and self.parent == self.grandparent.left:\n self.parent.__rotateLeft()\n self.left.__insertCase3Step2()\n elif self == self.parent.left and self.parent == self.grandparent.right:\n self.parent.__rotateRight()\n self.right.__insertCase3Step2()\n\n else:\n self.color = True\n self.__insertCase3Step2()\n\n def __insertCase3Step2(self) -> None:\n \"\"\"rotaties en herkleuringen\"\"\"\n self.parent.color = False\n self.grandparent.color = True\n\n if self == self.parent.left:\n self.grandparent.__rotateRight()\n else:\n self.grandparent.__rotateLeft()\n\n def __rotateLeft(self) -> None:\n \"\"\"\n draait node naar links\n :return: None\n \"\"\"\n child = self.right\n self.root, child.root = child.root, self.root\n self.right = child.right\n child.right = self\n child.left = self.left\n self.left = child\n\n def __rotateRight(self) -> None:\n \"\"\"\n draait de nodes naar rechts\n :return: None\n \"\"\"\n child = self.right\n self.root, child.root = child.root, self.root\n self.right = child.right\n child.right = self\n child.left = self.left\n self.left = child\n\n def retrieveItem(self, searchKey: int) -> Tuple[Optional[object], bool]:\n \"\"\"\n haalt het object met gegeven searchKey op uit de RBT\n :param searchKey: een getal\n :return: geeft het (object, Succes) als het gevonden is in de RBT, als het niet gevonden is dan geeft het (None, Succes) terug\n :pre geen\n :post De RBT is niet veranderd\n \"\"\"\n if self.root is None:\n return None, False\n\n if searchKey == self.root.getId():\n return self.root, True\n\n elif searchKey < self.root.getId():\n if self.left is None:\n return None, False\n return self.left.retrieveItem(searchKey)\n\n if self.right is None:\n return None, False\n return self.right.retrieveItem(searchKey)\n\n def __retrieveItem(self, searchKey: int) -> Optional['RedBlackTree']:\n \"\"\"\n haalt het object met gegeven searchKey op uit de RBT\n :param searchKey: een getal\n :return: geeft het (object, Succes) als het gevonden is in de RBT, als het niet gevonden is dan geeft het (None, Succes) terug\n :pre geen\n :post De RBT is niet veranderd\n \"\"\"\n if searchKey == self.root.getId():\n return self\n\n elif searchKey < self.root.getId():\n if self.left is None:\n return None\n return self.left.__retrieveItem(searchKey)\n\n if self.right is None:\n return None\n return self.right.__retrieveItem(searchKey)\n\n def deleteItem(self, searchKey: int) -> bool:\n \"\"\"\n verwijdert de node met gegeven searchKey uit de RBT\n :param searchKey: een getal\n :return: Succes\n :pre geen\n :post het item met searchKey zit niet meer in de RBT, de grootte van de RBT is met 1 vermindert.\n \"\"\"\n node = self.__retrieveItem(searchKey)\n if node is None:\n return False\n if not (node.left or node.right): # no children\n node.parent.__deleteCase1()\n if node.parent.left == node:\n node.parent.left = None\n node.parent.__rotateLeft()\n else:\n node.parent.right = None\n node.parent.__rotateRight()\n\n return True\n\n if not (node.left and node.right): # 1 child\n node.__deleteOneChild(searchKey)\n\n else:\n node.__delete2children(searchKey)\n\n return True\n\n def __delete2children(self, searchKey: int) -> None:\n \"\"\"delete als de node 2 kinderen heeft, __switch met innorder succesor\"\"\"\n node = self.__switch(self.__inorderSuccesor())\n node.deleteItem(searchKey)\n\n def __deleteOneChild(self, searchKey:int) -> None:\n \"\"\"delete case, 1 kind\"\"\"\n if self.left:\n child = self.__switch(self.left)\n else:\n child = self.__switch(self.right)\n\n if self.color is False:\n if child.color is True:\n child.color = False\n if self.left == child:\n self.left = None\n\n else:\n self.right = None\n\n else:\n child.__deleteCase1()\n\n def __switch(self, node) -> 'RedBlackTree':\n \"\"\"switched 2 nodes om, geeft de nieuwe node terug\"\"\"\n self.root, node.root = node.root, self.root\n return node\n\n def __deleteCase1(self) -> None:\n if self.parent is not None:\n self.__deleteCase2()\n\n def __deleteCase2(self) -> None:\n if self.sibling.color is True:\n self.parent.color = True\n self.sibling.color = False\n if self == self.parent.left:\n self.parent.__rotateLeft()\n else:\n self.parent.__rotateRight()\n self.__deleteCase3()\n\n def __deleteCase3(self) -> None:\n if self.parent.color is False and self.sibling.color is False and self.left.color is False and self.right.color is False:\n self.color = True\n self.parent.__deleteCase1()\n else:\n self.__deleteCase4()\n\n def __deleteCase4(self) -> None:\n if self.parent.color is True and self.sibling.color is False and self.sibling.left.color is False and self.sibling.right.color is False:\n self.sibling.color = True\n self.parent.color = False\n else:\n self.__deleteCase5()\n\n def __deleteCase5(self) -> None:\n if self.sibling.color is False:\n if self == self.parent.left and self.sibling.right.color is False and self.sibling.left.color is True:\n self.sibling.color = True\n self.sibling.left.color = False\n self.sibling.__rotateRight()\n elif self == self.parent.right and self.sibling.left.color is False and self.sibling.right is False:\n self.sibling.color = True\n self.sibling.right.color = False\n self.sibling.__rotateLeft()\n self.__deleteCase6()\n\n def __deleteCase6(self) -> None:\n self.sibling.color = self.parent.color\n self.parent.color = False\n\n if self == self.parent.left:\n self.sibling.right.color = False\n self.parent.__rotateLeft()\n else:\n self.sibling.color = False\n self.parent.__rotateRight()\n\n def __inorderSuccesor(self) -> Optional['RedBlackTree']:\n \"\"\"\n geeft de inorder succesor van de node terug.\n :return: inorder succesor\n \"\"\"\n if self.right is None:\n return None\n\n def left(node):\n if node.left is not None:\n left(node.left)\n\n return node\n\n return left(self.right)\n\n def inorderTraverse(self, visit) -> None:\n \"\"\"\n overloopt de RBT in inorder volgorde (van klein naar groot), past de visit functie toe op de nodes\n :return: een tuple met de elementen in, van eerst naar laatste element\n :pre geen\n :post de RBT is onverandert\n \"\"\"\n if not (self.left or self.right):\n visit(self.root)\n return\n\n if self.left:\n self.left.inorderTraverse(visit)\n visit(self.root)\n if self.right:\n self.right.inorderTraverse(visit)\n\n def isEmpty(self) -> bool:\n \"\"\"\n :return: geeft terug of de RBT leeg is (bool)\n \"\"\"\n return self.root is None\n\n def getSize(self) -> int:\n \"\"\"geeft het aantal nodes in de boom terug\"\"\"\n return self.size\n\n def save(self) -> _BSTSave:\n if self.root is None:\n return {}\n save = {'root': self.root.getId(), 'color': 'red' if self.color else 'black'}\n if bool(self.left or self.right):\n save['children'] = [self.left.save() if self.left else None, self.right.save() if self.right else None]\n\n return save\n\n def load(self, tree: '_BSTSave') -> None:\n \"\"\"\n slaagt de boom op in de vorm {root:root, color:color, children:[child1, child2]}\n :param tree: {root:root, color:color, children:[child1, child2]}\n :return: None\n \"\"\"\n self.clear()\n self.__loadItems(tree)\n\n def __loadItems(self, tree: Union['_BSTSave', list]) -> None:\n \"\"\"\n laadt de items in de boom\n :param tree: dict/list van de boom\n :return: None\n \"\"\"\n\n self.root = TreeItem(tree['root'], tree['root']) # TreeItem is for testing purposes\n self.color = False if tree[\"color\"] == \"black\" else True\n\n if 'children' in tree:\n if tree['children'][0]:\n self.left = RedBlackTree()\n self.left.loadItems(tree['children'][0])\n\n if tree['children'][1]:\n self.right = RedBlackTree()\n self.right.loadItems(tree['children'][1])\n\n def clear(self) -> None:\n \"\"\"maakt de boom leeg\"\"\"\n self.root = None\n self.left = None\n self.right = None\n self.parent = None\n\n @property\n def grandparent(self) -> Optional['RedBlackTree']:\n \"\"\"\n geeft de grootvader terug\n :return: grandparent, als diet bestaat anders None\n :precondities: geen\n :postconditites: geen\n \"\"\"\n if self.parent is None:\n return None\n return self.parent.parent\n\n @grandparent.setter\n def grandparent(self, val: Optional['RedBlackTree']) -> None:\n \"\"\"\n zet de waarde van de grootvader\n :param val: waarde\n :return: None\n :precondities: geen\n :postconditites: geen\n \"\"\"\n self.parent.parent = val\n\n @property\n def uncle(self) -> Optional['RedBlackTree']:\n \"\"\"\n geeft de waarde van de nonkel terug\n :return: uncle, als diet bestaat anders None\n :precondities: geen\n :postconditites: geen\n \"\"\"\n if self.grandparent is None:\n return None\n return self.grandparent.left if self.grandparent.left != self.parent else self.grandparent.right\n\n @property\n def sibling(self) -> Optional['RedBlackTree']:\n if self.parent is None:\n return None\n if self == self.parent.left:\n if self.parent.right is not None:\n return self.parent.right\n return None\n elif self.parent.left is not None:\n return self.parent.left\n\n\nclass TreeItem:\n def __init__(self, key, val):\n self.sk = key\n self.v = val\n\n def getSearchKey(self):\n return self.sk\n\n\ndef createTreeItem(key, val):\n return TreeItem(key, val)\n\n\nif __name__ == \"__main__\":\n t = RedBlackTree()\n print(t.isEmpty())\n print(t.insertItem(createTreeItem(8,8)))\n print(t.insertItem(createTreeItem(5,5)))\n print(t.insertItem(createTreeItem(10,10)))\n print(t.insertItem(createTreeItem(15,15)))\n print(t.isEmpty())\n print(t.retrieveItem(5)[0])\n print(t.retrieveItem(5)[1])\n t.inorderTraverse(print)\n print(t.save())\n t.load({'root': 8,'color': 'black','children':[{'root':5,'color': 'black'},{'root':10,'color': 'black'}]})\n t.insertItem(createTreeItem(15,15))\n print(t.deleteItem(0))\n print(t.save())\n print(t.deleteItem(10))\n print(t.save())\n\n \"\"\"\nTrue\nTrue\nTrue\nTrue\nTrue\nFalse\n5\nTrue\n5\n8\n10\n15\n{'root': 8,'color': 'black','children':[{'root':5,'color': 'black'},{'root':10,'color': 'black','children':[None,{'root':15,'color': 'red'}]}]}\nFalse\n{'root': 8,'color': 'black','children':[{'root':5,'color': 'black'},{'root':10,'color': 'black','children':[None,{'root':15,'color': 'red'}]}]}\nTrue\n{'root': 8,'color': 'black','children':[{'root':5,'color': 'black'},{'root':15,'color': 'black'}]}\n\"\"\"\n","sub_path":"Robin/DataStructures/RedBlackTree.py","file_name":"RedBlackTree.py","file_ext":"py","file_size_in_byte":15227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"507165228","text":"import os\n\nfrom flask import Flask, g, request, send_from_directory, abort\nfrom flask_cors import CORS\nfrom flask_restful import Resource\nfrom flask_restful_swagger_2 import Api, swagger, Schema\n\nfrom neo4j.v1 import GraphDatabase, basic_auth\n\n\napp = Flask(__name__)\napi = Api(app, title='Neo4j Movie Demo API', api_version='0.0.10')\nCORS(app)\n\n\ndriver = GraphDatabase.driver('bolt://localhost', auth=basic_auth(os.environ['MOVIE_DATABASE_USERNAME'], os.environ['MOVIE_DATABASE_PASSWORD']))\n\n\ndef get_db():\n if not hasattr(g, 'neo4j_db'):\n g.neo4j_db = driver.session()\n return g.neo4j_db\n\n\n@app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, 'neo4j_db'):\n g.neo4j_db.close()\n\n\nclass GenreModel(Schema):\n type = 'object'\n properties = {\n 'id': {\n 'type': 'integer',\n },\n 'name': {\n 'type': 'string',\n }\n }\n\n\nclass MovieModel(Schema):\n type = 'object'\n properties = {\n 'id': {\n 'type': 'integer',\n },\n 'title': {\n 'type': 'string',\n },\n 'summary': {\n 'type': 'string',\n },\n 'released': {\n 'type': 'integer',\n },\n 'duration': {\n 'type': 'integer',\n },\n 'rated': {\n 'type': 'string',\n },\n 'tagline': {\n 'type': 'string',\n },\n 'poster_image': {\n 'type': 'string',\n }\n }\n\n\nclass PersonModel(Schema):\n type = 'object'\n properties = {\n 'id': {\n 'type': 'integer',\n },\n 'name': {\n 'type': 'string',\n },\n 'born': {\n 'type': 'integer',\n },\n 'poster_image': {\n 'type': 'string',\n }\n }\n\n\ndef serialize_genre(genre):\n return {\n 'id': genre['id'],\n 'name': genre['name'],\n }\n\n\ndef serialize_movie(movie):\n return {\n 'id': movie['id'],\n 'title': movie['title'],\n 'summary': movie['summary'],\n 'released': movie['released'],\n 'duration': movie['duration'],\n 'rated': movie['rated'],\n 'tagline': movie['tagline'],\n 'poster_image': movie['poster_image'],\n }\n\n\ndef serialize_person(person):\n return {\n 'id': person['id'],\n 'name': person['name'],\n 'born': person['born'],\n 'poster_image': person['poster_image'],\n }\n\n\nclass ApiDocs(Resource):\n def get(self, path=None):\n if not path:\n path = 'index.html'\n return send_from_directory('swaggerui', path)\n\nclass GenreList(Resource):\n @swagger.doc({\n 'tags': ['genres'],\n 'summary': 'Find all genres',\n 'description': 'Returns all genres',\n 'responses': {\n '200': {\n 'description': 'A list of genres',\n 'schema': GenreModel,\n }\n }\n })\n def get(self):\n db = get_db()\n result = db.run('MATCH (genre:Genre) RETURN genre')\n return [serialize_genre(record['genre']) for record in result]\n\n\nclass Movie(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movie by ID',\n 'description': 'Returns a movie',\n 'parameters': [\n {\n 'name': 'id',\n 'description': 'movie id',\n 'in': 'path',\n 'type': 'integer',\n }\n ],\n 'responses': {\n '200': {\n 'description': 'A movie',\n 'schema': MovieModel,\n },\n '404': {\n 'description': 'movie not found'\n },\n }\n })\n def get(self, id):\n db = get_db()\n result = db.run(\n '''\n MATCH (movie:Movie {id: {id}})\n MATCH (movie)<-[r:ACTED_IN]-(a:Person) // movies must have actors\n MATCH (related:Movie)<--(a:Person) // movies must have related movies\n WHERE related <> movie\n OPTIONAL MATCH (movie)-[:HAS_KEYWORD]->(keyword:Keyword)\n OPTIONAL MATCH (movie)-[:HAS_GENRE]->(genre:Genre)\n OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Person)\n OPTIONAL MATCH (movie)<-[:PRODUCED]-(p:Person)\n OPTIONAL MATCH (movie)<-[:WRITER_OF]-(w:Person)\n WITH DISTINCT movie, genre, keyword, d, p, w, a, r, related, count(related) AS countRelated\n ORDER BY countRelated DESC\n RETURN DISTINCT movie,\n collect(DISTINCT keyword) AS keywords,\n collect(DISTINCT d) AS directors,\n collect(DISTINCT p) AS producers,\n collect(DISTINCT w) AS writers,\n collect(DISTINCT{ name:a.name, id:a.id, poster_image:a.poster_image, role:r.role}) AS actors,\n collect(DISTINCT related) AS related,\n collect(DISTINCT genre) AS genres\n ''', {'id': id}\n )\n for record in result:\n return {\n 'id': record['movie']['id'],\n 'title': record['movie']['title'],\n 'summary': record['movie']['summary'],\n 'released': record['movie']['released'],\n 'duration': record['movie']['duration'],\n 'rated': record['movie']['rated'],\n 'tagline': record['movie']['tagline'],\n 'poster_image': record['movie']['poster_image'],\n 'genres': [serialize_genre(genre) for genre in record['genres']],\n 'directors': [serialize_person(director)for director in record['directors']],\n 'producers': [serialize_person(producer) for producer in record['producers']],\n 'writers': [serialize_person(writer) for writer in record['writers']],\n 'actors': [\n {\n 'id': actor['id'],\n 'name': actor['name'],\n 'role': actor['role'],\n 'poster_image': actor['poster_image'],\n } for actor in record['actors']\n ],\n 'related': [serialize_movie(related) for related in record['related']],\n }\n return {'message': 'movie not found'}, 404\n\n\nclass MovieList(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find all movies',\n 'description': 'Returns a list of movies',\n 'responses': {\n '200': {\n 'description': 'A list of movies',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self):\n db = get_db()\n result = db.run(\n '''\n MATCH (movie:Movie) RETURN movie\n '''\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass MovieListByGenre(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movie by genre id',\n 'description': 'Returns a list of movies by genre',\n 'parameters': [\n {\n 'name': 'genre_id',\n 'description': 'genre id',\n 'in': 'path',\n 'type': 'integer',\n 'required': 'true'\n }\n ],\n 'responses': {\n '200': {\n 'description': 'A list of movies with the specified genre',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self, genre_id):\n db = get_db()\n result = db.run(\n '''\n MATCH (movie:Movie)-[:HAS_GENRE]->(genre)\n WHERE genre.id = {genre_id}\n RETURN movie\n ''', {'genre_id': genre_id}\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass MovieListByDateRange(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movie by year range',\n 'description': 'Returns a list of movies released between a range of years',\n 'parameters': [\n {\n 'name': 'start',\n 'description': 'start year',\n 'in': 'path',\n 'type': 'string',\n 'required': 'true'\n },\n {\n 'name': 'end',\n 'description': 'end year',\n 'in': 'path',\n 'type': 'string',\n 'required': 'true'\n }\n ],\n 'responses': {\n '200': {\n 'description': 'A list of movies released between the specified years',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self, start, end):\n db = get_db()\n result = db.run(\n '''\n MATCH (movie:Movie)\n WHERE movie.released > {start} AND movie.released < {end}\n RETURN movie\n ''', {'start': start, 'end': end}\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass MovieListByPersonActedIn(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movies by actor',\n 'description': 'Returns a list of movies that a person has acted in.',\n 'parameters': [\n {\n 'name': 'person_id',\n 'description': 'person id',\n 'in': 'path',\n 'type': 'integer',\n 'required': 'true'\n },\n ],\n 'responses': {\n '200': {\n 'description': 'A list of movies the specified person has acted in',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self, person_id):\n db = get_db()\n result = db.run(\n '''\n MATCH (actor:Person {id: {person_id}})-[:ACTED_IN]->(movie:Movie)\n RETURN DISTINCT movie\n ''', {'person_id': person_id}\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass MovieListByWrittenBy(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movies by writer',\n 'description': 'Returns a list of movies writen by a person',\n 'parameters': [\n {\n 'name': 'person_id',\n 'description': 'person id',\n 'in': 'path',\n 'type': 'integer',\n 'required': 'true'\n },\n ],\n 'responses': {\n '200': {\n 'description': 'A list of movies the specified person has written',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self, person_id):\n db = get_db()\n result = db.run(\n '''\n MATCH (actor:Person {id: {person_id}})-[:WRITER_OF]->(movie:Movie)\n RETURN DISTINCT movie\n ''', {'person_id': person_id}\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass MovieListByDirectedBy(Resource):\n @swagger.doc({\n 'tags': ['movies'],\n 'summary': 'Find movies by director',\n 'description': 'Returns a list of movies directed by a person',\n 'parameters': [\n {\n 'name': 'person_id',\n 'description': 'person id',\n 'in': 'path',\n 'type': 'integer',\n 'required': 'true'\n },\n ],\n 'responses': {\n '200': {\n 'description': 'A list of movies the specified person has directed',\n 'schema': {\n 'type': 'array',\n 'items': MovieModel,\n }\n }\n }\n })\n def get(self, person_id):\n db = get_db()\n result = db.run(\n '''\n MATCH (actor:Person {id: {person_id}})-[:DIRECTED]->(movie:Movie)\n RETURN DISTINCT movie\n ''', {'person_id': person_id}\n )\n return [serialize_movie(record['movie']) for record in result]\n\n\nclass Person(Resource):\n @swagger.doc({\n 'tags': ['people'],\n 'summary': 'Find person by id',\n 'description': 'Returns a person',\n 'parameters': [\n {\n 'name': 'id',\n 'description': 'person id',\n 'in': 'path',\n 'type': 'integer',\n 'required': True\n }\n ],\n 'responses': {\n '200': {\n 'description': 'A person',\n 'schema': PersonModel,\n },\n '404': {\n 'description': 'person not found'\n },\n }\n })\n def get(self, id):\n db = get_db()\n results = db.run(\n '''\n MATCH (person:Person {id: {id}})\n OPTIONAL MATCH (person)-[:DIRECTED]->(d:Movie)\n OPTIONAL MATCH (person)<-[:PRODUCED]->(p:Movie)\n OPTIONAL MATCH (person)<-[:WRITER_OF]->(w:Movie)\n OPTIONAL MATCH (person)<-[r:ACTED_IN]->(a:Movie)\n OPTIONAL MATCH (person)-->(movies)<-[relatedRole:ACTED_IN]-(relatedPerson)\n RETURN DISTINCT person,\n collect(DISTINCT { name:d.title, id:d.id, poster_image:d.poster_image}) AS directed,\n collect(DISTINCT { name:p.title, id:p.id, poster_image:p.poster_image}) AS produced,\n collect(DISTINCT { name:w.title, id:w.id, poster_image:w.poster_image}) AS wrote,\n collect(DISTINCT{ name:a.title, id:a.id, poster_image:a.poster_image, role:r.role}) AS actedIn,\n collect(DISTINCT{ name:relatedPerson.name, id:relatedPerson.id, poster_image:relatedPerson.poster_image, role:relatedRole.role}) AS related\n ''', {'id': id}\n )\n for record in results:\n return {\n 'id': record['person']['id'],\n 'name': record['person']['name'],\n 'born': record['person']['born'],\n 'poster_image': record['person']['poster_image'],\n 'directed': [\n {\n 'id': movie['id'],\n 'name': movie['name'],\n 'poster_image': movie['poster_image'],\n } for movie in record['directed']\n ],\n 'produced': [\n {\n 'id': movie['id'],\n 'name': movie['name'],\n 'poster_image': movie['poster_image'],\n } for movie in record['produced']\n ],\n 'wrote': [\n {\n 'id': movie['id'],\n 'name': movie['name'],\n 'poster_image': movie['poster_image'],\n } for movie in record['wrote']\n ],\n 'actedIn': [\n {\n 'id': movie['id'],\n 'name': movie['name'],\n 'poster_image': movie['poster_image'],\n 'role': movie['role'],\n } for movie in record['actedIn']\n ],\n 'related': [\n {\n 'id': person['id'],\n 'name': person['name'],\n 'poster_image': person['poster_image'],\n 'role': person['role'],\n } for person in record['related']\n ],\n }\n return {'message': 'person not found'}, 404\n\n\nclass PersonList(Resource):\n @swagger.doc({\n 'tags': ['people'],\n 'summary': 'Find all people',\n 'description': 'Returns a list of people',\n 'responses': {\n '200': {\n 'description': 'A list of people',\n 'schema': {\n 'type': 'array',\n 'items': PersonModel,\n }\n }\n }\n })\n def get(self):\n db = get_db()\n results = db.run(\n '''\n MATCH (person:Person) RETURN person\n '''\n )\n return [serialize_person(record['person']) for record in results]\n\n\nclass PersonBacon(Resource):\n @swagger.doc({\n 'tags': ['people'],\n 'summary': 'Find all Bacon paths',\n 'description': 'Returns all bacon paths from person 1 to person 2',\n 'parameters': [\n {\n 'name': 'name1',\n 'description': 'Name of the origin user',\n 'in': 'query',\n 'type': 'string',\n 'required': True,\n },\n {\n 'name': 'name2',\n 'description': 'Name of the target user',\n 'in': 'query',\n 'type': 'string',\n 'required': True,\n }\n ],\n 'responses': {\n '200': {\n 'description': 'A list of people',\n 'schema': {\n 'type': 'array',\n 'items': PersonModel,\n }\n }\n }\n })\n def get(self):\n name1 = request.args['name1']\n name2 = request.args['name2']\n db = get_db()\n results = db.run(\n '''\n MATCH p = shortestPath( (p1:Person {name:{name1} })-[:ACTED_IN*]-(target:Person {name:{name2} }) )\n WITH extract(n in nodes(p)|n) AS coll\n WITH filter(thing in coll where length(thing.name)> 0) AS bacon\n UNWIND(bacon) AS person\n RETURN DISTINCT person\n ''', {'name1': name1, 'name2': name2}\n )\n return [serialize_person(record['person']) for record in results]\n\n\napi.add_resource(ApiDocs, '/docs', '/docs/')\napi.add_resource(GenreList, '/api/v0/genres')\napi.add_resource(Movie, '/api/v0/movies/')\napi.add_resource(MovieList, '/api/v0/movies')\napi.add_resource(MovieListByGenre, '/api/v0/movies/genre//')\napi.add_resource(MovieListByDateRange, '/api/v0/movies/daterange//')\napi.add_resource(MovieListByPersonActedIn, '/api/v0/movies/acted_in_by/')\napi.add_resource(MovieListByWrittenBy, '/api/v0/movies/written_by/')\napi.add_resource(MovieListByDirectedBy, '/api/v0/movies/directed_by/')\napi.add_resource(Person, '/api/v0/people/')\napi.add_resource(PersonList, '/api/v0/people')\napi.add_resource(PersonBacon, '/api/v0/people/bacon')\n","sub_path":"flask-api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"144472603","text":"import random\nfrom random import randint, sample, uniform\nfrom acme import Product\n\nADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\nNOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\n\ndef generate_products(num_products=30):\n \"\"\"Generate random products\"\"\"\n products = []\n for _ in range(num_products):\n adject = sample(ADJECTIVES, 1)\n noun = sample(NOUNS, 1)\n name = adject[0] + ' ' + noun[0]\n price = round(random.uniform(5,100),2)\n weight = round(random.uniform(5,100),2)\n flammability = round(random.uniform(0,2.5),2)\n products.append(Product(name, price, weight, flammability))\n return products\n\ndef inventory_report(products):\n \"\"\"Generate inventory report of products\"\"\"\n names = []\n prices = []\n weights = []\n flames = []\n for p in products:\n names.append(p.name)\n prices.append(p.price)\n weights.append(p.weight)\n flames.append(p.flammability)\n\n print(\"Number of unique product names in the list is \", len(set(names)))\n print(\"Average product price is \", sum(prices) / len(prices))\n print(\"Average product weight is \", sum(weights) / len(weights))\n print(\"Average product flammability is \", sum(flames) / len(flames))\n\nif __name__ == '__main__':\n inventory_report(generate_products())\n","sub_path":"eng_sprint1/acme_report.py","file_name":"acme_report.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386341362","text":"# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport click\nimport requests\nimport sys\nfrom prettytable import prettytable\n\nfrom src.api.cluster import Cluster\nfrom src.api.pipeline import Pipeline\nfrom src.api.pipeline_run import PipelineRun\nfrom src.api.user import User\nfrom src.config import Config, ConfigNotFoundError\nfrom src.model.pipeline_run_filter_model import DEFAULT_PAGE_SIZE, DEFAULT_PAGE_INDEX\nfrom src.model.pipeline_run_model import PriceType\nfrom src.utilities import date_utilities, time_zone_param_type, state_utilities\nfrom src.utilities.acl_operations import ACLOperations\nfrom src.utilities.datastorage_operations import DataStorageOperations\nfrom src.utilities.metadata_operations import MetadataOperations\nfrom src.utilities.permissions_operations import PermissionsOperations\nfrom src.utilities.pipeline_run_operations import PipelineRunOperations\nfrom src.version import __version__\n\nMAX_INSTANCE_COUNT = 1000\nMAX_CORES_COUNT = 10000\n\n\n@click.group()\n@click.version_option(prog_name='Cloud Pipeline CLI', version=__version__)\ndef cli():\n \"\"\"pipe is command line interface to Bfx Pipeline engine\n It allows run pipelines as well as viewing runs and cluster state\n \"\"\"\n pass\n\n\n@cli.command()\n@click.option('-a', '--auth-token',\n prompt='Authentication token',\n help='Token for API authentication',\n default=None)\n@click.option('-s', '--api',\n prompt='Pipeline engine endpoint',\n help='URL of a Pipeline API endpoint')\n@click.option('-tz', '--timezone',\n prompt='Dates presentation timezone (utc/local)',\n help='Dates presentation timezone (utc/local)',\n type=time_zone_param_type.TIMEZONE,\n default=time_zone_param_type.LOCAL_ZONE)\n@click.option('-p', '--proxy',\n prompt='Proxy address',\n help='URL of a proxy for all calls',\n default='')\ndef configure(auth_token, api, timezone, proxy):\n \"\"\"Configures CLI parameters\n \"\"\"\n Config.store(auth_token, api, timezone, proxy)\n\n\ndef echo_title(title, line=True):\n click.echo(title)\n if line:\n for i in title:\n click.echo('-', nl=False)\n click.echo('')\n\n\n@cli.command(name='view-pipes')\n@click.argument('pipeline', required=False)\n@click.option('-v', '--versions', help='List versions of a pipeline', is_flag=True)\n@click.option('-p', '--parameters', help='List parameters of a pipeline', is_flag=True)\n@click.option('-s', '--storage-rules', help='List storage rules of a pipeline', is_flag=True)\n@click.option('-r', '--permissions', help='List user permissions of a pipeline', is_flag=True)\ndef view_pipes(pipeline, versions, parameters, storage_rules, permissions):\n \"\"\"Lists pipelines definitions\n \"\"\"\n\n # If pipeline name or id is specified - list details of a pipeline\n if pipeline:\n view_pipe(pipeline, versions, parameters, storage_rules, permissions)\n # If no argument is specified - list brief details of all pipelines\n else:\n view_all_pipes()\n\n\ndef view_all_pipes():\n pipes_table = prettytable.PrettyTable()\n pipes_table.field_names = [\"ID\", \"Name\", \"Latest version\", \"Created\", \"Source repo\"]\n pipes_table.align = \"r\"\n try:\n pipelines = list(Pipeline.list())\n if len(pipelines) > 0:\n for pipeline_model in pipelines:\n pipes_table.add_row([pipeline_model.identifier,\n pipeline_model.name,\n pipeline_model.current_version_name,\n pipeline_model.created_date,\n pipeline_model.repository])\n click.echo(pipes_table)\n else:\n click.echo('No pipelines are available')\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\ndef view_pipe(pipeline, versions, parameters, storage_rules, permissions):\n try:\n pipeline_model = Pipeline.get(pipeline, storage_rules, versions, parameters)\n pipe_table = prettytable.PrettyTable()\n pipe_table.field_names = [\"key\", \"value\"]\n pipe_table.align = \"l\"\n pipe_table.set_style(12)\n pipe_table.header = False\n pipe_table.add_row(['ID:', pipeline_model.identifier])\n pipe_table.add_row(['Name:', pipeline_model.name])\n pipe_table.add_row(['Latest version:', pipeline_model.current_version_name])\n pipe_table.add_row(['Created:', pipeline_model.created_date])\n pipe_table.add_row(['Source repo:', pipeline_model.repository])\n pipe_table.add_row(['Description:', pipeline_model.description])\n click.echo(pipe_table)\n click.echo()\n\n if parameters and pipeline_model.current_version is not None and pipeline_model.current_version.run_parameters is not None:\n echo_title('Parameters:', line=False)\n if len(pipeline_model.current_version.run_parameters.parameters) > 0:\n parameters_table = prettytable.PrettyTable()\n parameters_table.field_names = [\"Name\", \"Type\", \"Mandatory\", \"Default value\"]\n parameters_table.align = \"l\"\n for parameter in pipeline_model.current_version.run_parameters.parameters:\n parameters_table.add_row(\n [parameter.name, parameter.parameter_type, parameter.required, parameter.value])\n click.echo(parameters_table)\n click.echo()\n else:\n click.echo('No parameters are available for current version')\n\n if versions:\n echo_title('Versions:', line=False)\n if len(pipeline_model.versions) > 0:\n versions_table = prettytable.PrettyTable()\n versions_table.field_names = [\"Name\", \"Created\", \"Draft\"]\n versions_table.align = \"r\"\n for version_model in pipeline_model.versions:\n versions_table.add_row([version_model.name, version_model.created_date, version_model.draft])\n click.echo(versions_table)\n click.echo()\n else:\n click.echo('No versions are configured for pipeline')\n\n if storage_rules:\n echo_title('Storage rules', line=False)\n if len(pipeline_model.storage_rules) > 0:\n storage_rules_table = prettytable.PrettyTable()\n storage_rules_table.field_names = [\"File mask\", \"Created\", \"Move to STS\"]\n storage_rules_table.align = \"r\"\n for rule in pipeline_model.storage_rules:\n storage_rules_table.add_row([rule.file_mask, rule.created_date, rule.move_to_sts])\n click.echo(storage_rules_table)\n click.echo()\n else:\n click.echo('No storage rules are configured for pipeline')\n\n if permissions:\n permissions_list = User.get_permissions(pipeline_model.identifier, 'pipeline')\n echo_title('Permissions', line=False)\n if len(permissions_list) > 0:\n permissions_table = prettytable.PrettyTable()\n permissions_table.field_names = [\"SID\", \"Principal\", \"Allow\", \"Deny\"]\n permissions_table.align = \"r\"\n for permission in permissions_list:\n permissions_table.add_row([permission.name,\n permission.principal,\n permission.get_allowed_permissions_description(),\n permission.get_denied_permissions_description()])\n click.echo(permissions_table)\n click.echo()\n else:\n click.echo('No user permissions are configured for pipeline')\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except RuntimeError as error:\n click.echo(str(error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\n@cli.command(name='view-runs')\n@click.argument('run-id', required=False, type=int)\n@click.option('-s', '--status', help='List pipelines with a specific status [ANY/FAILURE/SUCCESS/STOPPED/RUNNING]')\n@click.option('-df', '--date-from', help='List pipeline runs started after date')\n@click.option('-dt', '--date-to', help='List pipeline runs started before date')\n@click.option('-p', '--pipeline', help='List runs for a specific pipeline type@version')\n@click.option('-pid', '--parent-id', help='List runs for a specific parent pipeline run', type=int)\n@click.option('-f', '--find', help='Search runs with a specific substring in a run parameters')\n@click.option('-t', '--top', help='Display top N records', type=int)\n@click.option('-nd', '--node-details', help='Display node details', is_flag=True)\n@click.option('-pd', '--parameters-details', help='Display parameters', is_flag=True)\n@click.option('-td', '--tasks-details', help='Display tasks', is_flag=True)\ndef view_runs(run_id,\n status,\n date_from,\n date_to,\n pipeline,\n parent_id,\n find,\n top,\n node_details,\n parameters_details,\n tasks_details):\n \"\"\"Lists pipelines runs\n \"\"\"\n # If a run id is specified - list details of a run\n if run_id:\n view_run(run_id, node_details, parameters_details, tasks_details)\n # If no argument is specified - list runs according to options\n else:\n view_all_runs(status, date_from, date_to, pipeline, parent_id, find, top)\n\n\ndef view_all_runs(status, date_from, date_to, pipeline, parent_id, find, top):\n runs_table = prettytable.PrettyTable()\n runs_table.field_names = [\"RunID\", \"Parent RunID\", \"Pipeline\", \"Version\", \"Status\", \"Started\"]\n runs_table.align = \"r\"\n try:\n statuses = []\n if status is not None:\n if status.upper() != 'ANY':\n for status_value in status.split(','):\n statuses.append(status_value.upper())\n else:\n statuses.append('RUNNING')\n pipeline_id = None\n pipeline_version_name = None\n if pipeline is not None:\n pipeline_name_parts = pipeline.split('@')\n pipeline_model = Pipeline.get(pipeline_name_parts[0])\n pipeline_id = pipeline_model.identifier\n pipeline_version_name = pipeline_model.current_version_name\n if len(pipeline_name_parts) > 1:\n pipeline_version_name = pipeline_name_parts[1]\n page = DEFAULT_PAGE_INDEX\n page_size = DEFAULT_PAGE_SIZE\n if top is not None:\n page = 1\n page_size = top\n run_filter = PipelineRun.list(page=page,\n page_size=page_size,\n statuses=statuses,\n date_from=date_utilities.parse_date_parameter(date_from),\n date_to=date_utilities.parse_date_parameter(date_to),\n pipeline_id=pipeline_id,\n version=pipeline_version_name,\n parent_id=parent_id,\n custom_filter=find)\n if run_filter.total_count == 0:\n click.echo('No data is available for the request')\n else:\n if run_filter.total_count > run_filter.page_size:\n click.echo('Showing {} results from {}:'.format(run_filter.page_size, run_filter.total_count))\n for run_model in run_filter.elements:\n runs_table.add_row([run_model.identifier,\n run_model.parent_id,\n run_model.pipeline,\n run_model.version,\n state_utilities.color_state(run_model.status),\n run_model.scheduled_date])\n click.echo(runs_table)\n click.echo()\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\ndef view_run(run_id, node_details, parameters_details, tasks_details):\n try:\n run_model = PipelineRun.get(run_id)\n if not run_model.pipeline and run_model.pipeline_id is not None:\n pipeline_model = Pipeline.get(run_model.pipeline_id)\n if pipeline_model is not None:\n run_model.pipeline = pipeline_model.name\n run_model_price = PipelineRun.get_estimated_price(run_id)\n run_main_info_table = prettytable.PrettyTable()\n run_main_info_table.field_names = [\"key\", \"value\"]\n run_main_info_table.align = \"l\"\n run_main_info_table.set_style(12)\n run_main_info_table.header = False\n run_main_info_table.add_row(['ID:', run_model.identifier])\n run_main_info_table.add_row(['Pipeline:', run_model.pipeline])\n run_main_info_table.add_row(['Version:', run_model.version])\n if run_model.owner is not None:\n run_main_info_table.add_row(['Owner:', run_model.owner])\n if run_model.endpoints is not None and len(run_model.endpoints) > 0:\n endpoint_index = 0\n for endpoint in run_model.endpoints:\n if endpoint_index == 0:\n run_main_info_table.add_row(['Endpoints:', endpoint])\n else:\n run_main_info_table.add_row(['', endpoint])\n endpoint_index = endpoint_index + 1\n if not run_model.scheduled_date:\n run_main_info_table.add_row(['Scheduled', 'N/A'])\n else:\n run_main_info_table.add_row(['Scheduled:', run_model.scheduled_date])\n if not run_model.start_date:\n run_main_info_table.add_row(['Started', 'N/A'])\n else:\n run_main_info_table.add_row(['Started:', run_model.start_date])\n if not run_model.end_date:\n run_main_info_table.add_row(['Completed', 'N/A'])\n else:\n run_main_info_table.add_row(['Completed:', run_model.end_date])\n run_main_info_table.add_row(['Status:', state_utilities.color_state(run_model.status)])\n run_main_info_table.add_row(['ParentID:', run_model.parent_id])\n if run_model_price.total_price > 0:\n run_main_info_table.add_row(['Estimated price:', '{} $'.format(round(run_model_price.total_price, 2))])\n else:\n run_main_info_table.add_row(['Estimated price:', 'N/A'])\n click.echo(run_main_info_table)\n click.echo()\n\n if node_details:\n\n node_details_table = prettytable.PrettyTable()\n node_details_table.field_names = [\"key\", \"value\"]\n node_details_table.align = \"l\"\n node_details_table.set_style(12)\n node_details_table.header = False\n\n for key, value in run_model.instance:\n if key == PriceType.SPOT:\n node_details_table.add_row(['price-type', PriceType.SPOT if value else PriceType.ON_DEMAND])\n else:\n node_details_table.add_row([key, value])\n echo_title('Node details:')\n click.echo(node_details_table)\n click.echo()\n\n if parameters_details:\n echo_title('Parameters:')\n if len(run_model.parameters) > 0:\n for parameter in run_model.parameters:\n click.echo('{}={}'.format(parameter.name, parameter.value))\n else:\n click.echo('No parameters are configured')\n click.echo()\n\n if tasks_details:\n echo_title('Tasks:', line=False)\n if len(run_model.tasks) > 0:\n tasks_table = prettytable.PrettyTable()\n tasks_table.field_names = ['Task', 'State', 'Scheduled', 'Started', 'Finished']\n tasks_table.align = \"r\"\n for task in run_model.tasks:\n scheduled = 'N/A'\n started = 'N/A'\n finished = 'N/A'\n if task.created is not None:\n scheduled = task.created\n if task.started is not None:\n started = task.started\n if task.finished is not None:\n finished = task.finished\n tasks_table.add_row(\n [task.name, state_utilities.color_state(task.status), scheduled, started, finished])\n click.echo(tasks_table)\n else:\n click.echo('No tasks are available for run')\n click.echo()\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\n@cli.command(name='view-cluster')\n@click.argument('node-name', required=False)\ndef view_cluster(node_name):\n \"\"\"Lists cluster nodes\n \"\"\"\n # If a node id is specified - list details of a node\n if node_name:\n view_cluster_for_node(node_name)\n # If no argument is specified - list all nodes\n else:\n view_all_cluster()\n\n\ndef view_all_cluster():\n nodes_table = prettytable.PrettyTable()\n nodes_table.field_names = [\"Name\", \"Pipeline\", \"Run\", \"Addresses\", \"Created\"]\n nodes_table.align = \"l\"\n try:\n nodes = list(Cluster.list())\n if len(nodes) > 0:\n for node_model in nodes:\n info_lines = []\n is_first_line = True\n pipeline_name = None\n run_id = None\n if node_model.run is not None:\n pipeline_name = node_model.run.pipeline\n run_id = node_model.run.identifier\n for address in node_model.addresses:\n if is_first_line:\n info_lines.append([node_model.name, pipeline_name, run_id, address, node_model.created])\n else:\n info_lines.append(['', '', '', address, ''])\n is_first_line = False\n if len(info_lines) == 0:\n info_lines.append([node_model.name, pipeline_name, run_id, None, node_model.created])\n for line in info_lines:\n nodes_table.add_row(line)\n nodes_table.add_row(['', '', '', '', ''])\n click.echo(nodes_table)\n else:\n click.echo('No data is available for the request')\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\ndef view_cluster_for_node(node_name):\n try:\n node_model = Cluster.get(node_name)\n node_main_info_table = prettytable.PrettyTable()\n node_main_info_table.field_names = [\"key\", \"value\"]\n node_main_info_table.align = \"l\"\n node_main_info_table.set_style(12)\n node_main_info_table.header = False\n node_main_info_table.add_row(['Name:', node_model.name])\n\n pipeline_name = None\n if node_model.run is not None:\n pipeline_name = node_model.run.pipeline\n\n node_main_info_table.add_row(['Pipeline:', pipeline_name])\n\n addresses_string = ''\n for address in node_model.addresses:\n addresses_string += address + '; '\n\n node_main_info_table.add_row(['Addresses:', addresses_string])\n node_main_info_table.add_row(['Created:', node_model.created])\n click.echo(node_main_info_table)\n click.echo()\n\n if node_model.system_info is not None:\n table = prettytable.PrettyTable()\n table.field_names = [\"key\", \"value\"]\n table.align = \"l\"\n table.set_style(12)\n table.header = False\n for key, value in node_model.system_info:\n table.add_row([key, value])\n echo_title('System info:')\n click.echo(table)\n click.echo()\n\n if node_model.labels is not None:\n table = prettytable.PrettyTable()\n table.field_names = [\"key\", \"value\"]\n table.align = \"l\"\n table.set_style(12)\n table.header = False\n for key, value in node_model.labels:\n if key.lower() == 'node-role.kubernetes.io/master':\n table.add_row([key, click.style(value, fg='blue')])\n elif key.lower() == 'kubeadm.alpha.kubernetes.io/role' and value.lower() == 'master':\n table.add_row([key, click.style(value, fg='blue')])\n elif key.lower() == 'cloud-pipeline/role' and value.lower() == 'edge':\n table.add_row([key, click.style(value, fg='blue')])\n elif key.lower() == 'runid':\n table.add_row([key, click.style(value, fg='green')])\n else:\n table.add_row([key, value])\n echo_title('Labels:')\n click.echo(table)\n click.echo()\n\n if node_model.allocatable is not None or node_model.capacity is not None:\n ac_table = prettytable.PrettyTable()\n ac_table.field_names = [\"\", \"Allocatable\", \"Capacity\"]\n ac_table.align = \"l\"\n keys = []\n for key in node_model.allocatable.keys():\n if key not in keys:\n keys.append(key)\n for key in node_model.capacity.keys():\n if key not in keys:\n keys.append(key)\n for key in keys:\n ac_table.add_row([key, node_model.allocatable.get(key, ''), node_model.capacity.get(key, '')])\n click.echo(ac_table)\n click.echo()\n\n if len(node_model.pods) > 0:\n echo_title(\"Jobs:\", line=False)\n if len(node_model.pods) > 0:\n pods_table = prettytable.PrettyTable()\n pods_table.field_names = [\"Name\", \"Namespace\", \"Status\"]\n pods_table.align = \"l\"\n for pod in node_model.pods:\n pods_table.add_row([pod.name, pod.namespace, state_utilities.color_state(pod.phase)])\n click.echo(pods_table)\n else:\n click.echo('No jobs are available')\n click.echo()\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\n@cli.command(name='run', context_settings=dict(ignore_unknown_options=True))\n@click.option('-n', '--pipeline', required=False)\n@click.option('-c', '--config', required=False, type=str, help='Pipeline configuration name')\n@click.argument('run-params', nargs=-1, type=click.UNPROCESSED)\n@click.option('-p', '--parameters', help='List parameters of a pipeline', is_flag=True)\n@click.option('-y', '--yes', is_flag=True, help='Do not ask confirmation')\n@click.option('-id', '--instance-disk', help='Instance disk size', type=int)\n@click.option('-it', '--instance-type', help='Instance disk type', type=str)\n@click.option('-di', '--docker-image', help='Docker image', type=str)\n@click.option('-cmd', '--cmd-template', help='Command template', type=str)\n@click.option('-t', '--timeout', help='Timeout, when elapsed - run will be stopped', type=int)\n@click.option('-q', '--quiet', help='Quiet mode', is_flag=True)\n@click.option('-ic', '--instance-count', help='Number of instances to launch',\n type=click.IntRange(1, MAX_INSTANCE_COUNT, clamp=True), required=False)\n@click.option('-nc', '--cores', help='Number cores that a cluster shall contain',\n type=click.IntRange(2, MAX_CORES_COUNT, clamp=True), required=False)\n@click.option('-s', '--sync', is_flag=True, help='Allow to be run in a sync mode.')\n@click.option('-pt', '--price-type', help='Instance price type',\n type=click.Choice([PriceType.SPOT, PriceType.ON_DEMAND]), required=False)\n@click.option('-r', '--region-id', help='Instance cloud region', type=int, required=False)\n@click.option('-pn', '--parent-node', help='Parent instance id', type=int, required=False)\ndef run(pipeline,\n config,\n parameters,\n yes,\n run_params,\n instance_disk,\n instance_type,\n docker_image,\n cmd_template,\n timeout,\n quiet,\n instance_count,\n cores,\n sync,\n price_type,\n region_id,\n parent_node):\n \"\"\"Schedules a pipeline/version execution\n \"\"\"\n PipelineRunOperations.run(pipeline, config, parameters, yes, run_params, instance_disk, instance_type,\n docker_image, cmd_template, timeout, quiet, instance_count, cores, sync, price_type,\n region_id, parent_node)\n\n\n@cli.command(name='stop')\n@click.argument('run-id', required=True, type=int)\n@click.option('-y', '--yes', is_flag=True, help='Do not ask confirmation')\ndef stop(run_id, yes):\n \"\"\"Stops a running pipeline\n \"\"\"\n PipelineRunOperations.stop(run_id, yes)\n\n\n@cli.command(name='terminate-node')\n@click.argument('node-name', required=True, type=str)\n@click.option('-y', '--yes', is_flag=True, help='Do not ask confirmation')\ndef terminate_node(node_name, yes):\n \"\"\"Terminates calculation node\n \"\"\"\n terminate_node_calculation(node_name, yes)\n\n\ndef terminate_node_calculation(node_name, yes):\n if not yes:\n click.confirm('Are you sure you want to terminate node {}?'.format(node_name), abort=True)\n try:\n node_model = Cluster.get(node_name)\n if node_model.is_master:\n click.echo('Error: cannot terminate master node {}'.format(node_name), err=True)\n else:\n Cluster.terminate_node(node_name)\n click.echo('Node {} was terminated'.format(node_name))\n except ConfigNotFoundError as config_not_found_error:\n click.echo(str(config_not_found_error), err=True)\n except requests.exceptions.RequestException as http_error:\n click.echo('Http error: {}'.format(str(http_error)), err=True)\n except RuntimeError as runtime_error:\n click.echo('Error: {}'.format(str(runtime_error)), err=True)\n except ValueError as value_error:\n click.echo('Error: {}'.format(str(value_error)), err=True)\n\n\n@cli.group()\ndef storage():\n \"\"\"Storage operations\n \"\"\"\n pass\n\n\n@storage.command(name='create')\n@click.option('-n', '--name', required=True,\n help='Name of the new storage', prompt='Name of the new storage',)\n@click.option('-d', '--description', default='', show_default=False,\n prompt='Write down some description of this datastorage',\n help='Description of the datastorage')\n@click.option('-sts', '--short_term_storage', default='', show_default=False,\n prompt='How many days data in this bucket will be stored in the short term storage?',\n help='Number of days for storing data in the short term storage')\n@click.option('-lts', '--long_term_storage', default='', show_default=False,\n prompt='How many days data in this bucket will be stored in the long term storage?',\n help='Number of days for storing data in the long term storage')\n@click.option('-v', '--versioning', default=False, show_default=False, is_flag=True,\n help='Enable versioning for this datastorage')\n@click.option('-b', '--backup_duration', default='', show_default=False,\n prompt='How many days backups of the bucket will be stored?',\n help='Number of days for storing backups of the bucket')\n@click.option('-t', '--type', default='S3',\n prompt='Type of the cloud for datastorage',\n help='type of the cloud for datastorage')\n@click.option('-f', '--parent_folder', default='', show_default=False,\n prompt='Name of the folder which will contain this datastorage, nothing for root of the hierarchy',\n help='Name of the folder which will contain this datastorage')\n@click.option('-c', '--on_cloud',\n prompt='Do you want to create this storage on a cloud?',\n help='Create bucket on a cloud', default=False, is_flag=True)\n@click.option('-p', '--path', default='', help='The name of the new bucket.',\n prompt='The name of the new bucket.')\n@click.option('-r', '--region_id', default='default', help='Cloud region id where storage shall be created. ',\n prompt='Cloud region id where storage shall be created.')\ndef create(name, description, short_term_storage, long_term_storage, versioning, backup_duration, type,\n parent_folder, on_cloud, path, region_id):\n \"\"\"Creates a new datastorage\n \"\"\"\n DataStorageOperations.save_data_storage(name, description, short_term_storage, long_term_storage, versioning,\n backup_duration, type, parent_folder, on_cloud, path, region_id)\n\n\n@storage.command(name='delete')\n@click.option('-n', '--name', required=True, help='Name of the storage to delete')\n@click.option('-c', '--on_cloud', help='Delete bucket on a cloud', is_flag=True)\n@click.option('-y', '--yes', is_flag=True, help='Do not ask confirmation')\ndef delete(name, on_cloud, yes):\n \"\"\"Deletes a datastorage\n \"\"\"\n DataStorageOperations.delete(name, on_cloud, yes)\n\n\n@storage.command(name='policy')\n@click.option('-n', '--name', required=True, help='Name of the storage to update the policy of')\n@click.option('-sts', '--short_term_storage', default='', show_default=False,\n prompt='How many days data in this bucket will be stored in the short term storage? (Empty means deletion of the current rule)',\n help='Number of days for storing data in the short term storage')\n@click.option('-lts', '--long_term_storage', default='', show_default=False,\n prompt='How many days data in this bucket will be stored in the long term storage? (Empty means for deletion of the current rule)',\n help='Number of days for storing data in the long term storage')\n@click.option('-v', '--versioning', default=False, show_default=False, is_flag=True,\n prompt='Do you want to enable versioning for this datastorage?',\n help='Enable versioning for this datastorage')\n@click.option('-b', '--backup_duration', default='', help='Number of days for storing backups of the bucket')\ndef update_policy(name, short_term_storage, long_term_storage, versioning, backup_duration):\n \"\"\"Update the policy of the given datastorage\n \"\"\"\n if not backup_duration and versioning:\n backup_duration = click.prompt(\n \"How many days backups of the bucket will be stored? (Empty means deletion of the current rule)\",\n default=\"\")\n DataStorageOperations.policy(name, short_term_storage, long_term_storage, backup_duration, versioning)\n\n\n@storage.command(name='mvtodir')\n@click.argument('name', required=True)\n@click.argument('directory', required=True)\ndef mvtodir(name, directory):\n \"\"\"Moves a datastorage to a new parent folder\n \"\"\"\n DataStorageOperations.mvtodir(name, directory)\n\n\n@storage.command(name='ls')\n@click.argument('path', required=False)\n@click.option('-l', '--show_details', is_flag=True, help='Show details')\n@click.option('-v', '--show_versions', is_flag=True, help='Show object versions')\n@click.option('-r', '--recursive', is_flag=True, help='Recursive listing')\n@click.option('-p', '--page', type=int, help='Maximum number of records to show')\n@click.option('-a', '--all', is_flag=True, help='Show all results at once ignoring page settings')\ndef storage_list(path, show_details, show_versions, recursive, page, all):\n \"\"\"Lists storage contents\n \"\"\"\n DataStorageOperations.storage_list(path, show_details, show_versions, recursive, page, all)\n\n\n@storage.command(name='mkdir')\n@click.argument('folders', required=True, nargs=-1)\ndef storage_mk_dir(folders):\n \"\"\" Creates a directory in a datastorage\n \"\"\"\n DataStorageOperations.storage_mk_dir(folders)\n\n\n@storage.command('rm')\n@click.argument('path', required=True)\n@click.option('-y', '--yes', is_flag=True, help='Do not ask confirmation')\n@click.option('-v', '--version', required=False, help='Delete a specified version of object')\n@click.option('-d', '--hard-delete', is_flag=True, help='Completely delete a path form bucket')\n@click.option('-r', '--recursive', is_flag=True, help='Recursive deletion (required for deleting folders)')\n@click.option('-e', '--exclude', required=False, multiple=True,\n help='Exclude all files matching this pattern from processing')\n@click.option('-i', '--include', required=False, multiple=True,\n help='Include only files matching this pattern into processing')\ndef storage_remove_item(path, yes, version, hard_delete, recursive, exclude, include):\n \"\"\" Removes file or folder from a datastorage\n \"\"\"\n DataStorageOperations.storage_remove_item(path, yes, version, hard_delete, recursive, exclude, include)\n\n\n@storage.command('mv')\n@click.argument('source', required=True)\n@click.argument('destination', required=True)\n@click.option('-r', '--recursive', is_flag=True, help='Recursive source scan')\n@click.option('-f', '--force', is_flag=True, help='Rewrite files in destination')\n@click.option('-e', '--exclude', required=False, multiple=True,\n help='Exclude all files matching this pattern from processing')\n@click.option('-i', '--include', required=False, multiple=True,\n help='Include only files matching this pattern into processing')\n@click.option('-q', '--quiet', is_flag=True, help='Quiet mode')\n@click.option('-s', '--skip-existing', is_flag=True, help='Skip files existing in destination, if they have '\n 'size matching source')\n@click.option('-t', '--tags', required=False, multiple=True, help=\"Set object tags during copy. Tags can be specified \"\n \"as single KEY=VALUE pair or a list of them. \"\n \"If --tags option specified all existent tags will \"\n \"be overwritten.\")\n@click.option('-l', '--file-list', required=False, help=\"Path to file with file paths that should be copied. This file \"\n \"should be tub delimited and consist of two columns: \"\n \"relative path to file and size.\")\ndef storage_move_item(source, destination, recursive, force, exclude, include, quiet, skip_existing, tags, file_list):\n \"\"\" Moves file or folder from one datastorage to another one\n or between local filesystem and a datastorage (in both directions)\n \"\"\"\n DataStorageOperations.cp(source, destination, recursive, force, exclude, include, quiet, tags, file_list,\n clean=True, skip_existing=skip_existing)\n\n\n@storage.command('cp')\n@click.argument('source', required=True)\n@click.argument('destination', required=True)\n@click.option('-r', '--recursive', is_flag=True, help='Recursive source scan')\n@click.option('-f', '--force', is_flag=True, help='Rewrite files in destination')\n@click.option('-e', '--exclude', required=False, multiple=True,\n help='Exclude all files matching this pattern from processing')\n@click.option('-i', '--include', required=False, multiple=True,\n help='Include only files matching this pattern into processing')\n@click.option('-q', '--quiet', is_flag=True, help='Quiet mode')\n@click.option('-s', '--skip-existing', is_flag=True, help='Skip files existing in destination, if they have '\n 'size matching source')\n@click.option('-t', '--tags', required=False, multiple=True, help=\"Set object tags during copy. Tags can be specified \"\n \"as single KEY=VALUE pair or a list of them. \"\n \"If --tags option specified all existent tags will \"\n \"be overwritten.\")\n@click.option('-l', '--file-list', required=False, help=\"Path to file with file paths that should be copied. This file \"\n \"should be tub delimited and consist of two columns: \"\n \"relative path to file and size.\")\ndef storage_copy_item(source, destination, recursive, force, exclude, include, quiet, skip_existing, tags, file_list):\n \"\"\" Copies files from one datastorage to another one\n or between local filesystem and a datastorage (in both directions)\n \"\"\"\n DataStorageOperations.cp(source, destination, recursive, force,\n exclude, include, quiet, tags, file_list, skip_existing=skip_existing)\n\n\n@storage.command('restore')\n@click.argument('path', required=True)\n@click.option('-v', '--version', required=False, help='Restore specified version')\ndef storage_restore_item(path, version):\n \"\"\" Restores file version in a datastorage.\n If version is not specified it will try to restore the latest non deleted version.\n Otherwise a specified version will be restored.\n \"\"\"\n DataStorageOperations.restore(path, version)\n\n\n@storage.command('set-object-tags')\n@click.argument('path', required=True)\n@click.argument('tags', required=True, nargs=-1)\n@click.option('-v', '--version', required=False, help='Set tags to specified version')\ndef storage_set_object_tags(path, tags, version):\n \"\"\" Sets tags for a specified object\n - path - full path to an object in data storage starting with 'cp://' scheme\n - tags - specified as single KEY=VALUE pair or a list of them\n - If a specific tag key already exists for an object - it will be overwritten\n \"\"\"\n DataStorageOperations.set_object_tags(path, tags, version)\n\n\n@storage.command('get-object-tags')\n@click.argument('path', required=True)\n@click.option('-v', '--version', required=False, help='Set tags to specified version')\ndef storage_get_object_tags(path, version):\n \"\"\" Gets tags for a specified object\n - path - full path to an object in data storage starting with 'cp://' scheme\n \"\"\"\n DataStorageOperations.get_object_tags(path, version)\n\n\n@storage.command('delete-object-tags')\n@click.argument('path', required=True)\n@click.argument('tags', required=True, nargs=-1)\n@click.option('-v', '--version', required=False, help='Set tags to specified version')\ndef storage_delete_object_tags(path, tags, version):\n \"\"\" Sets tags for a specified object\n - path - full path to an object in data storage starting with 'cp://' scheme\n - tags - list of tags to delete\n \"\"\"\n DataStorageOperations.delete_object_tags(path, tags, version)\n\n\n@cli.command(name='view-acl')\n@click.argument('identifier', required=False)\n@click.option(\n '-t', '--object-type',\n help='Object type',\n required=True,\n type=click.Choice(['pipeline', 'folder', 'data_storage'])\n)\ndef view_acl(identifier, object_type):\n \"\"\" View object permissions\n \"\"\"\n ACLOperations.view_acl(identifier, object_type)\n\n\n@cli.command(name='set-acl')\n@click.argument('identifier', required=False)\n@click.option(\n '-t', '--object-type',\n help='Object type',\n required=True,\n type=click.Choice(['pipeline', 'folder', 'data_storage'])\n)\n@click.option('-s', '--sid', help='User or group name', required=True)\n@click.option('-g', '--group', help='Group', is_flag=True)\n@click.option('-a', '--allow', help='Allow permissions')\n@click.option('-d', '--deny', help='Deny permissions')\n@click.option('-i', '--inherit', help='Inherit permissions')\ndef set_acl(identifier, object_type, sid, group, allow, deny, inherit):\n \"\"\" Set object permissions\n \"\"\"\n ACLOperations.set_acl(identifier, object_type, sid, group, allow, deny, inherit)\n\n\n@cli.group()\ndef tag():\n \"\"\" Operations with tags\n \"\"\"\n pass\n\n\n@tag.command(name='set')\n@click.argument('entity_class', required=True)\n@click.argument('entity_id', required=True)\n@click.argument('data', required=True, nargs=-1)\ndef set_tag(entity_class, entity_id, data):\n \"\"\" Sets tags for a specified object\n - class - define: Folder, Pipeline, Storage, Registry, Tool, etc.\n - identifier - define name or id of an object of a specified class\n - Tags can be specified as single KEY=VALUE pair or a list of them\n - If a specific tag key already exists for an object - it will be overwritten\n \"\"\"\n MetadataOperations.set_metadata(entity_class, entity_id, data)\n\n\n@tag.command(name='get')\n@click.argument('entity_class', required=True)\n@click.argument('entity_id', required=True)\ndef get_tag(entity_class, entity_id):\n \"\"\" Lists all tags for a specific object or list of objects. Two parameters shall be specified:\n - class - define: Folder, Pipeline, Storage, Registry, Tool, etc.\n - identifier - define name or id of an object of a specified class\n \"\"\"\n MetadataOperations.get_metadata(entity_class, entity_id)\n\n\n@tag.command(name='delete')\n@click.argument('entity_class', required=True)\n@click.argument('entity_id', required=True)\n@click.argument('keys', required=False, nargs=-1)\ndef delete_tag(entity_class, entity_id, keys):\n \"\"\" Deletes specified tags for a specified object\n - Tags can be specified as single KEY=VALUE pair or a list of them\n - If a specific tag key already exists for an object - it will be overwritten\n \"\"\"\n MetadataOperations.delete_metadata(entity_class, entity_id, keys)\n\n\n@cli.command(name='chown')\n@click.argument('user_name', required=True)\n@click.argument('entity_class', required=True)\n@click.argument('entity_name', required=True)\ndef chown(user_name, entity_class, entity_name):\n \"\"\"\n Changes current owner to specified.\n - user_name: desired object owner\n - entity_class: name of the object class. Possible values: PIPELINE, FOLDER, DATA_STORAGE, DOCKER_REGISTRY, TOOL,\n TOOL_GROUP, CONFIGURATION, METADATA_ENTITY\n - entity_name: name or id of the object\n \"\"\"\n PermissionsOperations.chown(user_name, entity_class, entity_name)\n\n# Used to run a PyInstaller \"freezed\" version\nif getattr(sys, 'frozen', False):\n cli(sys.argv[1:])","sub_path":"pipe-cli/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":44960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363868406","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='OfflineOrder',\n fields=[\n ('number', models.AutoField(serialize=False, verbose_name=b'Order Number', primary_key=True)),\n ('customer_name', models.CharField(default=b'', max_length=200, verbose_name=b'Customer name')),\n ('shipping_address', models.CharField(default=b'', max_length=1000, verbose_name=b'Shipping address', blank=True)),\n ('shipping_method', models.CharField(default=b'', max_length=1000, verbose_name=b'Shipping method', blank=True)),\n ('shipping_date', models.DateTimeField(default=datetime.datetime.now, blank=True)),\n ('delivery_status', models.CharField(default=b'notyet', max_length=30, verbose_name=b'Delivery status', blank=True, choices=[(b'delivered', b'Delivered'), (b'ontheway', b'On the way'), (b'notyet', b'Not yet delivered')])),\n ('customer_email', models.EmailField(default=b'', max_length=250, verbose_name=b'Customer Email', blank=True)),\n ('date_placed', models.DateTimeField(default=datetime.datetime.now, blank=True)),\n ('price_inc_tax', models.FloatField(default=0, verbose_name=b'Price (Inc. Tax)', blank=True)),\n ('shipping_charge_inc_tax', models.FloatField(default=0, verbose_name=b'Shipping charge (Inc. Tax)', blank=True)),\n ('item_description', models.CharField(default=b'', max_length=1000, verbose_name=b'Items')),\n ],\n options={\n 'verbose_name': 'Offline Order',\n 'verbose_name_plural': 'Offline Orders',\n },\n ),\n ]\n","sub_path":"offlineorder/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"122385375","text":"import os\nPWD = os.getcwd()\nFRAMES = os.path.join(PWD, 'frames')\nLABELS = os.path.join(PWD, 'labels')\nLABELS2 = os.path.join(PWD, 'labels2')\nrem_ids = []\nwith open(LABELS, 'r') as lfile:\n with open(LABELS2, 'w') as lfile2:\n for count, l in enumerate(lfile):\n frame_id = l.split(',')[0]\n if os.path.exists(os.path.join(FRAMES, frame_id + '.jpg')):\n lfile2.write(l)\n if count % 5 == 0: print('{} ... ok'.format(frame_id))\n else:\n rem_ids.append(frame_id)\n\nos.remove(LABELS)\nos.rename(LABELS2, LABELS)\nprint('Dataset:\\n'+ '-'*8)\nprint('frames: {}'.format(count))\nprint('inconsistent frames: {}'.format(len(rem_ids)))\nif rem_ids:\n print('Inconsistent labels were purged.')\n","sub_path":"label_check.py","file_name":"label_check.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568915251","text":"__author__ = 'Dazdingo'\r\n\r\nimport dao.time_tables_dao as time_tables_dao\r\nimport dao.players_dao as players_dao\r\nimport dao.single_game_songs_dao as single_game_songs_dao\r\nfrom util import common_util\r\n\r\n\r\ndef get_time_table(player_id):\r\n time_table = time_tables_dao.find_by_id(player_id)\r\n if time_table is None:\r\n time_table = time_tables_dao.new_item(player_id)\r\n return time_table._data\r\n\r\n\r\ndef mark_time_table(player_id, time_used):\r\n time_table = time_tables_dao.find_by_id(player_id)\r\n player = players_dao.find_by_id(player_id)\r\n if time_table is None:\r\n time_table = time_tables_dao.new_item(player_id)\r\n time_table[player['round']] = time_used\r\n time_tables_dao.update_item(time_table)\r\n\r\n\r\ndef get_single_game_songs():\r\n songs_collection = single_game_songs_dao.find_all()\r\n songs = list()\r\n for item in songs_collection:\r\n item['_id'] = str(item['_id'])\r\n item['player'] = common_util.item2json(players_dao.find_by_id(item['player_id']))\r\n songs.append(item._data)\r\n return songs\r\n\r\n\r\ndef single_update_round(player_id):\r\n player = players_dao.find_by_id(player_id)\r\n player['round'] += 1\r\n players_dao.update_item(player)\r\n pass\r\n","sub_path":"server/service/single_game_service.py","file_name":"single_game_service.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"219072452","text":"\"\"\"\nGiven two integers dividend and divisor, divide two integers without using multiplication, division, and mod operator.\n\nThe integer division should truncate toward zero, which means losing its fractional part. For example, 8.345 would be truncated to 8, and -2.7335 would be truncated to -2.\n\nReturn the quotient after dividing dividend by divisor.\n\nNote: Assume we are dealing with an environment that could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For this problem, if the quotient is strictly greater than 231 - 1, then return 231 - 1, and if the quotient is strictly less than -231, then return -231.\n\nExample 1:\nInput: dividend = 10, divisor = 3\nOutput: 3\nExplanation: 10/3 = 3.33333.. which is truncated to 3.\n\nExample 2:\nInput: dividend = 7, divisor = -3\nOutput: -2\nExplanation: 7/-3 = -2.33333.. which is truncated to -2.\n\n\nConstraints:\n-231 <= dividend, divisor <= 231 - 1\ndivisor != 0\n\"\"\"\n\nclass DivideTwoIntegers:\n def divide(self, dividend, divisor):\n positive = (dividend < 0) is (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n res = 0\n while dividend >= divisor:\n temp, i = divisor, 1\n while dividend >= temp:\n dividend -= temp\n res += i\n i <<= 1\n temp <<= 1\n if not positive:\n res = -res\n return min(max(-2147483648, res), 2147483647)","sub_path":"LeetCodePython/DivideTwoIntegers.py","file_name":"DivideTwoIntegers.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486599806","text":"\"\"\"Example DAG demonstrating the usage of the BashOperator.\"\"\"\n\nfrom datetime import timedelta\n\nimport airflow\nfrom airflow.models import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\nargs = {\n \"owner\": \"Airflow\",\n \"start_date\": airflow.utils.dates.days_ago(2),\n}\n\ndag = DAG(\n dag_id=\"my_third_dag\",\n default_args=args,\n schedule_interval=timedelta(minutes=150),\n dagrun_timeout=timedelta(minutes=60),\n)\n\nrun_this_last = DummyOperator(task_id=\"run_this_last\", dag=dag)\n\n# [START howto_operator_bash]\nrun_this1 = BashOperator(task_id=\"echo_1\", bash_command=\"echo 1\", dag=dag,)\n\n# [START howto_operator_bash]\nrun_this2 = BashOperator(task_id=\"echo_2\", bash_command=\"echo 2\", dag=dag,)\n\n# [START howto_operator_bash]\nrun_this3 = BashOperator(task_id=\"echo_3\", bash_command=\"echo 3\", dag=dag,)\n\n# [START howto_operator_bash]\nrun_this4 = BashOperator(task_id=\"echo4\", bash_command=\"echo 4\", dag=dag,)\n# [END howto_operator_bash]\n\n\n# for i in range(3):\n# task = BashOperator(\n# task_id='runme_' + str(i),\n# bash_command='echo \"{{ task_instance_key_str }}\" && sleep 1',\n# dag=dag,\n# )\n# task >> run_this\n\n# [START howto_operator_bash_template]\nalso_run_this = BashOperator(\n task_id=\"also_run_this\",\n bash_command='echo \"run_id={{ run_id }} | dag_run={{ dag_run }}\"',\n dag=dag,\n)\n# [END howto_operator_bash_template]\n\nrun_this1 >> run_this2 >> [run_this3, run_this4] >> also_run_this\n","sub_path":"dags/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"234240100","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom .models import Post, Comment\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import DetailView, CreateView,UpdateView,DeleteView,ListView\nfrom .forms import PostForm,CommentForm\nfrom django.core.urlresolvers import reverse_lazy,reverse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\n\n\n'''\n@login_required # must need to do 'login'\n\n#Showing 'money' list function\ndef list(request):\n\tlist = Post.objects.all().order_by('-created_date')\n\treturn render(request, 'money/list.html', {'list': list})\n'''\n#post_list = login_required(ListView.as_view(model=Post,queryset=Post.objects.all().order_by('-created_date'),paginate_by=5))\n\n\n#detail = login_required(DetailView.as_view(model=Post))\n@login_required\ndef post_list(request):\n queryset_list =Post.objects.all().order_by('-created_date')\n query=request.GET.get(\"q\")\n if query:\n queryset_list = queryset_list.filter(\n Q(title__icontains=query)|\n Q(message__icontains=query)|\n Q(user__username__icontains=query)\n ).distinct()\n paginator = Paginator(queryset_list, 8)\n page = request.GET.get('page') \n\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages) \n \n context ={\n \"object_list\":queryset,\n \"title\":\"List\"\n }\n \n return render(request,\"money/post_list.html\",context)\n\n\n@login_required\ndef post_detail(request,pk):\n post = get_object_or_404(Post, pk=pk)\n post.hit += 1\n post.save()\n return render(request, 'money/post_detail.html',{'post':post})\n\n\nclass PostCreateView(CreateView):\n model = Post\n form_class = PostForm\n template_name = 'money/add_post.html'\n\n def form_valid(self, form):\n posts = form.save(commit=False)\n posts.user = self.request.user\n posts.save()\n return super(PostCreateView, self).form_valid(form)\n\n\n\npost_new = login_required(PostCreateView.as_view(model=Post, form_class=PostForm,template_name='money/add_post.html'))\npost_edit = login_required(UpdateView.as_view(model=Post, form_class=PostForm,template_name = 'money/edit_post.html'))\npost_delete = login_required(DeleteView.as_view(model=Post,success_url=reverse_lazy('money:post_list')))\n\n\n\n\nclass CommentCreateView(CreateView):\n model = Comment\n form_class = CommentForm\n template_name = 'money/add_comment.html'\n\n def form_valid(self, form):\n comment = form.save(commit=False)\n comment.post = get_object_or_404(Post, pk=self.kwargs['post_pk'])\n comment.comment_user = self.request.user\n comment.save()\n return super(CommentCreateView, self).form_valid(form)\n\n\ncomment_new = login_required(CommentCreateView.as_view(model=Comment,form_class=CommentForm,template_name = 'money/add_comment.html'))\n\ncomment_edit = login_required(UpdateView.as_view(model=Comment, form_class=CommentForm,template_name = 'money/edit_comment.html'))\n\ncomment_delete = login_required(DeleteView.as_view(model=Comment,success_url=reverse_lazy('money:post_list')))","sub_path":"money/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401307484","text":"import os\nfrom pathlib import Path\nimport cv2\nimport random\nimport numpy as np\nimport dnn\nimport dsconf\nimport imgutil\nfrom . import imgcnv\n\nclass Dataset():\n\t\"\"\"データセット.\n\t\"\"\"\n\tdef __init__(self, hash, xs, ts, xrev, yrev):\n\t\tself.hash = hash\n\t\tself.xs = xs\n\t\tself.ts = ts\n\t\tself.xrev = xrev\n\t\tself.yrev = yrev\n\n\tdef get(self):\n\t\t\"\"\"データセットを取得する.\n\t\t# Returns:\n\t\t\t(入力データ, 教師データ)\n\t\t\"\"\"\n\t\tdssize = dsconf.MaxImageSize\n\t\tmaxSize = dssize[0]\n\t\thalfSize = maxSize // 2\n\t\tsize1 = random.randint(halfSize, maxSize)\n\t\tsize2 = random.randint(halfSize, maxSize)\n\n\t\t# リサイズ後の画像を作成\n\t\trx = imgutil.ResizeIfLarger(self.xs[random.randrange(0, len(self.xs))], size1)\n\t\trt = imgutil.ResizeIfLarger(self.ts[random.randrange(0, len(self.ts))], size2)\n\t\trx = imgcnv.BgrToDnn(rx)\n\t\trt = imgcnv.BgrToDnn(rt)\n\n\t\t# データペア領域作成\n\t\tx = np.full((dsconf.InChs,) + dssize, 1, dtype=dnn.dtype)\n\t\tt = np.full((dsconf.OutChs,) + dssize, 1, dtype=dnn.dtype)\n\n\t\t# リサイズ後の画像をランダムな位置に配置\n\t\tw = rx.shape[2]\n\t\th = rx.shape[1]\n\t\tox = random.randint(0, dssize[1] - w)\n\t\toy = random.randint(0, dssize[0] - h)\n\t\tx[:, oy:oy + h, ox:ox + w] = rx[:, ::self.yrev, ::self.xrev]\n\n\t\tw = rt.shape[2]\n\t\th = rt.shape[1]\n\t\tox = random.randint(0, dssize[1] - w)\n\t\toy = random.randint(0, dssize[0] - h)\n\t\tt[:, oy:oy + h, ox:ox + w] = rt[:, ::self.yrev, ::self.xrev]\n\n\t\treturn x, t\n\ndef Load(datasetsDir):\n\t\"\"\"データセットディレクトリから全データセットを読み込む.\n\t# Args:\n\t\tdatasetsDir: データセットディレクトリ名.\n\t# Returns:\n\t\tデータセットのリスト.\n\t\"\"\"\n\t# CycleGAN用データセットディレクトリ\n\tcgd = os.path.join(datasetsDir, \"cyclegan\")\n\t# 白バックとそれ以外用ディレクトリ名\n\tnbd = os.path.join(cgd, \"nb\")\n\twbd = os.path.join(cgd, \"wb\")\n\n\t# 白バック以外のイメージ読み込み\n\tp = Path(nbd)\n\tpls = []\n\tpls.extend(p.glob(\"*.jpg\"))\n\tpls.extend(p.glob(\"*.png\"))\n\tpls.extend(p.glob(\"*.jpeg\"))\n\timageCount = len(pls)\n\tloadedCount = 0\n\txs = []\n\tfor pl in pls:\n\t\tfile = os.path.normpath(os.path.join(nbd, pl.name))\n\t\timg = cv2.imread(file, cv2.IMREAD_COLOR)\n\t\tif img is None:\n\t\t\timageCount -= 1\n\t\t\tcontinue\n\t\txs.append(img)\n\t\tloadedCount += 1\n\t\tprint(loadedCount, \"/\", imageCount)\n\n\t# 白バックのイメージ読み込み\n\tp = Path(wbd)\n\tpls = []\n\tpls.extend(p.glob(\"*.jpg\"))\n\tpls.extend(p.glob(\"*.png\"))\n\tpls.extend(p.glob(\"*.jpeg\"))\n\timageCount = len(pls)\n\tloadedCount = 0\n\tts = []\n\tfor pl in pls:\n\t\tfile = os.path.normpath(os.path.join(wbd, pl.name))\n\t\timg = cv2.imread(file, cv2.IMREAD_COLOR)\n\t\tif img is None:\n\t\t\timageCount -= 1\n\t\t\tcontinue\n\t\tts.append(img)\n\t\tloadedCount += 1\n\t\tprint(loadedCount, \"/\", imageCount)\n\n\t# データセットリストに追加\n\tds = []\n\tfor i in range(len(xs) + len(ts)):\n\t\tds.append(Dataset(i, xs, ts, 1, 1))\n\t\tds.append(Dataset(i, xs, ts, 1, -1))\n\t\tds.append(Dataset(i, xs, ts, -1, 1))\n\t\tds.append(Dataset(i, xs, ts, -1, -1))\n\trandom.shuffle(ds)\n\treturn ds\n","sub_path":"CycleGan/dsloader.py","file_name":"dsloader.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"434923908","text":"# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Part of \"Nuitka\", an optimizing Python compiler that is compatible and\n# integrates with CPython, but also works on its own.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\" Functions to handle git staged content.\n\nInspired from https://raw.githubusercontent.com/hallettj/git-format-staged/master/git-format-staged\n\nOriginal author: Jesse Hallett \n\n\"\"\"\n\n\nimport os\nimport re\nimport subprocess\n\nfrom nuitka.utils.Execution import check_call, check_output\n\n\n# Parse output from `git diff-index`\ndef _parseIndexDiffLine(line):\n pattern = re.compile(\n r\"^:(\\d+) (\\d+) ([a-f0-9]+) ([a-f0-9]+) ([A-Z])(\\d+)?\\t([^\\t]+)(?:\\t([^\\t]+))?$\"\n )\n\n zeroed_pat = re.compile(\"^0+$\")\n\n # Returns the argument unless the argument is a string of zeroes, in which case\n # returns `None`\n def unless_zeroed(s):\n return s if not zeroed_pat.match(s) else None\n\n match = pattern.match(line)\n if not match:\n raise ValueError(\"Failed to parse diff-index line: \" + line)\n\n return {\n \"src_mode\": unless_zeroed(match.group(1)),\n \"dst_mode\": unless_zeroed(match.group(2)),\n \"src_hash\": unless_zeroed(match.group(3)),\n \"dst_hash\": unless_zeroed(match.group(4)),\n \"status\": match.group(5),\n \"score\": int(match.group(6)) if match.group(6) else None,\n \"src_path\": match.group(7),\n \"dst_path\": match.group(8),\n }\n\n\ndef getStagedFileChangeDesc():\n # Only file additions and modifications\n output = check_output(\n [\"git\", \"diff-index\", \"--cached\", \"--diff-filter=AM\", \"--no-renames\", \"HEAD\"]\n )\n\n for line in output.splitlines():\n if str is not bytes:\n line = line.decode(\"utf8\")\n\n yield _parseIndexDiffLine(line)\n\n\ndef getFileHashContent(object_hash):\n return check_output([\"git\", \"cat-file\", \"-p\", object_hash])\n\n\ndef putFileHashContent(filename):\n new_hash = check_output(\n [\"git\", \"hash-object\", \"-w\", \"--stdin\"], stdin=open(filename)\n )\n\n if str is not bytes:\n new_hash = new_hash.decode(\"utf8\")\n\n assert new_hash\n return new_hash.rstrip()\n\n\ndef updateFileIndex(diff_entry, new_object_hash):\n check_call(\n [\n \"git\",\n \"update-index\",\n \"--cacheinfo\",\n \"%s,%s,%s\"\n % (diff_entry[\"dst_mode\"], new_object_hash, diff_entry[\"src_path\"]),\n ]\n )\n\n\ndef updateWorkingFile(path, orig_object_hash, new_object_hash):\n patch = check_output([\"git\", \"diff\", orig_object_hash, new_object_hash])\n\n path = path.replace(os.path.sep, \"/\")\n\n # Substitute object hashes in patch header with path to working tree file\n patch_b = patch.replace(orig_object_hash.encode(), path.encode()).replace(\n new_object_hash.encode(), path.encode()\n )\n\n apply_patch = subprocess.Popen(\n [\"git\", \"apply\", \"-\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n _output, _err = apply_patch.communicate(input=patch_b)\n\n # TODO: In case of failure, do we need to abort?\n\n return apply_patch.returncode != 0\n","sub_path":"nuitka/tools/quality/Git.py","file_name":"Git.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"93574426","text":"class Solution:\n\t# @param candidates, a list of integers\n\t# @param target, integer\n\t# @return a list of lists of integers\n\tdef combinationSum2(self, candidates, target):\n\t\tre=[]\n\t\tcandidates.sort()\n\t\thelper(candidates,0,target,[],re)\n\t\treturn re\n\ndef helper(a, k, target, r, re):\n\tif k==len(a):\n\t\tif target==0:\n\t\t\tre.append(copy.deepcopy(r))\n\telse:\n\t\tval=a[k]\n\t\tn=k\n\t\twhile n TreeNode:\n if not root:\n return None\n\n root.left = self.pruneTree(root.left)\n root.right = self.pruneTree(root.right)\n\n if root.left or root.right or root.val == 1:\n return root\n else:\n return None","sub_path":"OA/CapitalOne/BinaryTreepruning.py","file_name":"BinaryTreepruning.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605720692","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 29 08:35:01 2020\r\n\r\n@author: Carlos Tejeda\r\n\"\"\"\r\nres=\"Si\"\r\nwhile (res != \"No\"):\r\n print (\"Ingrese el nombre del alumno\")\r\n nomb=input()\r\n print (\"Cuantas calificaciones va a promediar\")\r\n nucal= input()\r\n nucal=int(nucal)\r\n \r\n suma=0\r\n for i in range(1,nucal+1):\r\n print (\"Ingresa la \",i,\"a. Calificacion\")\r\n calif=input()\r\n calif=float(calif)\r\n suma=suma+calif\r\n promedio =suma/nucal\r\n\r\n if(promedio>=70):\r\n print (\"El alumno\", nomb,\"tiene un primedio de \",promedio, \" y es aprobatorio\")\r\n \r\n else: \r\n print (\"El alumno\", nomb,\"tiene un primedio de \",promedio, \" y es reprobatorio \\n\")\r\n print (\"Desea agregar otro alumno: (Si/No)?\")\r\n res=input()","sub_path":"Ciclo_For.py","file_name":"Ciclo_For.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585180731","text":"\n# coding: utf-8\n\n# In[113]:\n\n######################################DATA POSTPROCESSING:\n\n####Here we extract dF/F, make joystick position centered around 0,\n\n###include error reporting in the scripts.\n\n\n\n####NOTE THIS CODE IS SPECIFIC TO MY IMPLEMENTATION. IE IT IS NO LONGER GENERIC.\n\n\n# In[114]:\n\nimport tifffile as tff\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom scipy import misc \nfrom scipy import ndimage\nimport jpImageFunctions as jpi\n#from jpImageFunctions import headerExtract\nimport os as os\nimport image_registration as imr\nimport re #this is regex.\nfrom skimage import transform as tf\nimport numpy as np\nimport Image\nget_ipython().magic(u'pylab inline')\nimport pickle\nimport sys\nfrom pandas import *\nfrom pandas import DataFrame as dataFrame\nfrom pandas import Series as series\nfrom scipy import signal\n\n\n# LOAD THE DATA\n\n# In[115]:\n\n######################################Chan postprocessing:\n####HERE WE LOAD THE IMAGING DATA AND THE BLACKROCK DATA\nloadSessionPkl = open('OUTPUTsessionData.pkl','rb')\nsessionData = pickle.load(loadSessionPkl)\nloadSessionPkl.close()\n\n\n# In[115]:\n\n\n\n\n# In[116]:\n\nplot(sessionData['chanJoystickX'])\n\n\n# In[117]:\n\na = sessionData['chanJoystickY']\n\n\n# In[118]:\n\nb = np.array(a)\nplot(b)\n\n\n# In[118]:\n\n\n\n\n# In[118]:\n\n\n\n\n# In[118]:\n\n\n\n\n# In[119]:\n\n##############################################################MODE + SMOOTH THE CHANNEL DATA.\n\nfrom scipy import stats ###This gives us mode() calculating capability.\nfrom scipy.signal import savgol_filter as savgol_filter\n\ndef chanModeSimplifier(arrayToModeise): ###function mode normalises and applies a conservative smoothing function.\n #\n modeValue = stats.mode(arrayToModeise)[0]\n arrayToModeise = int64(arrayToModeise) ################WARNING USE TO INT64() but gave error\n arrayToModeise = arrayToModeise-modeValue\n arrayToModeise = savgol_filter(arrayToModeise, 15,1)\n arrayToModeise = int64(arrayToModeise.round())\n \n #modeValue = stats.mode(arrayToModeise)[0]\n #arrayToModeiseArray = np.array(arrayToModeise)\n #arrayToModeise = arrayToModeise-modeValue\n #arrayToModeise = savgol_filter(arrayToModeise, 15,1)\n #arrayToModeise = arrayToModeise.round()\n return arrayToModeise\n#note the following could be two lines of code but keeping it like this is useful for debugging.\nchanJoystickYModal = chanModeSimplifier(sessionData['chanJoystickY'])\nchanJoystickXModal = chanModeSimplifier(sessionData['chanJoystickX'])\nchanJoystickYModal = series(chanJoystickYModal)\nchanJoystickXModal = series(chanJoystickXModal)\nsessionData['chanJoystickY'] = chanJoystickYModal\nsessionData['chanJoystickX'] = chanJoystickXModal\n\n\n# In[120]:\n\nplot(sessionData['chanJoystickX'])\n\n\n# In[160]:\n\n\ndef roiDFFExtractor(roiTrace, method,isFirst): ###method number simply defines the type of dF/F extraction to do.\n #\n if (method ==1):\n \n #\n #Bottom 10% mean method:\n minVal = roiTrace.min()\n roiTrace[:] = roiTrace[:] - minVal #roiTrace + minVal\n \n roiOrdered = roiTrace.order()\n basethresh = len(roiOrdered)/10 ####Length of the bottom 10% of rois\n valsToAvg = roiTrace[0:basethresh]\n baselineF = mean(valsToAvg) ###How is my intensity going below 0?\n \n \n if (isFirst ==1): #debugging:\n #print('base threshold is: ' + str(basethresh))\n #print('baselineF: ' + str(baselineF))\n #print('The minimum val is ' + str(minVal))\n sss=1\n ####ERROR: ADD BASELINE TO DRAG ABOVE THIS.....\n roiTrace = signal.detrend(roiTrace)\n roiTrace = (roiTrace/baselineF) #*100\n ##########Look up how this works\n newMinVal = roiTrace.min()\n #print('the newminval is ' + str(newMinVal))\n return roiTrace\n \n###Note: the detrending moves it to around zero.\n\n\n# In[161]:\n\nkeysToFilter = sessionData.keys()\nmatchPattern = 'roi\\d{0,4}'\nmatches = [elem for elem in keysToFilter if re.search(matchPattern, elem) != None]\n\n\n####ROI NORMALISATION\n\n\n\n\n\n# In[162]:\n\nlen(sessionData)\n\n\n# In[162]:\n\n\n\n\n# In[163]:\n\n#######################dF/calculation\n\n\nsessionData = sessionDataEdit #now we return to the old version.\n\n\n# In[164]:\n\n#timepoints = {}\n#for pointy in range(len(sessionData['roi2'])):\n# currVal = sessionData['roi2'][pointy]\n\n\n# In[165]:\n\nplot(sessionData['roi10'])\n\n\n# In[166]:\n\ntype(matches[1])\nblob =1\nif (blob == 1):\n for i in range (len(matches)):\n trace = sessionData[matches[i]]\n if (i == 30):\n aaa = trace\n #print(matches[i])\n if (i ==3):\n isFirst = 1\n else:\n isFirst = 0 \n isFirst=1\n\n traceProcessed = roiDFFExtractor(trace,1,isFirst)\n sessionData[matches[i]] = traceProcessed\n \n\n\n# In[170]:\n\nplot(sessionData['roi10'])\n\n\n# In[171]:\n\ndY = np.array(sessionData['chanJoystickY'])\ndY = np.diff(dY)\ndX = np.array(sessionData['chanJoystickX'])\ndX = np.diff(dX)\n###ISSUE DY AND DX ARE ONE INDEX SHORTER DUE TO THE WAY THE DIFFERENTIAL WORKS. THEREFORE INSERTING 'FAKEVAL' OF ZERO AT THE VERY END.\ndY= np.append(dY,0)\ndX=np.append(dX,0)\nsessionData['chanJoystickYVelocity'] = dY\nsessionData['chanJoystickXVelocity'] = dX\n\neuclidV = (np.square(dY) + np.square(dX))\neuclidV = numpy.sqrt(euclidV)\nsessionData['chanEuclidVelocity'] = euclidV\n\n\n# In[172]:\n\n\n\n\n# In[13]:\n\ndef intelligentDFCompress(newDF,tempoTest):\n for key in range(len(newDF.keys())):\n keyName = newDF.keys()[key]\n a=1\n print(keyName)\n \n \n if (type(newDF[keyName][0]) != type(float64(122.233))):\n if (min(newDF[keyName]>=0)): #####This essentially checks for the signed nature of the signal:\n if ((max(newDF[keyName])<2**8) and min(newDF[keyName]>=0)):\n #set to binary\n newDF[keyName] = newDF[keyName].astype('uint8')\n elif ((max(newDF[keyName])<2**16) and min(newDF[keyName]>=0)):\n newDF[keyName] = newDF[keyName].astype('uint16')\n elif ((max(newDF[keyName])<2**32) and min(newDF[keyName]>=0)):\n newDF[keyName] = newDF[keyName].astype('uint32')\n else:\n newDF[keyName] = newDF[keyName].astype('int64')\n else:\n if ((max(newDF[keyName])<((2**8)/2) and min(newDF[keyName]>(-((2**8)/2))))):\n #set to binary\n newDF[keyName] = newDF[keyName].astype('int8')\n elif ((max(newDF[keyName])<((2**16)/2) and min(newDF[keyName]>(-((2**16)/2))))):\n newDF[keyName] = newDF[keyName].astype('int16')\n elif ((max(newDF[keyName])<((2**32)/2) and min(newDF[keyName]>(-((2**32)/2))))):\n newDF[keyName] = newDF[keyName].astype('int32')\n else:\n newDF[keyName] = newDF[keyName].astype('int64')\n else: ###ie, if it does contain roi\n #newDF[keyName] = newDF[keyName].astype('uint8')\n newDF[keyName] = newDF[keyName].astype('float16')\n return newDF\n\n\n# In[14]:\n\n\ntype(sessionData['roi1'][0])\n\n\n# In[43]:\n\nplot(sessionData['roi1'])\n\n\n# In[44]:\n\nmean(sessionData['roi1'][1000:300000])\n\n\n# In[15]:\n\na = float16(122.233)\ntype(a)\n\n\n# In[45]:\n\nsessionData.keys()\n\n\n# In[62]:\n\n##############################################NORMALISATION OF THE XY TRANSLATE.\n\nfor tPoint in range(len(sessionData)):\n if (tPoint % 50000 == 0):\n disp(tPoint)\n \n tPointLocal = tPoint\n tPointNext = tPointLocal + 29999\n if (tPointNext>(len(sessionData)-1)):\n tPointLocal = len(sessionData)-1\n tPointNext = tPointLocal+29999\n normalisationWindow = mean(sessionData['chanTranslateEuclidean'][range(tPoint,tPointNext,250)]) #take value every\n \n sessionData['chanTranslateEuclidNorm'][tPoint] = sessionData['chanTranslateEuclidean'][tPoint]/normalisationWindow\n\n\n# In[57]:\n\nrange(0,1000,200)\n\n\n# In[70]:\n\nplot(sessionData['chanTranslateEuclidNorm'])\n#plot(sessionData['chanTranslateDimen1'])\n\n\n# In[64]:\n\nplot(sessionData['chanTranslateEuclidean'])\n\n\n# In[17]:\n\ntempoTest = sessionData['roi0'] #zScoreLimit):\n thresholdCrossings[timeP] = 1\n else:\n thresholdCrossings[timeP] = 0\n \n####Minspacing is currently set at 250ms.\n\n###begin to loop through:\n####Need to go through a switch sequence to pull out the reaches here.\ncurrStatus = 0 #####currStatus can be 0 (search for next reach)\n\n###Other vars:\n#timeSinceLastReachEnd\n\nreaches = {} ##A dict that will contain the rudimentary temporal information for\n\nreachNum=0\nfilteredThresholdCrossings = np.zeros(len(zScoreOfEuclidVelocity)) ####This is the threshold crossings that have been filtered\n#so that they only represent discrete reaches\nfor clockCount in range(len(thresholdCrossings)):\n #####Here we begin iterating through the data.\n if (currStatus == 0):\n if (zScoreOfEuclidVelocity[clockCount]>zScoreLimit):\n #Here we initiate the new reach\n currReachInfo = {}\n \n ####Now we loop back to find the start time.\n foundStart = 0\n regressorClock = clockCount\n while (foundStart==0):\n if (zScoreOfEuclidVelocity[regressorClock]<=zScoreStartLimit):\n startTimeCount=regressorClock\n foundStart = 1\n \n \n regressorClock= regressorClock-1\n \n \n currReachInfo['startTime'] = startTimeCount\n currStatus=1 \n elif(currStatus ==1):\n #\n #This is a waiting game until velocity drops back under the threshold.\n if (zScoreOfEuclidVelocity[clockCount]<=zScoreEndLimit): ######Check if we are below threshold\n \n currReachInfo['endTime'] = clockCount\n endLimTime = clockCount+minSpace ###This is how long we must\n reaches[reachNum] = currReachInfo ####Here we save the reach.\n \n currStatus = 2\n elif(currStatus==2):\n #\n #Here we check if the reach is in fact still in progress.\n if (zScoreOfEuclidVelocity[clockCount]>zScoreLimit):\n currStatus=1 \n elif(clockCount>endLimTime):\n currStatus = 0\n reachNum = reachNum+1 ####This is where we declare the reach to be over.\n ####Set the value of thresholdCrossings:\n filteredThresholdCrossings[currReachInfo['startTime']:currReachInfo['endTime']] = 1\n \n del currReachInfo\n del endLimTime\n \n #Here we check if we have fulfilled the waiting time to concede that the reach is over.\n\n \n\n\n\n# In[24]:\n\nthresholdCrossings ###This yields 96 reaches\n\n\n# In[25]:\n\n#plot(sessionData['chanJoystickY'][30000:35000])\n#plot(zScoreOfEuclidVelocity[:35000])\nfig = plt.gcf()\nfig.set_size_inches(20,4)\n\n\n# In[26]:\n\nplot(filteredThresholdCrossings[30000:35000])\nfig = plt.gcf()\nfig.set_size_inches(20,4)\n\n\n# In[26]:\n\n\n\n\n# In[27]:\n\n######################Now we use the reaches system to extract reach stuff\n\n###Variables to work with:\n#reaches\n#sessionData\n\nnumReaches = len(reaches)\n#ITERATE THROUGH THE REACHES\nreachData = {}\nfor reachNum in range(numReaches):\n ####\n reachTemp = {}\n reachTemp['startTime'] = reaches[reachNum]['startTime']\n reachTemp['endTime'] = reaches[reachNum]['endTime']\n reachTemp['reachDuration'] = reachTemp['endTime'] - reachTemp['startTime']\n reachTemp['maxVelocity'] = max(sessionData['chanEuclidVelocity'][reachTemp['startTime']:reachTemp['endTime']])\n reachTemp['middleTime'] = reachTemp['startTime'] + round(reachTemp['endTime']/2)\n reachTemp['trajectory'] = 1####Place holder for a real trajectory extractor. Need to save all the real data locally.\n ###reachTemp['mouseId'] = ses\n ###Need to add in mouse id.\n reachData[reachNum] = reachTemp\n \n \n\n\n# In[28]:\n\na = np.zeros(len(reachData))\nfor i in range(len(reachData)):\n a[i] = reachData[i]['maxVelocity']\n\n\n\n# In[28]:\n\n\n\n\n# In[29]:\n\nsessionData.keys()\n\n\n# In[29]:\n\n\n\n\n# In[30]:\n\nplot((sessionData['chanLickPortPiezo'][400000:500000])/100)\n\n\n# In[31]:\n\nplotRange=range(0,100000)\na = sessionData['chanTrialStartBinary']*100\n#plot(a[plotRange])\nb = sessionData['chanRewardBinary'] *100\nplot(b[plotRange])\n#plot((sessionData['chanLickPortPiezo'][plotRange])/700)\n#plot(a[200000:300000])\n#plot(sessionData['chanJoystickY'][plotRange]/10)\n#plot(sessionData['chanEuclidVelocity'][plotRange]*3)\nplot(sessionData['roi3'][plotRange]*10)\n#plot(a[plotRange])\n\nfig = plt.gcf()\nfig.set_size_inches(20,5)\n\n\n# In[32]:\n\n#plot(sessionData['chanLickPortPiezo'][0:10000])\n#plot(sessionData['chanJoystickX'][0:10000])\n\n\n# In[33]:\n\nplot(sessionData['chanJoystickX'][320000:340000])\nfig = plt.gcf()\nfig.set_size_inches(20,4)\n\n\n# In[34]:\n\nsessionData.keys()\n\n\n# In[34]:\n\n\n\n\n# In[34]:\n\n\n\n\n# In[35]:\n\nplot(sessionData['roi10'])\nplot(sessionData['chanEuclidVelocity']/80)\n\n\n# In[38]:\n\nplot(sessionData['chanJoystickY'])\n\n\n# In[36]:\n\ntimeRangeToNorm = 30*120 #the time window over which the calculation must be performed.\n\n\n\n###this is annoyingly complex as I only do it once for a unique value. Need to really be sure this is working correctly.\nblobbb=0\nif (blobbb==1):\n for roiIden in range(len(matches)):\n print(matches[roiIden])\n\n newRoi = np.zeros(len(sessionData))\n\n\n for timeToMod in range(len(sessionData)):\n\n if (timeToMod !=0):\n currVal = sessionData[matches[roiIden]][timeToMod]\n prevVal = sessionData[matches[roiIden]][timeToMod-1]\n else:\n currVal = sessionData[matches[roiIden]][timeToMod]\n prevVal = -9999\n\n\n if (currVal!= prevVal):\n startSetVal = timeToMod\n ###now we search for when it next changes, and set that as our jump point\n set = 0\n for i in range(65):\n if (set == 0):\n testPoint = sessionData[matches[roiIden]][timeToMod+i]\n if (testPoint!= currVal): #is, value has changed\n endSetValTemp = testPoint-1\n set = 1\n endSetVal= timeToMod+endSetValTemp\n\n\n currentRoiVal = sessionData[matches[roiIden]][startSetVal]\n if (timeToMod>(len(sessionData)-timeRangeToNorm)): ###ie, if we are nearing the end.\n startTime = len(sessionData)-(timeRangeToNorm+1)\n endTime = len(sessionData)-1\n else:\n startTime = timeToMod\n endTime = timeToMod+timeRangeToNorm\n\n normalisationWindow = mean(sessionData[matches[roiIden]][range(startTime,endTime,30)]) #taking every 30th time point\n newRoi[startSetVal:endSetVal] = currentRoiVal/normalisationWindow\n\n\n\n\n if (timeToMod % 100000 == 0): #basically a modulus clock\n disp(timeToMod)\n\n\n\n\n sessionDataEdit[matches[roiIden]] = newRoi ##here we update with the normalised version. \n\n\n","sub_path":"_DISTILL/_BehaviourIllustrator/sessionPreprocessing.py","file_name":"sessionPreprocessing.py","file_ext":"py","file_size_in_byte":16574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546180182","text":"from flask import Flask, render_template, url_for, request, session, flash, redirect\r\nfrom datetime import timedelta, datetime\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom werkzeug.utils import secure_filename\r\nimport os\r\n\r\n\r\napp=Flask(__name__)\r\napp.secret_key=\"FarmPlus\"\r\n#app.permanent_session_lifetime=timedelta(hours=5)\r\n\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///data.sqlite3'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\r\n\r\napp.config['UPLOAD_FOLDER']=\"B:/python/Flask/proj3/static/uploads\"\r\n#Path to your app folder/static/uploads\r\n\r\ndb=SQLAlchemy(app)\r\n\r\nclass User(db.Model):\r\n id=db.Column('id',db.Integer,primary_key=True)\r\n name=db.Column('name',db.String(100),nullable=False)\r\n email=db.Column('email',db.String(100),nullable=False,unique=True)\r\n residence=db.Column('residence',db.String(100),nullable=False)\r\n password=db.Column('password',db.String(100),nullable=False)\r\n\r\n def __init__(self, name, email,residence, password):\r\n self.name=name\r\n self.email=email\r\n self.residence=residence\r\n self.password=password\r\n\r\nclass Story(db.Model):\r\n id=db.Column('storyid',db.Integer,primary_key=True)\r\n img=db.Column('storyimg',db.String(100),nullable=True)\r\n title=db.Column('title',db.String(150),nullable=False)\r\n content=db.Column('content',db.String(1000),nullable=False)\r\n time=db.Column('storytime',db.String(50),nullable=False)\r\n user=db.Column('storyuser',db.String(100),nullable=False)\r\n\r\n def __init__(self,img,title,content,time,user):\r\n self.img=img\r\n self.title=title\r\n self.content=content\r\n self.time=time\r\n self.user=user\r\n\r\nclass News(db.Model):\r\n id=db.Column('newsid',db.Integer,primary_key=True)\r\n img=db.Column('newsimg',db.String(100),nullable=True)\r\n title=db.Column('newstitle',db.String(150),nullable=False)\r\n content=db.Column('newscontent',db.String(1000),nullable=False)\r\n time=db.Column('newstime',db.String(50),nullable=False)\r\n\r\n def __init__(self,img,title,content,time):\r\n self.img=img\r\n self.title=title\r\n self.content=content\r\n self.time=time\r\n\r\nclass Facility(db.Model):\r\n id=db.Column('facid',db.Integer,primary_key=True)\r\n img=db.Column('facimg',db.String(100),nullable=True)\r\n title=db.Column('factitle',db.String(150),nullable=False)\r\n content=db.Column('facontent',db.String(1000),nullable=False)\r\n time=db.Column('factime',db.String(50),nullable=False)\r\n\r\n def __init__(self,img,title,content,time):\r\n self.img=img\r\n self.title=title\r\n self.content=content\r\n self.time=time \r\n\r\nclass Harvest(db.Model):\r\n id=db.Column('harid',db.Integer,primary_key=True)\r\n img=db.Column('harimg',db.String(100),nullable=True)\r\n title=db.Column('hartitle',db.String(150),nullable=False)\r\n content=db.Column('harcontent',db.String(1000),nullable=False)\r\n time=db.Column('hartime',db.String(50),nullable=False)\r\n\r\n def __init__(self,img,title,content,time):\r\n self.img=img\r\n self.title=title\r\n self.content=content\r\n self.time=time \r\n\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef home():\r\n if request.method=='POST':\r\n name=request.form['name']\r\n email=request.form['email']\r\n if email=='admin@farmplus.com':\r\n session['email']=email\r\n return redirect(url_for('adash'))\r\n residence=request.form['farm']\r\n password=request.form['password']\r\n #session.permanent=True\r\n find_user=User.query.filter_by(email=email).first()\r\n if find_user:\r\n if password==find_user.password:\r\n session['email']=find_user.email\r\n return redirect(url_for('dash'))\r\n else:\r\n flash('Invalid login')\r\n return render_template('index.html',LorD='Login')\r\n user=User(name=name, email=email, residence=residence, password=password)\r\n db.session.add(user)\r\n db.session.commit()\r\n session['email']=user.email\r\n return redirect(url_for('dash'))\r\n\r\n else:\r\n if 'email' in session:\r\n return render_template('index.html',LorD='Dashboard')\r\n return render_template('index.html',LorD='Login')\r\n\r\n@app.route('/dash')\r\ndef dash():\r\n if 'email' in session:\r\n email=session['email']\r\n if email=='admin@farmplus.com':\r\n return redirect(url_for('home'))\r\n user=User.query.filter_by(email=email).first()\r\n return render_template('dash.html',name=user.name,email=user.email,storysentival=0.0,onstorysentival=0.0,laststorysentival=0.0,onlaststorysentival=0.0)\r\n else :\r\n return redirect(url_for('home'))\r\n\r\n@app.route('/history')\r\ndef hist():\r\n return render_template('history.html')\r\n\r\n@app.route('/story',methods=['GET','POST'])\r\ndef story():\r\n img=None\r\n if request.method=='POST':\r\n if 'file' in request.files:\r\n file=request.files['file']\r\n if file : \r\n if validateImg(file):\r\n img=dumpImg(file)\r\n title=request.form['title']\r\n content=request.form['content']\r\n time=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n email=session['email']\r\n user=User.query.filter_by(email=email).first()\r\n story=Story(img=img,title=title,content=content,time=time,user=user.name)\r\n db.session.add(story)\r\n db.session.commit()\r\n stories=Story.query.filter_by(user=user.name)\r\n return render_template('stories.html',stories=stories)\r\n else:\r\n email=session['email']\r\n user=User.query.filter_by(email=email).first()\r\n stories=Story.query.filter_by(user=user.name)\r\n return render_template('stories.html',stories=stories)\r\n\r\n@app.route('/account')\r\ndef account():\r\n email=session['email']\r\n user=User.query.filter_by(email=email).first()\r\n return render_template('account.html',user=user)\r\n\r\n@app.route('/adash')\r\ndef adash():\r\n return render_template('adash.html')\r\n\r\n@app.route('/aharvest',methods=['GET','POST'])\r\ndef aharvest():\r\n img=None\r\n if request.method=='POST':\r\n if 'file' in request.files:\r\n file=request.files['file']\r\n if file : \r\n if validateImg(file):\r\n img=dumpImg(file)\r\n title=request.form['title']\r\n content=request.form['content']\r\n time=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n harvest=Harvest(img=img,title=title,content=content,time=time)\r\n db.session.add(harvest)\r\n db.session.commit()\r\n harvests=Harvest.query.all()\r\n return render_template('aharvest.html', harvests=harvests)\r\n\r\n@app.route('/afacilities',methods=['GET','POST'])\r\ndef afacilities():\r\n img=None\r\n if request.method=='POST':\r\n if 'file' in request.files:\r\n file=request.files['file']\r\n if file : \r\n if validateImg(file):\r\n img=dumpImg(file)\r\n title=request.form['title']\r\n content=request.form['content']\r\n time=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n fac=Facility(img=img,title=title,content=content,time=time)\r\n db.session.add(fac)\r\n db.session.commit()\r\n facs=Facility.query.all()\r\n return render_template('afacilities.html',facilities=facs)\r\n\r\n@app.route('/anews',methods=['GET','POST'])\r\ndef anews():\r\n img=None\r\n if request.method=='POST':\r\n if 'file' in request.files:\r\n file=request.files['file']\r\n if file : \r\n if validateImg(file):\r\n img=dumpImg(file)\r\n title=request.form['title']\r\n content=request.form['content']\r\n time=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n news=News(img=img,title=title,content=content,time=time)\r\n db.session.add(news)\r\n db.session.commit()\r\n news_all=News.query.all()\r\n return render_template('anews.html',news=news_all)\r\n\r\n@app.route('/feed',methods=['POST','GET'])\r\ndef feed():\r\n img=None\r\n if request.method=='POST':\r\n if 'file' in request.files:\r\n file=request.files['file']\r\n if file : \r\n if validateImg(file):\r\n img=dumpImg(file)\r\n title=request.form['title']\r\n content=request.form['content']\r\n time=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n email=session['email']\r\n user=User.query.filter_by(email=email).first()\r\n story=Story(img=img,title=title,content=content,time=time,user=user.name)\r\n db.session.add(story)\r\n db.session.commit()\r\n stories=Story.query.all()\r\n return render_template('infeed.html',stories=stories)\r\n\r\n@app.route('/harvest')\r\ndef harvest():\r\n harvests=Harvest.query.all()\r\n return render_template('inharvest.html',harvests=harvests)\r\n\r\n@app.route('/newsletter')\r\ndef newsletter():\r\n news_all=News.query.all()\r\n return render_template('innewsletter.html',news=news_all)\r\n\r\n@app.route('/facilities')\r\ndef facilities():\r\n facs=Facility.query.all()\r\n return render_template('infacilities.html',facilities=facs)\r\n\r\n@app.route('/fullstory')\r\ndef fullstory():\r\n return render_template('fullstory.html')\r\n\r\n@app.route('/upload',methods=['POST','GET'])\r\ndef uploadtest():\r\n if request.method=='POST':\r\n if 'file' not in request.files:\r\n flash('no file')\r\n return render_template('home.html',filename='',ext='')\r\n else:\r\n file=request.files['file']\r\n if not file:\r\n return render_template('home.html',filename='',ext='')\r\n filename = secure_filename(file.filename)\r\n fname=filename[:filename.index('.')]\r\n ext=filename[filename.index('.')+1:]\r\n fname+=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], fname+'.'+ext))\r\n if 'textarea' in request.form:\r\n flash(request.form['textarea'])\r\n return render_template('home.html',filename=fname,ext=ext)\r\n else: \r\n return render_template('home.html',filename='',ext='')\r\n\r\ndef validateImg(file):\r\n filename=secure_filename(file.filename)\r\n if '.' not in filename:\r\n return False\r\n #fname=filename[:filename.index('.')]\r\n ext=filename[filename.index('.')+1:]\r\n return ext.lower() in ['png','jpg','jpeg']\r\n\r\ndef dumpImg(file):\r\n filename=secure_filename(file.filename)\r\n fname=filename[:filename.index('.')]\r\n ext=filename[filename.index('.')+1:]\r\n fname+=datetime.now().strftime(\"%d-%m-%y-%H-%M-%S\")\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], fname+'.'+ext))\r\n return fname+'.'+ext\r\n\r\ndb.create_all()\r\napp.run(use_reloader=False, port='800')\r\n","sub_path":"proj3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363168978","text":"import ddt \r\nimport unittest,requests\r\nimport BeautifulReport\r\n@ddt.ddt\r\nclass MyCase(unittest.TestCase):\r\n\t@ddt.file_data(abc.yml)\r\n\t@ddt.unpack\r\n\tdef test_run(self,**kwargs):\r\n\t\tmethod=kwargs.get('method')\r\n\t\turl=kwargs.get('method')\r\n\t\tdata=kwargs.get('data',{})\r\n\t\theader=kwargs.get('header',{})\r\n\t\tis_json=kwargs.get('is_json',0)\r\n\t\tcookie=kwargs.get('cookie',{})\r\n\t\tcheck=kwargs.get('check')\r\n\t\tif method.upper()=='POST':\r\n\t\t\tif is_json:\r\n\t\t\t\tr=requests.post(url,json=data,headers=header,cookies=cookie)\r\n\t\t\telse:\r\n\t\t\t\tr=requests.post(url,data=data,headers=header,cookies=cookie)\r\n\t\telse:\r\n\t\t\tr=requests.post(url,params=data,headers=header,cookies=cookie)\r\n\t\tfor c in check:\r\n\t\t\tself.assertIn(c,r.txt)\r\n\r\n","sub_path":"common/base_case.py","file_name":"base_case.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386669328","text":"########\n# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nfrom manager_upgrade_base import BaseManagerUpgradeTest\n\n\nclass ManagerUpgradeTest(BaseManagerUpgradeTest):\n def test_manager_upgrade(self):\n \"\"\"Bootstrap a manager, upgrade it, rollback it, examine the results.\n\n To test the manager in-place upgrade procedure:\n - bootstrap a manager (this is part of the system under test,\n does destructive changes to the manager, and need a known manager\n version: so, can't use the testenv manager)\n - deploy the hello world app\n - upgrade the manager, changing some inputs (eg. the port that\n Elasticsearch uses)\n - check that everything still works (the previous deployment still\n reports metrics; we can install another deployment)\n - rollback the manager\n - post-rollback checks: the changed inputs are now the original\n values again, the installed app still reports metrics\n \"\"\"\n self.prepare_manager()\n\n preupgrade_deployment_id = self.deploy_hello_world('pre-')\n\n self.upgrade_manager()\n self.post_upgrade_checks(preupgrade_deployment_id)\n\n self.rollback_manager()\n self.post_rollback_checks(preupgrade_deployment_id)\n\n self.teardown_manager()\n","sub_path":"cosmo_tester/test_suites/test_manager_upgrade/manager_upgrade_test.py","file_name":"manager_upgrade_test.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486597874","text":"# Kangmin Xie, kangminx, 09-23-2017\n\n# labels_dict: a dict of labels with healthy level defined\n# calculate_and_compare_score function return a dict of scores\n# labels_dict = {\"aws\":list[], \"azure\":list[], \"gcp\":list[]}\ndef calculate_and_compare_score(labels_dict, reference_dict):\n # here the lables_dict has the key-value type: String-List \n scores = dict()\n scores['aws_score'] = calculate_score(labels_dict['aws'], reference_dict)\n scores['azure_score'] = calculate_score(labels_dict['azure'], reference_dict)\n scores['gcp_score'] = calculate_score(labels_dict['gcp'], reference_dict)\n averageScore = 0.01 * scores['aws_score'] + 0.00 * scores['azure_score'] + 0.99 * scores['gcp_score']\n scores['average'] = averageScore\n return scores\n\n# Help funtion to get the score of current meal in a list\ndef calculate_score(labels_list, reference_dict):\n # the labels_dict here has key-value like \"Apple\":Excellent\n # the reference_dic here has the key-value like \"Apple\"\n # the score will be the sum_score / number of effective items\n if len(labels_list) == 0:\n return -10 # if the given list is empty\n sum_score = 0\n # the number of non-food neutral items \n count_zero_score_item = 0\n for item in labels_list:\n sum_score += look_for_score(item, reference_dict)\n if look_for_score(item, reference_dict) == 0:\n count_zero_score_item += 1\n if (len(labels_list) == count_zero_score_item):\n return -10\n raw_score = (sum_score)/(len(labels_list)-count_zero_score_item)\n current_meal_score = scoreToStand(raw_score)\n return current_meal_score\n\n# Help funtion to get a raw score by looking up the reference\ndef look_for_score(anItem, reference_dict):\n if anItem in reference_dict:\n healthOpt = reference_dict[anItem]\n return healthOptToScore(healthOpt)\n return 0;\n\n# Help funtion to get a raw score from the matching string\ndef healthOptToScore(result):\n if result == \"excellent\": score = 30\n elif result == \"good\": score = 20 \n elif result == \"neutral\": score = 1\n elif result == \"non-food\": score = 0\n elif result == \"bad\": score = -20\n elif result == \"terrible\": score = -30\n else: score = 0 # undefine\n return score\n\n# Help funtion to get a standard score\ndef scoreToStand(x):\n y = 0.0;\n if (x >= -30 and x < -10):\n y = 1.5 * x + 45.0\n if (x >= -10 and x < 0):\n y = 2.0 * x + 50.0\n if (x >= 0 and x < 17):\n y = 30.0/17.0 * (x - 17) + 80.0\n if (x >= 17 and x <= 30):\n y = 20.0/13.0 * (x - 30) + 100.0\n return y;\n\n# =============================================================================\n# print ('*******************************************')\n# print ('Test calculate_and_compare_score')\n# # Test below\n# list1 = ['milk', 'egg', 'noodles', 'plate']\n# list2 = [\"fries\", \"sandwich\", \"coke\"]\n# list3 = [\"beef\", \"soymilk\", \"rice\", \"cabbage\"]\n# \n# reference = {'milk':'excellent', 'egg':'good', 'noodles':'neutral', 'plate':'non-food',\n# \"fries\":'terrible', \"sandwich\":'bad', \"coke\":'terrible',\n# \"beef\":'good', \"soymilk\":'good', \"rice\":'neutral', \"cabbage\":'good'}\n# \n# dictTotal = {\"aws\":list1, \"azure\":list2, \"gcp\":list3}\n# rtdict = calculate_and_compare_score(dictTotal, reference)\n# print (rtdict)\n# print ('Test calculate_and_compare_score end')\n# \n# print ('*******************************************')\n# print ('Test calculate_score function')\n# score1 = calculate_score(list1, reference)\n# score2 = calculate_score(list2, reference)\n# score3 = calculate_score(list3, reference)\n# print (\"excellent list score: \", score1)\n# print (\"terrible list score: \", score2)\n# print (\"good list score: \", score3)\n# print ('Tes calculate_score function end')\n# print ('*******************************************')\n# \n# print ('Test look_for_score function')\n# milkScore = look_for_score(\"milk\", reference)\n# print ('milk: ', milkScore)\n# eggScore = look_for_score(\"egg\", reference)\n# print ('egg: ', eggScore)\n# noodlesScore = look_for_score(\"noodles\", reference)\n# print ('noodles: ', noodlesScore)\n# plateScore = look_for_score(\"plate\", reference)\n# print ('plate: ', plateScore)\n# \n# friesScore = look_for_score(\"fries\", reference)\n# print ('fries: ', friesScore)\n# sanwichScore = look_for_score(\"sandwich\", reference)\n# print ('sandwich: ', sanwichScore)\n# cokeScore = look_for_score(\"coke\", reference)\n# print ('coke: ', cokeScore)\n# \n# beefScore = look_for_score(\"beef\", reference)\n# print ('beef: ', beefScore)\n# soymilkScore = look_for_score(\"soymilk\", reference)\n# print ('soymilk: ', soymilkScore)\n# cabbageScore = look_for_score(\"cabbage\", reference)\n# print ('cabbage: ', cabbageScore)\n# riceScore = look_for_score(\"rice\", reference)\n# print ('rice: ', riceScore)\n# print ('Test look_for_score function end')\n# print ('*******************************************')\n# =============================================================================\n# =============================================================================\n# def getHealthyLevel(labels_dict):\n# score = calculate_score(labels_dict)\n# if (score == -1): return \"None-Food\"\n# if (score < 20): return \"Terrible\"\n# elif (score < 36): return \"Bad\"\n# elif (score < 54): return \"SoSo\"\n# elif (score < 78): return \"Good\"\n# elif (score <= 100): return \"Excellent\"\n# return \"Something is wrong\"\n# =============================================================================\n\n# =============================================================================\n# #Test Below\n# dict1 = {'milk':30, 'egg':20, 'noodles':1, 'plate':0}\n# print (calculate_score(dict1))\n# print (getHealthyLevel(dict1))\n# \n# dict2 = {\"fries\":-30, \"sanwich\":-20, \"coke\":-30}\n# print (calculate_score(dict2))\n# print (getHealthyLevel(dict2))\n# \n# dict3 = {\"beef\":20, \"soymilk\":20, \"rice\":1, \"cabbage\":20}\n# print (calculate_score(dict3))\n# print (getHealthyLevel(dict3))\n# \n# dict4 = {\"friedChicken\":-20, \"soup\":20, \"rice\":1}\n# print (calculate_score(dict4))\n# print (getHealthyLevel(dict4))\n# \n# dictAll = {\"aws\":dict1, \"azure\":dict2, \"gcp\":dict3}\n# # sprint (dictAll)\n# \n# for i in dictAll:\n# print (i, dictAll[i])\n# \n# print (\"test\")\n# listAll = calculate_and_compare_score(dictAll)\n# print (listAll)\n# =============================================================================\n","sub_path":"recognition_service/EvaluateImage.py","file_name":"EvaluateImage.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"384068740","text":"class Interrogator:\n def __init__(self, questions):\n self.questions = questions\n\n def __iter__(self):\n return self.questions.__iter__()\n\n\nquestions = [\n \"What is your name?\",\n \"What is your quest?\",\n \"What is the average airspeed velocity of an unladen swallow?\"\n]\n\nawkward_person = Interrogator(questions)\n\nfor question in awkward_person:\n print(question)\n","sub_path":"exercise-104.py","file_name":"exercise-104.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"55608406","text":"ROLE = {\n 'admin': 1,\n 'booth': 3,\n 'speaker': 4,\n 'ambassador': 6,\n 'user': 7,\n 'partner': 8\n}\n\nTICKET_TYPES = {\n 'user': 'user',\n 'exhibitor': 'exhibitor'\n}\n\nSLOT = {\n\t'community': 400,\n\t'commercial': 3600\n}\n\nEVENT_TYPES = {\n 'discuss panel',\n 'speaker',\n 'hackaton',\n 'other'\n}\n\nEVENT_DATES = {\n '1': '2017-11-21',\n '2': '2017-11-22',\n '3': '2017-11-23'\n}\n\nSPONSOR_STAGES = {\n '1': 'lead',\n '2': 'prospect',\n '3': 'official'\n}\n\nSPONSOR_TYPES = {\n '1': 'diamond',\n '2': 'platinum',\n '3': 'gold',\n '4': 'silver'\n}\n\nVA_NUMBER = {\n 'bca': '877800',\n 'permata': '431800',\n 'mandiri_bill': '242801',\n 'bni': '119800'\n}\n\nPAYPAL = {\n 'mode': \"sandbox\",\n 'client_id': \"Ac-Ikn76GlVB5tFLwMoFYEl9FGumrB7NYdkicE5bd7Q_QfWmnKDyK_ZlZ7mFB-MlENIQR1fTvcj1Ivdv\",\n 'client_secret': \"EF23WB7s-mKixitheWRubig7O4JJmMTjmtRxX2xxMfVuarGceOWe8Uda47ORTdrCEshaKhLlmU4rZ1OT\",\n 'payee': 'shi77.andy-facilitator@gmail.com',\n 'return_url': 'http://localhost:5000/payment/execute',\n 'cancel_url': 'http://localhost:5000/'\n}\n\nMIDTRANS_API_BASE_URL = 'https://api.sandbox.midtrans.com/v2/'\n# Change these consts to devsummit later\nMERCHANT_ID = 'M1066775'\nCLIENT_KEY = 'VT-client-g8cB-IVLwe64YIdv'\nSERVER_KEY = 'VT-server-njhqghnFUZbtZgOg9ldNtY0l:'\nIMAGE_QUALITY = 70\n\n# FCM key\nFCM_SERVER_KEY = 'key=AAAA8iNOby4:APA91bGxdjtV_YTm3FnvjUiGJPPartTvM5COQFsubP-kBGP0AbmGBml1WtbYgAKc2-CDNcFGYLl4j0JzJq4AzeZwc47aURd3MTKLW_bLS6FtYokJdgjJcE7rM-9KiPlPJ029S9ua7OUF'\nFCM_GENERAL_TOPIC = '/topics/devsummit_indonesia_2017'","sub_path":"app/configs/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"231705410","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.odr as odr\nimport scipy.optimize as optimize\nfrom sympy import solve, solveset, var\nimport sympy as sp\nfrom scipy.io import loadmat\nfrom copy import deepcopy\nimport time\nimport os\nfrom scipy.stats import truncnorm\n\n\ndef get_truncated_normal(mean=0, sd=1, low=0, upp=10):\n return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\n\n\ndef write_loadcurve(time, magnitude, file_name, id_numb, path=''):\n if not path == '':\n os.chdir(path)\n f = open(file_name, \"w+\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\t\\n\")\n f.write(\"\\t\\t\\n\")\n for t, m in zip(time, magnitude):\n f.write(\"\\t\\t\\t\" + str(t) + \", \" + str(m) + \"\\n\")\n f.write(\"\\t\\t\\n\")\n f.write(\"\\t\\n\")\n f.write(\"\")\n f.close()\n\n\ndef read_data_thief(file_name, path=''):\n if not path == '':\n os.chdir(path)\n data = []\n with open(file_name, 'r') as fh:\n next(fh)\n for line in fh:\n data.append([float(x) for x in line.split(',')])\n data = np.asarray(data)\n return data\n\n\ndef write_parameters(parameters, parm_name, path=''):\n if not path == '':\n os.chdir(path)\n i = 0\n f = open(\"parameters.feb\", \"w+\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\t\\n\")\n for param in parameters:\n f.write(\"\\t\\t\" + str(param) + \"\\n\")\n i += 1\n f.write(\"\\t\\n\")\n f.write(\"\")\n f.close()\n\n\ndef pre_stretch(ite_max, tol_error, path=''):\n if not path == '':\n os.chdir(path)\n error = np.inf # [mm]\n i = 0\n # os.system('cp geometry_init.feb geometry_opt.feb')\n X_aim = np.asarray(load_feb_file_nodes('geometry_init.feb', '', path=path))\n X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '', path=path))\n X_opt = deepcopy(X_subopt)\n #X_opt[:, 1:] = 0.875 * X_subopt[:, 1:]\n write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)\n while (i < ite_max) and (error > tol_error):\n os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i pre_stretch.feb')\n X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '', path=path))\n t, x = load_output_dat_file('disp_pre_stretch.dat', path=path)\n x = np.asarray(x)\n X_def = x[np.where(x[:, 0] == 1)[0][-1]:np.where(x[:, 0] == X_aim.shape[0])[0][-1] + 1, :]\n X_error = X_aim[:, 1:] - X_def[:, 1:]\n error = np.max(np.abs(X_error))\n X_opt = deepcopy(X_def)\n X_opt[:, 1:] = X_error + X_subopt[:, 1:]\n write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)\n print(i, error)\n i += 1\n\n\ndef write_febio_geometry_file(file_name, x, path=''):\n if not path == '':\n os.chdir(path)\n i = 0\n fh = open(file_name, 'r')\n with open('temp.feb', 'w+') as temp:\n for line in fh:\n if not line.find('') == -1:\n temp.write('\\t\\t\\t ' + str(x[i, 1]) + ', ' + str(x[i, 2]) + ', ' + str(x[i, 3]) + '\\n')\n i += 1\n i = int(np.min([i, x.shape[0]-1]))\n else:\n temp.write(line)\n os.system('mv temp.feb ' + file_name)\n\n\n\ndef load_feb_file_nodes(filename, section, path=''):\n if not path == '':\n os.chdir(path)\n nodes = []\n with open(filename) as fh:\n line = next(fh)\n while line.find(section) == -1:\n line = next(fh)\n for line in fh:\n if not line.find('') == -1:\n break\n id_1 = line.find(\" \")\n id_3 = line.find(\"\")\n nodes.append([int(line[id_1 + 10:id_2 - 1])] + [float(x) for x in line[id_2+3:id_3].split(',')])\n return nodes\n\n\ndef load_feb_file_nodes_id(filename, section, path=''):\n if not path == '':\n os.chdir(path)\n nodes_index = []\n with open(filename) as fh:\n line = next(fh)\n while line.find(section) == -1:\n line = next(fh)\n for line in fh:\n if not line.find('') == -1:\n break\n id_1 = line.find(\"\")\n nodes_index.append(int(line[id_1 + 10:id_2 - 1]))\n return nodes_index\n\n\ndef load_output_dat_file(filename, path=''):\n if not path == '':\n os.chdir(path)\n nodes_disp = []\n t = []\n with open(filename) as fh:\n for line in fh:\n if line.find('*Step') == 0:\n line = next(fh)\n id_1 = line.find('=')\n t.append(float(line[id_1 + 1:-1]))\n line = next(fh)\n line = next(fh)\n nodes_disp.append([float(x) for x in line.split(',')])\n return t, nodes_disp\n\ndef biconic_fitting(data):\n x = np.reshape(data[:, 0], [len(data[:, 0]), 1])\n y = np.reshape(data[:, 1], [len(data[:, 0]), 1])\n z = np.reshape(data[:, 2], [len(data[:, 0]), 1])\n X = np.zeros([len(x), len(x)+3])\n # create Matrix for least square minimization\n for i in range(len(x)):\n X[i, 0:3] = [x[i, 0]**2, y[i, 0]**2, x[i, 0]*y[i, 0]]\n X[i, i+3] = z[i, 0]**2\n p_prime = np.linalg.lstsq(X, 2*z, rcond=-1)\n p_prime = p_prime[0]\n # X_inv = np.linalg.pinv(X)\n # p_prime = 2*np.dot(X_inv, z)\n term = np.zeros([len(x), 1])\n # create Matrix for least square minimization\n for i in range(len(x)):\n term[i, 0] = p_prime[i+3, 0]*(2*z[i, 0] - p_prime[i+3, 0]*z[i, 0]**2)\n p = -np.ones([3, 1])\n a_1 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) + np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))\n a_2 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) - np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))\n a_1 = np.round(a_1, decimals=5)\n a_2 = np.round(a_2, decimals=5)\n if a_1 > 0 and (p_prime[0, 0] - a_1)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_1) >= 0:\n p[0] = np.real(a_1)\n elif a_2 > 0 and (p_prime[0, 0] - a_2)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_2) >= 0:\n p[0] = np.real(a_2)\n else:\n p[0] = np.inf\n p[1] = -p[0] + (p_prime[0, 0] + p_prime[1, 0])\n if p[0] == p[1]:\n p[2] = 0\n else:\n p[2] = 0.5*(np.arcsin(p_prime[2, 0]/(p[1] - p[0])))\n p_prime_2 = np.linalg.lstsq(X[:, 0:3], term, rcond=-1)\n p_prime_2 = p_prime_2[0]\n # p_prime_2 = np.dot(np.linalg.pinv(X[:, 0:3]), term)\n R_x = 1/p[0]\n R_y = 1/p[1]\n Q_x = R_x**2*(p_prime_2[0] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1\n Q_y = R_y**2*(p_prime_2[1] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1\n phi = p[2]\n return R_x, R_y, phi, Q_x, Q_y\n\ndef f_biconic_model(init, *data):\n \"\"\"biconical model; inital guess: init=[a',b',d',u',v',w'], data to fit to: data= [x_i,y_i,z_i]\"\"\"\n data = data[0]\n c = (init[3]*data[0, :]**2 + init[4]*data[1, :]**2 + init[5]*data[0, :]*data[1, :])/(init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :])\n return np.sum(( init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :] + c*(data[2, :])**2 - 2*(data[2, :]) )**2)\n\n\ndef f2_biconic_model(init, *data):\n data = data[0]\n x = data[:, 0]\n y = data[:, 1]\n z = data[:, 2]\n return np.sum((-z + init[4] + (x**2/init[0] + y**2/init[1])/(1 + np.sqrt(1 - (1+init[2])*x**2/init[0]**2 - (1+init[3])*y**2/init[1]**2)))**2)\n\ndef nm_biconic_fit(data):\n x = np.reshape(data[:, 0], [len(data[:, 0]), 1])\n y = np.reshape(data[:, 1], [len(data[:, 0]), 1])\n z = np.reshape(data[:, 2], [len(data[:, 0]), 1])\n init = np.array([1/7.6, 1/7.6, 0, 0, 0, 0])\n res = optimize.minimize(f_biconic_model, init, np.array([x, y, z]), method='Nelder-Mead', options={'xtol': 1e-10})\n p_prime = res.x\n a_1 = 0.5 * (-(-p_prime[0] - p_prime[1]) + np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))\n a_2 = 0.5 * (-(-p_prime[0] - p_prime[1]) - np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))\n a_1 = np.round(a_1, decimals=5)\n a_2 = np.round(a_2, decimals=5)\n p = np.zeros([5,1])\n if a_1 > 0 and (p_prime[0] - a_1) / (p_prime[0] + p_prime[1] - 2 * a_1) >= 0:\n p[0] = np.real(a_1)\n elif a_2 > 0 and (p_prime[0] - a_2) / (p_prime[0] + p_prime[1] - 2 * a_2) >= 0:\n p[0] = np.real(a_2)\n else:\n p[0] = np.inf\n p[1] = -p[0] + (p_prime[0] + p_prime[1])\n if p[0] == p[1]:\n p[2] = 0\n else:\n p[2] = 0.5 * (np.arcsin(p_prime[2] / (p[1] - p[0])))\n R_x = 1 / p[0]\n R_y = 1 / p[1]\n Q_x = R_x**2*(p_prime[3] - 0.5*p_prime[5] * np.tan(p[2])) - 1\n Q_y = R_y**2*(p_prime[4] - 0.5*p_prime[5] * np.tan(p[2])) - 1\n phi = p[2]\n return R_x, R_y, phi, Q_x, Q_y\n\n\ndef f_sphere(init, *data):\n data = np.array(data[0:3])[:, :, 0]\n x = data[0, :]\n y = data[1, :]\n z = data[2, :]\n return (-init[0]**2 + x**2 + y**2 + (z-init[1])**2)**2\n\n\ndef sphere_fit(data):\n x = np.reshape(data[:, 0], [len(data[:, 0]), 1])\n y = np.reshape(data[:, 1], [len(data[:, 0]), 1])\n z = np.reshape(data[:, 2], [len(data[:, 0]), 1])\n init = np.array([7.6, 0])\n res = optimize.least_squares(f_sphere, init, args=np.array([x, y, z]))\n return res.x\n\n\ndef f_circ(init, *data):\n data = np.array(data[0:2])[:, :, 0]\n x = data[0, :]\n y = data[1, :]\n return (-init[0]**2 + x**2 + (y-init[1])**2)**2\n\n\ndef circ_fit(data):\n x = np.reshape(data[:, 0], [len(data[:, 0]), 1])\n y = np.reshape(data[:, 1], [len(data[:, 0]), 1])\n init = np.array([7.6, 0])\n res = optimize.least_squares(f_circ, init, args=np.array([x, y]))\n return res.x\n\n\ndef keratometry(self, mode='biconic'):\n # Coordinates of surface\n x = self[:, 0]\n y = self[:, 1]\n z = self[:, 2]\n\n # Least squares\n\n # Create X matrix based on measurements\n x2 = x ** 2\n y2 = y ** 2\n xy = x * y\n z2 = z ** 2\n z2_diag = np.diag(z2)\n X = np.c_[x2, y2, xy, z2_diag]\n\n # Create target vector\n t = 2\n z_target = t * z\n\n # Solve least-squares\n Xinv = np.linalg.pinv(X)\n p = np.matmul(Xinv, z_target)\n\n # Obtain a', b', d'\n a_p = p[0]\n b_p = p[1]\n d_p = p[2]\n\n # Solve a and b to obtain Rx, Ry and Phi\n\n # Calculate a\n a = np.roots([1, -a_p - b_p, a_p * b_p - (d_p ** 2) / 4])\n print(a)\n aux = [np.real_if_close(a[0], tol=1e-5), np.real_if_close(a[1], tol=1e-5)]\n a = np.array(aux)\n\n # Avoid negative radii\n a = a[a > 0]\n print(a)\n\n # Avoid violating constrain on sin(phi)^2\n if np.abs(a_p - a[0]) < 1e-6:\n check = np.array([0, 0])\n else:\n check = (a_p - a) / ((a_p + b_p) - 2 * a)\n\n a = a[check >= 0]\n\n # Calculate b\n b = (a_p + b_p) - a\n\n if mode == 'biconic':\n\n # Calculate Radii and angle\n Rx = 1 / a\n Ry = 1 / b\n if (np.abs(d_p) < 1e-6) and (np.sum(np.abs(b - a)) < 1e-6):\n phi = np.array([0, 0])\n else:\n phi = 0.5 * np.arcsin(d_p / (b - a)) # Angle of flatter meridian\n\n # Double check the correct option if more than two options available\n if len(phi) == 2:\n if (phi[0] < 0) or (phi[0] >= np.pi/2):\n Rx = Rx[1]\n Ry = Ry[1]\n phi = phi[1]\n else:\n Rx = Rx[0]\n Ry = Ry[0]\n phi = phi[0]\n\n if Rx < Ry:\n phi = phi + np.pi / 2\n aux = Rx\n Rx = Ry\n Ry = aux\n\n phi_deg = phi * 180 / np.pi\n\n # Power\n Kmax = (1.3375 - 1) * 1000 / np.min(\n np.array([Rx, Ry])) # Maximum curvature related to minimum radius (steeper meridian)\n Kmin = (1.3375 - 1) * 1000 / np.max(\n np.array([Rx, Ry])) # Minimum curvature related to minimum radius (flatter meridian)\n Kmean = (Kmax + Kmin) / 2\n\n elif mode == 'sphere':\n Rx = 1 / np.real_if_close(a[0], tol=1e-6)\n Ry = Rx\n phi = 0\n phi_deg = 0\n\n # Power\n Kmax = (1.3375 - 1) * 1000 / Rx # Maximum curvature related to minimum radius (steeper meridian)\n Kmin = (1.3375 - 1) * 1000 / Ry # Minimum curvature related to minimum radius (flatter meridian)\n Kmean = (Kmax + Kmin) / 2\n\n else:\n raise ValueError('Unknown option (sphere or biconic)')\n\n # Solve u', v' and w' to determine conic constants Qx, Qy\n\n # c_target\n c_target = p[3:] * (t * z - p[3:] * z2)\n\n # X\n X = np.c_[x2, y2, xy]\n\n # Least squares\n p_u = np.matmul(np.linalg.pinv(X), c_target)\n\n u_p = p_u[0]\n v_p = p_u[1]\n w_p = p_u[2]\n\n # Conic values\n Qx = (Rx ** 2) * (u_p - w_p * np.tan(phi) / 2) - 1\n Qy = (Ry ** 2) * (v_p + w_p * np.tan(phi) / 2) - 1\n\n biconic = {'Rx': Rx, 'Ry': Ry, 'Qx': Qx, 'Qy': Qy, 'Phi': phi}\n\n # Fitting error\n a = 1 / Rx\n b = 1 / Ry\n u = (1 + Qx) / Rx ** 2\n v = (1 + Qy) / Ry ** 2\n t = 2\n\n # Reconstruct surface\n c_eq = (u * x ** 2 + v * y ** 2) / (a * x ** 2 + b * y ** 2)\n B = -t * np.ones(x.shape[0])\n C = a * x ** 2 + b * y ** 2\n\n # Predict sagitta\n z_pred = []\n for ix in range(B.shape[0]):\n z_pred.append(np.roots([c_eq[ix], B[ix], C[ix]]))\n\n z_pred = np.array(z_pred)\n\n # Select correct solution\n centroid_target = np.mean(z)\n centroids_pred = np.mean(z_pred, axis=0)\n diff = np.abs(centroids_pred - centroid_target)\n indx = int(np.where(diff == np.min(diff))[0])\n z_pred = z_pred[:, indx]\n\n # Calculate error\n MSE = np.sum(np.sqrt((z_pred - z) ** 2))\n\n # if self.verbose:\n # print('MSE: %1.3f' % MSE)\n #\n # print('Kmax: %1.2f D;' % Kmax, 'Kmin: %1.2f D;' % Kmin, 'Kmean: %1.2f D;' % Kmean,\n # 'Astigm: %1.2f D' % (Kmax - Kmin),\n # r'Angle: %1.2f deg.' % phi_deg)\n\n return Kmax, Kmin, Kmean, biconic\n\n\ndef execute_simulation(cc):\n ite_max = 12 # [-]\n tol_error = 1e-3 # [mm]\n\n m_1 = 65.75\n c_1 = 0.0065\n k = 100\n k_epi = cc[1]\n gamma_stroma = 0 # 5.5\n tau_stroma = 0 # 38.1666\n\n E_epi = cc[0] # Young's modulus [MPa]\n nu_epi = 0.075 # Poison ratio [-]\n k_stroma = cc[2]\n gamma_epi = 0\n tau_epi = 0\n eye_lid_pressure = cc[3]\n duration_initiating_contact = 10\n duration_load = 28790\n duration_unload = 3600 * 16\n time_prestretch = tau_stroma * 5 + 64\n time_initiating_contact = time_prestretch + duration_initiating_contact\n time_load_end = time_initiating_contact + duration_load\n time_unload_end = time_load_end + duration_unload\n parameter_name = ['m_1', 'c_1', 'k', 'k_stroma', 'gamma_stroma', 'tau_stroma', 'E_epi', 'nu_epi', 'k_epi',\n 'gamma_epi',\n 'tau_epi', 'time_prestretch', 'time_initiating_contact', 'time_load_end', 'time_unload_end',\n 'eye_lid_pressure']\n\n unload_disp = 0.0075 + k_epi / 1.995e-5 * 0.003 - 0.007 * \\\n ((0.00025 - eye_lid_pressure) / 0.0005 - 0.5 * (0.0015 - E_epi) / 0.0015)\n time_1 = [0, 64, time_prestretch]\n magnitude_1 = [0, 1, 1]\n time_2 = [0, 64 * 0.3, 64]\n magnitude_2 = [0.25, time_prestretch * 0.5, time_prestretch * 1.5]\n time_3 = [time_prestretch, time_initiating_contact, 3600 * 24 + time_prestretch,\n 3600 * 24 + time_initiating_contact, 2 * 3600 * 24 + time_prestretch,\n 2 * 3600 * 24 + time_initiating_contact, 3 * 3600 * 24 + time_prestretch,\n 3 * 3600 * 24 + time_initiating_contact]\n magnitude_3 = [-2.5, 2, -2.5, 2.5, -2.5, 3, -2.5, 3.5]\n time_4 = [time_initiating_contact, time_initiating_contact + 50, 3600 * 24, 3600 * 24 + time_initiating_contact,\n 3600 * 24 + time_initiating_contact + 50, 3600 * 24 * 2, 2 * 3600 * 24 + time_initiating_contact,\n 2 * 3600 * 24 + time_initiating_contact + 50, 3 * 3600 * 24, 3 * 3600 * 24 + time_initiating_contact,\n 3 * 3600 * 24 + time_initiating_contact + 50]\n magnitude_4 = [0.25, 1, 1, 0.25, 1, 1, 0.25, 1, 1, 0.25, 1]\n time_5 = [time_load_end, time_load_end + 50, time_load_end + 50.5, 24 * 3600, 24 * 3600 + time_load_end,\n 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 50.5, 2 * 24 * 3600,\n 2 * 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 50.5,\n 3 * 24 * 3600, 3 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_load_end + 50,\n 3 * 24 * 3600 + time_load_end + 50.5]\n magnitude_5 = [-unload_disp, 0.01, 1, 1, -(unload_disp + 0.001), 0.01, 1, 1, -(unload_disp + 0.0015), 0.01, 1,\n 1, -(unload_disp + 0.002), 0.01, 1]\n time_6 = [time_prestretch, time_prestretch + 60, time_prestretch + 500, time_prestretch + 2500, time_load_end,\n 24 * 3600 + time_prestretch, 24 * 3600 + time_prestretch + 60, 24 * 3600 + time_prestretch + 500,\n 24 * 3600 + time_prestretch + 2500, 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_prestretch,\n 2 * 24 * 3600 + time_prestretch + 60, 2 * 24 * 3600 + time_prestretch + 500,\n 2 * 24 * 3600 + time_prestretch + 2500, 2 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_prestretch,\n 3 * 24 * 3600 + time_prestretch + 60, 3 * 24 * 3600 + time_prestretch + 500,\n 3 * 24 * 3600 + time_prestretch + 2500, 3 * 24 * 3600 + time_load_end]\n magnitude_6 = [1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500]\n time_7 = [time_load_end, time_load_end + 50, time_load_end + 500, time_load_end + 2500, time_unload_end,\n 24 * 3600 + time_load_end, 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 500,\n 24 * 3600 + time_load_end + 2500, 24 * 3600 + time_unload_end, 2 * 24 * 3600 + time_load_end,\n 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 500,\n 2 * 24 * 3600 + time_load_end + 2500, 2 * 24 * 3600 + time_unload_end, 3 * 24 * 3600 + time_load_end,\n 3 * 24 * 3600 + time_load_end + 50, 3 * 24 * 3600 + time_load_end + 500,\n 3 * 24 * 3600 + time_load_end + 2500, 3 * 24 * 3600 + time_unload_end]\n magnitude_7 = [0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200,\n 3500]\n\n main_path = os.popen('pwd').read()[:-1]\n parameter = [m_1, c_1, k, k_stroma, gamma_stroma, tau_stroma, E_epi, nu_epi, k_epi, gamma_epi, tau_epi,\n 10 * time_prestretch, 10 * duration_initiating_contact, 10 * duration_load, 10 * duration_unload,\n eye_lid_pressure]\n write_parameters(parameter, parameter_name, path=main_path)\n write_loadcurve(time_1, magnitude_1, 'pre_stretch_load_curve.feb', 1, path=main_path)\n write_loadcurve(time_2, magnitude_2, 'pre_stretch_must_point_curve.feb', 2, path=main_path)\n write_loadcurve(time_3, magnitude_3, 'initiating_contact_load_curve.feb', 3, path=main_path)\n write_loadcurve(time_4, magnitude_4, 'load_curve.feb', 4, path=main_path)\n write_loadcurve(time_5, magnitude_5, 'unload_curve.feb', 5, path=main_path)\n write_loadcurve(time_6, magnitude_6, 'must_point_curve_1.feb', 6, path=main_path)\n write_loadcurve(time_7, magnitude_7, 'must_point_curve_2.feb', 7, path=main_path)\n \n pre_stretch(ite_max, tol_error, path=main_path)\n os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i 4_day_with_prestretch.feb -o 4_day.log -p 4_day.xplt &>> 4_day-jg.log')\n\ndef check_success(path_log_name):\n # Sanity check\n if '.' not in path_log_name:\n raise ValueError('File must have the extension (%s)'%path_log_name)\n\n # Open log file from FEBio\n log = open(path_log_name, 'r')\n\n # Dumped all lines in list AND reverse the list\n log = log.readlines()[::-1]\n\n # Trim the list to keep only the part with interesting information (avoids long executions when failure)\n log = log[:20]\n\n # For all the lines in the list, check whether the Normal Termination is reached (returns 0). Otherwise, fails and returns 1\n for line in log:\n # Remove return carriage at the end of line and blank spaces at the beginning\n line = line.strip()\n\n # If the length of the line is 0, it is empty. Otherwise, check if it is normal termination\n if len(line) == 0: #Skips empty line\n continue\n else:\n if line == 'N O R M A L T E R M I N A T I O N':\n return 0\n elif line =='E R R O R T E R M I N A T I O N':\n return 1\n\n # The simulation is running\n return 2\n\n\ndef cart2pol(x, y):\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\n\ndef pol2cart(rho, phi):\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return(x, y)\n\n","sub_path":"Patient_specific/axisymmetric/my_functions.py","file_name":"my_functions.py","file_ext":"py","file_size_in_byte":21361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"159744004","text":"def get_predictions(model, data_loader):\n model = model.eval()\n review_texts = []\n predictions = []\n prediction_probs = []\n real_values = []\n with torch.no_grad():\n for d in data_loader:\n texts = d[\"review_text\"]\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n _, preds = torch.max(outputs, dim=1)\n probs = F.softmax(outputs, dim=1)\n review_texts.extend(texts)\n predictions.extend(preds)\n prediction_probs.extend(probs)\n predictions = torch.stack(predictions).cpu()\n prediction_probs = torch.stack(prediction_probs).cpu()\n return review_texts, predictions, prediction_probs\ny_review_texts, y_pred, y_pred_probs = get_predictions(\n model,\n test_df_data_loader\n)\ny_pred = y_pred.numpy().ravel()\nsubmission = pd.DataFrame({'label': y_pred})\nsubmission.to_csv('submission.csv', index=False)","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"191530074","text":"from __future__ import unicode_literals\nfrom floe.api import WorkFloe, OEMolOStreamCube\nfrom ComplexPrepCubes.cubes import SolvationCube, ForceFieldPrep\nfrom LigPrepCubes.cubes import LigChargeCube\nfrom LigPrepCubes.ports import LigandReader\nfrom YankCubes.cubes import YankSolvationFECube\nfrom OpenMMCubes.cubes import OpenMMminimizeCube, OpenMMnvtCube, OpenMMnptCube\n\njob = WorkFloe(\"SolvationFreeEnergy\")\n\njob.description = \"\"\"\nSolvation Free Energy Calculation of small molecules\n\nEx. python floes/solvation_free_energy --ligands ligands.oeb\n--ofs-data_out fe.oeb\n\nParameters:\n-----------\nligands (file): OEB file of the prepared ligands\n\nOutputs:\n--------\nofs: Output file\n\"\"\"\n\njob.classification = [['Simulation']]\njob.tags = [tag for lists in job.classification for tag in lists]\n\n# Ligand setting\niligs = LigandReader(\"Ligands\", title=\"Ligand Reader\")\niligs.promote_parameter(\"data_in\", promoted_name=\"ligands\", title=\"Ligand Input File\", description=\"Ligand file name\")\n\nchargelig = LigChargeCube(\"LigCharge\")\nchargelig.promote_parameter('max_conformers', promoted_name='max_conformers',\n description=\"Set the max number of conformers per ligand\", default=800)\n\nsolvate = SolvationCube(\"Solvation\")\nsolvate.promote_parameter(\"density\", promoted_name=\"density\", title=\"Solution density in g/ml\", default=1.0,\n description=\"Solution Density in g/ml\")\nsolvate.promote_parameter(\"solvents\", promoted_name=\"solvents\", title=\"Solvent components\",\n default='[H]O[H], ClC(Cl)Cl, CS(=O)C, c1ccccc1',\n description=\"Comma separated smiles strings of solvent components\")\nsolvate.promote_parameter(\"molar_fractions\", promoted_name=\"molar_fractions\",\n title=\"Molar fractions\",\n default='1.0, 0.0, 0.0, 0.0',\n description=\"Comma separated strings of solvent molar fractions\")\nsolvate.promote_parameter('distance_between_atoms', promoted_name='distance_between_atoms', default=2.5)\nsolvate.promote_parameter(\"padding_distance\", promoted_name=\"padding_distance\", default=11.0,\n description=\"The largest dimension (in A) of the solute (along the x, y, or z axis) \"\n \"is determined, and a cubic box of size \"\n \"(largest dimension)+2*padding is used\")\n\n\n\nff = ForceFieldPrep(\"ForceField\")\n# ff.promote_parameter('ligand_forcefield', promoted_name='ligand_forcefield', default='SMIRNOFF')\n\n# Minimization\nminimize = OpenMMminimizeCube(\"Minimize\")\nminimize.promote_parameter('restraints', promoted_name='m_restraints', default=\"noh ligand\",\n description='Select mask to apply restarints')\nminimize.promote_parameter('restraintWt', promoted_name='m_restraintWt', default=5.0,\n description='Restraint weight in kcal/(mol A^2')\n\n# NVT Warm-up\nwarmup = OpenMMnvtCube('warmup', title='warmup')\nwarmup.promote_parameter('time', promoted_name='warm_psec', default=20.0,\n description='Length of MD run in picoseconds')\nwarmup.promote_parameter('restraints', promoted_name='w_restraints', default=\"noh ligand\",\n description='Select mask to apply restarints')\nwarmup.promote_parameter('restraintWt', promoted_name='w_restraintWt', default=2.0,\n description='Restraint weight in kcal/(mol A^2')\nwarmup.promote_parameter('trajectory_interval', promoted_name='w_trajectory_interval', default=0,\n description='Trajectory saving interval')\nwarmup.promote_parameter('reporter_interval', promoted_name='w_reporter_interval', default=0,\n description='Reporter saving interval')\nwarmup.promote_parameter('outfname', promoted_name='w_outfname', default='warmup',\n description='Equilibration suffix name')\nwarmup.promote_parameter('center', promoted_name='center', default=True)\n\n# NPT Equilibration stage\nequil = OpenMMnptCube('equil', title='equil')\nequil.promote_parameter('time', promoted_name='eq_psec', default=20.0,\n description='Length of MD run in picoseconds')\nequil.promote_parameter('restraints', promoted_name='eq_restraints', default=\"noh ligand\",\n description='Select mask to apply restraints')\nequil.promote_parameter('restraintWt', promoted_name='eq_restraintWt', default=0.1,\n description='Restraint weight in kcal/(mol A^2')\nequil.promote_parameter('trajectory_interval', promoted_name='eq_trajectory_interval', default=0,\n description='Trajectory saving interval')\nequil.promote_parameter('reporter_interval', promoted_name='eq_reporter_interval', default=0,\n description='Reporter saving interval')\nequil.promote_parameter('outfname', promoted_name='eq_outfname', default='equil',\n description='Equilibration suffix name')\n\nsolvationfe = YankSolvationFECube(\"SovationFE\")\nsolvationfe.promote_parameter('iterations', promoted_name='iterations', default=1000)\nsolvationfe.promote_parameter('nonbondedCutoff', promoted_name='nonbondedCutoff', default=10.0)\n\nofs = OEMolOStreamCube('ofs', title='OFS-Success')\nofs.set_parameters(backend='s3')\n\nfail = OEMolOStreamCube('fail', title='OFS-Failure')\nfail.set_parameters(backend='s3')\nfail.set_parameters(data_out='fail.oeb.gz')\n\njob.add_cubes(iligs, chargelig, solvate, ff, minimize, warmup, equil, solvationfe, ofs, fail)\n\niligs.success.connect(chargelig.intake)\nchargelig.success.connect(solvate.intake)\nsolvate.success.connect(ff.intake)\nff.success.connect(minimize.intake)\nminimize.success.connect(warmup.intake)\nwarmup.success.connect(equil.intake)\nequil.success.connect(solvationfe.intake)\nsolvationfe.success.connect(ofs.intake)\nsolvationfe.failure.connect(fail.intake)\n\nif __name__ == \"__main__\":\n job.run()","sub_path":"floes/solvation_free_energy.py","file_name":"solvation_free_energy.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"1645922","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom flask import Flask, request, jsonify,render_template\nfrom flask_cors import CORS\nfrom FQA_Bot import *\nfrom datetime import timedelta\nimport time\n\napp = Flask(__name__)\n#app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds = 1)\nCORS(app,supports_credentials=True)\n\nbot = Bot()\n\n@app.route('/')\ndef index():\n return render_template(\"chatBot.html\")\n\n\n@app.errorhandler(404)\ndef url_error(e):\n return \"\"\"\n Wrong URL!\n
{}
\"\"\".format(e), 404\n\n\n@app.errorhandler(500)\ndef server_error(e):\n return \"\"\"\n An internal error occurred:
{}
\n See logs for full stacktrace.\n \"\"\".format(e), 500\n\n\n@app.route('/api', methods=['POST'])\ndef api():\n s = time.time()\n \n query = request.get_json()[\"question\"]\n print(\"post: \"+query)\n \n output_data = {}\n output_data[\"answer\"],output_data[\"sim1\"],output_data[\"sim2\"],output_data[\"sim3\"]= bot.get_answer(query)\n output_data[\"time_cost\"] = time.time() - s\n \n response = jsonify(output_data)\n return response\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=80, debug=False)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"40843943","text":"def BinToDecimale (nb):\r\n \"\"\" parametre nb str\r\n retour int\r\n \"\"\"\r\n n=-1\r\n x=0\r\n for n in range (len(nb)-1, -1, -1):\r\n if nb[n]==\"1\":\r\n x=2**(len(nb)-n-1)+x\r\n return x\r\n\r\n#while True :\r\n# x=str(input(\"nb base binaire= \"))\r\n# print(BinToDecimale(x))\r\n \r\n","sub_path":"Projet_Python/Fonction_BinToDecimale.py","file_name":"Fonction_BinToDecimale.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391598800","text":"import pymysql\n\nimport BuyerFunctionality\nimport LoginRegister\nimport UpdateBuyerInformation\n\nfrom PyQt5.QtWidgets import *\n\n\nclass BuyerAccountInformation(QWidget):\n\n def __init__(self, username):\n super(BuyerAccountInformation, self).__init__()\n\n self.username = username\n\n self.connection = pymysql.connect(host=\"localhost\",\n user=\"root\",\n password=None,\n db=\"grocerytech\",\n charset=\"utf8mb4\")\n\n self.initUI()\n\n def initUI(self):\n\n root = QGridLayout()\n\n self.f_name = QLabel(\"First Name\")\n self.usernameLabel = QLabel(\"Username\")\n self.pref_store = QLabel(\"Preferred Store\")\n self.store_address = QLabel(\"Store Address\")\n self.email = QLabel(\"Email\")\n self.pref_num = QLabel(\"Preferred Card Number\")\n self.rout_num = QLabel(\"Routing Number\")\n self.l_name = QLabel(\"Last Name\")\n self.phone = QLabel(\"Phone\")\n self.address = QLabel(\"Address\")\n self.city = QLabel(\"City\")\n self.state = QLabel(\"State\")\n self.zip = QLabel(\"Zip Code\")\n\n self.f_nameEdit = QLineEdit()\n self.usernameEdit = QLineEdit()\n self.pref_storeEdit = QLineEdit()\n self.store_addressEdit = QLineEdit()\n self.emailEdit = QLineEdit()\n self.pref_numEdit = QLineEdit()\n self.rout_numEdit = QLineEdit()\n self.l_nameEdit = QLineEdit()\n self.phoneEdit = QLineEdit()\n self.addressEdit = QLineEdit()\n self.cityEdit = QLineEdit()\n self.stateEdit = QLineEdit()\n self.zipEdit = QLineEdit()\n\n self.f_nameEdit.setReadOnly(True)\n self.usernameEdit.setReadOnly(True)\n self.pref_storeEdit.setReadOnly(True)\n self.store_addressEdit.setReadOnly(True)\n self.emailEdit.setReadOnly(True)\n self.pref_numEdit.setReadOnly(True)\n self.rout_numEdit.setReadOnly(True)\n self.l_nameEdit.setReadOnly(True)\n self.phoneEdit.setReadOnly(True)\n self.addressEdit.setReadOnly(True)\n self.cityEdit.setReadOnly(True)\n self.stateEdit.setReadOnly(True)\n self.zipEdit.setReadOnly(True)\n\n self.f_nameEdit.setText(self.getFirstName())\n self.usernameEdit.setText(self.username)\n self.pref_storeEdit.setText(self.getPrefStore())\n self.store_addressEdit.setText(self.getStoreAddress())\n self.emailEdit.setText(self.getEmail())\n self.pref_numEdit.setText(str(self.getPrefCard()))\n self.rout_numEdit.setText(str(self.getRoutNum()))\n self.l_nameEdit.setText(self.getLastName())\n self.phoneEdit.setText(str(self.getPhone()))\n self.addressEdit.setText(self.getAddress())\n self.cityEdit.setText(self.getCity())\n self.stateEdit.setText(self.getState())\n self.zipEdit.setText(str(self.getZip()))\n\n self.back = QPushButton(\"Back\")\n self.del_acct = QPushButton(\"Delete Account\")\n self.update = QPushButton(\"Update\")\n\n self.back.clicked.connect(self.backClicked)\n self.del_acct.clicked.connect(self.deleteClicked)\n self.update.clicked.connect(self.updateClicked)\n\n root.addWidget(self.f_name, 1, 1)\n root.addWidget(self.f_nameEdit, 1, 2)\n root.addWidget(self.l_name, 1, 3)\n root.addWidget(self.l_nameEdit, 1, 4)\n root.addWidget(self.usernameLabel, 2, 1)\n root.addWidget(self.usernameEdit, 2, 2)\n root.addWidget(self.phone, 2, 3)\n root.addWidget(self.phoneEdit, 2, 4)\n root.addWidget(self.pref_store, 3, 1)\n root.addWidget(self.pref_storeEdit, 3, 2)\n root.addWidget(self.address, 3, 3)\n root.addWidget(self.addressEdit, 3, 4)\n root.addWidget(self.store_address, 4, 1)\n root.addWidget(self.store_addressEdit, 4, 2)\n root.addWidget(self.city, 4, 3)\n root.addWidget(self.cityEdit, 4, 4)\n root.addWidget(self.email, 5, 1)\n root.addWidget(self.emailEdit, 5, 2)\n root.addWidget(self.state, 5, 3)\n root.addWidget(self.stateEdit, 5, 4)\n root.addWidget(self.pref_num, 6, 1)\n root.addWidget(self.pref_numEdit, 6, 2)\n root.addWidget(self.zip, 6, 3)\n root.addWidget(self.zipEdit, 6, 4)\n root.addWidget(self.rout_num, 7, 1)\n root.addWidget(self.rout_numEdit, 7, 2)\n root.addWidget(self.back, 8, 2)\n root.addWidget(self.del_acct, 8, 3)\n root.addWidget(self.update, 8, 4)\n\n self.setLayout(root)\n self.setWindowTitle(\"Buyer Account Information\")\n self.setGeometry(400, 120, 600, 270)\n self.show()\n\n\n def getFirstName(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT first_name FROM USERR WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n f_name = cursor.fetchone()\n return f_name[0]\n\n def getLastName(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT last_name FROM USERR WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n l_name = cursor.fetchone()\n return l_name[0]\n\n def getPhone(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT phone FROM BUYER WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n phone = cursor.fetchone()\n return phone[0]\n\n def getPrefStore(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT store_name FROM GROCERYSTORE, BUYER WHERE GROCERYSTORE.store_id=BUYER.default_store_id and username=\"{}\";'.format(self.username))\n exists = cursor.execute(select)\n\n if exists == 0:\n return \"None\"\n else:\n store_name = cursor.fetchone()\n return store_name[0]\n\n def getAddress(self):\n cursor1 = self.connection.cursor()\n cursor2 = self.connection.cursor()\n\n select1 = ('SELECT house_number FROM ADDRESS, BUYER WHERE ADDRESS.id=BUYER.address_id and username=\"{}\";'.format(self.username))\n cursor1.execute(select1)\n house_number = cursor1.fetchone()\n house_number = str(house_number[0])\n\n select2 = ('SELECT street FROM ADDRESS, BUYER WHERE ADDRESS.id=BUYER.address_id and username=\"{}\";'.format(self.username))\n cursor2.execute(select2)\n street = cursor2.fetchone()\n street = street[0]\n\n return (house_number + \" \" + street)\n\n def getStoreAddress(self):\n cursor1 = self.connection.cursor()\n cursor2 = self.connection.cursor()\n address_id = self.getStoreID()\n\n select1 = ('SELECT house_number FROM ADDRESS, GROCERYSTORE WHERE ADDRESS.id=GROCERYSTORE.address_id and ADDRESS.id = {};'.format(address_id))\n cursor1.execute(select1)\n house_number = cursor1.fetchone()\n house_number = str(house_number[0])\n\n select2 = ('SELECT street FROM ADDRESS, GROCERYSTORE WHERE ADDRESS.id=GROCERYSTORE.address_id and ADDRESS.id = {};'.format(address_id))\n cursor2.execute(select2)\n street = cursor2.fetchone()\n street = street[0]\n\n return (house_number + \" \" + street)\n\n def getCity(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT city FROM ADDRESS, BUYER WHERE ADDRESS.id=BUYER.address_id and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n city = cursor.fetchone()\n return city[0]\n\n def getEmail(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT email FROM USERR WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n email = cursor.fetchone()\n return email[0]\n\n def getState(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT state FROM ADDRESS, BUYER WHERE ADDRESS.id=BUYER.address_id and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n state = cursor.fetchone()\n return state[0]\n\n def getPrefCard(self):\n if self.hasNoPayMeth():\n return \"NONE\"\n\n if self.defaultIsCheck():\n return \"Check\"\n\n cursor = self.connection.cursor()\n\n select = ('SELECT account_number FROM BUYER, PAYMENTS WHERE BUYER.default_payment=PAYMENTS.payment_name and BUYER.username=PAYMENTS.username and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n prefCard = cursor.fetchone()\n\n if type(prefCard) == tuple:\n return prefCard[0]\n\n else:\n return \"No card on file\"\n\n def getZip(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT zip_code FROM ADDRESS, BUYER WHERE ADDRESS.id=BUYER.address_id and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n zipcode = cursor.fetchone()\n return zipcode[0]\n\n def getRoutNum(self):\n if self.hasNoPayMeth():\n return \"NONE\"\n\n if self.defaultIsCheck():\n return \"Check\"\n\n cursor = self.connection.cursor()\n\n select = ('SELECT routing_number FROM BUYER, PAYMENTS WHERE BUYER.default_payment=PAYMENTS.payment_name and BUYER.username=PAYMENTS.username and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n routNum = cursor.fetchone()\n\n if type(routNum) == tuple:\n return routNum[0]\n\n else:\n return \"No card on file\"\n\n def getStoreID(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT GROCERYSTORE.address_id FROM GROCERYSTORE, BUYER WHERE GROCERYSTORE.store_id=BUYER.default_store_id and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n address_id = cursor.fetchone()\n return address_id[0]\n\n def defaultIsCheck(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT COUNT(account_number) FROM BUYER, PAYMENTS WHERE BUYER.default_payment=PAYMENTS.payment_name and BUYER.username=PAYMENTS.username and BUYER.username=\"{}\";'.format(self.username))\n cursor.execute(select)\n isFound = cursor.fetchone()\n isFound = isFound[0]\n if isFound == 0:\n return True\n else:\n return False\n\n def hasNoPayMeth(self):\n cursor = self.connection.cursor()\n select = ('SELECT default_payment FROM BUYER WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n\n if cursor.fetchone()[0] == \"NONE\":\n return True\n\n return False\n\n def backClicked(self):\n self.buyerFunct = BuyerFunctionality.BuyerFunctWindow(self.username)\n self.buyerFunct.show()\n self.close()\n\n self.connection.close()\n\n def deleteClicked(self):\n self.confDelete = ConfirmDeleteAccount(self.username)\n self.confDelete.show()\n self.close()\n\n self.connection.close()\n\n def updateClicked(self):\n self.updateActInfo = UpdateBuyerInformation.UpdateBuyerInformation(self.username)\n self.updateActInfo.show()\n self.close()\n\n self.connection.close()\n\n\n\nclass ConfirmDeleteAccount(QWidget):\n\n def __init__(self, username):\n super(ConfirmDeleteAccount, self).__init__()\n\n self.username = username\n\n self.initUI()\n\n self.connection = pymysql.connect(host=\"localhost\",\n user=\"root\",\n password=None,\n db=\"grocerytech\",\n charset=\"utf8mb4\")\n\n def initUI(self):\n\n root = QVBoxLayout()\n msg = QHBoxLayout()\n btns = QGridLayout()\n\n self.message = QLabel(\"ARE YOU SURE YOU WANT TO DELETE YOUR ACCOUNT?\")\n msg.addWidget(self.message)\n\n self.cancelBtn = QPushButton(\"Cancel\")\n self.confBtn = QPushButton(\"Confirm\")\n self.cancelBtn.clicked.connect(self.cancelClicked)\n self.confBtn.clicked.connect(self.confClicked)\n\n btns.addWidget(self.cancelBtn, 1, 3)\n btns.addWidget(self.confBtn, 1, 4)\n\n root.addLayout(msg)\n root.addLayout(btns)\n self.setLayout(root)\n self.setGeometry(500, 250, 150, 150)\n self.setWindowTitle(\"Confirm Account Deletion\")\n self.show()\n\n def cancelClicked(self):\n self.acctInfo = BuyerAccountInformation(self.username)\n self.acctInfo.show()\n self.close()\n\n self.connection.close()\n\n def confClicked(self):\n cursor = self.connection.cursor()\n\n payments = ('DELETE FROM PAYMENTS WHERE username=\"{}\";'.format(self.username))\n user = ('DELETE FROM USERR WHERE username=\"{}\";'.format(self.username))\n address = ('DELETE FROM ADDRESS WHERE id={};'.format(self.getAddressID()))\n\n cursor.execute(payments)\n cursor.execute(user)\n cursor.execute(address)\n self.connection.commit()\n\n self.loginRegister = LoginRegister.LoginWindow()\n self.loginRegister.show()\n self.close()\n\n self.connection.close()\n\n def getAddressID(self):\n cursor = self.connection.cursor()\n\n select = ('SELECT address_id FROM BUYER WHERE username=\"{}\";'.format(self.username))\n cursor.execute(select)\n return cursor.fetchone()[0]\n\n\n\n","sub_path":"BuyerAccountInformation.py","file_name":"BuyerAccountInformation.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110977814","text":"# Import modules \nimport math\nimport numpy as np\nimport matplotlib \nimport matplotlib.pyplot as plt\n\n################################\n#Force LaTeX use -- taken from Emile \nfrom matplotlib import rc\nfnt = 14 # font size\nrc('text', usetex=True)\nrc('text.latex',preamble=r'\\usepackage{bm}')\nfont = {'family': 'serif',\n 'weight': 'normal',\n 'size': fnt+2,\n }\nfontt = {'family': 'serif',\n 'color': 'white',\n 'weight': 'normal',\n 'size': fnt+2,\n }\n\nmatplotlib.rc('font', **font)\n################################\n\nx = np.linspace(-1.0,1.0,500)\nA0 = 1.0 \nAT = 0.4 \nL = 1.0\nA_x = 0.5*A0*((1.0+AT/A0) + (AT/A0-1.0)*np.cos(np.pi*x/L))\n\nx = np.concatenate(([-1.2],x,np.linspace(1.0,5.0,20)))\nA_x = np.concatenate(([1000.0],A_x,np.linspace(A_x[A_x.size-1],1.0,20)))\n\ny_axis = 0.05\n\naxis= np.linspace(y_axis,y_axis,x.size)\nx_axis = np.linspace(-1.0,5.0,axis.size)\n#Create figure\nfig1 = plt.figure()\n\nax0 = fig1.add_subplot(111)\nax0.plot(x,A_x/A0,ls='-',color='black')\nax0.fill(x,A_x/A0,color='grey')\nax0.plot(x_axis,axis,ls='-',color='black')\nax0.set(xlim=[-1.6,3.0],ylim=[-0.1,1.1])\nax0.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False) # labels along the bottom edge are off\nax0.tick_params(\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left=False, # ticks along the bottom edge are off\n right=False, # ticks along the top edge are off\n labelleft=False) # labels along the bottom edge are off\n\n#inlet arrows\ndy = (1.0-y_axis)/10\n\nfor i in range(0,10):\n\tax0.arrow(-1.2,(i+1)*dy,0.2,0.0,head_width=20.0*0.001,color='black')\n\nax0.text(-1.5,0.6,r'$\\rho_0$')\nax0.text(-1.5,0.5,r'$T_0$')\nax0.text(-1.5,0.4,r'$M_0$')\n\n#Uniform and stretched grid eregions \nax0.arrow(-1.0,-0.05,2.0,0.0,head_width=20.0*0.001,color='black')\nax0.arrow(1.0,-0.05,-2.0,0.0,head_width=20.0*0.001,color='black')\nax0.text(-0.35,-0.035,r'Uniform')\nax0.arrow(1.08,-0.05,2.0,0.0,head_width=20.0*0.001,color='black')\nax0.arrow(3.08,-0.05,-2.0,0.0,head_width=20.0*0.001,color='black')\nax0.text(1.65,-0.035,r'Stretched')\nfig1.tight_layout()\n\ndpi= 1000\nfig1.savefig('figuer_sim.eps',format='eps',dpi=dpi)\n\n\nplt.show()\n\n","sub_path":"nzzle_design/figure_sim.py","file_name":"figure_sim.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"277132648","text":"#!/usr/bin/env python\n\nimport numpy as np\n\n\n# Load cyst phantom scatterer data\n\nimport scipy.io as spio\n\nfile = r'C:\\Users\\spenc\\Desktop\\Experiments\\ftp_files\\cyst_phantom\\pht_data.mat'\nmat = spio.loadmat (file)\n\npos, amp = mat['phantom_positions'], mat['phantom_amplitudes']\nscatterers = np.concatenate ([e.astype ('float32') for e in [pos, amp]], axis=-1)\n\n\n# Generate the transducer apertures for send and receive\n\nf0 = 3.5e6 # Transducer center frequency [Hz]\nfs = 100e6 # Sampling frequency [Hz]\nc = 1540 # Speed of sound [m/s]\nlambd = c / f0 # Wavelength [m]\nwidth = lambd # Width of element\nheight = 5 / 1000 # Height of element [m]\nkerf = 0.05 / 1000 # Kerf (gap between elements) [m]\npitch = kerf + width # Pitch (center-to-center distance between elements) [m]\nN_elements = 192 # Number of physical elements\nno_sub_x = 5 # Number of sub-divisions in x-direction of elements\nno_sub_y = 5 # Number of sub-divisions in y-direction of elements\n\n\n# Create and configure GPU simulator\n\nfrom pyrfsim import RfSimulator\n\nsim = RfSimulator ('gpu')\nsim.set_print_debug (True)\n\nsim.set_parameter ('sound_speed', str (c))\nsim.set_parameter ('radial_decimation', '1') # depth-direction downsampling factor\nsim.set_parameter ('phase_delay', 'on') # improves subsample accuracy\nsim.set_parameter ('use_elev_hack', 'off')\n\n\ndef subdivide (length, num):\n delta = length * 1/num\n divisions = np.arange ((-num//2 + 1) * delta, (num//2 + 1) * delta, delta)\n\n return divisions\n\n# Arguments very similar to xdc_linear_array in Field II\ndef linear_transducer (N_elements, width, height, kerf, no_sub_x=1, no_sub_y=1, as_array=False):\n '''Calculates the origin positions of the (sub-)elements in a linear array transducer.'''\n\n elem_x = subdivide (width, no_sub_x)\n elem_y = subdivide (height, no_sub_y)\n\n template_x = np.tile (elem_x, [1, N_elements])\n template_x = template_x.repeat (no_sub_y, axis=0)\n template_y = np.tile (elem_y.reshape(-1, 1), [1, N_elements])\n template_y = template_y.repeat (no_sub_x, axis=1)\n\n pitch = width + kerf # element center-to-center distance\n origins_x = np.arange ((-N_elements//2 + 1) * pitch, (N_elements//2 + 1) * pitch, pitch).reshape (1, -1)\n origins_x = origins_x.repeat (no_sub_x, axis=1)\n origins_x = origins_x.repeat (no_sub_y, axis=0)\n\n transducer_x = template_x + origins_x\n transducer_y = template_y\n transducer_z = np.zeros (transducer_x.shape)\n\n if as_array:\n return np.stack ([transducer_x, transducer_y, transducer_z], axis=2)\n else:\n return {'x': transducer_x, 'y': transducer_y, 'z': transducer_z}\n\n\n# Define a scan sequence\n\nreceive_aperture = linear_transducer (N_elements, width, height, kerf, no_sub_x, no_sub_y, as_array=True)\n\norigins = receive_aperture.reshape (-1, 3).astype ('float32')\nN_subelements = origins.shape[0]\ndirections = np.tile (np.array ([0, 0, 1], dtype='float32'), [N_subelements, 1])\nlateral_dirs = np.tile (np.array ([1, 0, 0], dtype='float32'), [N_subelements, 1])\ntimestamps = np.zeros (N_subelements, dtype='float32')\nline_length = .09\n\nsim.set_scan_sequence (origins, directions, line_length, lateral_dirs, timestamps)\n\n\n# Define excitation signal\n\nfrom scipy.signal import gausspulse\n\nt = np.arange (-16/f0, 16/f0, 1/fs)\nexcitation, envelope = gausspulse(t, fc=f0, bw=.5, retenv=True)\nexcitation = excitation.astype ('float32')\ncenter_index = len (t) // 2\nsim.set_excitation (excitation, center_index, fs, f0)\n\n\n# Set scatterers\n\nsim.clear_fixed_scatterers () # Make this cell idempotent\n\n# transmit_aperture = linear_transducer (N_elements, width, height, kerf, no_sub_x, no_sub_y, as_array=True)\ndata = scatterers\n\nsim.add_fixed_scatterers (data)\n\n\n# Set the beam profile\n\nsigma_lateral = 1e-3\nsigma_elevational = 1e-3\nsim.set_analytical_beam_profile (sigma_lateral, sigma_elevational)\n\n\n# Simulate all scanlines\n\nsim.set_parameter ('radial_decimation', '17') # test radial decimation by a number that does not divide m_rf_line_num_samples evenly\nsim.set_parameter ('use_elev_hack', 'off')\niq_lines = sim.simulate_lines ()\nprint (iq_lines)\nimport sys; sys.stdout.flush()\n\n\n# Test resizing the number of lines\n\nno_sub_x = 3 # Number of sub-divisions in x-direction of elements\nno_sub_y = 5 # Number of sub-divisions in y-direction of elements\n\n\n# Define a scan sequence\n\nreceive_aperture = linear_transducer (N_elements, width, height, kerf, no_sub_x, no_sub_y, as_array=True)\n\norigins = receive_aperture.reshape (-1, 3).astype ('float32')\nN_subelements = origins.shape[0]\ndirections = np.tile (np.array ([0, 0, 1], dtype='float32'), [N_subelements, 1])\nlateral_dirs = np.tile (np.array ([1, 0, 0], dtype='float32'), [N_subelements, 1])\ntimestamps = np.zeros (N_subelements, dtype='float32')\nline_length = .09\n\nsim.set_scan_sequence (origins, directions, line_length, lateral_dirs, timestamps)\n\n\n# Simulate all scanlines\n\nsim.set_parameter ('radial_decimation', '7') # test radial decimation by a number that does not divide m_rf_line_num_samples evenly\nsim.set_parameter ('use_elev_hack', 'off')\niq_lines = sim.simulate_lines ()\nprint (iq_lines)\nimport sys; sys.stdout.flush()\n","sub_path":"jupyter/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"502463401","text":"from django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\nfrom .views import index, EquipoList, EquipoCreate, EquipoUpdate, EquipoDelete, EquipoDetail,\\\n OtroList, OtroCreate, OtroUpdate, OtroDetail, OtroDelete, \\\n PerifericoList, PerifericoCreate, PerifericoUpdate, PerifericoDetail, PerifericoDelete\n\nurlpatterns = [\n # URLS EQUIPOS\n url(r'^$', index, name=\"index\"),\n url(r'^nuevo$', EquipoCreate.as_view(), name=\"equipo_crear\"),\n url(r'^listar', EquipoList.as_view(), name=\"equipo_listar\"),\n url(r'^editar/(?P\\d+)/$', EquipoUpdate.as_view(), name='equipo_editar'),\n url(r'^eliminar/(?P\\d+)/$', EquipoDelete.as_view(), name='equipo_eliminar'),\n url(r'^detalle/(?P.+)/$', EquipoDetail.as_view(), name='equipo_detalle'),\n\n # URLS OTROS\n url(r'^nuevaotro$', OtroCreate.as_view(), name=\"otro_crear\"),\n url(r'^listaotro', OtroList.as_view(), name=\"otro_listar\"),\n url(r'^editarotro/(?P\\d+)/$', OtroUpdate.as_view(), name='otro_editar'),\n url(r'^eliminarotro/(?P\\d+)/$', OtroDelete.as_view(), name='otro_eliminar'),\n url(r'^detalleotro/(?P.+)/$', OtroDetail.as_view(), name='otro_detalle'),\n\n # URLS PERIFERICOS\n url(r'^nuevoperiferico/(?P\\d+)/$', PerifericoCreate.as_view(), name='periferico_crear'),\n url(r'^listaperiferico/(?P\\d+)/$', PerifericoList.as_view(), name='periferico_listar'),\n url(r'^editarperiferico/(?P\\d+)/$', PerifericoUpdate.as_view(), name='periferico_editar'),\n url(r'^eliminarperiferico/(?P\\d+)/$', PerifericoDelete.as_view(), name='periferico_eliminar'),\n url(r'^detalleperiferico/(?P.+)/$', PerifericoDetail.as_view(), name='periferico_detalle'),\n]\n","sub_path":"myapps/sisinventario/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"1779074","text":"'''\r\nCreated on Mar 9, 2018\r\n\r\n@author: Michael\r\n'''\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom adjustAndClean.TAQAdjust import TAQAdjust\r\nfrom adjustAndClean.TAQCleaner import TAQCleaner\r\nfrom adjustAndClean.StackData import StackData\r\nfrom partB.xMinuteReturn import getXSecMidQuoteReturns\r\nfrom math import sqrt\r\nfrom cvxopt import matrix\r\nfrom cvxopt.blas import dot\r\nfrom cvxopt.solvers import qp, options\r\n\r\nprint('Initializing.')\r\n\r\n # FIRST: Take S&P500 tickers\r\ns_p500 = '/Users/Michael/eclipse-workspace/Homework_1/s_p500.xlsx'\r\ns_p500xls = pd.read_excel(open(s_p500,'rb'), sheet_name='WRDS')\r\ns_ptickers = np.unique((np.array(s_p500xls['Ticker Symbol'])).astype(str))\r\ns_ptickers = s_ptickers[:-1]\r\n\r\nbaseDir = '/Users/Michael/Documents/TAQ/R'\r\nstartDate = '20070620'\r\nendDate = '20070721'\r\ntickers = []\r\ntickers.append('AAPL')\r\ntickers.append('MSFT')\r\ntickers.append('AMZN')\r\ntickers.append('JPM')\r\ntickers.append('JNJ')\r\ntickers.append('GOOG')\r\ntickers.append('XOM')\r\ntickers.append('BAC')\r\ntickers.append('WFC')\r\ntickers.append('INTC')\r\n\r\nprint(tickers[0])\r\nprint(type(tickers[0]))\r\n\r\nreturns = np.zeros((11,409))\r\n\r\nfor k in range(10):\r\n # Stack everything\r\n ticker = tickers[k]\r\n stack = StackData(baseDir, startDate, endDate, ticker)\r\n stack.addTrades()\r\n stack.addQuotes()\r\n print('Finished stacking')\r\n # Get results\r\n quotes = stack.getStackedQuotes()\r\n trades = stack.getStackedTrades()\r\n print('Got results') \r\n # Adjust\r\n adjuster = TAQAdjust( quotes, trades, s_p500 )\r\n adjuster.adjustQuote()\r\n print('Adjusted')\r\n # Clean\r\n cleaner = TAQCleaner(quotes, trades)\r\n quotes = np.delete(quotes, cleaner.cleanQuotesIndices(), axis = 0)\r\n print('Cleaned')\r\n q_returns = getXSecMidQuoteReturns(quotes,900)[0]\r\n for x in range(len(q_returns)):\r\n returns[k,x] = q_returns[x]\r\n print('length of', tickers[k],' is',len(q_returns))\r\n \r\nMeanVector = np.zeros(11)\r\n\r\n\r\nfor x in range(409):\r\n returns[10,x] = 1\r\n # dummy variable used for covariance\r\n#Convert to annualized\r\nfor x in range(10):\r\n print(np.mean(returns[x,:]))\r\n MeanVector[x] = ((np.mean(returns[x,:])+1)**(26*252))-1\r\n\r\nMeanVector[10] = 0.0378\r\nCovariance = np.cov(returns)*26*252\r\nprint(MeanVector)\r\nprint(Covariance)\r\n\r\n#Feed into cvxopt\r\nn = 11\r\nS = matrix( Covariance )\r\npbar = matrix(MeanVector)\r\n\r\nG = matrix(0.0, (n,n))\r\nG[::n+1] = -1.0\r\nh = matrix(0.0, (n,1))\r\nA = matrix(1.0, (1,n))\r\nb = matrix(1.0)\r\n\r\nN = 1000\r\nmus = [ 10**(5.0*t/N-1.0) for t in range(N) ]\r\noptions['show_progress'] = False\r\nxs = [ qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ]\r\nreturns = [ dot(pbar,x) for x in xs ]\r\nrisks = [ sqrt(dot(x, S*x)) for x in xs ]\r\n\r\ntry: import pylab\r\nexcept ImportError: pass\r\nelse:\r\n pylab.figure(1, facecolor='w')\r\n pylab.plot(risks, returns)\r\n pylab.xlabel('standard deviation')\r\n pylab.ylabel('expected return')\r\n pylab.axis([0, 0.2, 0, 0.15])\r\n pylab.title('Risk-return trade-off curve (fig 4.12)')\r\n pylab.yticks([0.00, 0.05, 0.10, 0.15])\r\n pylab.show()\r\n\r\nrisk_free = returns[0]\r\nbest_sharpe = 0\r\nbest_sharpe_index = 0\r\nfor x in range(len(returns)):\r\n if ((returns[x]-risk_free)/risks[x])>best_sharpe:\r\n best_sharpe = (returns[x]-risk_free)/risks[x]\r\n best_sharpe_index = x\r\n\r\nprint('Optimal portfolio returns:', returns[best_sharpe_index])\r\nprint('Optimal portfolio std dev:',risks[best_sharpe_index])\r\nprint('Optimal portfolio weights:')\r\nprint(xs[best_sharpe_index])\r\n\r\n\r\n","sub_path":"Running_C_D/FindMktPortfolio.py","file_name":"FindMktPortfolio.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"268810816","text":"import logging\nimport os\nimport json\nfrom PyQt5.QtCore import pyqtSlot\nfrom stage import Stage, Runnable\nfrom ctypes import cdll\n# import time\n\nclass LabelStage(Stage):\n \"\"\"Downloads datasets from online sources.\"\"\"\n def __init__(self, configHandler, dbHandler):\n Stage.__init__(self)\n if configHandler.getDatasetType() == \"subset\":\n self.labeler = self.ManualLabeler(configHandler, dbHandler)\n elif configHandler.getDatasetType() == \"full_set\":\n self.labeler = self.LabelImporter(configHandler, dbHandler)\n else:\n raise ValueError('Value must be one of the keys in SOURCE_URL')\n\n @pyqtSlot()\n def label(self):\n self.threadpool.start(self.labeler)\n\n class LabelImporter(Runnable):\n \"\"\"Class for importing image labels from CSV.\"\"\"\n def __init__(self, configHandler, dbHandler):\n Runnable.__init__(self, configHandler, dbHandler)\n\n self.lib = cdll.LoadLibrary(os.path.join(configHandler.getParentFolder(), \"build\", \"liblabelimporter.so\"))\n \n self.obj = self.lib.LabelImporter_new()\n \n @pyqtSlot()\n def run(self):\n logging.info(\"Importing label data from CSV\")\n self.signals.attemptUpdateProBarBounds.emit(0,1)\n self.signals.attemptUpdateProBarValue.emit(0)\n self.signals.attemptUpdateText.emit(\"Importing label data\")\n\n # start = time.time()\n self.lib.LabelImporter_run(self.obj)\n # end = time.time()\n # print(end - start)\n \n self.signals.attemptUpdateProBarValue.emit(1)\n self.signals.attemptUpdateText.emit(\"Done importing\")\n self.signals.finished.emit()\n logging.info(\"Done importing label data\")\n\n class ManualLabeler(Runnable):\n \"\"\"Class used to assist in labeling the data.\"\"\"\n def __init__(self, configHandler, dbHandler):\n Runnable.__init__(self, configHandler, dbHandler)\n\n self.count = 0\n self.records = None\n self.record = None\n\n @pyqtSlot()\n def run(self):\n \"\"\"Displays the content into the window.\"\"\"\n logging.info('Filling window')\n self.queryImageList()\n \n self.signals.attemptUpdateText.emit(\"Please manually label images\")\n self.signals.attemptUpdateProBarBounds.emit(0, self.expectedNumFiles)\n\n self.dbHandler.addTableToDb(self.configHandler.getTableName('label'), self.configHandler.getColumnsInfoFullPath(), \"nonElementColumns\", 'labels')\n \n self.displayNextImage()\n\n while self.count < self.expectedNumFiles:\n pass\n\n logging.info('End of query')\n self.signals.attemptUpdateText.emit(\"Image labeling complete\")\n self.signals.finished.emit()\n\n def frontal(self):\n logging.debug('Front')\n self.storeLabel('F')\n self.count += 1\n self.displayNextImage()\n \n def lateral(self):\n logging.debug('Lateral')\n self.storeLabel('L')\n self.count += 1\n self.displayNextImage()\n\n def displayNextImage(self):\n logging.debug('Image count: ' + str(self.count))\n self.signals.attemptUpdateText.emit('Image Count: ' + str(self.count))\n self.signals.attemptUpdateProBarValue.emit(self.dbHandler.countRecords(self.configHandler.getTableName('label')))\n\n if self.count < self.expectedNumFiles:\n self.record = self.records[self.count]\n self.signals.attemptUpdateImage.emit(self.record)\n \n def queryImageList(self):\n logging.debug('Getting the image list')\n sqlQuery = 'SELECT file_path, bits_stored FROM ' + self.configHandler.getTableName(\"metadata\") + ' ORDER BY file_path;'\n self.records = self.dbHandler.executeQuery(self.dbHandler.connection, sqlQuery).fetchall()\n\n def storeLabel(self, decision):\n logging.debug('Storing label')\n sqlQuery = 'INSERT INTO ' + self.configHandler.getTableName(\"label\") + ' (file_name, file_path, image_view) VALUES (\\'' + self.record['file_path'].split(os.sep)[-1] + '\\', \\'' + self.record['file_path'] + '\\', \\'' + decision + '\\');'\n self.dbHandler.executeQuery(self.dbHandler.connection, sqlQuery)\n","sub_path":"Combined/DesktopApp/labelStage.py","file_name":"labelStage.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"554842006","text":"# -*- coding:utf-8 -*-\n# 链接:https://www.nowcoder.com/questionTerminal/9b4c81a02cd34f76be2659fa0d54342a\n# 来源:牛客网\n#\n# 可以模拟魔方逆时针旋转的方法,一直做取出第一行的操作\n# 例如\n# 1 2 3\n# 4 5 6\n# 7 8 9\n# 输出并删除第一行后,\n# 4 5 6\n# 7 8 9\n# 再进行一次逆时针旋转,就变成:\n# 6 9\n# 5 8\n# 4 7\n# 继续重复上述操作即可。\n# Python代码如下\n# 螺旋打印矩阵\n# change [[1,2,3,4], into [[1,2,3,4],\n# [5,6,7,8], [8,12,16,15],\n# [9,10,11,12], [14,13,9,5],\n# [13,14,15,16]] [6,7,11,10]\n\n\nclass Solution:\n # matrix类型为二维列表,需要返回列表\n def printMatrix(self, matrix):\n # write code here\n result = []\n while (matrix):\n result += matrix.pop(0)\n if not matrix or not matrix[0]:\n break\n matrix = self.turn(matrix) # turn的作用是将同一行变为同一列,最后反转\n return result\n\n def turn(self, matrix):\n \"\"\"\n turn函数用于逆时针旋转\n :param matrix:\n :return:\n \"\"\"\n num_r = len(matrix)\n num_c = len(matrix[0])\n newmat = []\n\n # 把同一列变为同一行, 最后reverse\n for i in range(num_c):\n newmat2 = []\n # row is column now\n for j in range(num_r):\n newmat2.append(matrix[j][i])\n newmat.append(newmat2)\n # the reverse of [[1,2],[4,5]] is [[4,5],[1,2]]\n newmat.reverse()\n return newmat\n\n\n# You are given an n x n 2D matrix representing an image.\n#\n# Rotate the image by 90 degrees (clockwise).\n#\n# Note:\n#\n# You have to rotate the image in-place, which means you have to modify the\n# input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.\n#\n# Example 1:\n#\n# Given input matrix =\n# [\n# [1,2,3],\n# [4,5,6],\n# [7,8,9]\n# ],\n#\n# rotate the input matrix in-place such that it becomes:\n# [\n# [7,4,1],\n# [8,5,2],\n# [9,6,3]\n# ]\n# Example 2:\n#\n# Given input matrix =\n# [\n# [ 5, 1, 9,11],\n# [ 2, 4, 8,10],\n# [13, 3, 6, 7],\n# [15,14,12,16]\n# ],\n#\n# rotate the input matrix in-place such that it becomes:\n# [\n# [15,13, 2, 5],\n# [14, 3, 4, 1],\n# [12, 6, 8, 9],\n# [16, 7,10,11]\n# ]\n\n# idea The most pythonic solution is a simple one-liner using [::-1] to flip\n# the matrix upside down and then zip to transpose it.\n# It assigns the result back into A, so it's \"in-place\" in a sense and the OJ accepts it as such, though some people might not.\n\n# upside down and then transpose to carry out 90 degrees (clockwise)\ndef rotate(A):\n A[:] = zip(*A[::-1])\n\n\n# Basically the same as the first solution, but using reverse instead of [::-1] and transposing\n# the matrix with loops instead of zip.\n# It's 100% in-place, just instead of only moving elements around, it also moves the rows around.\n# 翻转再转置\ndef rotate2(A):\n # print(A)\n A.reverse() # inplace的, 相当于把[[1,2],[2,3],[9,0]]变成[[9,0],[2,3],[1,2]]\n # print(A)\n # this part can be used for transpose\n for i in range(len(A)):\n for j in range(i):\n A[i][j], A[j][i] = A[j][i], A[i][j]\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.printMatrix([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]))\n rotate2([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ])\n","sub_path":"Basic_Algorithm/highlyFrequentQuestion/printmatrix.py","file_name":"printmatrix.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"52953571","text":"\n\nfrom xai.brain.wordbase.verbs._garrison import _GARRISON\n\n#calss header\nclass _GARRISONS(_GARRISON, ):\n\tdef __init__(self,): \n\t\t_GARRISON.__init__(self)\n\t\tself.name = \"GARRISONS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"garrison\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_garrisons.py","file_name":"_garrisons.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"65109699","text":"\"\"\"\n@file\n@brief Converters for models from :epkg:`mlinsights`.\n\"\"\"\nfrom sklearn.base import ClassifierMixin\nfrom skl2onnx import get_model_alias\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.common._registration import get_shape_calculator\nfrom skl2onnx._parse import parse_sklearn\nfrom skl2onnx.common._apply_operation import apply_identity\n\n\ndef _model_outputs(existing_scope, model, inputs, custom_parsers=None):\n \"\"\"\n Retrieves the outputs of one particular models.\n \"\"\"\n scope = existing_scope.temp()\n if custom_parsers is not None and model in custom_parsers:\n return custom_parsers[model](\n scope, model, inputs, custom_parsers=custom_parsers)\n return parse_sklearn(scope, model, inputs, custom_parsers=custom_parsers)\n\n\ndef parser_transfer_transformer(scope, model, inputs, custom_parsers=None):\n \"\"\"\n Parser for :epkg:`TransferTransformer`.\n \"\"\"\n if custom_parsers is not None and model in custom_parsers:\n return custom_parsers[model](\n scope, model, inputs, custom_parsers=custom_parsers)\n\n if model.method == 'predict_proba':\n name = 'probabilities'\n elif model.method == 'transform':\n name = 'variable'\n else:\n raise NotImplementedError( # pragma: no cover\n \"Unable to defined the output for method='{}' and model='{}'.\".format(\n model.method, model.__class__.__name__))\n\n prob = scope.declare_local_variable(name, FloatTensorType())\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n this_operator.inputs = inputs\n this_operator.outputs.append(prob)\n return this_operator.outputs\n\n\ndef shape_calculator_transfer_transformer(operator):\n \"\"\"\n Shape calculator :epkg:`TransferTransformer`.\n \"\"\"\n op = operator.raw_operator\n alias = get_model_alias(type(op.estimator_))\n calc = get_shape_calculator(alias)\n\n scope = operator.scope_inst.temp()\n this_operator = scope.declare_local_operator(alias)\n this_operator.raw_operator = op.estimator_\n this_operator.inputs = operator.inputs\n res = _model_outputs(scope, op.estimator_, operator.inputs)\n this_operator.outputs.extend([\n scope.declare_local_variable(\n \"%sTTS\" % r.onnx_name, r.type) for r in res])\n this_operator.outputs = res\n calc(this_operator)\n\n if op.method == 'predict_proba':\n operator.outputs[0].type = this_operator.outputs[1].type\n elif op.method == 'transform':\n operator.outputs[0].type = this_operator.outputs[0].type\n else:\n raise NotImplementedError( # pragma: no cover\n \"Unable to defined the output for method='{}' and model='{}'.\".format(\n op.method, op.__class__.__name__))\n\n\ndef convert_transfer_transformer(scope, operator, container):\n \"\"\"\n Converters for :epkg:`TransferTransformer`.\n \"\"\"\n op = operator.raw_operator\n op_type = get_model_alias(type(op.estimator_))\n\n this_operator = scope.declare_local_operator(op_type)\n this_operator.raw_operator = op.estimator_\n this_operator.inputs = operator.inputs\n\n if isinstance(op.estimator_, ClassifierMixin):\n container.add_options(id(op.estimator_), {'zipmap': False})\n\n res = _model_outputs(scope.temp(), op.estimator_, operator.inputs)\n this_operator.outputs.extend([\n scope.declare_local_variable(\n \"%sTTC\" % r.onnx_name, r.type) for r in res])\n\n if op.method == 'predict_proba':\n index = 1\n elif op.method == 'transform':\n index = 0\n else:\n raise NotImplementedError( # pragma: no cover\n \"Unable to defined the output for method='{}' and model='{}'.\".format(\n op.method, op.__class__.__name__))\n\n apply_identity(scope, this_operator.outputs[index].onnx_name,\n operator.outputs[0].full_name, container,\n operator_name=scope.get_unique_operator_name(\"IdentityTT\"))\n","sub_path":"mlprodict/onnx_conv/operator_converters/conv_transfer_transformer.py","file_name":"conv_transfer_transformer.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"58473691","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 1. 학습에 사용할 데이터 셋\n# 데이터 셋 읽어오기\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# 2. 텐서를 위한 X, Y 플레이스홀더\n# MNIST data image의 형태는 28 * 28 = 784\nnb_classes = 10 # 10개의 결과물로 구분\nX = tf.placeholder(tf.float32, shape=[None, 784]) # 784개의 픽셀 정보 입력\nY = tf.placeholder(tf.float32, shape=[None, nb_classes]) # 구분되는 결과\n\n# dropout에 사용할 비율 0.7 (학습: training), 테스팅에는 1 사용\nkeep_prob = tf.placeholder(tf.float32)\n\n# 3. NN 사용으로 변경 --> xavier(샤비에르)\nW1 = tf.get_variable(\"W1\", shape=[784, 512],\n initializer=tf.contrib.layers.xavier_initializer())\nb1 = tf.Variable(tf.random_normal([512]), name='bias_1')\nL1 = tf.nn.relu(tf.matmul(X, W1) + b1)\nL1 = tf.nn.dropout(L1, keep_prob=keep_prob)\n\nW2 = tf.get_variable(\"W2\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\nb2 = tf.Variable(tf.random_normal([512]), name='bias_2')\nL2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\nL2 = tf.nn.dropout(L2, keep_prob=keep_prob)\n\nW3 = tf.get_variable(\"W3\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\nb3 = tf.Variable(tf.random_normal([512]), name='bias_3')\nL3 = tf.nn.relu(tf.matmul(L2, W3) + b3)\nL3 = tf.nn.dropout(L3, keep_prob=keep_prob)\n\nW4 = tf.get_variable(\"W4\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\nb4 = tf.Variable(tf.random_normal([512]), name='bias_4')\nL4 = tf.nn.relu(tf.matmul(L3, W4) + b4)\nL4 = tf.nn.dropout(L4, keep_prob=keep_prob)\n\nW5 = tf.get_variable(\"W5\", shape=[512, nb_classes],\n initializer=tf.contrib.layers.xavier_initializer())\nb5 = tf.Variable(tf.random_normal([nb_classes]), name='bias_5')\nhypothesis = tf.matmul(L4, W5) + b5\n\n# 5. 비용함수(다양한 형태 - cross entropy) - 전달 파라미터 주의 !!\n# 주의할 점은 Y_one_hot은 label로 1개의1, 여러개의 0으로 이루어진 one_hot 벡터 이어야 한다.\n# 그러면 각각의 logit에 대한 오차값이 결과로 나오고 그것을 tf.reduce_mean해주면 전체에 대한 평균오차값이 나온다.\n# cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))\ncost_i = tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y)\ncost = tf.reduce_mean(cost_i) # 전체에 대한 평균오차값\n\n# 최적화\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\ntrain = optimizer.minimize(cost)\n\n# 정확도 계산 (Test model)\n# argmax() : [0.1, 0.3, 0.5]의 argmax는 1로 가장 큰 값의 index 출력\nprediction = tf.argmax(hypothesis, 1) # 가장 높은 값을 가지는 index반환\nis_correct = tf.equal(prediction, tf.argmax(Y, 1)) # 맞는지 확인\naccuracy = tf.reduce_mean(tf.cast(is_correct, dtype=tf.float32))\n\n# 파라미터\ntraining_epochs = 15 # 전체 데이터 셋을 한 번 학습을 1 epochs --> 15번 학습\nbatch_size = 100 # 한 번에 읽어올 크기\ntotal_batch = int(mnist.train.num_examples / batch_size) # 전체 배치 갯수\n\n# 세션\nwith tf.Session() as sess:\n # 변수 초기화\n sess.run(tf.global_variables_initializer())\n\n # 학습 사이클\n for epoch in range(training_epochs):\n avg_cost = 0\n\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 배치 크기만큼 처리\n feed_dict = {X: batch_xs, Y: batch_ys, keep_prob:0.7}\n cost_val, _ = sess.run([cost, train], feed_dict=feed_dict)\n avg_cost += cost_val / total_batch\n\n print(\"Epoch: {:04d}, Cost: {:.9f}\".format(epoch + 1, avg_cost))\n print(\"학습 finished\")\n\n # 테스트 셋을 이용해서 정확도 측정\n print(\n \"Accuracy: \",\n accuracy.eval(\n session=sess, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob:1}\n ),\n )\n\n # Get one and predict\n r = random.randint(0, mnist.test.num_examples - 1)\n print(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r: r + 1], 1)))\n print(\n \"Prediction: \",\n sess.run(tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r: r + 1], keep_prob:1}),\n )\n\n plt.imshow(\n mnist.test.images[r: r + 1].reshape(28, 28),\n cmap=\"Greys\",\n interpolation=\"nearest\",\n )\n plt.show()","sub_path":"DeepLearningZeroToAll/ch10/mnist_dropout.py","file_name":"mnist_dropout.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"361691982","text":"# -*- coding: utf-8 -*-\n# Django settings for lpdh_I project.\n\nfrom unipath import Path\nimport os.path\n\nPROJECT_DIR = Path(__file__).ancestor(2)\n\nMEDIA_ROOT = PROJECT_DIR.child('media')\nSTATIC_ROOT = PROJECT_DIR.child('static')\nSTATICFILES_DIRS = (\n PROJECT_DIR.child('assets'),\n)\nTEMPLATE_DIRS = (\n PROJECT_DIR.child('templates'),\n)\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n#BASE_PATH = os.path.dirname(__file__)\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__).decode('utf-8'))\n#print(BASE_PATH)\n#print(CURRENT_PATH)\n#PROJECT_DIR = Path(__file__).ancestor(2)\n\n#MEDIA_ROOT = PROJECT_DIR.child('media')\n#STATIC_ROOT = PROJECT_DIR.child('static')\n#STATIC_ROOT = '/home/jose/devel/django-projects/Blogs/lpdh_I/static/'\n#STATICFILES_DIRS = (\n# PROJECT_DIR.child('assets'),\n#)\n#TEMPLATE_DIRS = (\n# PROJECT_DIR.child('templates'),\n#)\n\n#print(PROJECT_DIR)\n#print(MEDIA_ROOT)\n#print(STATIC_ROOT)\n#print(STATICFILES_DIRS)\n#print(TEMPLATE_DIRS)\n\nADMINS = (\n (u'Jose Manuel Garcia Montes', 'jmgmontes@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'nacho_laprensadehoy', # Or path to database file if using sqlite3.\n # The following settings are not used with sqlite3:\n 'USER': 'nacho',\n 'PASSWORD': 'g22XsZuQ',\n 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n 'PORT': '', # Set to empty string for default.\n }\n}\n\n# Settings para comprobar envio de boletines a traves de email\nARTICLES_FROM_EMAIL = {\n 'protocol': 'IMAP4',\n 'host': 'mail.wservices.ch',\n 'port': 993,\n 'keyfile': None,\n 'certfile': None,\n 'user': 'boletin@laprensadehoy.es',\n 'password': '1961Michie',\n 'ssl': True,\n #'autopost': True,\n 'markup': 'h',\n 'acknowledge': False,\n }\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\nTIME_ZONE = 'Europe/Madrid'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'es-ES'\n\nSITE_ID = 1\n\nSESSION_COOKIE_DAYS = 90\nSESSION_COOKIE_AGE = 60 * 60 * 24 * SESSION_COOKIE_DAYS\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n#MEDIA_ROOT = '/home/jose/devel/django-projects/Blogs/lpdh_I/media/'\nMEDIA_URL = '/media/'\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\n#STATIC_URL = '/static/'\n#STATIC_ROOT = '/home/jose/devel/django-projects/Blogs/lpdh_I/static/'\nSTATIC_URL = '/static/'\n#STATICFILES_DIRS = (\n# BASE_PATH+'/static',\n#)\n\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '!ryc0x^8l3-gcbr78gi4(5&vgs%@rn3zb)ny$^!(0osfe#g_z6'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'aldia.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'aldia.wsgi.application'\n\n#TEMPLATE_DIRS = (\n# BASE_PATH+'/templates/',\n#)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n #'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.flatpages',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n 'core',\n 'unipath',\n #'south',\n 'katche',\n 'jarrett',\n 'coltrane',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\nSITE_DOMAIN = \"http://laprensadehoy.es/\"\nIPAD_IS_MOBILE = False\nLOGIN_URL = \"/usuario/login/\"\nLOGOUT_URL = \"/usuario/logout/\"\nLOGIN_REDIRECT_URL = '/usuario/profile/'\n#LOGOUT_REDIRECT_URL = '/'\nFORCE_SCRIPT_NAME = ''\n","sub_path":"temp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"155173198","text":"import argparse\nimport ast\nimport os\nimport re\nimport time\nimport threading\nimport numpy as np\nfrom pandas import read_table\nfrom deep4cast.forecasters import Forecaster\nfrom deep4cast.metrics import adjust_for_horizon, mape\n\nmutex = threading.Lock()\n\ndef load_topology(args):\n \"\"\"\n Loads the network(s) topology from a file if file is passed by user\n otherwise just returns the default topology\n \"\"\"\n if args.topology_file:\n topologies = []\n with open(args.topology_file) as f:\n content = f.readlines()\n for line in content:\n topology = []\n layers = re.findall('\\(.+?\\)', line)\n for layer in layers:\n layer_info = re.findall('{.*?}', layer)\n dict1 = ast.literal_eval(layer_info[0])\n dict2 = ast.literal_eval(layer_info[1])\n topology.append((dict1, dict2))\n topologies.append(topology)\n return topologies\n else:\n return None\n\n\ndef build_datasets(ts, lookback_period, test_fraction):\n \"\"\"\n Build the train and test sets\n :param args:\n :param ts:\n :return:\n \"\"\"\n # Format data to shape (n_steps, n_vars, n_series)\n while len(ts.shape) < 3:\n ts = np.expand_dims(ts, axis=-1)\n\n test_length = int(len(ts) * test_fraction)\n ts_train = ts[:-test_length]\n ts_test = ts[-test_length - lookback_period:]\n return ts_train, ts_test\n\n\ndef run_model(args, data_file, test_fraction, lag, topologies, epochs, batch_size, separator, horizon, lr, optimizers,\n results):\n \"\"\"\n Runs the forecaster on a data set (with given parameters) and computes the metric on train and test sets also\n reports the training time\n :return:\n \"\"\"\n global mutex\n print(\"Running for data \" + data_file + \" ...\")\n df = read_table(data_file, sep=separator)\n train_set, test_set = build_datasets(df.values, lag, test_fraction)\n\n for i in range(0, len(topologies)):\n if args.verbose==1:\n print(\"Train\\t\\t model:\" + topologies[i] + \"\\tdataset:\" + data_file)\n start = time.time()\n try:\n forecaster = Forecaster(\n topologies[i],\n optimizer=optimizers[i],\n lag=lag,\n horizon=horizon,\n batch_size=batch_size,\n epochs=epochs,\n uncertainty=args.uncertainty,\n dropout_rate=args.dropout_rate,\n lr=lr,\n verbose=args.verbose\n )\n\n forecaster.fit(train_set, verbose=args.verbose)\n train_time = time.time() - start\n metric = adjust_for_horizon(mape)\n if args.verbose == 1:\n print(\"Predict\\t\\t model:\" + topologies[i] + \"\\tdataset:\" + data_file)\n train_err = metric(forecaster.predict(train_set, n_samples=args.n_samples)['mean'],\n train_set[lag:len(train_set)])\n test_err = metric(forecaster.predict(test_set, n_samples=args.n_samples)['mean'],\n test_set[lag:len(test_set)])\n except Exception as e:\n print(e)\n train_time = time.time() - start\n train_err = 'NA'\n test_err = 'NA'\n\n file_name = os.path.basename(data_file)\n mutex.acquire()\n results.append((file_name, topologies[i], train_err, test_err, train_time))\n mutex.release()\n print(\"Done running for data \" + data_file + \" ...\")\n\n return train_err, test_err, train_time\n\n\ndef run_single_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons, lrs,\n optimizers):\n results = []\n for i in range(0, len(args.data_files)):\n run_model(args, args.data_files[i], test_fractions[i], lags[i],\n topologies, epochs[i], batch_sizes[i], separators[i],\n horizons[i], lrs[i], optimizers, results)\n return results\n\n\ndef run_multi_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons, lrs,\n optimizers):\n threads = []\n results = []\n num_threads = args.threads\n for i in range(0, len(args.data_files)):\n if len(threads) < num_threads:\n threads.append(threading.Thread(target=run_model, args=(\n args, args.data_files[i], test_fractions[i], lags[i], topologies, epochs[i], batch_sizes[i],\n separators[i],horizons[i], lrs[i], optimizers, results)))\n else:\n for i in range(0, len(threads)):\n threads[i].start()\n for i in range(0, len(threads)):\n threads[i].join()\n threads.clear()\n\n if len(threads) > 0:\n for i in range(0, len(threads)):\n threads[i].start()\n for i in range(0, len(threads)):\n threads[i].join()\n\n return results\n\n\ndef fill_list(list, target_size):\n \"\"\"\n Creates a new list out of a given one and extends\n it with last element of the list\n :return: the extended list\n \"\"\"\n new_list = []\n given_list_len = len(list)\n i = 1\n while i <= target_size:\n if i < given_list_len:\n new_list.append(list[i - 1])\n else:\n new_list.append(list[given_list_len - 1])\n i += 1\n return new_list\n\n\ndef main(args):\n\n print(\"\\n\\nRunning the benchmarks ...\")\n topologies = load_topology(args)\n if topologies is None:\n topologies = args.network_type\n\n separators = fill_list(args.separator, len(args.data_files))\n lags = fill_list(args.lag, len(args.data_files))\n horizons = fill_list(args.horizon, len(args.data_files))\n test_fractions = fill_list(args.test_fraction, len(args.data_files))\n epochs = fill_list(args.epochs, len(topologies))\n batch_sizes = fill_list(args.batch_size, len(topologies))\n optimizers = fill_list(args.optimizer, len(topologies))\n lrs = fill_list(args.learning_rate, len(topologies))\n\n if args.multi_threaded == 1:\n results = run_multi_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons,\n lrs, optimizers)\n else:\n results = run_single_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons,\n lrs, optimizers)\n\n\n if args.print_results:\n print(\"#\" * 100)\n print(\"Model\\t\\t\\t\\tTrain Metric\\t\\t\\t\\tTest Metric\\t\\t\\t\\tTrain Time\\t\\t\\t\\tDataset\")\n print(\"#\" * 100)\n for i in range(0, len(results)):\n print(results[i][1] + '\\t\\t\\t' + str(results[i][2]) + '\\t\\t\\t' + str(results[i][3]) + '\\t\\t\\t' + str(\n results[i][4]) + '\\t\\t\\t' + str(results[i][0]))\n print(\"#\" * 100)\n return results\n\n\ndef _get_parser():\n \"\"\"\n Collect all relevant command line arguments\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n named_args = parser.add_argument_group('named arguments')\n\n named_args.add_argument('-d', '--data-files',\n help=\"List of data files\",\n required=True,\n nargs=\"+\")\n\n named_args.add_argument('-nt', '--network_type',\n help=\"Network type\",\n required=False,\n default=['rnn'],\n type=str,\n nargs=\"+\")\n\n named_args.add_argument('-topology_file', '--topology-file',\n help=\"File containing the networks topology (it overrides the --network_type parameter.\",\n required=False,\n type=str)\n\n named_args.add_argument('-lg', '--lag',\n help=\"Lookback period\",\n required=True,\n nargs=\"+\",\n type=int)\n\n named_args.add_argument('-hr', '--horizon',\n help=\"Forecasting horizon\",\n required=False,\n default=[1],\n nargs=\"+\",\n type=int)\n\n named_args.add_argument('-o', '--optimizer',\n help=\"Optimizer type\",\n required=False,\n default=['sgd'],\n nargs=\"+\",\n type=str)\n\n named_args.add_argument('-sep', '--separator',\n help=\"Location of data sets\",\n required=False,\n default=[','],\n nargs=\"+\")\n\n named_args.add_argument('-tf', '--test-fraction',\n help=\"Test fraction at end of dataset\",\n required=False,\n default=[0.2],\n nargs=\"+\",\n type=float)\n\n named_args.add_argument('-e', '--epochs',\n help=\"Number of epochs to run\",\n required=False,\n default=[100],\n nargs=\"+\",\n type=int)\n\n named_args.add_argument('-b', '--batch-size',\n help=\"Location of validation data\",\n required=False,\n default=[8],\n nargs=\"+\",\n type=int)\n\n named_args.add_argument('-lr', '--learning-rate',\n help=\"Learning rate\",\n required=False,\n default=[0.1],\n nargs=\"+\",\n type=float)\n\n named_args.add_argument('-u', '--uncertainty',\n help=\"Toggle uncertainty\",\n required=False,\n default=False,\n type=bool)\n\n named_args.add_argument('-dr', '--dropout_rate',\n help=\"Dropout rate\",\n required=False,\n default=0.1,\n type=float)\n\n named_args.add_argument('-s', '--n_samples',\n help=\"Number of dropout samples\",\n required=False,\n default=10,\n type=int)\n\n named_args.add_argument('-m', '--multi-threaded',\n help=\"Multi-Threaded execution\",\n required=False,\n default=0,\n type=int)\n\n named_args.add_argument('-threads', '--threads',\n help=\"Number of threads to parallelize the computation\",\n required=False,\n default=3,\n type=int)\n\n named_args.add_argument('-p', '--print_results',\n help=\"Print results in tabular form\",\n required=False,\n default=0,\n type=int)\n\n named_args.add_argument('-v', '--verbose',\n help=\"Verbose\",\n required=False,\n default=0,\n type=int)\n return parser\n\n\nif __name__ == '__main__':\n args = _get_parser().parse_args()\n main(args)\n","sub_path":"benchmarking/benchmarking.py","file_name":"benchmarking.py","file_ext":"py","file_size_in_byte":11609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440794008","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#####################################################################\n# IoT Engineering Education @ KMUTNB / Thailand\n# Date: 2019-06-27\n# Python code snippet:\n# -> Execute the ping command from the OS (Linux).\n#####################################################################\n\nimport subprocess, re\n\nserver = '8.8.8.8'\ncmd = ['ping', '-c 5', '-W 1', server ]\n\ntry:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for line in iter(proc.stdout.readline, b''):\n line = line.decode('ascii').strip()\n result = re.search(\"\\d+(?=% packet loss)\", line)\n if result:\n loss = result.group(0)\n print ('packet loss: {} %'.format(loss))\n result = re.search(\"rtt min/avg/max/mdev = ([0-9\\.\\/]+) ms\", line)\n if result:\n print (line)\n print (result.group(1).split('/'))\n\nexcept Exception as ex:\n print ('Exception:', ex)\n\n#####################################################################\n","sub_path":"get_ping_stats-2.py","file_name":"get_ping_stats-2.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"255028261","text":"# Fetch all the image file names using glob\n# Read all the images using cv2.imread()\n# Store all the images into a list\n# Create a VideoWriter object using cv2.VideoWriter()\n# Save the images to video file using cv2.VideoWriter().write()\n# Release the VideoWriter and destroy all windows.\n\nimport cv2\nimport numpy as np\nimport glob\nimport os\n\npath = '/home/arnold/raindrop-detection-cnn/mesonet/'\nimg_array = []\nfor count in range(len(os.listdir(path))):\n#for filename in glob.glob('C:/New folder/Images/*.jpg'):\n# filename = './path/mask_frame_' + str(count) + '.jpg'\n filename = path + str(count) + '.jpg'\n print(filename)\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n img_array.append(img)\n\nout = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 24, size)\n\nfor i in range(len(img_array)):\n out.write(img_array[i])\nout.release()\n","sub_path":".ipynb_checkpoints/img_2_video-checkpoint.py","file_name":"img_2_video-checkpoint.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291715669","text":"from __future__ import print_function\n\nimport tornado.gen\nfrom katportalclient import KATPortalClient\nfrom katportalclient.client import SensorNotFoundError\nimport redis\nfrom functools import partial\nimport json\nimport yaml\nimport os\nimport sys\nimport ast\nimport json\nimport numpy as np\nfrom datetime import datetime\nimport uuid\nimport time\n\nfrom .redis_tools import (\n REDIS_CHANNELS,\n write_pair_redis,\n write_list_redis,\n publish_to_redis\n )\n\n#Slack channel to publish to: \nSLACK_CHANNEL = 'meerkat-obs-log'\n# Redis channel to send messages to the Slack proxy\nPROXY_CHANNEL = 'slack-messages'\n \nfrom .logger import log \n\nclass BLKATPortalClient(object):\n \"\"\"Client server to KATPortal. Once initialized, the client creates \n a Tornado ioloop and a connection to the local Redis server.\n\n Examples:\n >>> client = BLKATPortalClient()\n >>> client.start()\n\n When start() is called, a loop starts that subscribes to the 'alerts'\n channel of the Redis server. Depending on the message received, various\n tasks are performed. These include:\n 1. Creating a new KATPortalClient object specific to the\n product ID just received in a ?configure request.\n 2. Querying for schedule block information when ?capture-init is\n received and publishing this to Redis.\n 3. Subscribing to various sensors for asynchronous (and immediate)\n updates upon receiving ?capture-start and publishing the \n resultant information to Redis pub/sub channels.\n 4. Deleting the corresponding KATPortalClient object when\n a ?deconfigure request is sent.\n\n TODO:\n 1. Support thread-safe stopping of ioloop\n \"\"\"\n\n VERSION = '2020-06-19'\n\n def __init__(self, config_file):\n \"\"\"Our client server to the Katportal\"\"\"\n self.redis_server = redis.StrictRedis(decode_responses = True)\n self.p = self.redis_server.pubsub(ignore_subscribe_messages=True)\n self.io_loop = io_loop = tornado.ioloop.IOLoop.current()\n self.subarray_katportals = dict() # indexed by product IDs\n self.namespaces = dict() # indexed by product IDs\n self.config_file = config_file\n self.ant_sensors = [] # sensors required from each antenna\n self.stream_sensors = [] # stream sensors (for continuous update)\n self.cbf_conf_sensors = [] # cbf sensors to be queried once-off\n self.cbf_sensors = [] # cbf sensors (for continuous update)\n self.stream_conf_sensors = [] # stream sensors for acquisition\n self.conf_sensors = [] # other sensors to be queried once-off \n self.subarray_sensors = [] # subarray-level sensors\n self.cont_update_sensors = [] # for all sensors for continuous update\n self.cbf_on_track = [] # cbf sensors for acquisition on each target\n self.cbf_name = 'cbf_1' # Default CBF short name\n\n def MSG_TO_FUNCTION(self, msg_type):\n MSG_TO_FUNCTION_DICT = {\n 'configure' : self._configure,\n 'capture-init' : self._capture_init,\n 'capture-start': self._capture_start,\n 'capture-stop' : self._capture_stop,\n 'capture-done' : self._capture_done,\n 'deconfigure' : self._deconfigure,\n 'conf_complete' : self._conf_complete\n }\n return MSG_TO_FUNCTION_DICT.get(msg_type, self._other)\n \n def start(self):\n self.p.subscribe(REDIS_CHANNELS.alerts)\n if(sys.stdout.isatty()):\n self._print_start_image()\n for msg in self.p.listen():\n msg_data = msg['data']\n msg_parts = msg_data.split(':')\n if len(msg_parts) != 2:\n log.info(\"Not processing this message --> {}\".format(message))\n continue\n msg_type = msg_parts[0]\n product_id = msg_parts[1]\n self.MSG_TO_FUNCTION(msg_type)(product_id)\n\n def on_update_callback_fn(self, product_id, msg):\n \"\"\"Handler for messages published over sensor websockets.\n The received sensor values are stored in the redis database.\n\n Args:\n product_id (str): the product id given in the ?configure request\n msg (dict): a dictionary containing the updated sensor information\n\n Returns:\n None\n \"\"\"\n for key, value in msg.items():\n if key == 'msg_data':\n sensor_name = msg['msg_data']['name']\n sensor_value = msg['msg_data']['value']\n # Write sensors (continuous update)\n key = \"{}:{}\".format(product_id, sensor_name)\n write_pair_redis(self.redis_server, key, repr(sensor_value))\n # Data-suspect mask for publication\n if('data_suspect' in sensor_name):\n # Make sure sensor value is trustworthy\n # as these particular sensors take extra time \n # to initialise. \n # Note: assumes only one data-suspect sensor is \n # to be used.\n sensor_status = msg['msg_data']['status']\n if(sensor_status == 'nominal'):\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}:{}'.format('data-suspect', product_id, \n sensor_value))\n #RA/Dec/Az/El\n elif('pos_request_base' in sensor_name):\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}:{}'.format(product_id, \n sensor_name, sensor_value))\n # Aternate method:\n # self.antenna_consensus(product_id, \n # 'pos_request_base_dec')\n # Check for noise diode operation:\n elif('diode' in sensor_name):\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts,\n '{}:{}:{}'.format(product_id, \n sensor_name, sensor_value))\n # Target information for publication\n elif('target' in sensor_name):\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts,\n '{}:{}:{}'.format(product_id, \n sensor_name, sensor_value))\n write_pair_redis(self.redis_server, '{}:target'.format(product_id), \n sensor_value)\n write_pair_redis(self.redis_server, '{}:last-target'.format(product_id), \n str(time.time()))\n self.save_history(self.redis_server, product_id, 'target',\n sensor_value)\n # Observation state for publication\n elif('activity' in sensor_name):\n if(sensor_value == 'track'):\n # Uncomment below to retrieve once-off CBF sensor values\n # if(len(self.cbf_on_track) > 0):\n # # Complete the CBF sensor names with the CBF \n # # component name.\n # cbf_on_track_names = ['{}_'.format(self.cbf_name) +\n # sensor for sensor in self.cbf_on_track]\n # # Get CBF sensors and write to redis.\n # self.fetch_once(cbf_on_track_names, product_id,\n # 3, 30, 0.5)\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}'.format('tracking', product_id))\n else:\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}'.format('not-tracking', product_id))\n # If script not running, attempt to unsubscribe from sensors\n elif('script_status' in sensor_name):\n if(sensor_value != 'busy'):\n self.unsubscribe_list(product_id)\n\n def _configure(self, product_id):\n \"\"\"Executes when configure request is processed\n\n Args:\n product_id (str): the product id given in the ?configure request\n\n Returns:\n None\n \"\"\"\n # Update configuration:\n try:\n (ant_sensors, \n cbf_conf_sensors, \n stream_sensors, \n cbf_sensors, \n conf_sensors, \n subarray_sensors, \n stream_conf_sensors,\n cbf_on_track) = self.configure_katportal(\n os.path.join(os.getcwd(), self.config_file))\n if(ant_sensors is not None):\n self.ant_sensors = []\n self.ant_sensors.extend(ant_sensors)\n if(cbf_conf_sensors is not None):\n self.cbf_conf_sensors = []\n self.cbf_conf_sensors.extend(cbf_conf_sensors)\n if(stream_conf_sensors is not None):\n self.stream_conf_sensors = []\n self.stream_conf_sensors.extend(stream_conf_sensors)\n if(stream_sensors is not None):\n self.stream_sensors = []\n self.stream_sensors.extend(stream_sensors)\n if(conf_sensors is not None):\n self.conf_sensors = []\n self.conf_sensors.extend(conf_sensors)\n if(subarray_sensors is not None):\n self.subarray_sensors = []\n self.subarray_sensors.extend(subarray_sensors)\n if(cbf_sensors is not None):\n self.cbf_sensors = []\n self.cbf_sensors.extend(cbf_sensors)\n if(cbf_on_track is not None):\n self.cbf_on_track = []\n self.cbf_on_track.extend(cbf_on_track)\n log.info('Configuration updated')\n except:\n log.warning('Configuration not updated; old configuration might be present.')\n cam_url = self.redis_server.get(\"{}:{}\".format(product_id, 'cam:url'))\n client = KATPortalClient(cam_url, on_update_callback=partial(\n self.on_update_callback_fn, product_id), logger=log)\n #client = KATPortalClient(cam_url, \n # on_update_callback=lambda x: self.on_update_callback_fn(product_id), \n # logger=log)\n self.subarray_katportals[product_id] = client\n log.info(\"Created katportalclient object for : {}\".format(product_id))\n subarray_nr = product_id[-1]\n ant_key = '{}:antennas'.format(product_id) \n ant_list = self.redis_server.lrange(ant_key, 0, \n self.redis_server.llen(ant_key))\n # Enter antenna list into the history hash\n ant_history = json.dumps(ant_list)\n self.save_history(self.redis_server, product_id, 'antennas', \n ant_history)\n # Get sensors on configure\n if(len(self.conf_sensors) > 0):\n conf_sensor_names = ['subarray_{}_'.format(subarray_nr) \n + sensor for sensor in self.conf_sensors]\n self.fetch_once(conf_sensor_names, product_id, 3, 30, 0.5)\n # Get CBF component name (in case it has changed to \n # CBF_DEV_[product_id] instead of CBF_[product_id])\n key = '{}:subarray_{}_{}'.format(product_id, subarray_nr, \n 'pool_resources')\n pool_resources = self.redis_server.get(key).split(',')\n self.cbf_name = self.component_name('cbf', pool_resources, log)\n key = '{}:{}'.format(product_id, 'cbf_name')\n write_pair_redis(self.redis_server, key, self.cbf_name)\n # Get CBF sensor values required on configure.\n cbf_prefix = self.redis_server.get('{}:cbf_prefix'.format(product_id))\n if(len(self.cbf_conf_sensors) > 0):\n # Complete the CBF sensor names with the CBF component name.\n cbf_sensor_prefix = '{}_{}_'.format(self.cbf_name, cbf_prefix)\n cbf_conf_sensor_names = [cbf_sensor_prefix + \n sensor for sensor in self.cbf_conf_sensors]\n # Get CBF sensors and write to redis.\n self.fetch_once(cbf_conf_sensor_names, product_id, 3, 30, 0.5)\n # Calculate antenna-to-Fengine mapping\n antennas, feng_ids = self.antenna_mapping(product_id, \n cbf_sensor_prefix)\n write_pair_redis(self.redis_server, \n '{}:antenna_names'.format(product_id), antennas)\n write_pair_redis(self.redis_server, \n '{}:feng_ids'.format(product_id), feng_ids)\n # Get stream sensors on configure:\n if(len(self.stream_conf_sensors) > 0):\n stream_conf_sensors = ['subarray_{}_streams_{}_{}'.format(\n subarray_nr, cbf_prefix, sensor) for sensor in \n self.stream_conf_sensors]\n self.fetch_once(stream_conf_sensors, product_id, 3, 30, 0.5) \n # Initialise last-target to 0\n write_pair_redis(self.redis_server, '{}:last-target'.format(product_id), 0) \n # Indicate to anyone listening that the configure process is complete.\n publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, \n 'conf_complete:{}'.format(product_id))\n\n def _conf_complete(self, product_id):\n \"\"\"Called when sensor values for acquisition on configure have been \n acquired.\n \n Args: \n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n None\n \"\"\"\n log.info(\"Sensor values on configure acquired for {}.\".format(product_id))\n # Alert via slack:\n slack_message = \"{}::meerkat:: Successful subarray configuration\".format(SLACK_CHANNEL)\n publish_to_redis(self.redis_server, PROXY_CHANNEL, slack_message)\n\n def _capture_init(self, product_id):\n \"\"\"Responds to capture-init request by acquiring schedule block\n information including the list of pointings and the current \n schedule block IDs.\n\n Args:\n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n None\n \"\"\"\n # Schedule block IDs (sched_observation_schedule_1) \n # This is the list of schedule block IDs. The currently running block\n # will be in position 1.\n self.fetch_once('sched_observation_schedule_1', product_id, 3, 30, 0.5)\n # Schedule blocks - pointing list\n retries = 3\n # Increase the timeout by this factor on subsequent retries\n timeout_factor = 0.5 \n for i in range(0, retries):\n try:\n schedule_blocks = self.io_loop.run_sync(\n lambda: self._get_future_targets(product_id),\n timeout = 30 + timeout_factor*i*30)\n key = \"{}:schedule_blocks\".format(product_id)\n write_pair_redis(self.redis_server, key, \n json.dumps(schedule_blocks))\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, key)\n # If success, break.\n break\n except:\n log.warning(\"Could not retrieve schedule blocks: attempt {} of {}\".format(i + 1, retries))\n # If retried times, then log an error.\n if(i == (retries - 1)):\n log.error(\"Could not retrieve schedule blocks: {} attempts, giving up.\".format(retries))\n\n def _capture_start(self, product_id):\n \"\"\"Responds to capture-start request. Subscriptions to required \n sensors are made. Note that this must be done here (on capture-start),\n because some sensors (notably observation-activity) only become available \n for subscription on capture-start,\n\n Args:\n product_id (str): the name of the current subarray provided in the\n ?configure request.\n\n Returns:\n None\n \"\"\"\n # Save capture-start time:\n write_pair_redis(self.redis_server, '{}:last-capture-start'.format(product_id), \n str(time.time())) \n # Once-off sensors to query on ?capture_done\n # Uncomment below to add sensors for query.\n # sensors_to_query = [] \n # self.fetch_once(sensors_to_query, product_id, 3, 5, 0.5)\n sensors_for_update = self.build_sub_sensors(product_id)\n # Test the speed of retrieval for target information from an \n # individual antenna:\n # Retrieve list of antennas:\n ant_key = '{}:antennas'.format(product_id) \n antennas = self.redis_server.lrange(ant_key, 0, \n self.redis_server.llen(ant_key))\n # Build antenna sensor name\n # Pick first antenna in list for now \n # (implement antenna consensus again if this approach proves faster)\n ant_target = \"{}_target\".format(antennas[0]) \n sensors_for_update.append(ant_target)\n # Start io_loop to listen to sensors whose values should be registered\n # immediately when they change.\n if(len(sensors_for_update) > 0):\n loop = tornado.ioloop.IOLoop.current()\n loop.add_callback(lambda: self.subscribe_list(product_id, \n sensors_for_update))\n loop.start()\n\n def _capture_stop(self, product_id):\n \"\"\"Responds to capture-stop request.\n\n Args:\n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n None\n \"\"\"\n # Once-off sensors to query on ?capture-stop\n # Uncomment these lines to add sensors for query\n #sensors_to_query = [] \n #self.fetch_once(sensors_to_query, product_id, 3, 5, 0.5)\n pass\n\n def _capture_done(self, product_id):\n \"\"\"Responds to capture-done request. Resets the schedule block list to\n 'Unknown_SB'.\n\n Args:\n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n None\n \"\"\"\n # Reset schedule block to empty list\n key = '{}:sched_observation_schedule_1'.format(product_id)\n write_pair_redis(self.redis_server, key, 'Unknown_SB')\n\n def _deconfigure(self, product_id):\n \"\"\"Responds to deconfigure request\n\n Args:\n product_id (str): the name of the current subarray provided in \n the ?configure request.\n\n Returns:\n None\n \"\"\"\n # Once-off sensors to query on ?deconfigure\n # Uncomment the following lines to add sensors for query\n #sensors_to_query = [] \n #self.fetch_once(sensors_to_query, product_id, 3, 5, 0.5) \n if product_id not in self.subarray_katportals:\n log.warning(\"Failed to deconfigure a non-existent product_id: {}\".format(product_id))\n else:\n self.subarray_katportals.pop(product_id)\n log.info(\"Deleted KATPortalClient instance for product_id: {}\".format(product_id))\n \n def _other(self, product_id):\n \"\"\"This is called when an unrecognized request is sent.\n\n Args:\n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n None\n \"\"\"\n log.warning(\"Unrecognized alert : {}\".format(message['data']))\n\n def configure_katportal(self, cfg_file):\n \"\"\"Configure the katportal_server from the .yml config file.\n \n Args:\n cfg_file (str): File path to .yml config file.\n \n Returns:\n None\n \"\"\"\n try:\n with open(cfg_file, 'r') as f:\n try:\n cfg = yaml.safe_load(f)\n return(cfg['per_antenna_sub'], \n cfg['cbf_on_configure'], \n cfg['stream_sub'], \n cfg['cbf_sub'], \n cfg['array_on_configure'],\n cfg['array_sub'], \n cfg['stream_on_configure'],\n cfg['cbf_on_track'])\n except yaml.YAMLError as E:\n log.error(E)\n except IOError:\n log.error('Config file not found')\n\n @tornado.gen.coroutine\n def unsubscribe_list(self, product_id):\n \"\"\"Unsubscribe from all sensor websocket subscriptions for \n the subarray designated by product_id.\n\n Args: \n product_id (str): Name of the current subarray.\n\n Returns:\n None\n \"\"\"\n yield self.subarray_katportals[product_id].unsubscribe(\n namespace = self.namespaces[product_id])\n yield self.subarray_katportals[product_id].disconnect()\n # Stop io_loop\n self.io_loop.stop()\n log.info('Unsubscribed from sensors.') \n\n @tornado.gen.coroutine\n def subscribe_list(self, product_id, sensor_list):\n \"\"\"Subscribes to each sensor listed for asynchronous updates.\n \n Args:\n sensor_list (list): Full names of sensors to subscribe to.\n product_id (str): The product ID given in the ?configure request.\n\n Returns:\n None\n \"\"\"\n self.namespaces[product_id] = '{}_{}'.format(product_id, \n str(uuid.uuid4()))\n yield self.subarray_katportals[product_id].connect()\n result = yield self.subarray_katportals[product_id].subscribe(\n namespace = self.namespaces[product_id]) \n for sensor in sensor_list:\n # Using product_id to retrieve unique namespace\n result = yield self.subarray_katportals[product_id].set_sampling_strategies(\n self.namespaces[product_id], sensor, 'event')\n log.info('Subscribed to {} sensors'.format(len(sensor_list)))\n\n def subarray_data_suspect(self, product_id):\n \"\"\"Publish a global subarray data-suspect value by checking each\n individual antenna. If any antennas are marked faulty by an operator, \n they are logged, but the subarray data-suspect remains False \n so that the observation may continue with the remaining antennas.\n\n Args:\n product_id (str): Name of the current subarray.\n\n Returns:\n None\n \"\"\"\n ant_key = '{}:antennas'.format(product_id)\n ant_list = self.redis_server.lrange(ant_key, 0, \n self.redis_server.llen(ant_key)) \n ant_status = []\n try:\n for i in range(len(ant_list)):\n data_suspect = ast.literal_eval(self.redis_server.get(\n '{}:{}_data_suspect'.format(product_id, ant_list[i])))\n marked_faulty_key = '{}:{}_marked_faulty'.format(product_id, \n ant_list[i])\n marked_faulty = ast.literal_eval(self.redis_server.get(\n marked_faulty_key))\n if(data_suspect & marked_faulty):\n # If an antenna is marked faulty while subarray built, \n # allow it and log. \n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, marked_faulty_key)\n ant_status.append(False) \n # Note that if marked_faulty is True, \n # ant_suspect is by definition True. \n else:\n ant_status.append(data_suspect)\n if(sum(ant_status) == 0): # all antennas show good data\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:data_suspect:{}'.format(product_id, False))\n else: \n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:data_suspect:{}'.format(product_id, True))\n except:\n # If any of the sensors are not available, set subarray data \n # suspect flag to True\n publish_to_redis(self.redis_server, REDIS_CHANNELS.sensor_alerts,\n '{}:data_suspect:{}'.format(product_id, True))\n\n def subarray_consensus(self, product_id, sensor, value, components, n_stragglers):\n \"\"\"Determine if a particular sensor value is the same as a specified \n value for all of the specified components in a particular subarray, with \n the exception of a number of stragglers.\n\n Args:\n product_id (str): Name of the current subarray.\n sensor (str): Sensor to be checked for each component. Note that\n only per-component sensors can be specified.\n value (str): The desired value of all the components.\n components (list): List of components (str).\n n_stragglers (int): The number of components for which a consensus\n is still accepted. \n\n Returns:\n consensus (bool): If a subarray consensus has been reached\n or not, subject to the n_stragglers condition.\n mask (str): Mask of which components exhibit the desired value.\n 0 is true, 1 is false. \n \"\"\"\n mask = np.ones(len(components))\n consensus = False\n sensor_list = ['{}:{}_{}'.format(product_id, component, sensor) \n for component in components]\n components_status = self.redis_server.mget(sensor_list)\n for i, status in enumerate(components_status):\n if(status == value):\n mask[i] = 0\n if(np.sum(mask) <= n_stragglers):\n consensus = True\n else:\n log.info('Consensus for {} not reached'.format(sensor))\n return consensus, mask\n\n def antenna_consensus(self, product_id, sensor_name):\n \"\"\"Determine if a particular sensor value is the same for all antennas\n in a particular subarray.\n\n Args:\n product_id (str): Name of the current subarray.\n sensor_name (str): Sensor to be checked for each antenna. Note that\n only per-antenna sensors can be specified.\n\n Returns:\n None\n \"\"\"\n ant_key = '{}:antennas'.format(product_id)\n ant_list = self.redis_server.lrange(ant_key, 0, \n self.redis_server.llen(ant_key))\n ant_status = ''\n ant_compare = ''\n try:\n for i in range(len(ant_list)):\n ant_status = ant_status + self.redis_server.get(\n '{}:{}_{}'.format(product_id, ant_list[i], sensor_name)) \n ant_compare = ant_compare + self.redis_server.get(\n '{}:{}_{}'.format(product_id, ant_list[0], \n sensor_name))*len(ant_list)\n if(ant_status == ant_compare): # all antennas show the same value\n # Get value from last antenna\n value = ast.literal_eval(self.redis_server.get(\n '{}:{}_{}'.format(product_id, ant_list[i], sensor_name)))\n if(value is not None):\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}:{}'.format(product_id, sensor_name, value))\n key = '{}:{}'.format(product_id, sensor_name)\n write_pair_redis(self.redis_server, key, value)\n else:\n publish_to_redis(self.redis_server, \n REDIS_CHANNELS.sensor_alerts, \n '{}:{}:unavailable'.format(product_id, sensor_name))\n else:\n log.warning(\"Antennas do not show consensus for sensor: {}\".format(sensor_name))\n except:\n # If any of the sensors are not available:\n publish_to_redis(self.redis_server, REDIS_CHANNELS.sensor_alerts, \n '{}:{}:unavailable'.format(product_id, sensor_name))\n\n def gen_ant_sensor_list(self, product_id, ant_sensors):\n \"\"\"Automatically builds a list of sensor names for each antenna.\n\n Args:\n product_id (str): the product id given in the ?configure request\n ant_sensors (list): the sensors to be queried for each antenna\n\n Returns:\n ant_sensor_list (list): the full sensor names associated with \n each antenna\n \"\"\"\n ant_sensor_list = []\n # Add sensors specific to antenna components for each antenna:\n ant_key = '{}:antennas'.format(product_id)\n ant_list = self.redis_server.lrange(ant_key, 0, \n self.redis_server.llen(ant_key)) # list of antennas\n for ant in ant_list:\n for sensor in ant_sensors:\n ant_sensor_list.append(ant + '_' + sensor)\n return ant_sensor_list\n \n def gen_stream_sensor_list(self, product_id, stream_sensors, cbf_prefix):\n \"\"\"Automatically builds a list of stream sensor names.\n\n Args:\n product_id (str): The product id given in the ?configure request.\n stream_sensors (list): The stream sensors to be subscribed to.\n cbf_prefix (str): full CBF prefix. \n\n Returns:\n stream_sensor_list (list): the full sensor names.\n \"\"\"\n stream_sensor_list = []\n for sensor in stream_sensors:\n sensor_name = 'subarray_{}_streams_{}_{}'.format(product_id[-1], \n cbf_prefix, sensor)\n stream_sensor_list.append(sensor_name)\n return stream_sensor_list\n\n def gen_cbf_sensor_list(self, cbf_sensors, cbf_name):\n \"\"\"Builds sensor list for cbf sensor names.\n\n Args:\n cbf_sensors (list): The cbf sensors to be subscribed to.\n cbf_name (str): short CBF name. \n\n Returns:\n cbf_sensor_list (list): the full sensor names.\n \"\"\"\n cbf_sensor_list = []\n for sensor in cbf_sensors:\n sensor_name = '{}_{}'.format(cbf_name, sensor)\n cbf_sensor_list.append(sensor_name)\n return cbf_sensor_list\n\n def build_sub_sensors(self, product_id):\n \"\"\"Builds the list of sensors for subscription.\n\n Args:\n product_id (str): the name of the current subarray provided in\n the ?configure request.\n\n Returns:\n sensors_for_update (list): list of full sensor names for \n subscription.\n \"\"\"\n # Get cont update sensors\n sensors_for_update = []\n # Antenna sensors:\n sensors_for_update.extend(self.gen_ant_sensor_list(product_id, \n self.ant_sensors))\n # Stream sensors:\n cbf_prefix = self.redis_server.get('{}:cbf_prefix'.format(product_id))\n stream_sensors = self.gen_stream_sensor_list(product_id, \n self.stream_sensors, cbf_prefix)\n sensors_for_update.extend(stream_sensors)\n # Subarray sensors:\n for sensor in self.subarray_sensors:\n sensor = 'subarray_{}_{}'.format(product_id[-1], sensor)\n sensors_for_update.append(sensor)\n # CBF sensors:\n cbf_sensors = self.gen_cbf_sensor_list(self.cbf_sensors, self.cbf_name)\n sensors_for_update.extend(cbf_sensors)\n return sensors_for_update\n\n def component_name(self, short_name, pool_resources, log):\n \"\"\"Determine the full name of a subarray component. \n This is most needed in the case of \"dev\" components - \n for example, cbf_dev_2 instead of cbf_2.\n Returns the first match.\n\n Args:\n short_name (str): Short name of component (eg 'cbf').\n pool_resources (str): List of components (str) in current \n subarray.\n log: Logger.\n\n Returns:\n full_name (str): Full name of component.\n \"\"\"\n full_name = None\n for component in pool_resources:\n if short_name in component:\n full_name = component\n if full_name is None:\n log.warning('Could not find component: {}'.format(short_name))\n return full_name \n\n def save_history(self, redis_server, product_id, key, value):\n \"\"\"Save a particular key-value pair to a redis sensor history hash.\n \n Args:\n redis_server: current Redis server.\n product_id (str): the product ID given in the ?configure request.\n key (str): the name of the history item.\n value (str): the contents of the history item.\n\n Returns:\n None\n \"\"\"\n hash_name = 'history:{}:{}'.format(product_id, key)\n # Avoid isoformat from datetime as behaviour not consisent\n # Avoid specifying strftime decimal places due to inconsistent \n # behaviour\n # Recommmendation seems to be to simply truncate microseconds\n # if need be.\n time = datetime.utcnow()\n # Set ms field to 000 as specified\n time = time.strftime(\"%Y%m%dT%H%M%S.000Z\")\n redis_server.hset(hash_name, time, value)\n\n def fetch_once(self, sensor_names, product_id, retries, sync_timeout, timeout_factor):\n \"\"\"Handles once-off sensor requests, permitting retries in case there are problems \n on the CAM side. Once the sensor values are retrieved, the name-value pair are \n written to the Redis database.\n\n Args:\n timeout_factor (float): Fraction by which to increase the timeout with on \n each re-attempt.\n sensor_names (list): List of full sensor names whose values are required.\n product_id (str): The product ID for the current subarray.\n retries (int): The number of times to attempt fetching the sensor values.\n sync_timeout (int): The maximum time to wait for sensor values from CAM. \n\n None.\n \"\"\" \n for i in range(retries):\n try:\n self.io_loop.run_sync(lambda: self._get_sensor_values(\n product_id, sensor_names), \n timeout = sync_timeout + int(sync_timeout*timeout_factor*i))\n # If sensors succesfully queried and written to Redis, break.\n break \n except:\n log.warning(\"Could not retrieve once-off sensors: attempt {} of {}\".format(\n i + 1, retries))\n # If retried times, then log an error.\n if(i == (retries - 1)):\n log.error(\"Could not retrieve once-off sensors: {} attempts, giving up.\".format(\n retries)) \n log.error(\"{} could not be retrieved.\".format(sensor_names))\n\n def antenna_mapping(self, product_id, cbf_sensor_prefix):\n \"\"\"Get the mapping from antenna to F-engine ID as given in \n packet headers.\n\n Args:\n product_id (str): Identifier of current subarray.\n cbf_sensor_prefix (str): Prefix for the sensor name\n according to the current subarray configuration. \n Eg: \"cbf_1_wide_\"\n\n Returns:\n antennas (list): List of antenna names.\n feng_ids (list): List of corresponding F-engine IDs.\n \"\"\"\n labelling_sensor = '{}:{}input_labelling'.format(product_id, \n cbf_sensor_prefix)\n labelling = self.redis_server.get(labelling_sensor)\n labelling = ast.literal_eval(labelling)\n antennas = str([item[0] for item in labelling])\n feng_ids = str([int(np.floor(int(item[1])/2.0)) for item in \n labelling])\n return antennas, feng_ids\n\n @tornado.gen.coroutine\n def _get_future_targets(self, product_id):\n \"\"\"Gets the schedule blocks of the product_id's subarray.\n\n Args:\n product_id (str): the name of the current subarray provided in \n the ?configure request.\n\n Returns:\n List of dictionaries containing schedule block information.\n\n Examples:\n >>> self.io_loop.run_sync(lambda: \n self._get_future_targets(product_id))\n \"\"\"\n client = self.subarray_katportals[product_id]\n sb_ids = yield client.schedule_blocks_assigned()\n blocks = []\n for sb_id in sb_ids:\n # Should this be 'client' rather than 'portal_client'?\n # block = yield portal_client.future_targets(sb_id)\n block = yield client.future_targets(sb_id)\n blocks.append(block)\n raise tornado.gen.Return(blocks)\n\n @tornado.gen.coroutine\n def _get_sensor_values(self, product_id, targets):\n \"\"\"Gets sensor values associated with the current subarray and\n writes them to the Redis database.\n\n Args:\n product_id (str): the name of the current subarray provided in \n the ?configure request.\n targets (list): expressions to look for in sensor names.\n\n Returns:\n None\n\n Examples:\n >>> self.io_loop.run_sync(lambda: \n self._get_sensor_values(product_id, [\"target\", \"ra\", \"dec\"]))\n \"\"\"\n if not targets:\n log.warning(\"Sensor list empty. Not querying katportal...\")\n raise tornado.gen.Return(sensors_and_values)\n client = self.subarray_katportals[product_id]\n sensor_names = yield client.sensor_names(targets)\n if not sensor_names:\n log.warning(\"No matching sensors found!\")\n else:\n # Query approach:\n # Instead of sequentially querying each sensor, build a regex query\n # and fetch them all at once at the suggestion of ebarr. \n # This is said to cause fewer timeout problems. \n query = \"|\".join(sensor_names)\n try:\n sensor_details = yield client.sensor_values(query, \n include_value_ts=True)\n for sensor, details in sensor_details.items():\n sensor_dict = self._convert_SensorSampleValueTime_to_dict(details)\n redis_key = \"{}:{}\".format(product_id, sensor)\n # Only writing the sensor value (no other metadata for now)\n write_pair_redis(self.redis_server, redis_key, \n sensor_dict['value'])\n except Exception as e:\n log.error(e)\n\n def _convert_SensorSampleValueTime_to_dict(self, sensor_value):\n \"\"\"Converts the named-tuple object returned by sensor_value\n query into a dictionary. This dictionary contains the \n following values:\n - timestamp: float\n The timestamp (UNIX epoch) the sample was received by CAM.\n Timestamp value is reported with millisecond precision.\n - value_timestamp: float\n The timestamp (UNIX epoch) the sample was read at the \n lowest level sensor. value_timestamp value is reported \n with millisecond precision.\n - value: str\n The value of the sensor when sampled. The units depend \n on the sensor, see :meth:`.sensor_detail`.\n - status: str\n The status of the sensor when the sample was taken. As \n defined by the KATCP protocol. Examples: 'nominal', 'warn', \n 'failure', 'error', 'critical', 'unreachable', 'unknown', \n etc.\n\n Args:\n sensor_value (SensorSampleValueTime)\n\n Returns:\n sensor_value_dict (dict)\n \"\"\"\n sensor_value_dict = dict()\n sensor_value_dict['timestamp'] = sensor_value.sample_time\n sensor_value_dict['value_timestamp'] = sensor_value.value_time\n sensor_value_dict['value'] = sensor_value.value\n sensor_value_dict['status'] = sensor_value.status\n return sensor_value_dict\n\n def _print_start_image(self):\n print(R\"\"\"\n ________________________________\n / \"-_\n / . | . \\\n / : \\ | / : \\\n / '-___-' \\\n /_________________________________________ \\\n _______| |________________________--\"\"-L\n / F J \\\n / F J L\n / :' ': F\n / '-___-' /\n /_________________________________________--\"\n+---------------------------------------------------+\n| |\n| Breakthrough Listen's |\n| |\n| KATPortal Client |\n| |\n| Version: {} |\n| |\n| github.com/danielczech/meerkat-backend-interface |\n| github.com/ejmichaud/meerkat-backend-interface |\n| |\n+---------------------------------------------------+\n\"\"\".format(self.VERSION))\n","sub_path":"meerkat_backend_interface/katportal_server.py","file_name":"katportal_server.py","file_ext":"py","file_size_in_byte":41302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391809208","text":"#!/usr/bin/env python\n\nimport argparse\nimport copy\nimport re\nimport sys\nimport xml.sax\n\nclass Match(object):\n\tdef __init__(self, section, submatches):\n\t\tself._section = section\n\t\tself._submatches = submatches\n\n\tdef get_sub_matches(self):\n\t\treturn self._submatches\n\n\tdef get_section(self):\n\t\treturn self._section\n\n\tdef __hash__(self):\n\t\treturn hash(self._section)\n\n\tdef __eq__(self, other):\n\t\tif self._section != other._section:\n\t\t\treturn False\n\n\t\t#their must be a match for everything in other._submatches in self or\n\t\t# not equal\n\t\tselfsubs = self._submatches\n\t\tothersubs = other._submatches\n\t\tfor os in othersubs:\n\t\t\tif os not in selfsubs:\n\t\t\t\treturn False\n\n\t\treturn True\n\n\tdef write(self, filep=sys.stdout, lineno=False, tidy=False):\n\n\t\ts = self.get_section()\n\t\tif not tidy:\n\t\t\ts.write(filep=filep, lineno=lineno)\n\t\telse:\n\t\t\tfilep.write(s.format(lineno=lineno, option_map=self._submatches))\n\n\tdef match(self, other):\n\t\treturn self._section.match(other, False)\n\n\tdef filter(self, match):\n\n\t\tleft = {}\n\t\tsubs = self._submatches\n\t\tfltr = match.get_sub_matches()\n\t\t# The filter might contain arguments that were not matched, ie the search might\n\t\t# have been on socket (so sub_matches is only socket_ but the filter might include\n\t\t# args. We only filter on what the intersection between the two are.\n\t\tfltr_keys = set(fltr.keys())\n\t\tsub_keys = set(subs.keys())\n\t\tkeys = fltr_keys & sub_keys\n\n\t\t# Search each key value in the filter (white list exception)\n\t\tfor k in keys:\n\t\t\tv = fltr[k]\n\n\t\t\t# Keep a copy of the list associated with the k, we remove the\n\t\t\t# items from the list when a filter matches\n\t\t\ts = list(subs[k])\n\n\t\t\t# For each line in the exceptions filter, we compile it\n\t\t\t# as a possible regex and search the sub_matches with it.\n\t\t\tfor x in v:\n\t\t\t\t# If we find a match we remove it from the copy of sub_matches\n\t\t\t\tif x in subs[k]:\n\t\t\t\t\ts.remove(x)\n\n\t\t\t# If anything is left in the list of things, we add it as 'left', ie the delta\n\t\t\tif len(s) > 0:\n\t\t\t\tleft[k] = s\n\n\t\tself._submatches = left\n\n\t\t# The filter returns true if it was filtered out, ie nothing left, else if their is a delta\n\t\t# False is returned.\n\t\treturn not bool(left)\n\nclass Section(object):\n\n\t_KW_ARGS = 'args'\n\n\t_keywords = { _KW_ARGS : None }\n\n\tdef __init__(self, name, args, path, lineno, keywords=None):\n\t\tself._name = name\n\t\tself._lineno = lineno\n\t\tself._path = path\n\n\t\tself._option_strip = []\n\t\tself._no_print = Section._keywords.keys()\n\n\t\tself._option_map = copy.deepcopy(Section._keywords)\n\t\tif keywords != None:\n\t\t\tself._option_map.update(copy.deepcopy(keywords))\n\n\t\tself._option_map[Section._KW_ARGS] = (args, -1)\n\n\tdef get_header(self):\n\t\tn = self._name\n\t\tp = self._path\n\t\tl = str(self._lineno)\n\t\ta = self._option_map[Section._KW_ARGS][0]\n\t\treturn n + '(' + p + ' : ' + l + '): ' + a\n\n\tdef write(self, filep=sys.stdout, lineno=False):\n\t\tfilep.write(self.format(lineno=lineno))\n\n\tdef _keysort(self, tup):\n\n\t\tlst = tup[1]\n\t\tif len(lst) == 0:\n\t\t\treturn -2\n\t\tx = lst[0][1]\n\t\treturn x\n\n\tdef format(self, lineno=False, option_map=None):\n\n\t\tfmtout = self.get_header() + '\\n'\n\n\t\topt_map = self._option_map if not option_map else option_map\n\n\t\t# convert dict of keyword : [ list of tuples ] to a line order sorted list\n\t\t# of tuples (keyword, [(item, lineno), (item, lineno)])\n\t\titems = []\n\t\tfor k, v in opt_map.iteritems():\n\t\t\tif k in self._no_print:\n\t\t\t\tcontinue\n\n\t\t\t# listify tuples (normalizes the data)\n\t\t\tif isinstance(v, tuple):\n\t\t\t\tv = [ v ]\n\n\t\t\t# sort internal keyword list\n\t\t\tv.sort(key=lambda tup: tup[1])\n\t\t\titems.append((k, v))\n\n\t\t# Now we sort based on the highest item in the keyword list\n\t\titems.sort(key=self._keysort)\n\n\t\tfor (k, v) in items:\n\n\t\t\tif k in self._no_print:\n\t\t\t\tcontinue\n\n\t\t\t# Everything is a list of tupes, skip\n\t\t\t# unset things\n\t\t\tif len(v) == 0 or (len(v) == 1 and v[0][0] == None):\n\t\t\t\tcontinue\n\n\t\t\tstrip = True if k in self._option_strip else False\n\t\t\tk = None if strip else k\n\n\t\t\t# for tuple in list of tuples (tuples may have embedded lists as well)\n\t\t\tfor x in v:\n\n\t\t\t\tl = x[1]\n\t\t\t\ti = x[0]\n\n\t\t\t\tif not isinstance(i, list):\n\t\t\t\t\ti = [ i ]\n\n\t\t\t\tfor d in i:\n\n\t\t\t\t\tif isinstance(d, bool):\n\t\t\t\t\t\tif not d:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\td = str(d).lower()\n\n\t\t\t\t\tif lineno and l >= 0:\n\t\t\t\t\t\tfmtout += str(l)\n\n\t\t\t\t\tfmtout += '\\t'\n\n\t\t\t\t\tif k:\n\t\t\t\t\t\tfmtout += k + ' : '\n\n\t\t\t\t\tfmtout += d + '\\n'\n\n\t\treturn fmtout\n\n\tdef match(self, other, lazy_regex):\n\n\t\t(x, s) = self._section_cmp(other, lazy_regex)\n\t\tif x >= 0:\n\t\t\treturn Match(self, s)\n\t\telse:\n\t\t\treturn None\n\n\tdef get_name(self):\n\t\treturn self._name\n\n\tdef get_args(self):\n\t\treturn self._option_map[Section._KW_ARGS]\n\n\tdef get_lineno(self):\n\t\treturn self._lineno\n\n\tdef push(self, line):\n\t\traise Exception('Implement me!')\n\n\t@staticmethod\n\tdef get_keywords():\n\t\treturn Section.get_keymap().keys()\n\n\t@staticmethod\n\tdef get_keymap():\n\t\treturn Section._keywords\n\n\tdef _section_cmp(self, dict2, lazy_regex):\n\t\tdict1 = self._option_map\n\t\tkeys1 = set(dict1.keys())\n\t\tkeys2 = set(dict2.keys())\n\n\t\tsubmatches = {}\n\n\t\t# dict1 doesn't contain the _items in dict2,\n\t\t# dict1 is less than dict2\n\t\tif keys1 < keys2:\n\t\t\treturn -1\n\n\t\t# Their are either equal or their is more\n\t\t# keys in dict1 than dict2, now we check elements\n\t\tfor k in dict2:\n\t\t\tr = dict2[k]\n\t\t\tm = dict1[k]\n\n\t\t\t# empty fields are a no-match situation\n\t\t\tif m == None:\n\t\t\t\treturn (-1, submatches)\n\n\t\t\t# Normalize bools to lower case strings\n\t\t\tm = str(m).lower() if isinstance(m, bool) else m\n\n\t\t\t# Normalize search set arguments to a list\n\t\t\trl = r if isinstance(r, list) else [ r ]\n\n\t\t\tfound = 0\n\t\t\tfor r in rl:\n\n\t\t\t\tif isinstance(r, bool):\n\t\t\t\t\tr = str(r).lower()\n\t\t\t\telif not lazy_regex:\n\t\t\t\t\t# Greedify the search if not lazy\n\t\t\t\t\tr = '.*' + r + '.*'\n\n\t\t\t\t# Anchor the regex\n\t\t\t\tr = '^' + r + '$'\n\t\t\t\tpattern = re.compile(r)\n\n\t\t\t\t# Normalize everything to list\n\t\t\t\t# lists are presumed to be list of strings\n\t\t\t\tm = m if isinstance(m, list) else [ m ]\n\n\t\t\t\tfor x in m:\n\t\t\t\t\tq = x\n\n\t\t\t\t\t# args is weird since we dont append tuple\n\t\t\t\t\tif not isinstance(x, tuple):\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tq = str(q[0]).lower() if isinstance(q[0], bool) else q[0]\n\n\t\t\t\t\t# Do not attempt to search on a key when the service has not set it\n\t\t\t\t\t# and the default is '(None, -1)'\n\t\t\t\t\tif not q:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tresult = pattern.match(q)\n\n\t\t\t\t\tif result:\n\t\t\t\t\t\tfound = found + 1\n\t\t\t\t\t\tif k not in submatches:\n\t\t\t\t\t\t\tsubmatches[k] = []\n\t\t\t\t\t\tsubmatches[k].append(x)\n\n\t\t\tif found < len(rl):\n\t\t\t\t# no match\n\t\t\t\treturn (-1, submatches)\n\n\t\t# If dict1 had more keys, then its a super set\n\t\t# of dict2, else equal\n\t\tx = 0 if len(keys1) == len(keys2) else 1\n\t\tx = (x, submatches)\n\t\treturn x\n\n\t@staticmethod\n\tdef _join(parent, self):\n\t\tx = dict(parent)\n\t\tx.update(self)\n\t\treturn x\n\nclass OnSection(Section):\n\n\t_KW_COMMAND = 'command'\n\n\t_keywords = Section._join(\n\t\t\tSection._keywords,\n\t\t\t{ _KW_COMMAND : [] })\n\n\tdef __init__(self, *args, **kwargs):\n\t\tkwargs = dict(kwargs)\n\t\tkwargs['keywords'] = OnSection._keywords\n\t\tsuper(OnSection, self).__init__(*args, **kwargs)\n\t\tself._option_strip.append(OnSection._KW_COMMAND)\n\n\tdef push(self, line, lineno):\n\t\tself._option_map[OnSection._KW_COMMAND].append((line, lineno))\n\n\t@staticmethod\n\tdef get_keywords():\n\t\treturn OnSection.get_keymap().keys()\n\n\t@staticmethod\n\tdef get_keymap():\n\t\treturn OnSection._keywords\n\nclass ServiceSection(Section):\n\n\t_KW_CONSOLE = 'console'\n\t_KW_CRITICAL = 'critical'\n\t_KW_DISABLED = 'disabled'\n\t_KW_SET_ENV = 'set_env'\n\t_KW_SOCKET = 'socket'\n\t_KW_USER = 'user'\n\t_KW_GROUP = 'group'\n\t_KW_SECLABEL = 'seclabel'\n\t_KW_ONESHOT = 'oneshot'\n\t_KW_CLASS = 'class'\n\t_KW_IOPRIO = 'ioprio'\n\t_KW_ONRESTART = 'onrestart'\n\t_KW_WRITEPID = 'writepid'\n\t_KW_KEYCODES = 'keycodes'\n\n\t_keywords = (Section._join\n\t(\n\t\tSection._keywords,\n\t\t{\n\t\t\t_KW_CONSOLE : (False, -1),\n\t\t\t_KW_CRITICAL : (False, -1),\n\t\t\t_KW_DISABLED : (False, -1),\n\t\t\t_KW_SET_ENV : [],\n\t\t\t_KW_SOCKET : [],\n\t\t\t_KW_USER : ('root', -1),\n\t\t\t_KW_GROUP\t : [ ('root', -1) ],\n\t\t\t_KW_SECLABEL : (None, -1),\n\t\t\t_KW_ONESHOT : (False, -1),\n\t\t\t_KW_CLASS\t : ('default', -1),\n\t\t\t_KW_IOPRIO : (None, -1),\n\t\t\t_KW_ONRESTART: [],\n\t\t\t_KW_WRITEPID : [],\n\t\t\t_KW_KEYCODES : [],\n\t\t}\n\t))\n\n\tdef __init__(self, *args, **kwargs):\n\t\tkwargs = dict(kwargs)\n\t\tkwargs['keywords'] = ServiceSection._keywords\n\t\tsuper(ServiceSection, self).__init__(*args, **kwargs)\n\t\tself._group_cleared = False\n\n\tdef push(self, line, lineno):\n\n\t\tchunks = line.split()\n\t\tkeyword = chunks[0]\n\n\t\targs = ' '.join(chunks[1:])\n\n\t\tif keyword not in self._option_map:\n\t\t\traise Exception('Invalid service option: ' + keyword)\n\n\t\tkw = self._option_map[keyword]\n\t\tif isinstance(kw, tuple):\n\t\t\tkw = kw[0]\n\n\t\tif kw == None or isinstance(kw, str):\n\t\t\tself._option_map[keyword] = (args, lineno)\n\n\t\telif isinstance(kw, bool):\n\t\t\tself._option_map[keyword] = (True, lineno)\n\n\t\telif isinstance(kw, list):\n\t\t\t# clear out root on group if something else comes in.\n\t\t\ta = keyword == 'group'\n\t\t\tb = len(kw) == 1\n\t\t\tif not self._group_cleared and a and b and kw[0][0] == 'root':\n\t\t\t\tkw = self._option_map[keyword] = []\n\t\t\t\tself._group_cleared = True\n\n\t\t\tkw.append((args, lineno))\n\n\t\telse:\n\t\t\traise Exception('Unknown instance type: ' + kw)\n\n\t@staticmethod\n\tdef get_keywords():\n\t\treturn ServiceSection.get_keymap().keys()\n\n\t@staticmethod\n\tdef get_keymap():\n\t\treturn ServiceSection._keywords\n\nclass InitParser(object):\n\n\tON = 'on'\n\tSERVICE = 'service'\n\tIMPORT = 'import'\n\n\t_section_map = {\n\t\tON : OnSection,\n\t\tSERVICE : ServiceSection,\n\t\tIMPORT : Section\n\t}\n\n\tdef __init__(self, files):\n\t\tself._files = files\n\t\tself._items = {}\n\n\t\tfor k in InitParser._section_map:\n\t\t\tself._items[k] = []\n\n\tdef parse(self):\n\t\tfor p in self._files:\n\t\t\tself._handle_file(p)\n\n\tdef _handle_file(self, path):\n\t\twith open(path) as f:\n\t\t\tcurrent_section = None\n\t\t\tlineno = 0\n\t\t\tline = ''\n\t\t\tfor l in f:\n\t\t\t\tl = l.strip()\n\t\t\t\tlineno = lineno + 1\n\n\t\t\t\t# Skip empty (whitespace only) lines\n\t\t\t\tif len(l) == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Skip comments\n\t\t\t\tif l.startswith('#'):\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Ignore pystache conditional lines\n\t\t\t\tif l.startswith('{{'):\n\t\t\t\t\tcontinue\n\n\t\t\t\t# handle line folding\n\t\t\t\tif l.endswith('\\\\'):\n\t\t\t\t\tline += l + ' '\n\t\t\t\t\tcontinue\n\n\t\t\t\t# process the complete line\n\t\t\t\tline += l\n\n\t\t\t\tchunks = line.split()\n\t\t\t\tsection_name = chunks[0]\n\n\t\t\t\t# is the keyword a _name?\n\t\t\t\tif section_name in InitParser._section_map:\n\t\t\t\t\tif current_section != None:\n\t\t\t\t\t\tself._items[current_section.get_name()].append(current_section)\n\n\t\t\t\t\targs = ' '.join(chunks[1:])\n\t\t\t\t\tcurrent_section = self._section_factory(chunks[0], args, path, lineno)\n\n\t\t\t\t# its not a valid section name but were parsing section options\n\t\t\t\telif current_section != None:\n\t\t\t\t\tcurrent_section.push(line, lineno)\n\n\t\t\t\t# clear line, repeat\n\t\t\t\tline = ''\n\n\t\t\t# when the file ends, we need to push the last section\n\t\t\t# being parsed if set (ie dont push on blank file)\n\t\t\tif current_section != None:\n\t\t\t\tself._items[current_section.get_name()].append(current_section)\n\n\tdef _section_factory(self, section_name, section_args, path, lineno):\n\n\t\tif section_name not in self._section_map:\n\t\t\traise Exception('Error in name ' + section_name + ' line: ' + str(lineno))\n\n\t\treturn self._section_map[section_name](section_name, section_args, path, lineno)\n\n\tdef search(self, section_name, search, lazy_regex=False):\n\n\t\tfound = []\n\t\tsection = self._items[section_name]\n\n\t\tfor x in section:\n\t\t\tm = x.match(search, lazy_regex)\n\t\t\tif m:\n\t\t\t\tfound.append(m)\n\t\treturn found\n\n\tdef write(self, filep=sys.stdout, lineno=False):\n\n\t\tthings = []\n\t\tfor section_name in InitParser._section_map.keys():\n\t\t\tsection = self._items[section_name]\n\t\t\tfor x in section:\n\t\t\t\tthings.append(x)\n\n\t\tthings.sort(key=lambda sec: sec.get_lineno())\n\n\t\tfor x in things:\n\t\t\tx.write(filep=filep, lineno=lineno)\n\t\t\tfilep.write('\\n')\n\n\t@staticmethod\n\tdef get_section(name):\n\t\treturn InitParser._section_map[name]\n\n\t@staticmethod\n\tdef get_sectons():\n\t\treturn InitParser._section_map\n\n\t@staticmethod\n\tdef _merge_dicts(l):\n\t\tr = {}\n\t\tfor d in l:\n\t\t\tr.update(d)\n\t\treturn r\n\nclass Test(object):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself._name = 'unnamed' if 'name' not in kwargs else kwargs['name']\n\t\tself._section = kwargs['section']\n\t\tself._searches = []\n\t\tself._exceptions = []\n\t\tself._violators = None\n\n\t\tself._current = None\n\n\tdef start_search(self, *args, **kwargs):\n\t\tself._current = dict({ 'section' : self._section })\n\n\tdef start_exception(self, *args, **kwargs):\n\t\tself._current = dict()\n\n\tdef end_search(self):\n\t\tself._searches.append(self._current)\n\t\tself._current = None\n\n\tdef end_exception(self):\n\t\t#self._current['section'] = self._section\n\t\tself._exceptions.append(self._current)\n\t\tself._current = None\n\n\tdef append_keyword(self, search):\n\t\tfor k, v in search.iteritems():\n\t\t\tif k not in self._current:\n\t\t\t\tself._current[k] = []\n\t\t\tself._current[k].append(v)\n\n\tdef get_exceptions(self):\n\t\treturn self._exceptions\n\n\tdef get_searches(self):\n\t\treturn self._searches\n\n\tdef write(self, filep=sys.stdout, lineno=False):\n\n\t\tfilep.write('test: ' + self._name + '\\n')\n\t\tfor x in self._searches:\n\t\t\tfilep.write('\\tsearch: ' + str(x) + '\\n')\n\n\t\tfor x in self._exceptions:\n\t\t\tfilep.write('\\texcept: ' + str(x) + '\\n')\n\n\tdef set_violators(self, violators):\n\t\tself._violators = violators\n\n\tdef get_violators(self):\n\t\treturn self._violators\n\n\tdef getName(self):\n\t\treturn self._name\n\nclass AssertParser(xml.sax.ContentHandler):\n\n\t\tdef __init__(self, *args, **kwargs):\n\t\t\txml.sax.ContentHandler.__init__(self, *args, **kwargs)\n\n\t\t\tself._tests = []\n\t\t\tself._current = None\n\n\t\tdef startElement(self, name, attrs):\n\n\t\t\t\t# Make a straight python dict from attrs\n\t\t\t\tattrs = dict(attrs)\n\n\t\t\t\tif name == 'test':\n\t\t\t\t\tself._current = Test(**attrs)\n\n\t\t\t\telif name == 'search':\n\t\t\t\t\tif 'lazy' not in attrs:\n\t\t\t\t\t\tattrs['lazy'] = True\n\n\t\t\t\t\tself._current.start_search(**attrs)\n\n\t\t\t\telif name == 'keyword':\n\t\t\t\t\tself._current.append_keyword(attrs)\n\n\t\t\t\telif name == 'except':\n\t\t\t\t\tself._current.start_exception(attrs)\n\n\t\t\t\telif name == 'suite':\n\t\t\t\t\tpass\n\n\t\t\t\telse:\n\t\t\t\t\traise Exception('Unknown Keyword: ' + name)\n\n\t\tdef endElement(self, name):\n\t\t\t\tif name == 'test':\n\t\t\t\t\tself._tests.append(self._current)\n\t\t\t\t\tself._current = None\n\n\t\t\t\telif name == 'search':\n\t\t\t\t\tself._current.end_search()\n\n\t\t\t\telif name == 'except':\n\t\t\t\t\tself._current.end_exception()\n\n\t\tdef __iter__(self):\n\t\t\treturn iter(self._tests)\n\nclass commandlet(object):\n\n\t_commandlets = {}\n\n\tdef __init__(self, cmd):\n\t\tself._cmd = cmd\n\n\t\tif cmd in commandlet._commandlets:\n\t\t\traise Exception('Duplicate command name' + cmd)\n\n\t\tcommandlet._commandlets[cmd] = None\n\n\tdef __call__(self, cls):\n\t\tcommandlet._commandlets[self._cmd] = cls\n\t\treturn cls\n\n\t@staticmethod\n\tdef get():\n\t\treturn commandlet._commandlets\n\n\t@staticmethod\n\tdef set(cmdlets):\n\t\tcommandlet._commandlets = cmdlets\n\n@commandlet(\"print\")\nclass PrintCommand(object):\n\t'''\n\tDumps the contents of the init.rc file to stdout\n\t'''\n\n\tdef generate_options(self, group_parser):\n\t\tgroup_parser.add_argument('--lineno', action='store_true', help='Dump line numbers with keywords')\n\n\tdef __call__(self, init_parser, args):\n\t\tinit_parser.write(lineno=args['lineno'])\n\n@commandlet(\"search\")\nclass SearchCommand(object):\n\t'''\n\tSearches the init.rc for the specified section for a specified keyword regex.\n\t'''\n\n\tdef __init__(self):\n\t\tself._opts = None\n\n\tdef generate_options(self, group_parser):\n\n\t\toptions = self._gen_opts()\n\n\t\tfor opt in options:\n\t\t\targs = opt[0]\n\t\t\targs = [ args ] if isinstance(args, str) else args\n\t\t\tkwargs = opt[1] if len(opt) == 2 else {}\n\t\t\tgroup_parser.add_argument(*args, **kwargs)\n\n\tdef __call__(self, init_parser, args):\n\n\t\t# Get the option map and filter it for the selected\n\t\t# section\n\n\t\tsection_name = args['section']\n\n\t\tlazy_search = False\n\t\tif 'lazy' in args:\n\t\t\tlazy_search = args['lazy']\n\t\t\tdel args['lazy']\n\n\t\ttidy = False\n\t\tif 'tidy' in args:\n\t\t\ttidy = args['tidy']\n\t\t\tdel args['tidy']\n\n\t\tlineno = False\n\t\tif 'lineno' in args:\n\t\t\tlineno = args['lineno']\n\t\t\tdel args['lineno']\n\n\t\tdel args['section']\n\n\t\t# This option is not exposed externally to the commandline\n\t\tissilent = False\n\t\tif 'silent' in args:\n\t\t\tissilent = args['silent']\n\t\t\tdel args['silent']\n\n\t\tsection = InitParser.get_section(section_name)\n\n\t\t# validate all 'set' options are valid per section\n\t\t# If they are not set or are False (store_true action)\n\t\t# we remove them. This would be simpler if argparse supported\n\t\t# subsub parsers.\n\t\td = { k : v for k, v in args.items() if v != None }\n\t\tunknown = set(d.keys())\n\t\tvalid = set(section.get_keywords())\n\t\tinvalid = unknown - valid\n\n\t\tif len(invalid) > 0:\n\t\t\traise Exception(\"Invalid arguments found: \" + str(invalid))\n\n\t\t# A list of match objects\n\t\tfound = init_parser.search(section_name, d, lazy_search)\n\n\t\tif issilent:\n\t\t\treturn found\n\n\t\tfor m in found:\n\t\t\tm.write(lineno=lineno, tidy=tidy)\n\t\t\tsys.stdout.write('\\n')\n\n\tdef _gen_opts(self):\n\t\tif self._opts != None:\n\t\t\treturn self._opts\n\n\t\topts = []\n\t\tsections = InitParser.get_sectons()\n\t\ts = 'Searches a section given a section name {' + (','.join(sections.keys()) + '}')\n\t\topts.append(('--section', { 'help' : s, 'required' : True }))\n\t\topts.append(('--lazy', { 'action' : 'store_true', 'help' : 'The default is a greedy search, set this to force lazy searches.'}))\n\t\topts.append(('--tidy', { 'action' : 'store_true', 'help' : 'Set this flag to only print matching keywords for the section'}))\n\t\topts.append(('--lineno', { 'action' : 'store_true', 'help' : 'Print line numbers on matches'}))\n\n\t\tseen = {}\n\t\tfor n in sections:\n\t\t\ts = sections[n]\n\t\t\tfor k, t in s.get_keymap().iteritems():\n\t\t\t\tif k not in seen:\n\t\t\t\t\tseen[k] = True\n\n\t\t\t\t\th = 'argument is a valid regex. Multiple specifications of the option result in the last option specified used.'\n\t\t\t\t\tif isinstance(t, tuple):\n\t\t\t\t\t\tt = t[0]\n\n\t\t\t\t\tif isinstance(t, list):\n\t\t\t\t\t\th = 'argument is a valid regex. Multiple specifications of the option result in the logical and of all specified options.'\n\t\t\t\t\t\topts.append(('--' + k, { 'help' : 'Section: ' + k + '. ' + h, 'action' : 'append' }))\n\n\t\t\t\t\telif isinstance(t, bool):\n\t\t\t\t\t\th = 'true if specified. Multiple specifications of the option result in the last option specified used.'\n\t\t\t\t\t\topts.append(('--' + k , { 'help' : 'Section: ' + k + '. ' + h, 'action' : 'store_const', 'const' : True, 'dest' : k }))\n\t\t\t\t\t\topts.append(('--not' + k , { 'help' : 'Section: ' + k + '. ' + h, 'action' : 'store_const', 'const' : False, 'dest' : k}))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\topts.append(('--' + k , { 'help' : 'Section: ' + k + '. ' + h, 'action' : 'store', 'dest' : k }))\n\n\t\tself._opts = opts\n\t\treturn opts\n\n@commandlet(\"verify\")\nclass VerifyCommand(object):\n\t'''\n\tVerifies the contents of the init.rc against a file of assertions and white-list exceptions\n\t'''\n\n\tdef generate_options(self, group_parser):\n\t\tgroup_parser.add_argument('--assert', help='Verifies an init.rc file against a list of rules', action='append', required=True)\n\t\tgroup_parser.add_argument('--gen', help='Generate a list of exceptions for tests.', action='store_true')\n\n\tdef __call__(self, init_parser, args):\n\t\tverifier = AssertParser()\n\t\tparser = xml.sax.make_parser()\n\t\tparser.setContentHandler(verifier)\n\n\t\tfor p in args['assert']:\n\t\t\twith open(p, 'r') as f:\n\t\t\t\tparser.parse(f)\n\n\t\tgenmode = False\n\t\tif 'gen' in args:\n\t\t\tgenmode = args['gen']\n\t\t\tdel args['gen']\n\n\t\tself._init_parser = init_parser\n\n\t\tfailed_tests = []\n\t\t# find them all!\n\t\tfor t in verifier:\n\t\t\te = t.get_exceptions()\n\t\t\ts = t.get_searches()\n\t\t\tv = self._violations_search(s, e)\n\t\t\tif len(v) > 0:\n\t\t\t\tt.set_violators(v)\n\t\t\t\tfailed_tests.append(t)\n\n\t\t# nothing failed/reportable\n\t\tif len(failed_tests) == 0:\n\t\t\treturn\n\n\t\tif not genmode:\n\t\t\tVerifyCommand._print(failed_tests)\n\t\t\tsys.exit(len(failed_tests))\n\n\t\telse:\n\t\t\tVerifyCommand._gen(failed_tests)\n\n\t@staticmethod\n\tdef _gen(failed_tests):\n\n\t\tkws = \" \\n\"\n\n\t\tfor t in failed_tests:\n\t\t\tsys.stderr.write('Failed test(' + t.getName() + '):\\n')\n\t\t\tfor violator in t.get_violators():\n\n\t\t\t\t# We print args + keyword hoping to avoid duplicate matches, but perhaps its best\n\t\t\t\t# to print the whole section here.\n\t\t\t\tsys.stderr.write(' \\n')\n\t\t\t\tsys.stderr.write(kws % ('args', violator.get_section().get_args()[0]))\n\n\t\t\t\tfor k, v in violator.get_sub_matches().iteritems():\n\t\t\t\t\tfor x in v:\n\t\t\t\t\t\tsys.stderr.write(kws %(k, x[0]))\n\n\t\t\t\tsys.stderr.write(' \\n')\n\t@staticmethod\n\tdef _print(failed_tests):\n\t\tfor t in failed_tests:\n\t\t\tsys.stderr.write('Failed test(' + t.getName() + '):\\n')\n\t\t\tfor match in t.get_violators():\n\t\t\t\tsubmatches = match.get_sub_matches()\n\t\t\t\tsys.stderr.write(match.get_section().get_header() + '\\n')\n\t\t\t\tfor k,v in submatches.iteritems():\n\t\t\t\t\tfor x in v:\n\t\t\t\t\t\tsys.stderr.write('\\t' + k + '(' + str(x[1]) + ') : ' + x[0])\n\t\t\t\t\t\tsys.stderr.write('\\n')\n\n\tdef _violations_search(self, search_args, exception_args):\n\n\t\t\t# we use a set to de-duplicate the results from\n\t\t\t# multiple searches\n\t\t\t# We are building sets of hash objects, this will call\n\t\t\t# the Match.__hash__() method.\n\t\t\tfound = set()\n\t\t\texcepts = set()\n\n\t\t\tfor s in search_args:\n\t\t\t\t# Set the internal search flag to silent so we get\n\t\t\t\t# a list back and don't print\n\t\t\t\ts['silent'] = True\n\t\t\t\tf = self._search(s)\n\t\t\t\tif f != None:\n\t\t\t\t\tfor x in f:\n\t\t\t\t\t\tif not self.filter(exception_args, x):\n\t\t\t\t\t\t\tfound.add(x)\n\n\t\t\treturn found - excepts\n\n\tdef filter(self, exception_args, found):\n\n\t\tfor e in exception_args:\n\t\t\tm =found.match(e)\n\t\t\tif m:\n\t\t\t\treturn found.filter(m)\n\n\tdef _search(self, args):\n\t\treturn commandlet.get()['search'](self._init_parser, args)\n\ndef main():\n\n\topt_parser = argparse.ArgumentParser(description='A tool for intelligent searching of Android init.rc files')\n\n\tsubparser = opt_parser.add_subparsers(help='commands')\n\n\tcommandlets = commandlet.get()\n\ttmp = {}\n\n\t# for each commandlet, instantiate and set up their options\n\tfor n, c in commandlets.iteritems():\n\t\tp = subparser.add_parser(n, help=c.__doc__)\n\t\tp.add_argument('files', help='The init.rc file(s) to search', nargs='+')\n\t\tp.set_defaults(which=n)\n\t\t# Instantiate\n\t\tc = c()\n\t\ttmp[n] = c\n\n\t\topt_gen = getattr(c, 'generate_options', None)\n\t\tif callable(opt_gen):\n\t\t\t# get group help\n\t\t\tg = p.add_argument_group(n + ' options')\n\t\t\t# get args\n\t\t\tc.generate_options(g)\n\n\t# reassign constructed commandlets\n\tcommandlet.set(tmp)\n\n\targs = opt_parser.parse_args()\n\n\tinit_parser = InitParser(args.files)\n\tinit_parser.parse()\n\n\td = vars(args)\n\twhich = d['which']\n\n\t# drop options we added to not confuse commandlets\n\tdel d['files']\n\tdel d['which']\n\n\tcommandlet.get()[which](init_parser, d)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"isearch.py","file_name":"isearch.py","file_ext":"py","file_size_in_byte":22444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"338512358","text":"x = \"东京(成田)\"\r\n\r\ng = {\r\n\r\n}\r\n\r\ns = open(\"arrival_table.tsv\", encoding=\"utf-8\")\r\nfor l in s.readlines():\r\n number, depart_time, arrive_time, depart_airport, arrive_airport = l.strip().split(\"\\t\")\r\n \r\n if depart_airport == x:\r\n if depart_time not in g:\r\n g[depart_time] = {}\r\n if \"depart\" not in g[depart_time]:\r\n g[depart_time][\"depart\"] = []\r\n g[depart_time][\"depart\"].append(number)\r\n elif arrive_airport == x:\r\n if arrive_time not in g:\r\n g[arrive_time] = {}\r\n if \"arrive\" not in g[arrive_time]:\r\n g[arrive_time][\"arrive\"] = []\r\n g[arrive_time][\"arrive\"].append(number)\r\n\r\nimport json\r\njson.dump(g, open(\"gate_usage.json\", \"w\", encoding=\"utf-8\"), indent=4, sort_keys=True, ensure_ascii=False)","sub_path":"rail_data/gate_arrange.py","file_name":"gate_arrange.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"294344130","text":"N = int(input())\r\nxmin = ymin = float('inf')\r\nxmax = ymax = 0\r\np = []\r\nfor i in range(N):\r\n x,y = map(int, input().split())\r\n if x > y : \r\n x,y= y,x\r\n p.append((x, y))\r\n if x < xmin:\r\n xmin = x\r\n elif x > xmax:\r\n xmax = x\r\n if y < ymin:\r\n ymin = y\r\n elif y > ymax:\r\n ymax = y\r\nret = (ymax-ymin) * (xmax-xmin)\r\n\r\n\r\np.sort()\r\ndx = p[-1][0] - p[0][0]\r\nymin = p[0][0]\r\ntymin = float('inf')\r\nfor i in range(N-1):\r\n # print(i, dx, (xmax, xmin), end=' ==> ')\r\n tymin = min(tymin, p[i][1])\r\n xmax = max(xmax, p[i][1])\r\n xmin = min(tymin, p[i+1][0])\r\n dx = min(dx, xmax - xmin)\r\n if tymin < p[i+1][0]:\r\n break\r\n # print(i, dx, (xmax, xmin))\r\n\r\nprint(min(ret, (ymax-ymin) * dx))","sub_path":"Source Codes/AtCoder/arc073/C/1254659.py","file_name":"1254659.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"384590651","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport warnings\n\nfrom lxml import etree\nfrom typing import Union, List, Optional, Any\n\nfrom openlego.utils.general_utils import parse_string\nfrom openlego.utils.xml_utils import value_to_xml\n\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\nxsd_file_path = os.path.join(dir_path, 'partials.xsd')\nxsi_schema_location = 'file:///' + xsd_file_path\n\nschema = etree.XMLSchema(file=xsi_schema_location)\nparser = etree.XMLParser(schema=schema)\n\n\nclass Partials(object):\n\n def __init__(self, file=None):\n # type: (Optional[str]) -> None\n \"\"\"Initialize `Partials` object.\n\n Parameters\n ----------\n file : str, optional\n Path to the partials XML file to initialize from.\n \"\"\"\n super(Partials, self).__init__()\n\n if file is None:\n self._tree = etree.ElementTree(etree.Element('partials'), parser=parser) # type: etree._ElementTree\n else:\n self._tree = etree.parse(file, parser)\n\n @property\n def _elem_root(self):\n # type: () -> etree._Element\n \"\"\"Root `_Element` of the partials XML file.\"\"\"\n return self._tree.getroot()\n\n def get_partials(self, src=None):\n # type: (Optional[src]) -> dict\n \"\"\"Get a dictionary with the partials stored in the XML file.\n\n Parameters\n ----------\n src : str, optional\n Name of the source parameter to get the partials from\n\n Returns\n -------\n dict\n In the form partials['from_param_name']['to_param_name'] = sensitivity_value if no `src` is given, or\n in the form partials['to_param_name'] = sensitivity_value if `src` is given.\n \"\"\"\n partials = dict()\n\n if src is not None:\n elem_param = self._tree.xpath('/partials/parameter[uid=\"{}\"]'.format(src))\n if len(elem_param):\n for elem_partial in elem_param[1]:\n param = elem_partial[0].text\n value = parse_string(elem_partial[1].text)\n partials.update({param: value})\n else:\n for elem_param in self._elem_root:\n uid = elem_param[0].text\n\n if uid not in partials:\n partials.update({uid: dict()})\n\n for elem_partial in elem_param[1]:\n param = elem_partial[0].text\n if len(elem_partial) > 1:\n value = parse_string(elem_partial[1].text)\n else:\n value = 0.\n partials[uid].update({param: value})\n\n return partials\n\n def declare_partials(self, src, tgt, val=None):\n # type: (str, Union[str, List[str]], Optional[Any]) -> None\n \"\"\"Declare a set of partials that is provided.\n\n Parameters\n ----------\n src : str\n Name of the source parameter.\n\n tgt : str or Iterable[str]\n Name(s) of target parameters.\n\n val : any, optional\n Optional value(s) of partials.\n\n Notes\n -----\n If `val` is given and `tgt` is a list, `val` should have the same length as `tgt`.\n \"\"\"\n if not isinstance(tgt, list):\n tgt = [tgt]\n if val is not None:\n val = [val]\n\n elem_root = self._elem_root\n\n x_param = \"/partials/parameter[uid='{}']\".format(src)\n elem_param = self._tree.xpath(x_param)\n\n if not len(elem_param):\n elem_param = etree.SubElement(elem_root, 'parameter')\n elem_param_uid = etree.SubElement(elem_param, 'uid')\n elem_param_uid.text = src\n\n elem_partials = etree.SubElement(elem_param, 'partials')\n else:\n elem_partials = elem_param[0][1]\n\n for i, t in enumerate(tgt):\n x_partial = '/'.join([x_param, \"partials/partial[uid='{}']\"]).format(t)\n elem_partial = self._tree.xpath(x_partial)\n\n if not len(elem_partial):\n elem_partial = etree.SubElement(elem_partials, 'partial')\n elem_param_uid = etree.SubElement(elem_partial, 'uid')\n elem_param_uid.text = t\n else:\n warnings.warn(\n 'Partial from {} to {} is defined more than once. Last occurrence take precedence.'\n .format(src, t))\n\n if val is not None:\n elem_value = etree.SubElement(elem_partial, 'value')\n value_to_xml(elem_value, val[i])\n\n def add_partials(self, partials):\n # type: (dict) -> None\n \"\"\"Add a set of partials to the XML file.\n\n Parameters\n ----------\n partials : dict\n Dictionary of the partials.\n \"\"\"\n for param_uid, param in partials.items():\n self.declare_partials(param_uid, param.keys(), param.values())\n\n def write(self, file):\n # type: (str) -> None\n \"\"\"Write the current state of the class to a partials XML file.\n\n Parameters\n ----------\n file : str\n Path of the file to write to.\n \"\"\"\n if not schema.validate(self._tree):\n raise RuntimeError('Something is wrong.. XML is not a valid partials file.')\n\n self._tree.write(file, encoding='utf-8', pretty_print=True, xml_declaration=True)\n\n def get_string(self):\n # type: () -> str\n \"\"\"Return the current state of the class as a partials XML string.\n\n Returns\n -------\n str\n String representation of a partials XML file.\n \"\"\"\n if not schema.validate(self._tree):\n raise RuntimeError('Something is wrong.. XML is not a valid partials file.')\n\n return etree.tostring(self._tree, encoding='utf-8', pretty_print=True, xml_declaration=True)","sub_path":"openlego/partials/partials.py","file_name":"partials.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376790314","text":"#!/usr/bin/python3\nfrom MCP23017 import MCP23017\nimport logging\nlogging.basicConfig()\nlogging.getLogger( \"MCP23017\" ).setLevel( logging.DEBUG )\n\ndef handler(string):\n print(string)\n\nchips = [MCP23017(0x20, {'A': 17}),\n MCP23017(0x21, {'A': 27})]\n\nfor chip in chips:\n for i in range(0x1B):\n byte = chip.read(i)\n","sub_path":"raspi/src/read_chip.py","file_name":"read_chip.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"193518814","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 27 20:45:11 2018\n\n@author: anthonypamart\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.measure import label,regionprops\nfrom skimage import measure\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.spatial import distance\n\n\n##Fonction pour faire des plot en 3D\ndef plot_3d(image, threshold=-300):\n \n # Position the scan upright, \n # so the head of the patient would be at the top facing the camera\n p = image.transpose(2,1,0)\n p = p[:,:,::-1]\n \n verts, faces, x, y = measure.marching_cubes(p, threshold)\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n mesh = Poly3DCollection(verts[faces], alpha=0.7)\n face_color = [0.45, 0.45, 0.75]\n mesh.set_facecolor(face_color)\n ax.add_collection3d(mesh)\n\n ax.set_xlim(0, p.shape[0])\n ax.set_ylim(0, p.shape[1])\n ax.set_zlim(0, p.shape[2])\n\n plt.show()\n \n\n##Applique le nodule_mask au lung_img et renvoie donc le nodule_segmented\ndef get_nodule_segmented(nodule_mask,lung_img):\n nodule_segmented = lung_img.copy()\n for i in range(0,len(nodule_segmented)):\n for j in range(0,len(nodule_segmented)):\n if(nodule_mask[i][j]==0.0):\n nodule_segmented[i][j]=0.0\n return nodule_segmented\n\n##Prend le nodule mask et renvoie les coordonnées des centres des nodules présents et le nb de nodules présents\ndef get_centers_nodules(nodule_mask):\n label_scan = label(nodule_mask)\n center_full = [r.centroid for r in regionprops(label_scan)]\n nb_nodules = len(center_full)\n return center_full, nb_nodules\n\n##Pour récupérer le path des lung_img pour le patient dont on précise l'ID\n##Attention, si l'INPUT FOLDER change, il faut faire varier les indices dans la condition if pour bien toujours de récupérer l'ID du patient à partir de images_path_lung_img[i]\ndef get_patients_path_lung_img(images_path_lung_img, ID='1.3.6.1.4.1.14519.5.2.1.6279.6001.137763212752154081977261297097'):\n patients_path_lung_img = []\n for i in range(0,len(images_path_lung_img)):\n #if(images_path_lung_img[i][67:131]==id_patients[0]):\n if(images_path_lung_img[i][67:131]==ID):\n patients_path_lung_img.append(images_path_lung_img[i])\n patients_path_lung_img.sort()\n return patients_path_lung_img\n\n##Pour récupérer le path des nodule_mask pour le patient dont on précise l'ID\ndef get_patients_path_nodule_mask(images_path_nodule_mask, ID='1.3.6.1.4.1.14519.5.2.1.6279.6001.137763212752154081977261297097'):\n patients_path_nodule_mask = []\n for i in range(0,len(images_path_nodule_mask)):\n if(images_path_nodule_mask[i][67:131]==ID):\n patients_path_nodule_mask.append(images_path_nodule_mask[i])\n patients_path_nodule_mask.sort()\n return patients_path_nodule_mask\n\n\n##On met en entrée le path des nodule_mask du patient et on a en sortie un np array 3D avec le nodule mask (2D)\n##de toutes les slices (ce qui ajoute une 3e dimension)\ndef get_nodule_mask_3D(patients_path_nodule_mask, dim_x=512, dim_y=512):\n nodule_mask_3D = np.zeros((dim_x, dim_y, len(patients_path_nodule_mask)))\n for i in range(0,len(patients_path_nodule_mask)):\n temp = np.load(patients_path_nodule_mask[i]).reshape(512,512)\n \n for k in range(0, temp.shape[0]):\n for j in range(0, temp.shape[1]):\n if(temp[k][j] < 0.1 or i > 500 or j >500):\n temp[k][j] = 0\n else:\n temp[k][j] = 1\n \n nodule_mask_3D[:,:,i] = temp\n \n return nodule_mask_3D\n\n##On met en entrée le path des lung_img du patient et on a en sortie un np array 3D avec la lung_img (2D)\n##de toutes les slices (ce qui ajoute une 3e dimension)\ndef get_lung_img_3D(patients_path_lung_img, dim_x=512, dim_y=512):\n lung_img_3D = np.zeros((dim_x, dim_y, len(patients_path_lung_img)))\n for i in range(0,len(patients_path_lung_img)):\n lung_img_3D[:,:,i] = np.load(patients_path_lung_img[i]) ##Pas ouf de loader à chaque fois ??\n return lung_img_3D\n\n##On met en entrée le path des lung_img du patient et le path des nodule_mask et on a en sortie un np array 3D\n##avec le nodule segmenté (2D) de toutes les slices (ce qui rajoute une 3e dimension)\ndef get_nodule_segmented_3D(patients_path_nodule_mask, patients_path_lung_img, dim_z = 32, dim_x =512, dim_y=512):\n nodule_segmented_3D = np.zeros((dim_x, dim_y, dim_z))\n \n a= min(len(patients_path_lung_img), dim_z)\n for i in range(0,a):\n nodule_segmented_3D[:,:,i] = get_nodule_segmented(get_nodule_mask_3D(patients_path_nodule_mask)[:,:,i],get_lung_img_3D(patients_path_lung_img)[:,:,i])\n return nodule_segmented_3D\n\n##On stock les center_full et les nb_nodules dans des listes center_full_3D et nb_nodules_3D\ndef get_center_nodules_3D(patients_path_nodule_mask):\n center_full_3D = []\n nb_nodules_3D = []\n\n for i in range(0, len(patients_path_nodule_mask)):\n nodule_mask_3D = get_nodule_mask_3D(patients_path_nodule_mask)\n center_full, nb_nodules = get_centers_nodules(nodule_mask_3D[:,:,i])\n center_full_3D.append(center_full)\n nb_nodules_3D.append(nb_nodules) \n return center_full_3D, nb_nodules_3D\n\n\n##A partir des paths vers lesquels on trouve lung_img et mask_nodule et de l'ID du patient, on récupère un npdule 3D de taille 32x32x32\ndef npz_to_cnn(images_path_lung_img, images_path_nodule_mask, ID, dimx_nodule = 32, dimy_nodule = 32, dimz_nodule = 32):\n patients_path_lung_img = get_patients_path_lung_img(images_path_lung_img, ID)\n patients_path_nodule_mask = get_patients_path_nodule_mask(images_path_nodule_mask, ID)\n nodule_segmented_3D = get_nodule_segmented_3D(patients_path_nodule_mask, patients_path_lung_img, dimz_nodule)\n center_full_3D, nb_nodules_3D = get_center_nodules_3D(patients_path_nodule_mask)\n a = min(len(center_full_3D), dimz_nodule)\n \n first_nodule_3D = np.zeros((dimx_nodule, dimy_nodule, dimz_nodule)) \n \n #for i in range(0,len(nb_nodules_3D)):\n for i in range(0,a):\n first_nodule_3D[:,:,i] = nodule_segmented_3D[int(center_full_3D[i][-1][0])-dimx_nodule/2:int(center_full_3D[i][-1][0])+dimx_nodule/2,int(center_full_3D[i][-1][1])-dimy_nodule/2:int(center_full_3D[i][-1][1])+dimy_nodule/2,i]\n return first_nodule_3D, center_full_3D\n","sub_path":"Python/Notebook/Nodule_Segmentation.py","file_name":"Nodule_Segmentation.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"107695902","text":"# This networking code has been written in order to simplify the creation \r\n# of your tictactoe games. You are free to use it but you must report if \r\n# you used this code instead of writting your own inside your group report\r\n# and you must leave this header intact.\r\n# David\r\n\r\nimport time, socket, select\r\n\r\nclass Server:\r\n def __init__(self, port=12346):\r\n self.__connections = {}\r\n self.__port = port\r\n\r\n self.__server = socket.socket() # Create a socket object\r\n self.__uid = 0 # uid is a unique number to give to each client\r\n\r\n self.__server.bind(('', self.__port)) # Bind to the port\r\n self.__server.listen(5) # Now wait for client connection.\r\n\r\n def port(self):\r\n \"\"\" returns the port that the server is running on \"\"\"\r\n return self.__port\r\n\r\n def shutdown(self):\r\n \"\"\" make sure you call this before your program exits \"\"\"\r\n\r\n for c, data in self.__connections.items():\r\n data[\"socket\"].close()\r\n\r\n self.__server.shutdown(1)\r\n self.__server.close()\r\n\r\n def num_clients(self):\r\n \"\"\" return the number of clients currently connected \"\"\"\r\n \r\n return len(self.__connections)\r\n\r\n def send_message(self, msg, client=None):\r\n \"\"\" schedule a message to be sent next time poll() is run\r\n if no client is supplied then message will be sent to all connected clients \"\"\"\r\n\r\n if client == None:\r\n for c in self.__connections:\r\n self.__connections[c]['sendbuffer'].append(msg)\r\n\r\n elif client in self.__connections:\r\n self.__connections[client]['sendbuffer'].append(msg)\r\n\r\n else:\r\n raise self.Client('No such client')\r\n\r\n def __list_of_sockets(self):\r\n \"\"\" return a list of all the open client connections \"\"\"\r\n\r\n return [ self.__connections[i]['socket'] for i in self.__connections ]\r\n\r\n def poll(self):\r\n \"\"\" must be called reguarly, accepts new connections, handles dicsonnections, sent scheduled messages and \r\n recieves new messages.\r\n returns 3 lists, messages recieved, new clients and disconnected clients \"\"\"\r\n\r\n connections = self.__list_of_sockets()\r\n read, write, error = select.select( connections+[self.__server], connections, connections, 0 )\r\n\r\n messages, connected, disconnected = [], [], []\r\n\r\n # ====== process all the connections that had errors ======\r\n for conn in error:\r\n print( \"error\", conn )\r\n\r\n # ====== process all the connections that we are able to send data to ===\r\n for uid, data in self.__connections.items():\r\n if data['socket'] in write: # if this is a socket that is ready to get some data\r\n while data['sendbuffer'] != []: # while we have some data to send\r\n msg = data['sendbuffer'][0]\r\n\r\n try:\r\n data['socket'].send( \"{}\\n\".format(msg).encode('utf8') )\r\n data['sendbuffer'].pop(0)\r\n except:\r\n break\r\n\r\n # ====== process all the connections that are trying to send us data ===\r\n for conn in read:\r\n if conn is self.__server: # new client connecting\r\n c, addr = conn.accept()\r\n \r\n self.__connections[self.__uid] = {'socket':c, 'address':addr, 'sendbuffer':[], 'recvbuffer':\"\"} # add to list of open self.__connections\r\n connected.append(self.__uid)\r\n self.__uid += 1\r\n\r\n else:\r\n msgbytes = conn.recv(1024)\r\n\r\n for uid, data in self.__connections.items():\r\n if data['socket'] == conn:\r\n if not msgbytes: # treat empty message as a disconnection\r\n disconnected.append( uid )\r\n\r\n else:\r\n \"\"\" for everything else only consider a message complete once a newline character has been recieved \"\"\"\r\n data['recvbuffer'] += msgbytes.decode('utf8')\r\n\r\n msgs = data['recvbuffer'].split('\\n')\r\n for msg in msgs[:-1]:\r\n messages.append( (uid,msg) )\r\n\r\n data['recvbuffer'] = msgs[-1]\r\n\r\n break\r\n\r\n # ====== remove any clients that have disconnected from the connections store ===\r\n for uid in disconnected:\r\n self.__connections[uid][\"socket\"].close()\r\n self.__connections.pop(uid)\r\n\r\n return messages, connected, disconnected\r\n\r\nif __name__ == '__main__':\r\n s = Server()\r\n\r\n try:\r\n print( \"Server running on port {}\".format( s.port() ) )\r\n\r\n while True:\r\n messages, connected, disconnected = s.poll()\r\n\r\n for client, message in messages: \r\n if (message[0:15] == \"SENDING X VALUE\"):\r\n print( \"Client {} sent \\\"{}\\\"\".format( client, message ) )\r\n s.send_message(\"Server received X Value from client \" + str(client) + \" for button \" + message[27:28])\r\n s.poll()\r\n elif (message[0:15] == \"SENDING O VALUE\"):\r\n print( \"Client {} sent \\\"{}\\\"\".format( client, message ) )\r\n s.send_message(\"Server received O Value from client \" + str(client) + \" for button \" + message[27:28])\r\n s.poll()\r\n \r\n for client in connected:\r\n print( \"Client {} connected\".format(client) )\r\n s.send_message( \"Hello client {}\".format(client) )\r\n\r\n for client in disconnected:\r\n print( \"Client {} disconnected\".format(client) )\r\n \r\n time.sleep(1)\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n finally:\r\n print( \"Shutdown\" )\r\n s.shutdown()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"317580616","text":"import datetime\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\n\nfrom spotify_etl import run_spotify_etl\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.datetime(2021, 6, 17),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': datetime.timedelta(minutes=1)\n}\n\ndag = DAG(\n 'spotify_dag',\n default_args=default_args,\n description='SpotifyDAG with ETL',\n schedule_interval=datetime.timedelta(days=1),\n)\n\ndef just_a_function():\n print(\"Service ON\")\n\nrun_etl = PythonOperator(\n task_id='whole_spotify_etl',\n python_callable=run_spotify_etl,\n dag=dag,\n)\n\nrun_etl","sub_path":"api/dags/spotify_dag.py","file_name":"spotify_dag.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"151241712","text":"import argparse\nimport torch\nimport re\nimport cv2\nimport logging\nimport pathlib\nimport traceback\nimport numpy as np\nfrom model.model import FOTSModel\nfrom utils.bbox import Toolbox\n\nlogging.basicConfig(level=logging.DEBUG, format='')\n\n\ndef load_model(model_path, with_gpu, mode):\n logger.info(\"Loading checkpoint: {} ...\".format(model_path))\n checkpoints = torch.load(model_path)\n if not checkpoints:\n raise RuntimeError('No checkpoint found.')\n try:\n state_dict = checkpoints['state_dict']\n except KeyError:\n state_dict = checkpoints\n model = FOTSModel(mode)\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict, strict=False)\n if with_gpu:\n model = model.cuda()\n return model\n\n\ndef main(args:argparse.Namespace):\n model_path = args.model\n input_dir = args.input_dir\n output_dir = args.output_dir\n gt_dir = args.gt_dir\n with_image = True if output_dir else False\n with_gpu = True if torch.cuda.is_available() else False\n\n model = load_model(model_path, with_gpu, {'mode':'recog_only'})\n\n for image_fn in input_dir.glob('*.jpg'):\n gt = gt_dir / 'gt_{}.txt'.format(image_fn.stem)\n bboxes = []\n texts = []\n recognizable_bboxes = []\n recognizable_texts = []\n with gt.open(mode='r', encoding='utf-8') as f:\n for line in f:\n text = line.strip('\\ufeff').strip('\\xef\\xbb\\xbf').strip().split(',')\n x1, y1, x2, y2, x3, y3, x4, y4 = list(map(int, text[:8]))\n bbox = [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]\n transcript = text[8]\n bboxes.append(bbox)\n texts.append(transcript)\n bboxes = np.array(bboxes)\n\n filter_texts = [re.sub('[^0-9a-zA-Z]', '', word) for word in texts]\n text_tags = [True if(text == '###' or text == '') else False for text in filter_texts] # ignore '###'\n for bbox, tag, text in zip(bboxes, text_tags, texts):\n if not tag:\n recognizable_bboxes.append(bbox)\n recognizable_texts.append(text)\n recognizable_bboxes = np.array(recognizable_bboxes)\n try:\n with torch.no_grad():\n im, output_text = Toolbox.recognize(image_fn, model, recognizable_bboxes, with_image, None, with_gpu)\n # for index in range(len(bboxes)):\n # cv2.polylines(im[:, :, ::-1], [bboxes[index].astype(np.int32).reshape((-1, 1, 2))], True,\n # color=(255, 255, 0), thickness=1)\n if output_dir:\n img_path = output_dir / '{}'.format(image_fn.name)\n cv2.imwrite(img_path.as_posix(), im[:, :, ::-1])\n Toolbox.save_bbox_text(img_path, recognizable_bboxes, output_text, output_dir)\n except Exception as e:\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n logger = logging.getLogger()\n\n parser = argparse.ArgumentParser(description='Model eval')\n parser.add_argument('-m', '--model', default=None, type=pathlib.Path, required=True,\n help='path to model')\n parser.add_argument('-o', '--output_dir', default=None, type=pathlib.Path,\n help='output dir for drawn images')\n parser.add_argument('-i', '--input_dir', default=None, type=pathlib.Path, required=False,\n help='dir for input images')\n parser.add_argument('-g', '--gt_dir', default=None, type=pathlib.Path, required=False,\n help='dir for input images ground truth')\n args = parser.parse_args()\n main(args)\n\n\n\n\n\n\n\n\n\n","sub_path":"only_recog.py","file_name":"only_recog.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95412095","text":"# coding=utf8\nimport telepot\nimport sys\nfrom datetime import datetime\n\nfrom pluginloader import PluginLoader\n\n\nclass HelpPlugin():\n def __init__(self, commands):\n self.commands = commands\n\n def reply(self, message):\n return '\\n'.join([k + \": \" + v for k,v in self.commands.items()])\n\n\nclass TelegramBot(telepot.Bot):\n restricted = False\n allowedusers = {}\n botinfo = None\n plugins = {}\n useplugins = False\n commands = {}\n\n def __init__(self, config, logger):\n super(TelegramBot, self).__init__(config.get('telegrambot', 'token'))\n self.logger = logger\n\n self.restricted = config.getboolean('telegrambot', 'restrict_contacts')\n self.allowedusers = config.get('telegrambot', 'allowed_contacts').split(',')\n self.callbackusers = config.get('telegrambot', 'callback_contacts').split(',')\n self.botinfo = self.getMe()\n\n self.logger.info('Bot \"' + self.botinfo['first_name'] + '\" initialized. Bot id: ' + str(self.botinfo['id']))\n self.logger.info(\"Listening...\")\n self.useplugins = config.getboolean('telegrambot', 'loadplugins')\n if (self.useplugins):\n pluginloader = PluginLoader(self.logger)\n self.plugins = pluginloader.loadPlugins(self.callback)\n self.logger.info(\"Plugins are loaded\")\n for k,v in self.plugins.iteritems():\n if (k.startswith('/')):\n self.commands[k] = v.getdescription(k)\n self.plugins['/help'] = HelpPlugin(self.commands)\n self.logger.info(\"Help is: \" + str(self.commands))\n\n\n\n def handle(self, msg):\n self.logger.info(\"Incoming message: \" + msg['text'])\n if self.restricted and str(msg['from']['id']) not in self.allowedusers:\n # outcomingmessage = report_unknown_contact()\n # self.sendMessage(msg['chat']['id'], outcomingmessage)\n self.logger.error('Unknown contact: ' + str(msg['from']['first_name']) + ' id: ' + str(msg['from']['id']))\n else:\n command = msg['text'].split(\" \")[0]\n self.logger.debug(\"Command \" + command + \" found:\" + str(command in self.plugins))\n if (self.useplugins and command in self.plugins):\n outcomingmessage = self.plugins[command].reply(msg)\n self.sendMessage(msg['chat']['id'], outcomingmessage)\n\n\n def callback(self, message):\n for user in self.callbackusers:\n self.sendMessage(user, message)\n\n def sendMessage(self, chat_id, text,\n parse_mode=None, disable_web_page_preview=None,\n disable_notification=None, reply_to_message_id=None, reply_markup=None):\n self.logger.info(\"Outcoming message to \" + str(chat_id) + \" : \" + repr(text))\n super(TelegramBot, self).sendMessage(chat_id, text)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"577554589","text":"import networkx as nx\nimport matplotlib.pyplot as plot\n\nA=nx.Graph()\nA.add_edges_from([('R1','Sw1'),('Sw1','Sw2'),\n ('Sw1','Sw3'),('Sw2','Pc2'),('Sw2','Pc3'),\n ('Sw3','Pc4'),('Sw3','Pc5')])\n\nsw=['R1','Sw1','Sw2','Sw3']\npc=['Pc2','Pc3','Pc4','Pc5']\nswc=[('R1','Sw1'),('Sw1','Sw2'),('Sw1','Sw3')]\npcc=[('Sw2','Pc2'),('Sw2','Pc3'),('Sw3','Pc4'),('Sw3','Pc5')]\nlista=['Sw2','Pc2','Pc3','Sw3','Pc4','Pc5','R1'],['Sw1']\n\nposicion=nx.spring_layout(A, k=1, iterations=300, threshold=0.0001, weight='distans', scale=0.5)\n\nnx.draw_networkx_nodes(A,posicion, node_size=800, node_shape='s', nodelist=sw, node_color='#c9b323')\nnx.draw_networkx_nodes(A,posicion, node_size=800, node_shape='o', nodelist=pc, node_color='#c68282')\nnx.draw_networkx_edges(A,posicion, width=4, edgelist=swc,style='dashed',edge_vmax=1, edge_vmin=1)\nnx.draw_networkx_edges(A,posicion, width=2, edgelist=pcc)\nnx.draw_networkx_labels(A,posicion, font_size=11,font_family='arial')\n\nplot.axis('off')\nplot.savefig(\"Graf1_spring_layout.eps\")\nplot.show(A)\n","sub_path":"Tarea_2_Retificada/Grafo1.py","file_name":"Grafo1.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"430372780","text":"import numpy as np\nimport os\n\nfrom astropy import table\nfrom astropy.table import Table, Column\nfrom astropy.io import ascii\nfrom astropy.time import Time\n\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord,EarthLocation\n\nDEIMOS_DROPBOX = '/Users/mgeha/Dropbox/DEIMOS/'\n\n\n######################################################\n# HELIOCENTRIC CORRECTION PER MASK\ndef zspec_helio(zspec):\n i=0\n t = Time(zspec['MJD'],format='mjd')\n m=zspec['RA'] != 0\n r = np.median(zspec['RA'][m])\n d = np.median(zspec['DEC'][m])\n sc = SkyCoord(r,d, unit=(u.deg,u.deg))\n\n keck = EarthLocation.from_geodetic(lat=19.8283*u.deg, lon=-155.4783*u.deg, height=4160*u.m)\n heliocorr = sc.radial_velocity_correction('heliocentric',obstime=t, location=keck)\n\n vhelio = heliocorr.to(u.km/u.s) * (u.s/u.km)\n\n return vhelio\n\n######################################################\n# READ DEIMOS OBJECT GOOGLE DOCUMENT\ndef deimos_google():\n key = '1V2aVg1QghpQ70Lms40zNUjcCrycBF2bjgs-mrp6ojI8'\n gid=1906496323\n url = 'https://docs.google.com/spreadsheets/d/{0}/export?format=csv&gid={1}'.format(key, gid)\n masklist = Table.read(url, format='csv')\n\n gid =0\n url = 'https://docs.google.com/spreadsheets/d/{0}/export?format=csv&gid={1}'.format(key, gid)\n objlist = ascii.read(url, format='csv')\n \n return objlist,masklist\n\n######################################################\n# READ DEIMOS OBJECT GOOGLE DOCUMENT\ndef deimos_google_M31():\n key = '1V2aVg1QghpQ70Lms40zNUjcCrycBF2bjgs-mrp6ojI8'\n gid=237917397\n url = 'https://docs.google.com/spreadsheets/d/{0}/export?format=csv&gid={1}'.format(key, gid)\n masklist = Table.read(url, format='csv')\n\n gid =180140955\n url = 'https://docs.google.com/spreadsheets/d/{0}/export?format=csv&gid={1}'.format(key, gid)\n objlist = ascii.read(url, format='csv')\n \n return objlist,masklist\n\n\n######################################################\n# FIX BAD RA/DEC VALUES IN ZSPEC FILES\ndef fix_zspec(zspec_file):\n \n\n z=Table.read(zspec_file)\n #print(z['DEC'])\n dnew = []\n for dec in z['DEC']:\n d=dec.split(':')\n print(dec)\n if (d[0].strip() != '0.0'):\n if (int(d[1]) > 40):\n d[0]='-31'\n if (int(d[1]) < 30):\n d[0]='-32'\n dnew.append(d[0]+':'+d[1]+':'+d[2])\n\n else:\n dnew.append(dec)\n \n z['DEC'] = dnew\n\n z.write('new.fits',format='fits')\n \n\n######################################################\n# FILL A COLUMN\ndef filled_column(name, fill_value, size):\n \"\"\"\n Tool to allow for large strings\n \"\"\"\n return Column([fill_value]*int(size), name)\n\n\n######################################################\ndef calc_na_EW(spec,wvl,ivar,z,plot=False):\n\n wave = wvl / (1.0+z) \n\n # 21AA window centered on 8190AA\n wline = [8178., 8200.5]\n wred = [8203., 8230.]\n wblue = [8155., 8175.]\n waver = (wred[0] + wred[1])/2.\n waveb = (wblue[0] + wblue[1])/2.\n\n mnaI = (wave > wline[0]) & (wave < wline[1])\n mred = (wave > wred[0]) & (wave < wred[1])\n mblue = (wave > wblue[0]) & (wave < wblue[1])\n\n # INITIALIZE QUANTITIES\n na_EW = 0\n na_EW_err = -99\n\n # DETERMINE WEIGHTED MEAN OF BLUE/RED PSEUDO-CONTINUUM BAND\n # DON\"T CALCULATE IF DATA DOESN\"T EXIST\n if (np.sum(mblue) != 0) & (np.sum(mred) != 0): \n sum1 = np.sum(spec[mblue] * ivar[mblue]**2 )\n sum2 = np.sum( ivar[mblue]**2 )\n bcont = sum1 / sum2\n\n sum1 = np.sum(spec[mred] * ivar[mred]**2 )\n sum2 = np.sum( ivar[mred]**2 )\n rcont = sum1 / sum2\n\n\n # DEFINE CONTINUUM LINE BETWEEN RED/BLUE PASSBAND (y=mx+b)\n mline = (rcont - bcont) / (waver - waveb)\n bline = rcont - (mline * waver)\n continuum = (mline * wave) + bline\n\n \n # CALCULATE DELTA LAMBAs, ASSUME NON-LINEAR BINNING\n dlambda = np.zeros(np.size(wave))\n for i,item in enumerate(wave):\n if (i != np.size(wave)-1):\n dlambda[i] = wave[i+1]-wave[i] \n \n # CALCULATE NaI EW\n na_EW = np.sum((1 - (spec[mnaI] / continuum[mnaI])) * dlambda[mnaI])\n na_EW_var = np.sum(1./np.sum(ivar[mnaI] * (continuum[mnaI]/dlambda[mnaI])))\n na_EW_err = np.sqrt(na_EW_var)\n \n\n # PLOT IF REQUESTED\n if (plot == 1):\n plt.figure()\n na = [8183,8195]\n plt.axvline(na[0],color='r')\n plt.axvline(na[1],color='r')\n plt.title(na_EW)\n plt.plot(wave,spec)\n plt.plot(wave,continuum)\n plt.xlim(8170,8230)\n\n return na_EW, na_EW_err\n\n\n######################################################\n# ROUGH MEMEBERSHIP\ndef membership_CMD(zspec,obj):\n\n # GET ISOCHRONE PROPERTIES \n EBV = obj['EBV_SF11']\n dist= obj['Dist_kpc']\n iso = obj['iso_guess']\n\n r,gr = plot_isochrone_padova(dist*1e3,EBV,iso)\n\n r_hb,gr_hb = plot_isochrone_HB(dist*1e3,EBV) \n\n \n dmin = []\n emin = []\n for star in zspec:\n err = np.sqrt(star['GMAG_ERR']**2 + star['RMAG_ERR']**2)\n\n d = (r - star['RMAG'])**2 + (gr - (star['GMAG'] - star['RMAG']))**2\n d2 = (r_hb - star['RMAG'])**2 + (gr_hb - (star['GMAG'] - star['RMAG']))**2\n\n tmp = np.min(d)\n tmp2=np.min(d2) \n if tmp2 < tmp:\n tmp=tmp2\n dmin.append(tmp) \n emin.append(err)\n \n emin = np.array(emin)\n m = (emin > 1) \n emin[m] = 0\n m = (emin > 0.3) & (star['RMAG_ERR'] < 22) #. account for bad errors in UMa2 \n emin[m] = 0\n \n #*********************************\n # CMD THRESHOLD == 0.1 PLUS ERRORS\n mem = np.array(dmin) < 0.1 + emin # SET THRESHOLD PLUS PHOTOMETRIC ERROR\n \n return mem\n\n\n\n######################################################\ndef plot_isochrone_padova(dist,EBV,iso):\n\n iso = ascii.read(DEIMOS_DROPBOX+'/Photometry/isochrones/iso_t12_z'+str(iso)+'.dat')\n\n #A(g)/(E(B-V)) = 3.793 \n Ag = 3.793 * EBV\n Ar = 2.751 * EBV\n\n r_iso = iso['rmag'] + 5.*np.log10(dist) - 5. + Ar\n gr_iso = iso['gmag'] - iso['rmag'] + (Ag - Ar)\n \n return r_iso,gr_iso\n\n\n######################################################\ndef plot_isochrone_HB(dist,EBV):\n\n iso = Table.read(DEIMOS_DROPBOX+'/Photometry/isochrones/M92_fiducial.dat',format='ascii',guess=False)\n hb = iso['typ'] == 1\n \n #A(g)/(E(B-V)) = 3.793 \n Ag = 3.793 * EBV\n Ar = 2.751 * EBV\n\n r_iso = iso['rmag'][hb] + 5.*np.log10(dist) - 5. + Ar\n gr_iso = iso['gmr'][hb] + (Ag - Ar) + 0.2\n \n return r_iso,gr_iso \n","sub_path":"dev_algorithms/flexure/Geha_flexure/deimos_tools.py","file_name":"deimos_tools.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"532994340","text":"import GMatElastoPlastic as gmat\nimport GooseMPL as gplt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use(['goose', 'goose-latex'])\n\n# tensor operations\n\nddot42 = lambda A4,B2: np.einsum('ijkl,lk->ij',A4,B2)\nddot22 = lambda A2,B2: np.einsum('ij,ji',A2,B2)\nnorm = lambda A2 : np.abs(np.einsum('ij,ji',A2,A2))\n\n# define material model\n\nmat = gmat.Cartesian3d.LinearHardening(10.0, 1.0, 0.1, 0.2)\n\n# pre-loading\n\nninc = 301\n\nfor igamma, gamma in enumerate(np.linspace(0.0, 0.1, ninc)):\n\n mat.increment()\n\n Eps0 = np.array([\n [ 0.0, gamma, 0.0],\n [gamma, 0.0, 0.0],\n [ 0.0, 0.0, 0.0],\n ])\n\n Sig0, C4 = mat.Tangent(Eps0)\n\n# consistency check\n\nx = np.logspace(-16,0,100)\ny = np.zeros(x.shape)\n\nfor i in range(len(x)):\n\n dEps = np.random.random((3,3)) * x[i]\n dEps = 0.5 * (dEps + dEps.T)\n\n Sig = mat.Stress(Eps0 + dEps)\n\n dSig = Sig - Sig0\n\n y[i] = norm(dSig - ddot42(C4, dEps)) / norm(dSig)\n\n# plot result\n\nfig, ax = plt.subplots()\n\nax.plot(x, y, color='r', label=r'measurement')\n\nax.set_xscale('log')\nax.set_yscale('log')\n\nax.set_xlim([1e-18, 1e0])\nax.set_ylim([1e-18, 1e0])\n\nax.set_xlabel(r'$|| \\delta \\bm{\\varepsilon} ||$')\nax.set_ylabel(r'$\\eta$')\n\ngplt.plot_powerlaw(-2, 0.0, 1.0, 0.5, axis=ax, units='relative', color='k', linewidth=1,\n label=r'rounding error: $|| \\delta \\bm{\\varepsilon} ||^{-2}$')\n\ngplt.plot_powerlaw(+2, 0.5, 0.0, 0.5, axis=ax, units='relative', color='k', linewidth=1, linestyle='--',\n label=r'linearisation error: $|| \\delta \\bm{\\varepsilon} ||^{+2}$')\n\nax.legend()\n\nplt.savefig('consistency.pdf')\nplt.show()\n","sub_path":"docs/examples/consistency.py","file_name":"consistency.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49196287","text":"aa_list = ['F','F','L','L','S','S','S','S','Y','Y','-','-','C','C','-','W','L','L','L','L','P','P','P','P','H','H','Q','Q','R','R','R','R','I','I','I','M','T','T','T','T','N','N','K','K','S','S','R','R','V','V','V','V','A','A','A','A','D','D','E','E','G','G','G','G'] # list of aa's in order\n\nn = \"UCAG\" # POSSIBLE NUCLEOTIDES\n\ndef codon_list(n): # function to develop every combination of codon triplets in order to correspond with aa_list\n codon_tabl =[]\n for i in range(len(n)):\n for x in range(0,len(n),1):\n for y in range(0,len(n),1):\n \n codon_tabl.append(n[i] + n[x] + n[y])\n return codon_tabl\n\ndef create_dic(input_list,output_list): #function which links two lists into a dictionary;\n \n d = {}\n \n for i in range(0,len(input_list),1):\n \n d[input_list[i]] = output_list[i]\n return(d)\n\ncodon_table = codon_list(n) # creating a variable to store this the newly developed codon list\n\ntrans_dic = create_dic(codon_table,aa_list) # creating a dictionary of codon triplets corresponding to the corresponding amino acid \n\nprint(trans_dic)\n","sub_path":"translation_dictionary.py","file_name":"translation_dictionary.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349906551","text":"#!/usr/bin/python\n\nfrom time import time, sleep\nimport subprocess\n\nclass TestPopen():\n def test0(self):\n proc = subprocess.Popen([\"echo\", \"aaaa\"],\n stdout=subprocess.PIPE)\n out, err = proc.communicate()\n print(\"p said: \" + out.decode(\"UTF-8\"))\n\n def test1(self):\n proc = subprocess.Popen([\"sleep\", \"0.1\"])\n while proc.poll() is None:\n print(time())\n print(\"do some of my own work\")\n print(\"child exits as %d\" % proc.poll())\n\n def run_sleep(self, period):\n return subprocess.Popen([\"sleep\", str(period)])\n \n def test3(self):\n processes = []\n for _ in range(100):\n processes.append(self.run_sleep(0.2))\n\n start = time()\n for p in processes:\n p.communicate()\n print(\"cost %f\", (time() - start))\n \n def test4(self):\n start = time()\n sleep(1)\n print(\"cost %f\", (time() - start))\n\ndef run_sleep(period):\n proc = subprocess.Popen([\"sleep\", str(period)])\n return proc\n\ndef test3():\n start = time()\n processes = []\n print(list(range(10)))\n for _ in range(10):\n processes.append(run_sleep(0.2))\n\n for p in processes:\n p.communicate()\n print(\"cost %f\", (time() - start))\n\ndef test5():\n proc = subprocess.Popen([\"ls\"],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n proc.stdout.flush()\n out, err = proc.communicate()\n proc = subprocess.Popen([\"wc\", \"-l\"],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n proc.stdin.write(out)\n proc.stdin.flush()\n out, err = proc.communicate()\n\n","sub_path":"python/effective_python_59_ways/try_subprocess.py","file_name":"try_subprocess.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"648713011","text":"# encoding: utf-8\n'''\nCreated on 2017年9月5日\n\n@author: Blair Chan\n'''\n\nfrom constant import LANDMARK_START_NUM\nfrom constant import LANDMARK_SIZE_LIMIT_MAX\n\ndef get_landmark_hits(landmark_index, landmark_doc_type, esHelper):\n query = {\n \"from\": LANDMARK_START_NUM,\n \"size\": LANDMARK_SIZE_LIMIT_MAX,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"must_not\": [\n {\n \"term\": {\n \"status\": 1\n }\n }\n ],\n \"should\": []\n }\n },\n \"sort\": [\n {\n \"landmark_id\": {\n \"order\": \"asc\"\n }\n }\n ]\n }\n\n landmark_docs = esHelper.search_hits_hits(index=landmark_index, doc_type=landmark_doc_type, body=query)\n return landmark_docs\n","sub_path":"service_lq_offline-part/service_lq_offline-part/service/landmark_service.py","file_name":"landmark_service.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"101659019","text":"import json\nimport os\nfrom pprint import pprint\n\n__db_location__ = \"db\"\n__item_folder__ = f\"{__db_location__}/item\"\n__item__last_id__ = f\"{__db_location__}/item_id.db\"\n\n\nclass Item():\n def __init__(self) :\n if os.path.exists(__item__last_id__):\n with open(__item__last_id__, \"r\") as last_id_f:\n self.last_id = int(last_id_f.readline())\n else:\n self.last_id = 0\n\n def __repr__(self):\n return f\"id:{self.id},name:{self.name},price:{self.price},sellingPrice:{self.selling_price},qty:{self.qty}\"\n\n\n def __str__(self):\n return f\"id:{self.id},name:{self.name},price:{self.price},sellingPrice:{self.selling_price},qty:{self.qty}\"\n\n \n def save(self):\n id = self.last_id+1\n\n # Save database item\n _data_ = {\n \"id\": id,\n \"name\": self.name,\n \"price\": self.price,\n \"sellingPrice\": self.selling_price,\n \"qty\":self.qty\n }\n with open(f\"{__item_folder__}/{id}.db\", \"w\") as item_file:\n json.dump(_data_, item_file)\n\n # Save next id\n self.last_id += 1\n with open(__item__last_id__, \"w\") as f:\n f.write(str(self.last_id))\n\n def __get_item_by_path(item, path):\n with open(path, \"r\") as item_file:\n _data_ = json.load(item_file)\n item.id = _data_[\"id\"]\n item.name = _data_[\"name\"]\n item.price = _data_[\"price\"]\n item.selling_price = _data_[\"sellingPrice\"]\n item.qty = _data_[\"qty\"]\n \n def all_items(self):\n item_file_names = os.listdir(__item_folder__)\n items = []\n for item_file_name in item_file_names:\n item = Item()\n Item.__get_item_by_path(\n item, f\"{__item_folder__}/{item_file_name}\")\n items.append(item)\n return items\n\n def get_by_item_id(self,id):\n Item.__get_item_by_path(self, f\"{__item_folder__}/{id}.db\")\n\n def delete_item(self,id):\n if os.path.exists(f\"{__item_folder__}/{id}.db\"):\n os.remove(f\"{__item_folder__}/{id}.db\")\n print(\"**\",id,\"deleted\",\"**\")\n else:\n print(\"The item does not exist\")\n\n\ndef create_item(name,price,selling_price,qty):\n item = Item()\n item.name = name\n item.price = price\n item.selling_price = selling_price\n item.qty = qty\n item.save()\n print(\"** item added successfully..!**\")\n\ndef get_all_items():\n item = Item()\n items = item.all_items()\n print('----------------------------------------------------------------------------------------------------------------------')\n print('| items |')\n print('----------------------------------------------------------------------------------------------------------------------')\n pprint(items)\n print('----------------------------------------------------------------------------------------------------------------------')\n\ndef item_view_by_id(id):\n item = Item()\n item.id = id\n item.get_by_item_id(id)\n print('----------------------------------------------------------------------------------------------------------------------')\n print('| items |')\n print('----------------------------------------------------------------------------------------------------------------------')\n print(item.id, item.name, item.price, item.selling_price, item.qty)\n print('----------------------------------------------------------------------------------------------------------------------')\n\ndef item_get_by_id(id):\n item = Item()\n item.id = id\n item.get_by_item_id(id)\n return item\n\n\ndef item_delete(id):\n item = Item()\n item.id = id\n item.delete_item(id)\n\ndef item_update(id,qty):\n items = item_get_by_id(id)\n item_dict = vars(items)\n new_qty = str(int(item_dict[\"qty\"]) + int(qty))\n\n with open(\"db/item/\"f\"{items.id}.db\", \"r\") as jsonFile:\n data = json.load(jsonFile)\n\n data[\"qty\"] = new_qty\n\n with open(\"db/item/\"f\"{items.id}.db\", \"w\") as jsonFile:\n json.dump(data, jsonFile)\n return True\n ","sub_path":"item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"529523507","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nThis code is for IRLS-DNN model test\r\nE-mail: guokai_liu@163.com\r\n\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom next import Nextbacth\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\r\n\r\n#%% define the dense layer\r\ndef add_layer(inputs, in_size, out_size, n_layer, activation_function=None):\r\n # add one more layer and return the output of this layer\r\n layer_name = 'layer%s' % n_layer\r\n with tf.name_scope(layer_name):\r\n with tf.name_scope('weights'):\r\n Weights = tf.Variable(tf.random_normal([in_size, out_size],stddev=0.1), name='W')\r\n tf.summary.histogram(layer_name + '/weights', Weights)\r\n with tf.name_scope('biases'):\r\n biases = tf.Variable(tf.zeros([1, out_size]), name='b')\r\n tf.summary.histogram(layer_name + '/biases', biases)\r\n with tf.name_scope('Wx_plus_b'):\r\n Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)\r\n if activation_function is None:\r\n outputs = Wx_plus_b\r\n else:\r\n outputs = activation_function(Wx_plus_b, )\r\n tf.summary.histogram(layer_name + '/outputs', outputs)\r\n return outputs\r\n\r\n#%% Make up some real data\r\n \r\n# make some meshgrid data\r\nsep = 0.05\r\nX = np.arange(-1, 1, sep)\r\nY = np.arange(-1, 1, sep)\r\nX, Y = np.meshgrid(X, Y)\r\nZ = X**2 + Y**2\r\n\r\n# sequencial data\r\nx = X.reshape(-1)\r\ny = Y.reshape(-1)\r\nz = Z.reshape(-1)\r\n\r\n# make training data for pure and noisy condition\r\nx_data = np.stack((x,y),axis=1).astype('float32')\r\ny_data = z.reshape((-1,1)).astype('float32')\r\n\r\n# make training noisy data\r\ny_data_noise = y_data.copy()\r\nnoise_level = 0.1\r\nnoise_num = int(1600*noise_level)\r\nnoise = np.random.randint(5,10,noise_num)\r\nindex = np.random.randint(1,1600,noise_num)\r\ny_data_noise[index]=noise.reshape((-1,1))\r\n\r\n\r\n\r\n# filter the noisy data and left the pure data\r\nfil = y_data==y_data_noise\r\nfil_index = [idx for idx,item in enumerate(fil) if fil[idx]==True]\r\nx_data_pure = x_data[fil_index]\r\ny_data_pure = y_data[fil_index]\r\n\r\n#%%\r\n# define placeholder for inputs to network\r\nbatch_size = 200\r\nsample_size = len(x_data)\r\ninput_dim = 2\r\n\r\nwith tf.name_scope('inputs'):\r\n xs = tf.placeholder(tf.float32, [None, input_dim], name='x_input')\r\n ys = tf.placeholder(tf.float32, [None, 1], name='y_input')\r\n lr = tf.placeholder(tf.float32)\r\n sw = tf.placeholder(tf.float32,shape=[batch_size],name ='sample_weight')\r\n\r\n# add hidden layer-1\r\nl1 = add_layer(xs, input_dim, 100, n_layer=1, activation_function=tf.nn.relu)\r\n# add hidden layer-2\r\nl2 = add_layer(l1, 100, 50, n_layer=2, activation_function=tf.nn.relu)\r\n# add hidden layer-3\r\nl3 = add_layer(l2, 50, 25, n_layer=3, activation_function=tf.nn.relu)\r\n# add output layer\r\nprediction = add_layer(l3, 25, 1, n_layer=4, activation_function=None)\r\n\r\n# the error between prediciton and real data\r\nwith tf.name_scope('loss'):\r\n# loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),\r\n# reduction_indices=[1]))\r\n diff = tf.abs(ys-prediction)\r\n weighted_diff = tf.multiply(diff,sw)\r\n# loss = tf.reduce_mean(diff)\r\n loss = tf.reduce_mean(weighted_diff)\r\n tf.summary.scalar('loss', loss)\r\n\r\nwith tf.name_scope('train'):\r\n train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)\r\n\r\n#%%\r\n# traininig process\r\ndef training_process(input_data,output_data,IRLS=True,iteration_num=1000):\r\n with tf.Session() as sess: \r\n init = tf.global_variables_initializer()\r\n sess.run(init) \r\n sn = Nextbacth(np.arange(len(input_data)))\r\n \r\n # initialize the sample weights as ones for all data \r\n SAMPLE_WEIGHT = np.ones(len(input_data))\r\n \r\n \r\n for i in range(iteration_num):\r\n # update learninig weight\r\n max_learning_rate = 0.2\r\n min_learning_rate = 0.05\r\n decay_speed = 500.0\r\n learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)\r\n # update batch data\r\n label = sn.next_batch(batch_size)\r\n # update sample weight\r\n batch_sample_weight = SAMPLE_WEIGHT[label]\r\n \r\n # sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\r\n _,l,a,b= sess.run([train_step,loss,ys,prediction], \r\n feed_dict={xs: input_data[label], \r\n ys:output_data[label],\r\n lr:learning_rate,\r\n sw:batch_sample_weight})\r\n \r\n \r\n # update sample weight\r\n if i % 8 == 0 and i!=0:\r\n print('The %d batch loss is:%f'%(i,l))\r\n pre = sess.run(prediction,feed_dict={xs: input_data, ys: output_data,lr:learning_rate})\r\n res = np.abs(pre - output_data)\r\n res_mean = np.mean(res)\r\n res_std = np.std(res)\r\n \r\n# if IRLS==True:\r\n# for idx,item in enumerate(res):\r\n# SAMPLE_WEIGHT[idx] = 1 if item<=res_mean+2*res_std and item>=res_mean-2*res_std else SAMPLE_WEIGHT[idx]*0\r\n \r\n if IRLS==True:\r\n for idx,item in enumerate(res):\r\n SAMPLE_WEIGHT[idx] = np.exp(-item)\r\n return pre,SAMPLE_WEIGHT\r\n#%% calculate the prediction under different condtions\r\npre_pure_IRLS,sw_Pure = training_process(input_data=x_data_pure,output_data=y_data_pure,IRLS=True,iteration_num=10000)\r\n#%%\r\npre_noise_None,sw_None = training_process(input_data=x_data,output_data=y_data_noise,IRLS=False,iteration_num=10000)\r\n#%%\r\npre_noise_IRLS,sw_IRLS = training_process(input_data=x_data,output_data=y_data_noise,IRLS=True,iteration_num=10000)\r\n\r\n#%% plot the results for analysis\r\n#------------------------------------------------------------------------------\r\n# create a figure\r\nfig = plt.figure(figsize=(18,12))\r\nfig.patch.set_facecolor('white')\r\n\r\n# plot the distribution of the ground truth data\r\nax = fig.add_subplot(231, projection='3d')\r\nax.scatter(x,y,z)\r\nax.title.set_text('The Ground Truth')\r\nax.set_zlim(-1, 10)\r\n\r\n# plot the distribution of the noisy data\r\nax = fig.add_subplot(232, projection='3d')\r\nax.scatter(x,y,y_data_noise)\r\nax.title.set_text('The Noisy Data')\r\nax.set_zlim(-1, 10)\r\n\r\n# plot trained model with pure data using IRLS\r\nax = fig.add_subplot(233, projection='3d')\r\nax.scatter(x_data_pure[:,0],x_data_pure[:,1],pre_pure_IRLS)\r\nax.title.set_text('Model with Pure Data')\r\nax.set_zlim(-1, 10)\r\n\r\n# plot trained model with noisy data without using IRLS\r\nax = fig.add_subplot(234, projection='3d')\r\nax.scatter(x_data[:,0],x_data[:,1],pre_noise_None)\r\nax.title.set_text('Model with Noisy Data without using IRLS')\r\nax.set_zlim(-1, 10)\r\nfig.tight_layout()\r\n\r\n# plot trained model with noisy data using IRLS\r\nax = fig.add_subplot(235, projection='3d')\r\nax.scatter(x_data[:,0],x_data[:,1],pre_noise_IRLS)\r\nax.title.set_text('Model with Noisy Data using IRLS')\r\nax.set_zlim(-1, 10)\r\nfig.tight_layout()\r\n#------------------------------------------------------------------------------","sub_path":"IRLS_MLP/Two Variables for Regression.py","file_name":"Two Variables for Regression.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"261377785","text":"from pathfinding import *\n\n\n# Cost of travelling through a cell\ndef cost(cell, board):\n cell_type = board[cell[0]][cell[1]]\n\n if cell_type == SOURCE_CELL:\n return 0\n elif cell_type == GOAL_CELL:\n return 0\n elif cell_type == WATER_CELL:\n return 100\n elif cell_type == MOUNTAIN_CELL:\n return 50\n elif cell_type == FOREST_CELL:\n return 10\n elif cell_type == GRASSLAND_CELL:\n return 5\n elif cell_type == ROAD_CELL:\n return 1\n elif cell_type == OPEN_CELL:\n return 1\n else:\n return 999999 # Unknown cell, make it very expensive to cross\n\n\nfor i in range(1, 5):\n board = read('boards/board-2-' + str(i) + '.txt')\n source, goal = find_source_goal(board)\n algo_status = a_star(board, source, goal, cost=cost)\n\n # In task A.2 we shall only show the shortest path cells,\n # so remove the open and closed statuses\n for rownum in range(len(algo_status)):\n for colnum in range(len(algo_status[0])):\n if algo_status[rownum][colnum] != STATUS_SHORTEST_PATH:\n algo_status[rownum][colnum] = None\n\n output = draw(board, algo_status)\n output.save('visualizations/A.2.2-board-2-' + str(i) + '-solution.png')\n print('Saving visualizations/A.2.2-board-2-' + str(i) + '-solution.png')","sub_path":"assignment3/a_2.py","file_name":"a_2.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432079960","text":"import maya.OpenMayaUI as omui\nfrom PySide2 import QtWidgets, QtCore\nfrom shiboken2 import wrapInstance\n\n\ndef maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)\n\n\nclass SmartSaveUI(QtWidgets.QDialog):\n\n def __init__(self):\n super(SmartSaveUI, self).__init__(parent=maya_main_window())\n self.setWindowTitle(\"Smart Save\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)\n self.create_widgets()\n self.create_layout()\n\n def create_widgets(self):\n self.title_lbl = QtWidgets.QLabel(\"Smart Save\")\n self.title_lbl.setStyleSheet(\"font: bold 20px\")\n self.dir_lbl = QtWidgets.QLabel(\"Directory\")\n self.dir_linedit = QtWidgets.QLineEdit()\n self.incandsave_btn = QtWidgets.QPushButton(\"Incremenet and Save\")\n self.browse_btn = QtWidgets.QPushButton(\"Browse...\")\n self.save_btn = QtWidgets.QPushButton(\"Save\")\n self.cancel_btn = QtWidgets.QPushButton(\"Cancel\")\n self.desc_lbl = QtWidgets.QLabel(\"Descriptor\")\n self.desc_linedit = QtWidgets.QLineEdit(\"main\")\n self.version_lbl = QtWidgets.QLabel(\"Version\")\n self.version_spinbox = QtWidgets.QSpinBox()\n self.version_spinbox.setValue(1)\n self.ext_lbl = QtWidgets.QLabel(\"Extension\")\n self.ext_linedit = QtWidgets.QLineEdit(\"ma\")\n\n def create_layout(self):\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.addWidget(self.title_lbl)\n\n self.dir_lay = QtWidgets.QHBoxLayout()\n self.dir_lay.addWidget(self.dir_lbl)\n self.dir_lay.addWidget(self.dir_linedit)\n self.dir_lay.addWidget(self.browse_btn)\n\n self.desc_lay = QtWidgets.QHBoxLayout()\n self.desc_lay.addWidget(self.desc_lbl)\n self.desc_lay.addWidget(self.desc_linedit)\n\n self.version_lay = QtWidgets.QHBoxLayout()\n self.version_lay.addWidget(self.version_lbl)\n self.version_lay.addWidget(self.version_spinbox)\n\n self.ext_lay = QtWidgets.QHBoxLayout()\n self.ext_lay.addWidget(self.ext_lbl)\n self.ext_lay.addWidget(self.ext_linedit)\n\n self.desc_lay = QtWidgets.QHBoxLayout()\n self.desc_lay.addWidget(self.desc_lbl)\n self.desc_lay.addWidget(self.desc_linedit)\n\n self.bottom_btn_lay = QtWidgets.QHBoxLayout()\n self.bottom_btn_lay.addWidget(self.incandsave_btn)\n self.bottom_btn_lay.addWidget(self.save_btn)\n self.bottom_btn_lay.addWidget(self.cancel_btn)\n\n self.main_layout.addLayout(self.dir_lay)\n self.main_layout.addLayout(self.desc_lay)\n self.main_layout.addLayout(self.version_lay)\n self.main_layout.addLayout(self.ext_lay)\n self.main_layout.addStretch()\n self.main_layout.addLayout(self.bottom_btn_lay)\n self.setLayout(self.main_layout)","sub_path":"src/smartsaveui.py","file_name":"smartsaveui.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"297757188","text":"from . import base\n\n\ndef fetch_electricity(data_home=None):\n \"\"\"Returns a stream containing a day of electricity prices in New South Wales.\n\n Parameters:\n data_home (str): The directory where you wish to store the data.\n\n Yields:\n tuple: A pair (``x``, ``y``) where ``x`` is a dict of features and ``y`` is the target.\n\n References:\n 1. `SPLICE-2 Comparative Evaluation: Electricity Pricing `_\n\n \"\"\"\n\n name = 'electricity'\n url = 'https://maxhalford.github.io/files/datasets/electricity.zip'\n\n return base.fetch_csv_dataset(\n data_home=data_home,\n url=url,\n name=name,\n target_name='class',\n types={\n 'date': float,\n 'day': int,\n 'period': float,\n 'nswprice': float,\n 'nswdemand': float,\n 'vicprice': float,\n 'vicdemand': float,\n 'transfer': float,\n 'class': lambda x: True if x == 'UP' else False\n }\n )\n","sub_path":"creme/datasets/electricity.py","file_name":"electricity.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"337266497","text":"# Import all dependencies required to run the application \n\n# The OS import is used to connect to the heroku environment to get the environment variables for database connection\nimport os\nimport json\n# import io\n\n# Pandas is required in order to read the sql queries into dataframes for conversion to JSON for plotting\nimport pandas as pd\nimport numpy as np\n\n# SqlAlchemy is needed to make the connection to the database and actually pull information from it with the engine postgres URL\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\n\n# Psycorp2 is required in order to connect the the postgres database, whereas pymysql would be used for MySQL connections\nimport psycopg2\n\n# Flask is used to actually deploy our application and render the html files for the webpage views the user sees\nfrom flask import Flask, jsonify, render_template, url_for, json, request\nfrom flask_sqlalchemy import SQLAlchemy\nimport pickle\nfrom sklearn.cluster import KMeans\n\n# In order to work on our project both locally and in the cloud the following code tells it to either use the config file or search heroku\n# for the environment variables \nIS_HEROKU = False\n\nif('IS_HEROKU' in os.environ):\n IS_HEROKU = True\n\nif (IS_HEROKU):\n remote_esg_host = os.environ['remote_esg_host']\n remote_db_port = os.environ['remote_db_port']\n remote_esg_dbname = os.environ['remote_esg_dbname']\n remote_esg_dbuser = os.environ['remote_esg_dbuser']\n remote_esg_dbpwd = os.environ['remote_esg_dbpwd']\n API_KEY = os.environ['mapboxkey']\nelse:\n from config import remote_esg_host, remote_db_port, remote_esg_dbname, remote_esg_dbuser, remote_esg_dbpwd \n\nengine = create_engine(f\"postgres://{remote_esg_dbuser}:{remote_esg_dbpwd}@{remote_esg_host}:{remote_db_port}/{remote_esg_dbname}\")\nconn = engine.connect()\n\n# Initialize Flask application\napp = Flask(__name__)\nmodel = pickle.load(open(\"model.pkl\", 'rb'))\n\n# Set up SQL Alchemy connection and classes\nBase = automap_base() # Declare a Base using `automap_base()`\nBase.prepare(engine, reflect=True) # Use the Base class to reflect the database tables\nBase.classes.keys() # Print all of the classes mapped to the Base\n# ClientInfo = Base.classes.client_info # Assign the client_info class (table) to a variable called `ClientInfo`\nsession = Session(engine) # Create a session\nprint(Base.classes.keys())\n\n# Develop flask routes for each page and then the routes for the database info to feed the plots in our js files\n\n@app.route(\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n@app.route(\"/company_search\")\ndef company_search():\n \"\"\"Return the company search page.\"\"\"\n return render_template(\"company_search.html\")\n\n@app.route(\"/data_table\")\ndef data_table():\n \"\"\"Return the data table.\"\"\"\n return render_template(\"data_table.html\")\n\n@app.route(\"/table_iframe\")\ndef data_iframe():\n \"\"\"Return the table iframe.\"\"\"\n return render_template(\"table.html\")\n\n@app.route(\"/deep_dive\")\ndef deep_dive():\n \"\"\"Return the deep_dive info for industry/sector/funnel,etc.\"\"\"\n return render_template(\"deep_dive.html\")\n\n@app.route(\"/esg_breakdown\")\ndef esg_breakdown():\n \"\"\"Return the history and breakdown page.\"\"\"\n return render_template(\"esg_breakdown.html\")\n\n@app.route(\"/recommendations\")\ndef recommendations():\n \"\"\"Return the recommendations page.\"\"\"\n return render_template(\"recommendations.html\")\n\n\n@app.route('/output',methods=[\"POST\"])\ndef get_output():\n json = request.get_json()\n # input_data = pd.DataFrame(json[\"data\"])\n input_data = [[json[\"data\"]['E'],json[\"data\"]['S'],json[\"data\"]['G']]]\n # input_data = json[\"data\"]['S']\n # input_data.append(json[\"data\"]['E'])\n # input_data.append(json[\"data\"]['S'])\n # input_data.append(json[\"data\"]['G'])\n # model_input = np.array(input_data)\n # print(model_input)\n # with open(\"model.pkl\", 'rb') as file:\n # model = pickle.load(open(\"model.pkl\", 'rb'))\n model_predict = model.predict(input_data)\n model_predict_tuple = tuple(model_predict)\n # mdl_array = [model_output]\n # model_array = np.array([model_predict]) \n # print(model_predict) \n # print(model_predict_tuple)\n # print(3)\n # print(model_array)\n model_predict_array = np.array([model_predict_tuple])\n # numpyData = {\"array\": model_predict_array}\n # print(numpyData)\n model_predict_df = pd.DataFrame(model_predict_array,index=['Result'], columns=['Output'])\n model_predict_JSON = model_predict_df.to_json(orient='index')\n print(model_predict_JSON)\n return jsonify(model_predict_JSON)\n\n@app.route(\"/mapboxkey\", methods=[\"GET\", \"POST\"])\ndef mapbox():\n \"\"\"Return the recommendations page.\"\"\"\n if request.method == \"POST\":\n return 200\n\n else:\n return json.dumps(API_KEY)\n\n\n\n# @app.route('/api/data/esg')\n# def get_esg_data():\n# conn = engine.connect()\n\n# esg_df = pd.read_sql(\"SELECT * FROM woke_investing\", conn)\n\n# conn.close()\n\n# return esg_df.to_json(orient='records')\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"ESG/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"442818445","text":"def squares( n ) :\n \"\"\"Returns a list with the members of the squares sequence up to 'n'\"\"\"\n s = []\n\n for i in range(1, n+1) :\n s.append(i*i)\n\n return s\n\n\nif __name__ == '__main__' :\n from sys import argv\n print(squares(int(argv[1])))\n\n","sub_path":"modules/dmath/series/squares.py","file_name":"squares.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"298219973","text":"import unittest\nfrom datetime import timedelta\n\nfrom eve.utils import config\n\nfrom eve_mongoengine import get_utc_time\nfrom tests import (\n BaseTest,\n HawkeyDoc,\n)\n\n\nclass TestHttpPost(BaseTest, unittest.TestCase):\n def test_post_with_pre_save_hook(self):\n self.__class__.app.config.update(\n {\n \"LAST_UPDATED\": \"updated_at\",\n \"DATE_CREATED\": \"created_at\",\n }\n )\n self.__class__.ext.last_updated = \"updated_at\"\n self.__class__.ext.date_created = \"created_at\"\n self.__class__.ext.add_model(\n HawkeyDoc,\n resource_methods=[\"GET\", \"POST\", \"DELETE\"],\n item_methods=[\"GET\", \"PATCH\", \"PUT\", \"DELETE\"],\n )\n # resulting etag has to match (etag must be computed from\n # modified data, not from original!)\n HawkeyDoc.objects.delete()\n data = {\"a\": \"hey\"}\n response = self.client.post(\n \"/hawkeydoc/\", data='{\"a\": \"hey\"}', content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 201)\n resp_json = response.get_json()\n self.assertEqual(resp_json[config.STATUS], \"OK\")\n etag = resp_json[config.ETAG]\n\n # verify etag\n resp = self.client.get(\"/hawkeydoc/%s\" % resp_json[\"_id\"])\n self.assertEqual(etag, resp.get_json()[config.ETAG])\n\n HawkeyDoc(a=\"a\").save()\n queryset = HawkeyDoc.objects()\n for document in queryset:\n self.assertNotEqual(document.created_at, None)\n\n # test bulk insert no signal\n docs = HawkeyDoc.objects.insert([HawkeyDoc(a=\"a\")])\n for document in docs:\n self.assertEqual(document.created_at, None)\n\n # cleanup\n HawkeyDoc.objects.delete()\n\n def test_client_supplied_dates(self):\n yesterday = get_utc_time() - timedelta(days=1)\n doc = HawkeyDoc(a=\"hello\", created_at=yesterday, updated_at=yesterday).save(\n validate=False\n )\n self.assertEqual(doc.updated_at, yesterday)\n","sub_path":"tests/test_post_with_custom_date_created.py","file_name":"test_post_with_custom_date_created.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"567549505","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Create the vectors p and t\nt = np.arange(0,100,0.0001)\np = np.exp(-2*t) + np.exp(-3*t) + np.exp(-3*t/2) + np.exp(-5*t/6) + np.exp(-29*t/6) - np.exp(-5*t/2) - np.exp(-23*t/6) - np.exp(-4*t) - np.exp(-11*t/6)\n\n# Create the plot\nplt.plot(t,p)\n\nplt.xlabel('time')\nplt.ylabel('probability')\n\n# Show the plot\nplt.show()\n","sub_path":"abhiga:GeorgiaTech-ISYE6420-BayesianStatistics/Homework1/plotPvsTat0to100.py","file_name":"plotPvsTat0to100.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"636803441","text":"#!/bin/python\n# section 5, 24: sorting into order when there is no easy way to tell\nimport random\ndef get_data(x=100000):\n\t# generating the data [0, x).\n\t# return data, a tuple of (prev, next)\n\td = [i for i in range(x)]\n\trandom.shuffle(d)\n\tl = [(d[i], d[i+1]) for i in range(x-1)]\n\treturn d, l\n\t\ndef sort_data(l):\n\t# sort the list of tuples and return the original list\n\tlg = []\t\t# list of (i, x), always sorted by the second one\n\t# loop to the power of 2, log(x)\n\tstep = 1\n\twhile len(l)>0:\n\t\tl1 = l\n\t\tl2 = l.copy()\n\t\tl1.sort(key=(lambda i: i[0]))\t# sort on first token\n\t\tl2.sort(key=(lambda i: i[1]))\t# sort on second token\n\t\tl3 = []\n\t\t# inner loop, scan the two lists\n\t\tind1 = ind2 = indg = 0\n\t\twhile 1:\n\t\t\t# on the second list\n\t\t\tif ind1 >= len(l1) or l2[ind2][1] < l1[ind1][0]:\t# [first check ind1] append new one\n\t\t\t\tif len(lg) == 0:\t# create the first one\n\t\t\t\t\tlg.append((len(l)-2, l2[ind2][0]))\n\t\t\t\t\tlg.append((len(l)-1, l2[ind2][1]))\n\t\t\t\telse:\n\t\t\t\t\t#print(indg, ind2, lg, l2)\n\t\t\t\t\t#assert lg[indg][1] == l2[ind2][1]\n\t\t\t\t\twhile lg[indg][1] != l2[ind2][1]:\n\t\t\t\t\t\tindg += 1\n\t\t\t\t\tlg.append((lg[indg][0]-step, l2[ind2][0]))\n\t\t\t\t\tindg += 1\n\t\t\t\tind2 += 1\n\t\t\telif l2[ind2][1] == l1[ind1][0]:\t# find a larger-step one\n\t\t\t\tl3.append((l2[ind2][0], l1[ind1][1]))\n\t\t\t\tind1 += 1\n\t\t\t\tind2 += 1\n\t\t\telif l2[ind2][1] > l1[ind1][0]: # skip the head of l1\n\t\t\t\tind1 += 1\n\t\t\t# break when out of l2\n\t\t\tif ind2 >= len(l2):\n\t\t\t\tbreak\n\t\t# the post-process\n\t\tl = l3\n\t\tstep *= 2\n\t\tlg.sort(key=(lambda i: i[1]))\n\t# final sort\n\tlg.sort(key=(lambda i: i[0]))\n\treturn [i[1] for i in lg]\n\nif __name__ == \"__main__\":\n\tfor i in range(2):\n\t\td, l = get_data()\n\t\tr = sort_data(l)\n\t\tprint(d==r)\n","sub_path":"art/5.0.24.py","file_name":"5.0.24.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"181811635","text":"import config as cf\ndef convert_to_txt(filename: str):\n import csv\n num = filename[-6:-4]\n csv_file = filename\n txt_file = cf.base_dir+'/converted_data/full_prepared{0}.txt'.format(num)\n with open(txt_file, \"w\") as my_output_file:\n with open(csv_file, \"r\") as my_input_file:\n [my_output_file.write(\" \".join(row)+'\\n') for row in csv.reader(my_input_file)]\n my_output_file.close()\n print(txt_file)\n return txt_file # 'E:/exp/converted_data/full_prepared{0}.txt'.format(num)\n","sub_path":"DataPreparation_real/to_txt.py","file_name":"to_txt.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"131267182","text":"import os\nfrom collections import defaultdict\nfrom html import unescape\nfrom typing import Optional, Iterable, Union, Any, List\n\nfrom junitparser import Element, JUnitXml, TestCase, TestSuite\n\nfrom publish.unittestresults import ParsedUnitTestResults, UnitTestCase, ParseError\n\n\ndef get_results(results: Union[Element, List[Element]]) -> List[Element]:\n \"\"\"\n Returns the results with the most severe state.\n For example: If there are failures and succeeded tests, returns only the failures.\n \"\"\"\n if isinstance(results, List):\n d = defaultdict(list)\n for result in results:\n if result:\n d[get_result(result)].append(result)\n\n for state in ['error', 'failure', 'success', 'skipped']:\n if state in d:\n return d[state]\n return []\n\n return [results]\n\n\ndef get_result(results: Union[Element, List[Element]]) -> str:\n \"\"\"\n Returns the result of the given results.\n All results are expected to be of the same state.\n :param results:\n :return:\n \"\"\"\n if isinstance(results, List):\n return get_result(results[0]) if results else 'success'\n return results._tag if results else 'success'\n\n\ndef get_message(results: Union[Element, List[Element]]) -> str:\n \"\"\"\n Returns an aggregated message from all given results.\n :param results:\n :return:\n \"\"\"\n if isinstance(results, List):\n messages = [result.message\n for result in results\n if result and result.message]\n message = '\\n'.join(messages) if messages else None\n else:\n message = results.message if results else None\n return unescape(message) if message is not None else None\n\n\ndef get_content(results: Union[Element, List[Element]]) -> str:\n \"\"\"\n Returns an aggregated content form all given results.\n :param results:\n :return:\n \"\"\"\n if isinstance(results, List):\n contents = [result._elem.text\n for result in results\n if result is not None and result._elem is not None and result._elem.text is not None]\n content = '\\n'.join(contents) if contents else None\n else:\n content = results._elem.text \\\n if results and results._elem and results._elem.text is not None else None\n return unescape(content) if content is not None else None\n\n\ndef parse_junit_xml_files(files: Iterable[str]) -> ParsedUnitTestResults:\n \"\"\"Parses junit xml files and returns aggregated statistics as a ParsedUnitTestResults.\"\"\"\n def parse(path: str) -> Union[str, Any]:\n if not os.path.exists(path):\n return FileNotFoundError(f'File does not exist.')\n if os.stat(path).st_size == 0:\n return Exception(f'File is empty.')\n\n try:\n return JUnitXml.fromfile(path)\n except BaseException as e:\n return e\n\n parsed_files = [(result_file, parse(result_file))\n for result_file in files]\n junits = [(result_file, junit)\n for result_file, junit in parsed_files\n if not isinstance(junit, BaseException)]\n errors = [ParseError.from_exception(result_file, exception)\n for result_file, exception in parsed_files\n if isinstance(exception, BaseException)]\n\n suites = [(result_file, suite)\n for result_file, junit in junits\n for suite in (junit if junit._tag == \"testsuites\" else [junit])]\n suite_tests = sum([suite.tests for result_file, suite in suites])\n suite_skipped = sum([suite.skipped for result_file, suite in suites])\n suite_failures = sum([suite.failures for result_file, suite in suites])\n suite_errors = sum([suite.errors for result_file, suite in suites])\n suite_time = int(sum([suite.time for result_file, suite in suites]))\n\n def int_opt(string: Optional[str]) -> Optional[int]:\n try:\n return int(string) if string else None\n except ValueError:\n return None\n\n def get_cases(suite: TestSuite) -> List[TestCase]:\n \"\"\"\n JUnit seems to allow for testsuite tags inside testsuite tags, potentially at any depth.\n https://llg.cubic.org/docs/junit/\n\n This skips all inner testsuite tags and returns a list of all contained testcase tags.\n \"\"\"\n suites = list(suite.iterchildren(TestSuite))\n cases = list(suite.iterchildren(TestCase))\n return [case\n for suite in suites\n for case in get_cases(suite)] + cases\n\n cases = [\n UnitTestCase(\n result_file=result_file,\n test_file=case._elem.get('file'),\n line=int_opt(case._elem.get('line')),\n class_name=case.classname,\n test_name=case.name,\n result=get_result(results),\n message=get_message(results),\n content=get_content(results),\n time=case.time\n )\n for result_file, suite in suites\n for case in get_cases(suite)\n if case.classname is not None or case.name is not None\n for results in [get_results(case.result)]\n ]\n\n return ParsedUnitTestResults(\n files=len(parsed_files),\n errors=errors,\n # test state counts from suites\n suites=len(suites),\n suite_tests=suite_tests,\n suite_skipped=suite_skipped,\n suite_failures=suite_failures,\n suite_errors=suite_errors,\n suite_time=suite_time,\n # test cases\n cases=cases\n )\n","sub_path":"python/publish/junit.py","file_name":"junit.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"144318094","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Import dataset and store it in two separate vectors\ndataset = pd.read_csv('./B-Regressions/PolinomialLinearRegression/data/Position_Salaries.csv')\nx = dataset.iloc[:, :-1].values #Matrix of features\ny = dataset.iloc[:, 4].values #Dependent variables set\n\n#Encoding non numerical data into numbers\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nle_x = LabelEncoder()#Associates country names to numerical labels\nx[:, 3] = le_x.fit_transform(x[:, 3])#Applies the transformation\n\nohe = OneHotEncoder(categorical_features=[3])#Dummy variables, prevents the algorithm to order the data as for ex France > Germany\nx = ohe.fit_transform(x).toarray()\n\n#Avoiding Dummy variable Trap\nx = x[:, 1:]#Removes the first column of x\n\n#Split the dataset into Training set and a Test set\nfrom sklearn.cross_validation import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)\n\n#Feature scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_x = StandardScaler()\nx_train = sc_x.fit_transform(x_train)\nx_test = sc_x.transform(x_test)\nprint(x_train)\nprint(x_test)\"\"\"\n\n#Fitting Multiple Linear Regression to the training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train, y_train)\n\n#Predicting the Test set results\ny_pred = regressor.predict(x_test)\n\n#Building the optimal model using backward elimintation\nimport statsmodels.formula.api as sm\n\n#Add columns of 1s to simulate the constant b0 correspondent to the following notation y = b0 + b1x1 + bnxn\nx = np.append(arr = np.ones((50,1)).astype(int), values = x, axis = 1) #add a column of ones axis=1(adds a column)/axis=0(adds a row)\n\n#Starting backard elimination by hand\n#==========================================================================================================================\nx_opt = x[:, [0,1,2,3,4,5]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n\n#Removing index with highest p-value\nx_opt = x[:, [0,1,3,4,5]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n\n#Removing index with highest p-value\nx_opt = x[:, [0,1,3,4,5]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n\n#Removing index with highest p-value\nx_opt = x[:, [0,3,4,5]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n\n#Removing index with highest p-value\nx_opt = x[:, [0,3,5]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n\n#Removing index with highest p-value\nx_opt = x[:, [0,3]] #Creating new matrix of optimized features\nregressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\nprint(regressor_OLS.summary())#Evaluates the multiples linear regression model in therms of p value and other parameters\n#==========================================================================================================================\n","sub_path":"B-Regressions/MultipleLinearRegression/MultipleLinearRegressionTemplate.py","file_name":"MultipleLinearRegressionTemplate.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"284950986","text":"\"\"\"\nDRS De-Registration Comments Model package.\nCopyright (c) 2018 Qualcomm Technologies, Inc.\n All rights reserved.\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the\n limitations in the disclaimer below) provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided with the distribution.\n * Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior written permission.\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY\n THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom app import db\nfrom sqlalchemy import desc\n\n\nclass DeRegComments(db.Model):\n \"\"\"Database model for deregcomments table.\"\"\"\n __tablename__ = 'deregcomments'\n\n id = db.Column(db.Integer, primary_key=True)\n step = db.Column(db.String(20), nullable=False)\n comment = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.String(64), nullable=False)\n user_name = db.Column(db.String(64), nullable=False)\n added_at = db.Column(db.DateTime, server_default=db.func.now())\n\n status = db.Column(db.Integer, db.ForeignKey('status.id'))\n de_reg_details_id = db.Column(db.Integer, db.ForeignKey('deregdetails.id', ondelete='CASCADE'))\n\n def __init__(self, section, comment, user_id, user_name, status, request_id):\n \"\"\"Constructor\"\"\"\n self.step = section\n self.comment = comment\n self.user_id = user_id\n self.user_name = user_name\n self.status = status\n self.de_reg_details_id = request_id\n\n @classmethod\n def add(cls, section, comment, user_id, user_name, status, request_id):\n \"\"\"Method to add new comment.\"\"\"\n comment = cls(section, comment, user_id, user_name, status, request_id)\n try:\n db.session.add(comment)\n except Exception:\n db.session.rollback()\n raise Exception\n\n @staticmethod\n def get_all_by_regid(request_id):\n \"\"\"Method to get all comments by request id.\"\"\"\n return DeRegComments.query.order_by(desc(DeRegComments.added_at))\\\n .filter_by(de_reg_details_id=request_id).all()\n\n @staticmethod\n def get_all_by_section_type(request_id, section_type):\n \"\"\"Method to get section data by section type.\"\"\"\n return DeRegComments.query.order_by(desc(DeRegComments.added_at))\\\n .filter_by(de_reg_details_id=request_id).filter_by(step=section_type).all()\n","sub_path":"app/api/v1/models/deregcomments.py","file_name":"deregcomments.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496662087","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\n\n\nclass TestStaticStorage(object):\n def __init__(self, *args, **kwargs):\n self.__dict__['args_'] = args\n self.__dict__['kwargs_'] = kwargs\n self.__dict__['storages_'] = {}\n\n def reset_storage(self):\n cur_key = settings.STATICFILES_STORAGE_TEST\n self.__dict__['storages_'].pop(cur_key)\n\n def __getattr__(self, key):\n cur_key = settings.STATICFILES_STORAGE_TEST\n storage = self.__dict__['storages_'].get(cur_key)\n if storage is None:\n Storage = import_string(cur_key)\n storage = Storage(*self.__dict__['args_'], **self.__dict__['kwargs_'])\n self.__dict__['storages_'][cur_key] = storage\n return getattr(storage, key)\n","sub_path":"tests/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"247762727","text":"from scrape_linkedin import scrape_in_parallel, Scraper\nfrom scrape_linkedin.utils import HEADLESS_OPTIONS\nimport time\nimport json\nimport sys\nimport os\n\n\ndef parallel():\n users = ['austinoboyle', 'seancahalan', 'alexandre-granzer-guay-8b8378b2',\n 'nicole-odenwald-45989594', 'swishgoswami', 'tang-david',\n 'y-hung-tam-3943a636', 'delaramayazdi'\n ]\n with open('hrefs.json', 'r') as user_data:\n users = json.load(user_data)[0:8]\n data = {}\n scraper = Scraper(driver_options=HEADLESS_OPTIONS)\n for user in users:\n data[user] = scraper.scrape(user=user).to_dict()\n with open('../out2.json', 'w') as out:\n json.dump(data, out)\n\n\nif __name__ == '__main__':\n test_parallel()\n","sub_path":"test/Parallel.py","file_name":"Parallel.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"244900871","text":"from person.models import Person, RequestEntry\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom person.forms import UpdatePerson\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseBadRequest\nimport json\n\n\ndef index(request):\n person = get_object_or_404(Person)\n return render(request, 'index.htm', {'person': person})\n\n\ndef requests(request, sort=\"date_desc\"):\n if sort == \"date_desc\":\n last_requests = RequestEntry.objects.all().order_by('-date')[:10]\n elif sort == \"date_asc\":\n last_requests = RequestEntry.objects.all().order_by('date')[:10]\n elif sort == \"prior_asc\":\n last_requests = RequestEntry.objects.all().order_by('priority')[:10]\n elif sort == \"prior_desc\":\n last_requests = RequestEntry.objects.all().order_by('-priority')[:10]\n return render(request, 'requests.htm', {'last_requests': last_requests})\n\n\n@login_required\ndef edit(request, pk=\"1\"):\n \"\"\"\n Edit person data\n \"\"\"\n context = {}\n person = get_object_or_404(Person, pk=pk)\n if request.method == 'POST':\n form = UpdatePerson(request.POST, request.FILES, instance=person)\n if form.is_valid():\n form.save()\n if request.is_ajax():\n return HttpResponse('OK')\n else:\n return redirect(reverse('index'))\n else:\n if request.is_ajax():\n errors_dict = {}\n if form.errors:\n for error in form.errors:\n e = form.errors[error]\n errors_dict[error] = unicode(e)\n return HttpResponseBadRequest(json.dumps(errors_dict))\n else:\n return redirect('edit')\n\n else:\n form = UpdatePerson(instance=person)\n\n context['form'] = form\n context['person'] = person\n return render(request, 'edit.htm', context)\n","sub_path":"django_hello_world/person/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"344574975","text":"import os, xmltodict, json, datetime, urllib\ntry:\n\tfrom urllib.request import urlretrieve\nexcept ImportError:\n\tfrom urllib import urlretrieve\nfrom os import walk, path\nfrom flask import render_template, request\nfrom flask import current_app as app\nfrom models.Entities import Movie, People, Genre, Country\nfrom models.DBManager import DBFactory\nfrom services.allocine import Allocine\nfrom services.mapper import movieMapper\n\ndef people_list():\n\tentries = People.query.order_by(People.name).all()\n\treturn render_template(\"people.html\", title=\"My People list\", current='home', entries=entries)\n\ndef movie_list():\n\tentries = Movie.query.order_by(Movie.title).all()\n\treturn render_template(\"index.html\", title=\"My Movie list\", current='home', entries=entries)\n\ndef edit_movie(id=None):\n\tmovie = None\n\tif (id is not None):\n\t\tmovie = Movie.query.get(id)\n\t\tposter = '/static/thumbs/'+id+'.png'\n\tif (movie is None):\n\t\tmovie = Movie()\n\t\tposter = '/static/img/default.png'\n\treturn render_template(\"edit-movie.html\", title=\"Edit Movie\", movie=movie, poster=poster, countries=Country.query.all(), current='unregistered')\n\ndef post_movie(id=None):\n\ttitle = request.form[\"title\"]\n\tlink = request.form[\"link\"]\n\tposter = request.form[\"poster\"]\n\tactors = request.form[\"actors\"].split(',')\n\tdirectors = request.form[\"directors\"].split(',')\n\tgenres = request.form[\"genres\"].split(',')\n\tcountries = request.form.getlist(\"countries\")\n\treleaseDate = datetime.datetime.strptime(request.form[\"date\"], '%Y-%m-%d')\n\truntime = int(request.form[\"runtime\"])\n\tsynopsis = request.form[\"synopsis\"]\n\tfilePath = request.form[\"filePath\"]\n\tcontentType = request.form[\"contentType\"]\n\tpressRating = float(request.form[\"pressRating\"])\n\tpublicRating = float(request.form[\"publicRating\"])\n\tmovie = None\n\tif (id is not None):\n\t\tmovie = Movie.query.get(id)\n\tif (movie is None):\n\t\tmovie = Movie()\n\tmovie.title = title\n\tmovie.link = link\n\tmovie.runtime = runtime\n\tmovie.release_date = releaseDate\n\tmovie.synopsis = synopsis\n\tmovie.press_rating = pressRating\n\tmovie.public_rating = publicRating\n\tmovie.file_path = filePath\n\tmovie.content_type = contentType\n\tfor a in actors:\n\t\tf, n = a.strip().rsplit(' ', 1)\n\t\tp = People(f, n)\n\t\to = p.find()\n\t\tif (o is None):\n\t\t\tmovie.actors.append(p)\n\t\t\tDBFactory.getSession().add(p)\n\t\telse:\n\t\t\tmovie.actors.append(o)\n\tfor d in directors:\n\t\tf, n = d.strip().rsplit(' ', 1)\n\t\tp = People(f, n)\n\t\to = p.find()\n\t\tif (o is None):\n\t\t\tmovie.directors.append(p)\n\t\t\tDBFactory.getSession().add(p)\n\t\telse:\n\t\t\tmovie.directors.append(o)\n\tfor g in genres:\n\t\tp = Genre(g.strip())\n\t\to = p.find()\n\t\tif (o is None):\n\t\t\tmovie.genres.append(p)\n\t\t\tDBFactory.getSession().add(p)\n\t\telse:\n\t\t\tmovie.genres.append(o)\n\tfor c in countries:\n\t\tp = Country(c.strip())\n\t\to = p.find()\n\t\tif (o is None):\n\t\t\tmovie.countries.append(p)\n\t\t\tDBFactory.getSession().add(p)\n\t\telse:\n\t\t\tmovie.countries.append(o)\n\tDBFactory.getSession().add(movie)\n\ttry:\n\t\tDBFactory.getSession().commit()\n\t\tif (poster != ''):\n\t\t\turlretrieve(poster, './static/thumbs/'+str(movie.id)+'.png')\n\texcept:\n\t\tDBFactory.getSession().rollback()\n\t\traise\n\treturn movie\n\ndef file_list(filespath, page=None):\n\t#TODO: recursively add subfolders\n\tpage_size = 10\n\tif (page is None or int(page) < 1):\n\t\tpage = 1\n\tstart = ( int(page) - 1 ) * page_size\n\tfiles = []\n\ti = 0\n\tmax = 0\n\tfor (dirpath, dirnames, filenames) in walk(filespath):\n\t\tfor filename in sorted(filenames):\n\t\t\tif (i > int(start) and i <= int(start+page_size)):\n\t\t\t\tfiles.append((filename, path.splitext(filename)[0]))\n\t\t\ti = i + 1\n\t\t\tmax = max + 1\n\t\tbreak\n\tmax_pages = (max + page_size // 2) // page_size + 1\n\treturn render_template(\"files.html\", title=\"My File list\", current='unregistered', entries=files, page=int(page), max_pages=max_pages)\n\ndef retrieve_list(query, filename):\n\ta = Allocine(app.config['infodb']['allocine']['partner_key'], app.config['infodb']['allocine']['secret_key'])\n\tentries = a.search(query)\n\treturn render_template(\"retrieves.html\", title=\"My Retrieve list\", current='unregistered', entries=entries, filename=filename)\n\ndef retrieve_info(query, filename):\n\ta = Allocine(app.config['infodb']['allocine']['partner_key'], app.config['infodb']['allocine']['secret_key'])\n\taMovie = a.movie(query)\n\tmovie = Movie()\n\tmovieMapper(aMovie, movie)\n\tposter = ''\n\tif 'poster' in aMovie:\n\t\tif 'href' in aMovie['poster']:\n\t\t\tposter = aMovie['poster']['href']\n\tmovie.file_path = filename\n\text = os.path.splitext(filename)[1][1:]\n\tif (ext in app.config['mapping']['extensions']):\n\t\tmovie.content_type = app.config['mapping']['extensions'][ext]\n\treturn render_template(\"edit-movie.html\", title=\"My Movie info\", current='unregistered', movie=movie, new_poster=poster, poster=poster, countries=Country.query.all())\n\ndef delete_file(filename):\n\treturn os.remove(filename)\n\ndef delete_movie(movie_id):\n\tmovie = Movie.query.get(movie_id)\n\tDBFactory.getSession().delete(movie)\n\ttry:\n\t\treturn DBFactory.getSession().commit()\n\texcept:\n\t\tDBFactory.getSession().rollback()\n\t\traise","sub_path":"controllers/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"475384985","text":"from constants import StructureTypes\nfrom file_handler import FileHandler\nfrom annotations_directory_item import AnnotationsDirectoryItem\nfrom class_data import ClassData\nfrom utilities import decode_uleb128, put_encoded_array\nfrom utilities import encode_uleb128\nfrom utilities import get_type_list\nfrom utilities import put_type_list\nfrom utilities import get_encoded_array\n\n\nclass DexParser:\n # TODO Try another .dex file\n\n def __init__(self):\n\n self.header = {\n 'magic': None,\n 'checksum': None,\n 'signature': None,\n 'file_size': None,\n 'header_size': None,\n 'endian_tag': None,\n 'link_size': None,\n 'link_off': None,\n 'map_off': None,\n 'string_size': None,\n 'string_off': None,\n 'type_size': None,\n 'type_off': None,\n 'proto_size': None,\n 'proto_off': None,\n 'field_size': None,\n 'field_off': None,\n 'method_size': None,\n 'method_off': None,\n 'class_defs_size': None,\n 'class_defs_off': None,\n 'data_size': None,\n 'data_off': None\n }\n\n self.map = {\n 'size': None,\n 'list': []\n }\n\n self.sections = {\n 'string_id_list': None,\n 'type_id_list': None,\n 'proto_id_list': None,\n 'field_id_list': None,\n 'method_id_list': None,\n 'class_def_items': None\n }\n\n def parse(self, file_in_name):\n FileHandler.file_in = open(file_in_name, 'rb')\n\n self.get_header_info()\n self.get_map_info()\n self.get_sections()\n\n FileHandler.file_in.close()\n\n def save(self, file_out_name):\n FileHandler.file_out = open(file_out_name, 'wb')\n\n self.put_header_info()\n self.put_map_info()\n self.put_sections()\n\n FileHandler.file_out.close()\n\n def get_header_info(self):\n self.header['magic'] = FileHandler.read_bytes(0, 8)\n self.header['checksum'] = FileHandler.read_bytes(8, 4, 'I')\n self.header['signature'] = FileHandler.read_bytes(12, 20)\n self.header['file_size'] = FileHandler.read_bytes(32, 4, 'I')\n self.header['header_size'] = FileHandler.read_bytes(36, 4, 'I')\n self.header['endian_tag'] = FileHandler.read_bytes(40, 4)\n\n # Getting sections offset\n self.header['link_size'] = FileHandler.read_bytes(44, 4, 'I')\n self.header['link_off'] = FileHandler.read_bytes(48, 4, 'I')\n self.header['map_off'] = FileHandler.read_bytes(52, 4, 'I')\n self.header['string_size'] = FileHandler.read_bytes(56, 4, 'I')\n self.header['string_off'] = FileHandler.read_bytes(60, 4, 'I')\n self.header['type_size'] = FileHandler.read_bytes(64, 4, 'I')\n self.header['type_off'] = FileHandler.read_bytes(68, 4, 'I')\n self.header['proto_size'] = FileHandler.read_bytes(72, 4, 'I')\n self.header['proto_off'] = FileHandler.read_bytes(76, 4, 'I')\n self.header['field_size'] = FileHandler.read_bytes(80, 4, 'I')\n self.header['field_off'] = FileHandler.read_bytes(84, 4, 'I')\n self.header['method_size'] = FileHandler.read_bytes(88, 4, 'I')\n self.header['method_off'] = FileHandler.read_bytes(92, 4, 'I')\n self.header['class_defs_size'] = FileHandler.read_bytes(96, 4, 'I')\n self.header['class_defs_off'] = FileHandler.read_bytes(100, 4, 'I')\n self.header['data_size'] = FileHandler.read_bytes(104, 4, 'I')\n self.header['data_off'] = FileHandler.read_bytes(108, 4, 'I')\n\n def put_header_info(self):\n FileHandler.write_bytes(self.header['magic'], 0)\n FileHandler.write_bytes(self.header['checksum'], 8, 'I')\n FileHandler.write_bytes(self.header['signature'], 12)\n FileHandler.write_bytes(self.header['file_size'], 32, 'I')\n FileHandler.write_bytes(self.header['header_size'], 36, 'I')\n FileHandler.write_bytes(self.header['endian_tag'], 40)\n\n # Putting sections offset\n FileHandler.write_bytes(self.header['link_size'], 44, 'I')\n FileHandler.write_bytes(self.header['link_off'], 48, 'I')\n FileHandler.write_bytes(self.header['map_off'], 52, 'I')\n FileHandler.write_bytes(self.header['string_size'], 56, 'I')\n FileHandler.write_bytes(self.header['string_off'], 60, 'I')\n FileHandler.write_bytes(self.header['type_size'], 64, 'I')\n FileHandler.write_bytes(self.header['type_off'], 68, 'I')\n FileHandler.write_bytes(self.header['proto_size'], 72, 'I')\n FileHandler.write_bytes(self.header['proto_off'], 76, 'I')\n FileHandler.write_bytes(self.header['field_size'], 80, 'I')\n FileHandler.write_bytes(self.header['field_off'], 84, 'I')\n FileHandler.write_bytes(self.header['method_size'], 88, 'I')\n FileHandler.write_bytes(self.header['method_off'], 92, 'I')\n FileHandler.write_bytes(self.header['class_defs_size'], 96, 'I')\n FileHandler.write_bytes(self.header['class_defs_off'], 100, 'I')\n FileHandler.write_bytes(self.header['data_size'], 104, 'I')\n FileHandler.write_bytes(self.header['data_off'], 108, 'I')\n\n def get_map_info(self):\n map_offset = self.header['map_off']\n map_size = FileHandler.read_bytes(self.header['map_off'], 4, 'I')\n map_offset += 4\n\n # Every record is 12 bytes and + 4 bytes for record count\n self.map['size'] = map_size\n\n for i in range(0, map_size):\n self.map['list'].append({\n 'type': StructureTypes(FileHandler.read_bytes(map_offset + i * 12, 2, 'H')),\n 'unused': FileHandler.read_bytes(map_offset + i * 12 + 2, 2, 'H'),\n 'size': FileHandler.read_bytes(map_offset + i * 12 + 4, 4, 'I'),\n 'offset': FileHandler.read_bytes(map_offset + i * 12 + 8, 4, 'I')\n })\n\n def put_map_info(self):\n map_offset = self.header['map_off']\n map_size = self.map['size']\n\n FileHandler.write_bytes(map_size, map_offset, 'I')\n map_offset += 4\n\n for i in range(0, map_size):\n FileHandler.write_bytes(self.map['list'][i]['type'].value, map_offset + i * 12, 'H')\n FileHandler.write_bytes(self.map['list'][i]['unused'], map_offset + i * 12 + 2, 'H')\n FileHandler.write_bytes(self.map['list'][i]['size'], map_offset + i * 12 + 4, 'I')\n FileHandler.write_bytes(self.map['list'][i]['offset'], map_offset + i * 12 + 8, 'I')\n\n def get_sections(self):\n self.sections['string_id_list'] = self.get_string_id_list()\n self.sections['type_id_list'] = self.get_type_id_list()\n self.sections['proto_id_list'] = self.get_proto_id_list()\n self.sections['field_id_list'] = self.get_field_id_list()\n self.sections['method_id_list'] = self.get_method_id_list()\n self.sections['class_def_items'] = self.get_class_def_items()\n\n def put_sections(self):\n self.put_string_id_list()\n self.put_type_id_list()\n self.put_proto_id_list()\n self.put_field_id_list()\n self.put_method_id_list()\n self.put_class_def_items()\n\n def get_string_id_list(self):\n \"\"\"\n\n String_id_item = {\n string_data_off (uint) : offset to string in data section\n string_data_item: {\n string_size (uleb128): size of this string, in UTF-16 code units\n data (ubyte[]): a series of MUTF-8 code units followed by a byte of value 0\n }\n }\n\n \"\"\"\n string_id_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_STRING_ID_ITEM)\n\n for i in range(0, items_count):\n string_data_off = FileHandler.read_bytes(section_offset + i * 4, 4, 'I')\n\n utf16_size, extra_offset = decode_uleb128(FileHandler.read_bytes(string_data_off, 4, 'HEX'))\n\n data_offset = string_data_off + extra_offset\n data = b''\n byte = b'FF'\n while byte != b'\\x00':\n byte = FileHandler.read_bytes(data_offset, 1)\n data += byte\n data_offset += 1\n\n string_id_list.append({\n 'string_data_off': string_data_off,\n 'string_data_item': {\n 'utf16_size': utf16_size,\n 'data': data[:len(data) - 1]\n }\n })\n\n return string_id_list\n\n def put_string_id_list(self):\n string_size = self.header['string_size']\n string_off = self.header['string_off']\n\n for i in range(0, string_size):\n string_data_off = self.sections['string_id_list'][i]['string_data_off']\n FileHandler.write_bytes(string_data_off, string_off + i*4, 'I')\n\n utf16_size, value_size = encode_uleb128(self.sections['string_id_list'][i]\n ['string_data_item']['utf16_size'])\n FileHandler.write_bytes(utf16_size, string_data_off, 'HEX')\n\n data = self.sections['string_id_list'][i]['string_data_item']['data']\n FileHandler.write_bytes(data, string_data_off + value_size)\n\n pass\n\n def get_type_id_list(self):\n \"\"\"\n\n type_id_item {\n descriptor_idx (uint): index in string_id_list to string describing type\n }\n\n \"\"\"\n\n type_id_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_TYPE_ID_ITEM)\n\n for i in range(0, items_count):\n type_id_list.append({\n 'descriptor_idx': FileHandler.read_bytes(section_offset + i * 4, 4, 'I')\n })\n\n return type_id_list\n\n def put_type_id_list(self):\n type_off = self.header['type_off']\n type_size = self.header['type_size']\n\n for i in range(0, type_size):\n FileHandler.write_bytes(self.sections['type_id_list'][i]['descriptor_idx'], type_off, 'I')\n type_off += 4\n\n def get_proto_id_list(self):\n \"\"\"\n\n proto_id_item {\n shorty_idx (uint): index in string_id_list to string describing prototype\n return_type_idx (uint): index in type_id_list to return type\n parameters_off (uint): offset to parameters structure in data section\n parameters (type_list): list of parameter types for this prototype (if parameters_off != 0, otherwise 0)\n }\n\n \"\"\"\n\n proto_id_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_PROTO_ID_ITEM)\n\n for i in range(0, items_count):\n parameters_off = FileHandler.read_bytes(section_offset + i * 12 + 8, 4, 'I')\n proto_id_list.append({\n 'shorty_idx': FileHandler.read_bytes(section_offset + i * 12, 4, 'I'),\n 'return_type_idx': FileHandler.read_bytes(section_offset + i * 12 + 4, 4, 'I'),\n 'parameters_off': parameters_off\n })\n\n parameters = get_type_list(parameters_off)\n\n proto_id_list[-1]['parameters'] = parameters\n\n return proto_id_list\n\n def put_proto_id_list(self):\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_PROTO_ID_ITEM)\n\n for i in range(0, items_count):\n parameters_off = self.sections['proto_id_list'][i]['parameters_off']\n FileHandler.write_bytes(self.sections['proto_id_list'][i]['shorty_idx'], section_offset + i*12, 'I')\n FileHandler.write_bytes(self.sections['proto_id_list'][i]['return_type_idx'], section_offset + i*12 + 4, 'I')\n FileHandler.write_bytes(parameters_off, section_offset + i*12 + 8, 'I')\n\n put_type_list(self.sections['proto_id_list'][i]['parameters'], parameters_off)\n\n def get_field_id_list(self):\n \"\"\"\n\n field_id_item {\n class_idx (ushort): index in type_id_list to field's class type\n type_idx (ushort): index in type_id_list to field type\n name_idx (uint): index in string_id_list to string describing field name\n }\n\n \"\"\"\n\n field_id_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_FIELD_ID_ITEM)\n\n for i in range(items_count):\n field_id_list.append({\n 'class_idx': FileHandler.read_bytes(section_offset + i * 8, 2, 'H'),\n 'type_idx': FileHandler.read_bytes(section_offset + i * 8 + 2, 2, 'H'),\n 'name_idx': FileHandler.read_bytes(section_offset + i * 8 + 4, 4, 'I')\n })\n\n return field_id_list\n\n def put_field_id_list(self):\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_FIELD_ID_ITEM)\n\n for i in range(items_count):\n FileHandler.write_bytes(self.sections['field_id_list'][i]['class_idx'], section_offset + i * 8, 'H')\n FileHandler.write_bytes(self.sections['field_id_list'][i]['type_idx'], section_offset + i * 8 + 2, 'H')\n FileHandler.write_bytes(self.sections['field_id_list'][i]['name_idx'], section_offset + i * 8 + 4, 'I')\n\n def get_method_id_list(self):\n \"\"\"\n\n method_id_item {\n class_idx (ushort): index in type_id_list to method class type\n type_idx (ushort): index in proto_id_list to method prototype\n name_idx (uint): index in string_id_list to string describing method name\n }\n\n \"\"\"\n\n method_id_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_METHOD_ID_ITEM)\n\n for i in range(items_count):\n method_id_list.append({\n 'class_idx': FileHandler.read_bytes(section_offset + i * 8, 2, 'H'),\n 'proto_idx': FileHandler.read_bytes(section_offset + i * 8 + 2, 2, 'H'),\n 'name_idx': FileHandler.read_bytes(section_offset + i * 8 + 4, 4, 'I')\n })\n\n return method_id_list\n\n def put_method_id_list(self):\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_METHOD_ID_ITEM)\n\n for i in range(items_count):\n FileHandler.write_bytes(self.sections['method_id_list'][i]['class_idx'], section_offset + i * 8, 'H')\n FileHandler.write_bytes(self.sections['method_id_list'][i]['proto_idx'], section_offset + i * 8 + 2, 'H')\n FileHandler.write_bytes(self.sections['method_id_list'][i]['name_idx'], section_offset + i * 8 + 4, 'I')\n\n def get_class_def_items(self):\n \"\"\"\n\n class_def_item {\n class_idx (uint): index in the type_id_list to class type\n access_flags (uint): access flags for the class (public, final, ...)\n superclass_index (uint): index into the type_id_list for the superclass type\n interfaces_off (uint): offset to the list of interfaces, implemented by this class\n source_file_idx (uint): index into string_id_list for the name of source file containig this class\n annotations_off (uint): offset to the annotation_directory_item for this class\n class_data_offset (uint): offset to the class_data_item for this class\n static_values_off (uint): offset to the list of initial values for static fields\n\n interfaces (type_list): list of interfaces in type_list format; 0 if interfaces_off = 0\n annotations (anotations_directory_item): directly annotations directory for this class;\n 0 if annotations_off = 0\n class_data (class_data_item): class data for this class\n static_values (encoded_array): list of initial values for static fields\n }\n\n \"\"\"\n\n class_def_list = []\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_CLASS_DEF_ITEM)\n\n for i in range(items_count):\n interfaces_off = FileHandler.read_bytes(section_offset + i * 32 + 12, 4, 'I')\n annotation_off = FileHandler.read_bytes(section_offset + i * 32 + 20, 4, 'I')\n class_data_off = FileHandler.read_bytes(section_offset + i * 32 + 24, 4, 'I')\n static_values_off = FileHandler.read_bytes(section_offset + i * 32 + 28, 4, 'I')\n\n class_def_list.append({\n 'class_idx': FileHandler.read_bytes(section_offset + i * 32, 4, 'I'),\n 'access_flags': FileHandler.read_bytes(section_offset + i * 32 + 4, 4, 'I'),\n 'superclass_idx': FileHandler.read_bytes(section_offset + i * 32 + 8, 4, 'I'),\n 'interfaces_off': interfaces_off,\n 'source_file_idx': FileHandler.read_bytes(section_offset + i * 32 + 16, 4, 'I'),\n 'annotation_off': annotation_off,\n 'class_data_off': class_data_off,\n 'static_values_off': static_values_off,\n })\n\n class_def_list[-1]['interfaces'] = get_type_list(interfaces_off)\n\n class_def_list[-1]['annotations'] = AnnotationsDirectoryItem(annotation_off)\n\n class_def_list[-1]['class_data'] = ClassData(class_data_off)\n\n class_def_list[-1]['static_values'] = get_encoded_array(static_values_off)[0]\n\n return class_def_list\n\n def put_class_def_items(self):\n\n items_count, section_offset = self.get_section_parameters_from_map(StructureTypes.TYPE_CLASS_DEF_ITEM)\n\n for i in range(items_count):\n FileHandler.write_bytes(self.sections['class_def_items'][i]['class_idx'],\n section_offset + i * 32, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['access_flags'],\n section_offset + i * 32 + 4, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['superclass_idx'],\n section_offset + i * 32 + 8, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['interfaces_off'],\n section_offset + i * 32 + 12, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['source_file_idx'],\n section_offset + i * 32 + 16, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['annotation_off'],\n section_offset + i * 32 + 20, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['class_data_off'],\n section_offset + i * 32 + 24, 'I')\n FileHandler.write_bytes(self.sections['class_def_items'][i]['static_values_off'],\n section_offset + i * 32 + 28, 'I')\n\n put_type_list(self.sections['class_def_items'][i]['interfaces'],\n self.sections['class_def_items'][i]['interfaces_off'])\n\n put_encoded_array(self.sections['class_def_items'][i]['static_values'],\n self.sections['class_def_items'][i]['static_values_off'])\n\n self.sections['class_def_items'][i]['annotations'].save(self.sections['class_def_items']\n [i]['annotation_off'])\n\n self.sections['class_def_items'][i]['class_data'].save(self.sections['class_def_items']\n [i]['class_data_off'])\n\n def get_section_parameters_from_map(self, section_type):\n for map_item in self.map['list']:\n if map_item['type'] == section_type:\n return map_item['size'], map_item['offset']\n","sub_path":"dex_parser.py","file_name":"dex_parser.py","file_ext":"py","file_size_in_byte":19965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"299502162","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"medium\",\n \"link\": \"https://leetcode.com/problems/maximum-length-of-pair-chain/description/\",\n \"beats\": 0.0320,\n \"category\": [\"dynamic-programming\", \"greedy\"],\n \"tags\": [\"overlapping-intervals\"],\n \"questions\": []\n}\n\"\"\"\n\n\"\"\"\n思路:\n\t- 先对pairs排序\n\t- dp[i]表示pairs[:i+1]的最长pair chain的长度\n\"\"\"\n\nclass Solution:\n def findLongestChain(self, pairs):\n \"\"\"\n :type pairs: List[List[int]]\n :rtype: int\n \"\"\"\n if not pairs:\n return 0\n \n pairs.sort()\n dp = [1]*len(pairs)\n for i in range(1, len(pairs)):\n for j in range(i):\n if pairs[j][1] < pairs[i][0] and dp[i] < dp[j] + 1:\n dp[i] = dp[j]+1\n return dp[-1]\n","sub_path":"solutions/646.DP.py","file_name":"646.DP.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"89982198","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nimport heapq\n\n\nclass Heap:\n def __init__(self):\n self.sorted = []\n\n def push(self, item):\n heapq.heappush(self.sorted, item)\n\n def pop(self):\n return heapq.heappop(self.sorted)\n\n def __len__(self):\n return len(self.sorted)\n\n\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n sorted_nodes = Heap()\n ret = []\n\n for idx, list_node in enumerate(lists):\n if list_node is not None:\n sorted_nodes.push((list_node.val, idx))\n\n list_node = list_node.next\n lists[idx] = list_node\n\n while len(sorted_nodes):\n minimum_v, idx = sorted_nodes.pop()\n\n ret.append(minimum_v)\n\n list_node = lists[idx]\n if list_node is not None:\n sorted_nodes.push((list_node.val, idx))\n\n list_node = list_node.next\n lists[idx] = list_node\n\n return ret\n","sub_path":"solutions/merge_k_sorted_lists.py","file_name":"merge_k_sorted_lists.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315490469","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\n\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nimport os\nimport logging\n\nfrom flask import make_response, jsonify, request\nfrom flask_caching import Cache\n\nfrom crystal_toolkit import __version__ as ct_version\nfrom crystal_toolkit.core.mpcomponent import MPComponent\nfrom crystal_toolkit.helpers.layouts import *\nfrom crystal_toolkit.helpers.mprester import MPRester\nimport crystal_toolkit.components as ctc\n\nfrom pymatgen import Structure, Molecule\nfrom pymatgen.analysis.graphs import StructureGraph, MoleculeGraph\nfrom pymatgen import __version__ as pmg_version\n\nfrom json import loads\nfrom uuid import uuid4\nfrom urllib import parse\nfrom random import choice\nfrom ast import literal_eval\nfrom monty.serialization import loadfn\n\n# choose a default structure on load\nDEFAULT_MPIDS = loadfn(\"task_ids_on_load.json\")\n\n################################################################################\n# region SET UP APP\n################################################################################\n\nmeta_tags = [\n {\n \"name\": \"description\",\n \"content\": \"Crystal Toolkit allows you to import, view, analyze and transform \"\n \"crystal structures and molecules using the full power of the Materials \"\n \"Project.\",\n }\n]\n\nDEBUG_MODE = literal_eval(os.environ.get(\"CRYSTAL_TOOLKIT_DEBUG_MODE\", \"False\").title())\nMP_EMBED_MODE = literal_eval(\n os.environ.get(\"CRYSTAL_TOOLKIT_MP_EMBED_MODE\", \"False\").title()\n)\nENABLE_API = literal_eval(os.environ.get(\"CRYSTAL_TOOLKIT_ENABLE_API\", \"False\").title())\n\ncrystal_toolkit_app = dash.Dash(__name__, meta_tags=meta_tags)\ncrystal_toolkit_app.config[\"suppress_callback_exceptions\"] = True\ncrystal_toolkit_app.title = \"Crystal Toolkit\"\ncrystal_toolkit_app.scripts.config.serve_locally = True\n\nif not MP_EMBED_MODE:\n crystal_toolkit_app.config[\"assets_ignore\"] = r\".*\\.mpembed\\..*\"\n box_size = \"65vmin\"\nelse:\n # reduce zoom level and box size due to iframe on materialsproject.org\n ctc.StructureMoleculeComponent.default_scene_settings[\"defaultZoom\"] = 0.5\n box_size = \"50vmin\"\n\n\ncrystal_toolkit_app.server.secret_key = str(uuid4())\nserver = crystal_toolkit_app.server\n\n\n# endregion\n##########\n\n\n################################################################################\n# region SET UP CACHE\n################################################################################\n\nif os.environ.get(\"REDIS_URL\", \"\"):\n cache = Cache(\n crystal_toolkit_app.server,\n config={\n \"CACHE_TYPE\": \"redis\",\n \"CACHE_REDIS_URL\": os.environ.get(\"REDIS_URL\", \"\"),\n },\n )\nelif DEBUG_MODE:\n # disable cache in debug\n cache = Cache(crystal_toolkit_app.server, config={\"CACHE_TYPE\": \"null\"})\nelse:\n crystal_toolkit_app.logger.error(\n \"Failed to connect to Redis cache, falling back to file system cache.\"\n )\n cache = Cache(crystal_toolkit_app.server, config={\"CACHE_TYPE\": \"simple\"})\n\n# endregion\n\n\n################################################################################\n# region SET UP LOGGING\n################################################################################\n\nlogger = logging.getLogger(crystal_toolkit_app.title)\n\n# endregion\n\n\n################################################################################\n# region INSTANTIATE CORE COMPONENTS\n################################################################################\n\nctc.register_app(crystal_toolkit_app)\nctc.register_cache(cache)\n\nsupercell = ctc.SupercellTransformationComponent()\ngrain_boundary = ctc.GrainBoundaryTransformationComponent()\noxi_state = ctc.AutoOxiStateDecorationTransformationComponent()\nslab = ctc.SlabTransformationComponent()\nsubstitution = ctc.SubstitutionTransformationComponent()\n\ntransformation_component = ctc.AllTransformationsComponent(\n transformations=[supercell, slab, grain_boundary, oxi_state, substitution]\n)\n\nstruct_component = ctc.StructureMoleculeComponent()\nstruct_component.attach_from(transformation_component, origin_store_name=\"out\")\n\n# TODO: change to link to struct_or_mol ?\ndownload_component = ctc.DownloadPanelComponent(origin_component=struct_component)\n\nsearch_component = ctc.SearchComponent()\nupload_component = ctc.StructureMoleculeUploadComponent()\n\nrobocrys_component = ctc.RobocrysComponent(origin_component=struct_component)\nmagnetism_component = ctc.MagnetismComponent(origin_component=struct_component)\nxrd_component = ctc.XRayDiffractionPanelComponent(origin_component=struct_component)\npbx_component = ctc.PourbaixDiagramPanelComponent(origin_component=struct_component)\n\nsymmetry_component = ctc.SymmetryComponent(origin_component=struct_component)\nlocalenv_component = ctc.LocalEnvironmentPanel()\nlocalenv_component.attach_from(\n origin_component=struct_component, origin_store_name=\"graph\"\n)\n\nbonding_graph_component = ctc.BondingGraphComponent()\nbonding_graph_component.attach_from(struct_component, origin_store_name=\"graph\")\n# link bonding graph color scheme to parent color scheme\nbonding_graph_component.attach_from(\n struct_component,\n this_store_name=\"display_options\",\n origin_store_name=\"display_options\",\n)\n\n# favorites_component = ctc.FavoritesComponent()\n# favorites_component.attach_from(search_component, this_store_name=\"current-mpid\")\n\nif MP_EMBED_MODE:\n submit_snl_panel = ctc.SubmitSNLPanel(origin_component=struct_component)\n action_div = html.Div(\n [submit_snl_panel.panel_layout, download_component.panel_layout]\n )\nelse:\n action_div = html.Div([download_component.panel_layout])\n\npanels = [\n symmetry_component,\n bonding_graph_component,\n localenv_component,\n xrd_component,\n robocrys_component,\n]\n\nif MP_EMBED_MODE:\n mp_section = (html.Div(),)\nelse:\n\n bsdos_component = ctc.BandstructureAndDosPanelComponent(\n origin_component=search_component\n )\n # grain_boundary_panel = ctc.GrainBoundaryPanel(origin_component=search_component)\n xas_component = ctc.XASPanelComponent(origin_component=search_component)\n pd_component = ctc.PhaseDiagramPanelComponent(origin_component=struct_component)\n literature_component = ctc.LiteratureComponent(origin_component=struct_component)\n\n mp_panels = [\n pd_component,\n pbx_component,\n magnetism_component,\n xas_component,\n # bsdos_component,\n # grain_boundary_panel,\n literature_component,\n ]\n\n mp_section = (\n H3(\"Materials Project\"),\n html.Div([panel.panel_layout for panel in mp_panels], id=\"mp_panels\"),\n )\n\n\nbody_layout = [\n html.Br(),\n H3(\"Transform\"),\n html.Div([transformation_component.standard_layout]),\n html.Br(),\n H3(\"Analyze\"),\n html.Div([panel.panel_layout for panel in panels], id=\"panels\"),\n html.Br(),\n *mp_section,\n]\n\nSTRUCT_VIEWER_SOURCE = transformation_component.id()\n\n\nbanner = html.Div(id=\"banner\")\nif DEBUG_MODE:\n banner = html.Div(\n [\n html.Br(),\n MessageContainer(\n [\n MessageHeader(\"Warning\"),\n MessageBody(\n dcc.Markdown(\n \"This is a pre-release version of Crystal Toolkit and \"\n \"may not behave reliably. Please visit \"\n \"[https://viewer.materialsproject.org](https://viewer.materialsproject.org) \"\n \"for a stable version.\"\n )\n ),\n ],\n kind=\"warning\",\n ),\n ],\n id=\"banner\",\n )\n\napi_offline, api_error = True, \"Unknown error connecting to Materials Project API.\"\ntry:\n with MPRester() as mpr:\n api_check = mpr._make_request(\"/api_check\")\n if not api_check.get(\"api_key_valid\", False):\n api_error = (\n \"Materials Project API key not supplied or not valid, \"\n \"please set PMG_MAPI_KEY in your environment.\"\n )\n else:\n api_offline = False\nexcept Exception as exception:\n api_error = str(exception)\nif api_offline:\n banner = html.Div(\n [\n html.Br(),\n MessageContainer(\n [\n MessageHeader(\"Error: Cannot connect to Materials Project\"),\n MessageBody(api_error),\n ],\n kind=\"danger\",\n ),\n ],\n id=\"banner\",\n )\n\n\n# endregion\n\n\n################################################################################\n# region CREATE OTHER LAYOUT ELEMENTS\n################################################################################\n\n\nfooter = ctc.Footer(\n html.Div(\n [\n # html.Iframe(\n # src=\"https://ghbtns.com/github-btn.html?user=materialsproject&repo=crystaltoolkit&type=star&count=true\",\n # style={\n # \"frameborder\": False,\n # \"scrolling\": False,#\n # \"width\": \"72px\",\n # \"height\": \"20px\",\n # },\n # ),\n # html.Br(), Button([Icon(kind=\"cog\", fill=\"r\"), html.Span(\"Customize\")], kind=\"light\", size='small'),\n dcc.Markdown(\n f\"App created by [Crystal Toolkit Development Team](https://github.com/materialsproject/crystaltoolkit/graphs/contributors). \\n\"\n f\"Bug reports and feature requests gratefully accepted, please send them to [@mkhorton](mailto:mkhorton@lbl.gov). \\n\"\n f\"Powered by [The Materials Project](https://materialsproject.org), \"\n f\"[pymatgen v{pmg_version}](http://pymatgen.org) and \"\n f\"[Dash by Plotly](https://plot.ly/products/dash/). \"\n f\"Deployed on [Spin](http://www.nersc.gov/users/data-analytics/spin/).\"\n )\n ],\n className=\"content has-text-centered\",\n ),\n style={\"padding\": \"1rem 1rem 1rem\", \"background-color\": \"inherit\"},\n)\n\npanel_choices = dcc.Dropdown(\n options=[{\"label\": panel.title, \"value\": idx} for idx, panel in enumerate(panels)],\n multi=True,\n value=[idx for idx in range(len(panels))],\n id=\"panel-choices\",\n)\n\npanel_description = dcc.Markdown(\n [\n \"Crystal Toolkit offers various *panels* which each provide different ways \"\n \"of analyzing, transforming or retrieving information about a material using \"\n \"resources and tools available to The Materials Project. Some panels \"\n \"retrieve data or run algorithms on demand, so please allow some time \"\n \"for them to run. Explore these panels below.\"\n ],\n className=\"mpc-panel-description\",\n)\n\n\n# endregion\n\n\n################################################################################\n# region DEFINE MAIN LAYOUT\n################################################################################\n\nmaster_layout = Container(\n [\n dcc.Location(id=\"url\", refresh=False),\n MPComponent.all_app_stores(),\n # dcc.Store(storage_type=\"session\", id=\"session_store\"),\n banner,\n Section(\n [\n Columns(\n [\n Column(\n [\n struct_component.title_layout,\n html.Div(\n # [\n # html.A(\n # \"Documentation\",\n # href=\"https://docs.crystaltoolkit.org\",\n # )\n # ],\n # [favorites_component.button_layout],\n style={\"float\": \"right\"}\n ),\n ]\n )\n ]\n ),\n Columns(\n [\n Column(\n [\n # TODO: test responsiveness of layout on phone\n Box(\n struct_component.struct_layout,\n style={\n \"width\": box_size,\n \"height\": box_size,\n \"min-width\": \"300px\",\n \"min-height\": \"300px\",\n \"max-width\": \"600px\",\n \"max-height\": \"600px\",\n \"overflow\": \"hidden\",\n \"padding\": \"0.25rem\",\n \"margin-bottom\": \"0.5rem\",\n },\n ),\n html.Div(\n [\n html.Div(\n struct_component.legend_layout,\n style={\"float\": \"left\"},\n ),\n html.Div(\n [struct_component.screenshot_layout],\n style={\"float\": \"right\"},\n ),\n ],\n style={\n \"width\": box_size,\n \"min-width\": \"300px\",\n \"margin-bottom\": \"40px\",\n },\n ),\n ],\n narrow=True,\n ),\n Column(\n [\n Reveal(\n [\n search_component.standard_layout,\n upload_component.standard_layout,\n # favorites_component.favorite_materials_layout,\n ],\n title=\"Load Crystal or Molecule\",\n open=True,\n style={\"line-height\": \"1\"},\n id=\"load\",\n ),\n Reveal(\n [struct_component.options_layout],\n title=\"Display Options\",\n id=\"display-options\",\n ),\n action_div,\n # favorites_component.notes_layout,\n ],\n style={\"width\": box_size, \"max-width\": box_size},\n ),\n ],\n desktop_only=False,\n centered=False,\n ),\n Columns([Column(body_layout)]),\n ]\n ),\n Section(footer),\n ]\n)\n\ncrystal_toolkit_app.layout = master_layout\n\n\n# endregion\n\n\n################################################################################\n# region SET UP API ROUTES (to support creating viewer links in future)\n################################################################################\n\n\n@server.route(\"/version\", methods=[\"GET\"])\ndef get_version():\n return make_response(\n jsonify(\n {\n \"crystal_toolkit_version\": ct_version,\n \"crystal_toolkit_api_version\": 1,\n \"pymatgen_version\": pmg_version,\n }\n )\n )\n\n\ndef mson_to_token(mson, cache):\n\n # sanity check\n allowed_classes = [Structure, Molecule, StructureGraph, MoleculeGraph]\n allowed_mson = False\n if len(mson) > 1024 * 1024:\n # set a sensible size limit\n return {\"token\": None, \"error\": \"Request too large.\"}\n\n mson_dict = loads(mson)\n for cls in allowed_classes:\n if not allowed_mson:\n try:\n cls.from_dict(mson_dict)\n allowed_mson = True\n except:\n pass\n if not allowed_mson:\n return {\"token\": None, \"error\": \"Format not recognized.\"}\n\n token = str(uuid4())[0:6]\n # set to 1 week expiration by default\n cache.set(f\"crystal_toolkit_user_{token}\", mson, timeout=604_800)\n return {\"token\": token, \"error\": None}\n\n\ndef token_to_mson(token, cache):\n return cache.get(f\"crystal_toolkit_user_{token}\")\n\n\nif ENABLE_API:\n\n @server.route(\"/generate_token\", methods=[\"POST\"])\n def get_token():\n token = mson_to_token(request.json, cache)\n if token[\"error\"] is None:\n return make_response(jsonify(token), 200)\n else:\n return make_response(jsonify(token), 403)\n\n\n# endregion\n\n\n################################################################################\n# region SET UP CALLBACKS\n################################################################################\n\n\n@crystal_toolkit_app.callback(\n Output(search_component.id(\"input\"), \"value\"), [Input(\"url\", \"href\")]\n)\ndef update_search_term_on_page_load(href):\n if href is None:\n raise PreventUpdate\n pathname = str(parse.urlparse(href).path).split(\"/\")\n if len(pathname) <= 1:\n raise PreventUpdate\n elif not pathname[1]:\n return choice(DEFAULT_MPIDS)\n else:\n return pathname[1].replace(\"+\", \" \")\n\n\n@crystal_toolkit_app.callback(\n Output(search_component.id(\"input\"), \"n_submit\"),\n [Input(search_component.id(\"input\"), \"value\")],\n [State(search_component.id(\"input\"), \"n_submit\")],\n)\ndef perform_search_on_page_load(search_term, n_submit):\n # TODO: when multiple output callbacks are supported, should also update n_submit_timestamp\n if n_submit is None:\n return 1\n else:\n raise PreventUpdate\n\n\n@crystal_toolkit_app.callback(\n Output(\"url\", \"pathname\"), [Input(search_component.id(), \"data\")]\n)\ndef update_url_pathname_from_search_term(data):\n if data is None or \"mpid\" not in data:\n raise PreventUpdate\n return data[\"mpid\"]\n\n\n@crystal_toolkit_app.callback(\n Output(STRUCT_VIEWER_SOURCE, \"data\"),\n [Input(search_component.id(), \"data\"), Input(upload_component.id(), \"data\")],\n)\ndef master_update_structure(search_mpid, upload_data):\n\n if not search_mpid and not upload_data:\n raise PreventUpdate\n\n search_mpid = search_mpid or {}\n upload_data = upload_data or {}\n\n time_searched = search_mpid.get(\"time_requested\", -1)\n time_uploaded = upload_data.get(\"time_requested\", -1)\n\n if time_searched > time_uploaded:\n\n if search_mpid is None or \"mpid\" not in search_mpid:\n raise PreventUpdate\n\n with MPRester() as mpr:\n try:\n struct = mpr.get_task_data(search_mpid[\"mpid\"], \"structure\")[0][\n \"structure\"\n ]\n print(\"Struct from task.\")\n except:\n struct = mpr.get_structure_by_material_id(search_mpid[\"mpid\"])\n print(\"Struct from material.\")\n else:\n\n struct = MPComponent.from_data(upload_data[\"data\"])\n\n return MPComponent.to_data(struct.as_dict())\n\n\n# @crystal_toolkit_app.callback(\n# Output(struct_component.id(\"\"), \"\"),\n# [Input(transformation_component.id(\"\"), \"\")],\n# [State(struct_component.id(\"\"), \"\")]\n# )\n# def change_input_structure(transformation, current_state):\n# if transformation active and current state != input\n#\n\n# endregion\n\n\n################################################################################\n# region HANDLE PERSISTENT SETTINGS\n################################################################################\n\n# to_save_and_restore = [\n# # (struct_component.id(\"hide-show\"), \"values\"),\n# (struct_component.id(\"color-scheme\"), \"value\"),\n# # (struct_component.id(\"radius_strategy\"), \"value\"),\n# # (struct_component.id(\"draw_options\"), \"values\"),\n# # (struct_component.id(\"unit-cell-choice\"), \"value\"),\n# # (struct_component.id(\"repeats\"), \"value\"),\n# ]\n#\n## (\"display-options\", \"open\")]\n#\n# for (component_id, component_property) in to_save_and_restore:\n#\n# @app.callback(\n# Output(component_id, component_property),\n# [Input(\"session_store\", \"modified_timestamp\")],\n# [State(\"session_store\", \"data\")],\n# )\n# def load_data(modified_timestamp, saved_data):\n# key = f\"{component_id}_{component_property}\"\n# print(\"Saving: \", key)\n# print(\"Saved session data: \", saved_data)\n# if not saved_data or key not in saved_data:\n# raise PreventUpdate\n# return saved_data[key]\n#\n#\n# all_inputs = [\n# Input(component_id, component_property)\n# for component_id, component_property in to_save_and_restore\n# ]\n# all_keys = [\n# f\"{component_id}_{component_property}\"\n# for component_id, component_property in to_save_and_restore\n# ]\n#\n#\n# @app.callback(\n# Output(\"session_store\", \"data\"), all_inputs, [State(\"session_store\", \"data\")]\n# )\n# def load_data(property, saved_data):\n# key = f\"{component_id}_{component_property}\"\n# print(\"Saving: \", key)\n# saved_data = saved_data or {}\n# saved_data[key] = property\n# print(\"Saved session data: \", saved_data)\n# return saved_data\n\n# for idx, panel in enumerate(panels):\n# @app.callback(\n# Output(panel.id(\"panel\"), \"style\"),\n# [Input(\"panel-choices\", \"value\")]\n# )\n# def hide_show_panel(value):\n# if idx in value:\n# return {}\n# else:\n# return {\"display\": \"none\"}\n\n# endregion\n\n################################################################################\n# Run server :-)\n################################################################################\n\n\nif __name__ == \"__main__\":\n crystal_toolkit_app.run_server(debug=DEBUG_MODE, port=8050)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"431123021","text":"# Binary Classification using Gaussian Processes in GPyTorch\n# Dataset is a simulated 2D sinusoid \n# Uses discrete grid to sample points in 2D space\n\nfrom gpytorch.mlls.variational_elbo import VariationalELBO\nimport math\nimport torch\nimport torch.distributions as dist\nimport gpytorch\nfrom matplotlib import pyplot as plt\nimport os\nfrom scipy.optimize import minimize\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport sys\nimport time\n\nfrom gpytorch.models import AbstractVariationalGP\nfrom gpytorch.variational import CholeskyVariationalDistribution\nfrom gpytorch.variational import VariationalStrategy\n\ntorch.manual_seed(4);\n\nclass GPClassificationModel(AbstractVariationalGP):\n def __init__(self, train_x, lengthscale=None):\n variational_distribution = CholeskyVariationalDistribution(train_x.size(0))\n variational_strategy = VariationalStrategy(self, train_x, variational_distribution)\n super(GPClassificationModel, self).__init__(variational_strategy)\n self.mean_module = gpytorch.means.ConstantMean()\n if lengthscale is not None:\n \tself.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(lengthscale=lengthscale))\n else:\n \tself.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n\n def set_data(self, new_X):\n self.__init__(new_X);\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n return latent_pred\n\n\nclass VSGPClassificationModel(AbstractVariationalGP):\n def __init__(self, train_x):\n variational_distribution = CholeskyVariationalDistribution(train_x.size(0))\n variational_strategy = VariationalStrategy(self, train_x, variational_distribution, learn_inducing_locations=True)\n super(VSGPClassificationModel, self).__init__(variational_strategy)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n return latent_pred\n\n\nclass GPClassifier:\n def __init__(self, sparse=False, n_dims=1):\n # Initialize model and likelihood\n \n self.likelihood = gpytorch.likelihoods.BernoulliLikelihood()\n self.likelihood.train();\n self.sparse = sparse;\n self.n_dims = n_dims\n self.X = None;\n self.y = None;\n\n self.training_time = 0.0;\n\n def fit(self, X, y, lengthscale=None):\n \n if self.sparse:\n perm = torch.randperm(X.size(0))\n idx = perm[:50]\n self.inducing_points = X[idx];\n self.inducing_points.sort();\n class_model = VSGPClassificationModel;\n else:\n self.inducing_points = X;\n class_model = GPClassificationModel;\n\n self.X = X;\n self.y = y;\n\n self.model = class_model(self.inducing_points, lengthscale);\n # Use the adam optimizer\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.1)\n\n # \"Loss\" for GPs - the marginal log likelihood\n # num_data refers to the amount of training data\n self.mll = VariationalELBO(self.likelihood, self.model, self.y.numel())\n\n self.train();\n\n def train(self, num_steps=50):\n\n start = time.time();\n for i in range(num_steps):\n # Zero backpropped gradients from previous iteration\n self.optimizer.zero_grad()\n # Get predictive output\n output = self.model(self.X)\n # Calc loss and backprop gradients\n loss = -self.mll(output, self.y)\n loss.backward()\n #print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iter, loss.item()))\n self.optimizer.step()\n\n end = time.time()\n self.training_time += end - start;\n\n def update_post(self, X, y):\n if not isinstance(X, torch.Tensor):\n X = torch.tensor(X).float();\n\n if not isinstance(y, torch.Tensor):\n y = torch.tensor(y).float();\n\n if self.X is not None:\n #print(X.shape, self.X.shape)\n assert X.shape[1] == self.X.shape[1], 'Input shape does not match'\n\n else:\n self.fit(X, y);\n return 0;\n\n lengthscale = self.model.covar_module.base_kernel.lengthscale.item() \n self.fit(torch.cat((self.X, X), dim=0), torch.cat((self.y, y), dim=0), lengthscale=lengthscale);\n\n def forward(self, X):\n with torch.no_grad():\n pred_f = self.model(X);\n\n return pred_f;\n\n def predict(self, X, y=None, acc=True):\n\n if acc:\n assert y is not None, 'target labels required to calculate accuracy'\n\n self.model.eval()\n self.likelihood.eval()\n\n with torch.no_grad():\n # Test x are regularly spaced by 0.01 0,1 inclusive\n test_x = X\n # Get classification predictions\n pred_f = self.model(test_x);\n \n observed_pred = self.likelihood(pred_f)\n\n # Get the predicted labels (probabilites of belonging to the positive class)\n # Transform these probabilities to be 0/1 labels\n pred_labels = observed_pred.mean.ge(0.5).float()\n\n if acc:\n acc_pctg = (pred_labels == y).sum() * 100 / len(y);\n return pred_labels, acc_pctg\n else:\n return pred_labels\n\ndef RandSample(gpc, X):\n return torch.randperm(X.size(0))[0]\n\ndef plot_cmap(fig, gpc, X, axpoints, n, cmap):\n with torch.no_grad():\n post_f = gpc.forward(X);\n mean, var = post_f.mean, post_f.variance\n pcm = plt.pcolormesh(axpoints, axpoints, var.detach().numpy().reshape(n,n), cmap=cmap)#, vmin=0., vmax=0.5);\n fig.colorbar(pcm);\n\ndef plot_points(X, selected):\n train_x = X;\n plt.scatter(train_x[:,0].numpy(), train_x[:,1].numpy(), color='yellow', s=0.8, alpha=0.5);\n plt.scatter(selected[:,0].numpy(), selected[:,1].numpy(), color='black', s=7, alpha=0.8);\n plt.scatter(selected[-1][0], selected[-1][1], color='blue', s=10, alpha=1);\n\ndef UCB(gpc, X):\n with torch.no_grad():\n post_f = gpc.forward(X)\n\n idx = torch.argmax(post_f.variance);\n\n return idx;\n\nif __name__ == '__main__':\n cmap = plt.get_cmap('YlOrRd');\n fig = plt.figure(figsize=(12,6));\n\n num_steps = 30;\n\n n = 20\n train_x = torch.zeros(n ** 2, 2)\n train_x[:, 0].copy_(torch.linspace(-1, 1, n).repeat(n))\n train_x[:, 1].copy_(torch.linspace(-1, 1, n).unsqueeze(1).repeat(1, n).view(-1))\n train_y = torch.sign(torch.sin(train_x[:, 0] + 2*train_x[:, 1] * math.pi)).add(1).div(2);\n\n n = 100\n test_x = torch.zeros(n ** 2, 2)\n test_x[:, 0].copy_(torch.linspace(-1, 1, n).repeat(n))\n test_x[:, 1].copy_(torch.linspace(-1, 1, n).unsqueeze(1).repeat(1, n).view(-1))\n test_y = torch.sign(torch.sin(test_x[:, 0] + 2*test_x[:, 1] * math.pi)).add(1).div(2);\n \n \n \n gpc_list = [GPClassifier(sparse=False, n_dims=2), GPClassifier(sparse=False, n_dims=2)];\n gpc_names = ['UCB', 'Random Sample'];\n ac_funcs = [UCB, RandSample]\n next_idx = RandSample(gpc_list[0], train_x);\n\n next_x = [train_x[next_idx].unsqueeze(0), train_x[next_idx].unsqueeze(0)];\n next_y = [train_y[next_idx].unsqueeze(0), train_y[next_idx].unsqueeze(0)];\n \n selected = next_x.copy();\n\n pred_labels = [None, None]\n\n accs = [[],[]]\n\n for i in range(num_steps):\n for j, gpc in enumerate(gpc_list): \n if i != 0:\n selected[j] = torch.cat((selected[j], next_x[j]), dim=0);\n\n gpc.update_post(next_x[j], next_y[j])\n \n next_idx = ac_funcs[j](gpc, train_x);\n \n next_x[j], next_y[j] = train_x[next_idx].unsqueeze(0), train_y[next_idx].unsqueeze(0);\n\n if (i+1) % 5 == 0:\n pred_labels[j], acc = gpc.predict(test_x, test_y, acc=True)\n #print('{0}: Accuracy on test set, iteration {1}: {2:0.3f}%'.format(gpc_names[j],i+1, acc));\n accs[j].append(acc);\n \n plt.pause(1)\n plt.clf();\n plt.annotate('Points sampled: {}'.format(i + 1), xy=(0.5,0.90))#, xycoords='axes fraction')\n for j, gpc in enumerate(gpc_list):\n plt.subplot(1,2,j+1);\n\n plot_cmap(fig, gpc, test_x, torch.linspace(-1,1,n), n, cmap);\n plot_points(train_x, selected[j])\n \n plt.show(block=False);\n \n print('Time spent training gpc1: {0:0.3f} s'.format(gpc_list[0].training_time));\n print('Time spent training gpc2: {0:0.3f} s'.format(gpc_list[1].training_time));\n\n fig, ax = plt.subplots(1,2, figsize=(12, 6))\n ax = ax.flatten();\n for j in range(len(gpc_list)):\n ax[j].plot((np.array(list(range(len(accs[j])))) + 1)*5, accs[j]);\n ax[j].set_xlabel('Trials')\n ax[j].set_ylabel('Accuracy \\% on validation set');\n ax[j].set_title(gpc_names[j])\n ax[j].set_ylim([0,110]);\n ax[j].axhline(y=100, linestyle='--');\n\n\n fig, ax = plt.subplots(2,2, figsize=(12, 8))\n ax=ax.flatten();\n color1 = []\n color2 = [];\n color3 = []\n for i in range(len(pred_labels[0])):\n if test_y[i] == 1:\n color3.append('y');\n else:\n color3.append('r');\n\n if pred_labels[0][i] == 1:\n color1.append('y')\n else:\n color1.append('r')\n\n if pred_labels[1][i] == 1:\n color2.append('y')\n else:\n color2.append('r')\n \n \n # Plot data a scatter plot\n ax[0].scatter(test_x[:, 0].cpu(), test_x[:, 1].cpu(), color=color1, s=1)\n ax[2].scatter(test_x[:, 0].cpu(), test_x[:, 1].cpu(), color=color2, s=1)\n ax[1].scatter(test_x[:, 0].cpu(), test_x[:, 1].cpu(), color=color3, s=1)\n ax[3].scatter(test_x[:, 0].cpu(), test_x[:, 1].cpu(), color=color3, s=1)\n\n plt.show();\n","sub_path":"gpytorch/bin_classification_AL.py","file_name":"bin_classification_AL.py","file_ext":"py","file_size_in_byte":10151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"310779391","text":"import os\n\nos.environ['http_proxy'] = ''\n\nimport urllib.request\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\ndef get_lineups_from_url(page):\n sleep(0.5)\n soup = BeautifulSoup(page, \"html.parser\")\n\n players_list = get_players_lineup_list(soup)\n final_str = generate_string_for_lineups(players_list)\n\n return final_str\n\ndef get_players_lineup_list(soup):\n teams = soup.findAll(\"div\", class_=\"lineup-list\")\n home_players = teams[0].findAll(\"span\", class_=\"text-label player-name\")\n away_players = teams[1].findAll(\"span\", class_=\"text-label player-name\")\n\n players_list = []\n players_list.append([x.text.replace(\"\\n\", \"\").strip() for x in home_players])\n players_list.append([x.text.replace(\"\\n\", \"\").strip() for x in away_players])\n\n return players_list\n\ndef generate_string_for_lineups(l):\n final_str = \"\"\n\n for i in range(len(l[0])):\n final_str += \"{:20} {}\\n\".format(l[0][i], l[1][i])\n\n return final_str\n\n# if __name__ == \"__main__\":\n# get_lineups_from_url(\"https://fcbayern.com/en/matches/profis/bundesliga/2017-2018/fc-bayern-muenchen-sv-werder-bremen-19-01-2018-lineup\")","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"511550018","text":"import math\nimport torch\nimport torch.nn as nn\nfrom torch.nn import TransformerEncoderLayer, TransformerEncoder\n\nfrom settings import *\n\n\nclass EncoderTransformerModel(nn.Module):\n\n def __init__(self, num_classes):\n super().__init__()\n self.num_classes = num_classes\n self.max_seq_len = 1000\n self.ninp = 256\n self.nhid = 1024\n self.nlayers = 4\n self.nhead = 4\n self.pos_dropout = 0.1\n self.transformer_dropout = 0.1\n self.fc_in = nn.Linear(N_MELS, self.ninp)\n self.pos_encoder = PositionalEncoding(self.ninp, self.pos_dropout, self.max_seq_len)\n encoder_layer = TransformerEncoderLayer(self.ninp, self.nhead, self.nhid, self.transformer_dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layer, self.nlayers)\n self.decoder = nn.Linear(self.ninp, num_classes)\n self.dropout_layer = nn.Dropout(0.5)\n\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def get_padding_mask(self, lengths, padded_length, device):\n mask = torch.zeros(lengths.size(0), padded_length,\n device=device)\n for i in range(lengths.size(0)):\n mask[i, lengths[i]:] = 1\n return mask.bool()\n\n def get_nopeek_mask(self, size, device):\n return torch.full((size, size), float('-inf'), device=device).triu(1)\n\n def forward(self, src, lengths, device):\n src_mask = self.get_nopeek_mask(src.size(1), device)\n padding_mask = self.get_padding_mask(lengths, src.size(1), device)\n src = self.fc_in(src).transpose(0, 1) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n out = self.transformer_encoder(src=src, src_key_padding_mask=padding_mask)\n out = self.decoder(out)\n out = self.dropout_layer(out)\n return out.transpose(0, 1)\n\n\n# taken from https://pytorch.org/tutorials/beginner/transformer_tutorial.html\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout, max_len):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n","sub_path":"src/models/encoder_transformer.py","file_name":"encoder_transformer.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"281435300","text":"import os\ndef get_dose(file_name):\n doses=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25]\n file_number = int(file_name.split(sep='_')[3]) % 13\n return doses[file_number-1]\n\ndrugs = {\n 'chlorpromazine': '34.5',\n \"cisapride\": \"2.6\",\n 'diltiazem': '127.5',\n 'dofetilide': '2.1',\n 'mexiletine': '2503.2',\n 'ondansetron': '358.5',\n 'quinidine': \"842.9\",\n 'ranolazine': '1948.2',\n 'sotalol': \"2.1\",\n 'terfenadine': '9',\n 'verapamil': '45'\n}\npath = \"/Users/sofiestubo/Documents/CS/MSc/project/data_convertion/va_data-main/50-150%\"\nwith open(\"va_metrics-50-150.csv\", \"w\") as metrics:\n metrics.write(\"Param#,Peak Voltage,RMP,Max Upstroke Velocity,APD1,APD2,APD3,Tri90-40,CTD90,CTD50,CaTamp,CaTmax,CaiD,EMw,qNet,EAD,Depolarization,Index,GNa,GNaL,Gto,GKr,GKs,GK1,Gncx,Pnak,PCa,drug,cnet,dose\")\n metrics.write('\\n')\n print(os.listdir(path + '/' + 'sotalol'))\n for drug in drugs.keys():\n for file in os.listdir(path+'/'+drug):\n if file != (\".DS_Store\" or \".git\"):\n with open(path+'/'+drug+'/'+file) as f:\n print(f)\n lis = [line.split() for line in f]\n print (lis)\n dose = str(get_dose(f.name))\n print(dose)\n cnet = drugs.get(drug)\n for i in range(42, len(lis)):\n line = str(lis[i][0])\n metrics.writelines(line+','+drug+','+cnet+','+dose+ '\\n')\n f.close()\n metrics.close()\n\n\n\n\n","sub_path":"va_metrics.py","file_name":"va_metrics.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"235371999","text":"from followsuggester import FollowSuggester\n\nclass PHPStdClass:\n\tdef __init__(self, **kwargs):\n\t\tfor key in kwargs:\n\t\t\tsetattr( self, key, kwargs[key])\n\nuser = PHPStdClass(usr_id=1)\n\nfs = FollowSuggester(user, None)\nfs.expand_candidate_set([2])\n","sub_path":"follow/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"141475932","text":"#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom datetime import datetime\nfrom typing import List, Optional, Union\n\nfrom polyaxon import settings\nfrom polyaxon.connections.getter import get_connection_from_type\nfrom polyaxon.connections.reader import get_connection_type\nfrom polyaxon.env_vars.getters import get_artifacts_store_name\nfrom polyaxon.exceptions import PolyaxonStoresException\nfrom polyaxon.schemas.types import V1ConnectionType\nfrom polyaxon.utils.list_utils import to_list\nfrom polyaxon.utils.path_utils import create_tarfile, get_files_in_path, get_path\n\n\ndef get_artifacts_connection() -> Optional[V1ConnectionType]:\n store_name = get_artifacts_store_name()\n if store_name:\n return get_connection_type(store_name)\n if settings.AGENT_CONFIG:\n return settings.AGENT_CONFIG.artifacts_store\n return None\n\n\ndef validate_store(connection_type: V1ConnectionType):\n if not connection_type or not connection_type.is_artifact:\n raise PolyaxonStoresException(\"An artifact store type was not provided.\")\n\n\ndef list_files(\n subpath: str, filepath: str = None, connection_type: V1ConnectionType = None\n):\n connection_type = connection_type or get_artifacts_connection()\n\n validate_store(connection_type)\n\n store_path = get_path(connection_type.store_path, subpath)\n if filepath:\n store_path = get_path(store_path, filepath)\n\n store_manager = get_connection_from_type(connection_type=connection_type)\n\n try:\n results = store_manager.ls(store_path)\n results[\"files\"] = {f[0]: f[1] for f in results[\"files\"]}\n return results\n except Exception:\n raise PolyaxonStoresException(\n \"Run store path does not exists or bad configuration.\"\n )\n\n\ndef upload_file_or_dir(\n path_from: str,\n path_to: str,\n is_file: bool,\n workers: int = 0,\n last_time: datetime = None,\n connection_type: V1ConnectionType = None,\n exclude: List[str] = None,\n):\n connection_type = connection_type or get_artifacts_connection()\n\n validate_store(connection_type)\n store_manager = get_connection_from_type(connection_type=connection_type)\n\n if is_file:\n store_manager.upload_file(path_from, path_to, use_basename=False)\n else:\n store_manager.upload_dir(\n path_from,\n path_to,\n use_basename=False,\n workers=workers,\n last_time=last_time,\n exclude=exclude,\n )\n\n\ndef download_file_or_dir(\n path_from: str,\n path_to: str,\n is_file: bool,\n workers: int = 0,\n connection_type: V1ConnectionType = None,\n to_tar: bool = False,\n) -> Optional[str]:\n connection_type = connection_type or get_artifacts_connection()\n\n validate_store(connection_type)\n store_manager = get_connection_from_type(connection_type=connection_type)\n\n if is_file:\n store_manager.download_file(path_from, path_to, use_basename=False)\n else:\n store_manager.download_dir(\n path_from, path_to, use_basename=False, workers=workers\n )\n if not os.path.exists(path_to):\n return None\n if to_tar:\n return tar_dir(path_to)\n return path_to\n\n\ndef delete_file_or_dir(\n subpath: Union[str, List[str]],\n is_file: bool = False,\n workers: int = 0,\n connection_type: V1ConnectionType = None,\n):\n connection_type = connection_type or get_artifacts_connection()\n\n validate_store(connection_type)\n\n store_manager = get_connection_from_type(connection_type=connection_type)\n subpath = to_list(subpath, check_none=True)\n for sp in subpath:\n if is_file:\n store_manager.delete_file(sp)\n else:\n store_manager.delete(sp, workers=workers)\n\n\ndef tar_dir(download_path: str) -> str:\n outputs_files = get_files_in_path(download_path)\n tar_base_name = os.path.basename(download_path)\n tar_name = \"{}.tar.gz\".format(tar_base_name)\n target_path = get_path(settings.CLIENT_CONFIG.archive_root, tar_name)\n create_tarfile(files=outputs_files, tar_path=target_path, relative_to=download_path)\n return target_path\n","sub_path":"core/polyaxon/stores/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"402510122","text":"from django.contrib import admin\nfrom django.views.generic.base import TemplateView\nfrom django.urls import path\nfrom receipt import views\n\napp_name = 'receipt'\n\nurlpatterns = [\n path('create_receipt/',views.create_receipt,name='create_receipt'),\n path('request_receipt/',views.request_receipt,name='request_receipt'),\n path('request_receipt/distributor//',views.distributor_products,name='distributor_products'),\n path('request_receipt/company//',views.company_products,name='company_products'),\n path('receipt_product_detail/',views.receipt_product_detail,name='receipt_product_detail'),\n path('receipt_detail//',views.receipt_detail,name='receipt_detail'),\n path('receipt_list/',views.receipt_list,name='receipt_list'),\n path('receipt_stats',views.receipt_stats,name='receipt_stats'),\n path('access_denied/',views.access_denied,name='access_denied'),\n path('process_receipt/', views.process_receipt,name='process_receipt'),\n path('submit_receipt/', views.submit_receipt,name='submit_receipt')\n]","sub_path":"prod_dist - Copy/receipt/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440364417","text":"'''\nテンプレートマッチング\n\n2017/05/01\n\n(c) 2017 kapifuji\n'''\n# 注釈まだ\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nif __name__ == \"__main__\":\n grayImg = cv2.imread(\"image/nnss.png\", cv2.IMREAD_GRAYSCALE)\n templateImg = cv2.imread(\"save/nnss13.png\", cv2.IMREAD_GRAYSCALE)\n tempWidth, tempHeight = templateImg.shape[::-1]\n\n matchImg = cv2.matchTemplate(grayImg, templateImg, cv2.TM_CCOEFF_NORMED)\n\n '''\n minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(matchImg)\n\n topLeft = maxLoc\n bottomRight = (maxLoc[0] + tempWidth, maxLoc[1] + tempHeight)\n\n threeCImg = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)\n cv2.rectangle(threeCImg, topLeft, bottomRight, (0 ,255, 0), 3)\n '''\n\n threshold = 0.85\n loc = np.where(matchImg >= threshold)\n threeCImg = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)\n for pnt in zip(*loc[::-1]):\n cv2.rectangle(threeCImg, pnt, (pnt[0] + tempWidth, pnt[1] + tempHeight), (0 ,255, 0), 3)\n\n\n cv2.imshow(\"\", threeCImg)\n cv2.imwrite(\"save/nnssTemplateM.png\", threeCImg)\n\n key = cv2.waitKey(0)\n ","sub_path":"OpenCV Exercises/OpenCV Exercises/TemplateMatch.py","file_name":"TemplateMatch.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"561803212","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nalz = pd.read_csv(\"CCDSS.csv\")\n\n#print(alz)\n\nince = alz.loc[~alz[\"Incidence (Rate per 100)\"].str.contains(\"-\")]\nprev = alz.loc[~alz[\"Prevalence (%)\"].str.contains(\"-\")]\nmort = alz.loc[~alz[\"Mortality (Rate Ratio)\"].str.contains(\"-\")]\n\nnew_df = pd.DataFrame(alz[\"Area\"])\n\nnew_df[\"Incidence\"] = ince[\"Incidence (Rate per 100)\"].astype('float')\nnew_df[\"Prevalence\"] = prev[\"Prevalence (%)\"].astype('float')\nnew_df[\"Mortality\"] = mort[\"Mortality (Rate Ratio)\"].astype('float')\n\nprint(new_df.groupby([\"Area\"]).mean())\n#list(new_df.groupby([\"Area\"]).mean()[\"Incidence\"])\n#print(new_df[\"Area\"].sort_values().unique())\n\nfor i in new_df.columns[1:]:\n ticks = range(1, len(new_df.groupby([\"Area\"]))+1)\n height = list(new_df.groupby([\"Area\"]).mean()[i])\n tick_label = [\"AB\", \"BC\", \"CA\", \"MB\", \"NB\", \"NL\", \"NT,NU,YT\", \"NS\", \"ON\", \"PE\", \"QC\"]\n plt.bar(ticks, height, tick_label=tick_label, width=0.6)\n plt.title(\"Alzheimer's \" + i + \" by Province\")\n plt.ylabel(\"Population Percentage\")\n plt.show()\n\n'''\nalz_ince = {}\nalz_prev = {}\nalz_mort = {}\n\n#print(alz.columns)\n\nareas = list(set(alz[\"Area\"]))\nfor i in areas:\n alz_ince.setdefault(i, [])\n alz_prev.setdefault(i, [])\n alz_mort.setdefault(i, [])\n\nfor entries in alz.values:\n for keys in alz_ince.keys():\n if entries[0] == keys:\n if entries[6] == \"-\":\n pass\n else:\n alz_ince[keys].append(float(entries[6]))\n if entries[9] == \"-\":\n pass\n else:\n alz_prev[keys].append(float(entries[9]))\n if entries[12] == \"-\":\n pass\n else:\n alz_mort[keys].append(float(entries[12]))\n\nfor keys in alz_ince.keys():\n alz_ince[keys] = s.mean(alz_ince[keys])\n alz_prev[keys] = s.mean(alz_prev[keys])\n alz_mort[keys] = s.mean(alz_mort[keys])\n\n#print(alz_ince)\n#print(alz_prev)\n#print(alz_mort)\n\nshorthand = []\nfor short in alz_ince.keys():\n shorthand.append(short[:5])\n\nticks = range(1, len(alz_ince.keys())+1)\nheight = list(alz_ince.values())\ntick_label = shorthand\nplt.bar(ticks, height, tick_label=tick_label, width=0.6)\nplt.title(\"Alzheimer's Incidence by Province\")\nplt.ylabel(\"Population Percentage\")\nplt.show()\n\n\nticks = range(1, len(alz_prev.keys())+1)\nheight = list(alz_prev.values())\ntick_label = shorthand\nplt.bar(ticks, height, tick_label=tick_label, width=0.6)\nplt.title(\"Alzheimer's Prevalence by Province\")\nplt.ylabel(\"Population Percentage\")\nplt.show()\n\nticks = range(1, len(alz_mort.keys())+1)\nheight = list(alz_mort.values())\ntick_label = shorthand\nplt.bar(ticks, height, tick_label=tick_label, width=0.6)\nplt.title(\"Alzheimer's Mortality by Province\")\nplt.ylabel(\"Population Percentage\")\nplt.show()\n'''","sub_path":"RespiratoryDiseases/Alzheimers.py","file_name":"Alzheimers.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415728287","text":"import numpy as np\nimport json\nimport os \nimport pandas as pd\n\n\n\ndef getPaths():\n posDir=\"normpos\"\n negDir=\"normneg\"\n positive_paths=[os.path.join(posDir,file) for file in os.listdir(posDir)]\n negative_paths=[os.path.join(negDir,file) for file in os.listdir(negDir)]\n \n return positive_paths, negative_paths\n\n\n \ndef saveShuffledSet(outTrain,outValid,negIter):\n \n nb_file=0\n \n pos_paths,neg_paths=getPaths()\n tot_p=len(pos_paths)\n tot_neg=len(neg_paths)\n p=0\n n=0\n negIter\n training_sample=np.array([np.empty([40,40,40])])\n \n# \n# while n 0:\n if taxable > 80000:\n tax_rate = 0.45\n sskcs = 13505\n elif taxable > 55000:\n tax_rate = 0.35\n sskcs = 5505\n elif taxable > 35000:\n tax_rate = 0.3\n sskcs = 2755\n elif taxable > 9000:\n tax_rate = 0.25\n sskcs = 1005\n elif taxable > 4500:\n tax_rate = 0.2\n sskcs = 555\n elif taxable > 1500:\n tax_rate = 0.1\n sskcs = 105\n elif taxable >0:\n tax_rate = 0.03\n sskcs = 0\n return taxable * tax_rate - sskcs\n else:\n return 0\n\ntry:\n account = {}\n for arg in sys.argv[1:]:\n people_id = arg.split(':')[0]\n salary = int(arg.split(':')[1])\n account[people_id] = (salary - tax(salary) - social_insurance(salary))\n for key, value in account.items():\n print(key + ':' + '{:.2f}'. format(value))\nexcept:\n print(\"Parameter Error\")\n","sub_path":"calculator_1.0.py","file_name":"calculator_1.0.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"215518984","text":"import numpy as np\n\nimport theano\nimport theano.tensor as T\n\nfloatX = theano.config.floatX\n\nfrom keras.layers.recurrent import Recurrent, LSTM\nfrom keras import backend as K\n\ntol = 1e-4\n\n\ndef _wta(X):\n M = K.max(X, axis=-1, keepdims=True)\n R = K.switch(K.equal(X, M), X, 0.)\n return R\n\n\ndef _update_controller(self, inp, h_tm1, M):\n x = T.concatenate([inp, M], axis=-1)\n if len(h_tm1) in [1, 2]:\n if hasattr(self.rnn, \"get_constants\"):\n BW, BU = self.rnn.get_constants(x)\n h_tm1 += (BW, BU)\n _, h = self.rnn.step(x, h_tm1)\n\n return h\n\n\ndef _circulant(leng, n_shifts):\n eye = np.eye(leng)\n shifts = range(n_shifts // 2, -n_shifts // 2, -1)\n C = np.asarray([np.roll(eye, s, axis=1) for s in shifts])\n return theano.shared(C.astype(theano.config.floatX))\n\n\ndef _renorm(x):\n return x / (x.sum(axis=1, keepdims=True))\n\n\ndef _softmax(x):\n wt = x.flatten(ndim=2)\n w = T.nnet.softmax(wt)\n return w.reshape(x.shape) # T.clip(s, 0, 1)\n\n\ndef _cosine_distance(M, k):\n dot = (M * k[:, None, :]).sum(axis=-1)\n nM = T.sqrt((M ** 2).sum(axis=-1))\n nk = T.sqrt((k ** 2).sum(axis=-1, keepdims=True))\n return dot / (nM * nk)\n\n\nclass NeuralTuringMachine(Recurrent):\n def __init__(self, output_dim, memory_size, shift_range=3,\n init='glorot_uniform', inner_init='orthogonal',\n input_dim=None, input_length=None, **kwargs):\n self.output_dim = output_dim\n self.n_slots = memory_size[1]\n self.m_length = memory_size[0]\n self.shift_range = shift_range\n self.init = init\n self.inner_init = inner_init\n self.input_dim = input_dim\n self.input_length = input_length\n self.u = None\n if self.input_dim:\n kwargs['input_shape'] = (self.input_length, self.input_dim)\n super(NeuralTuringMachine, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.u = input_shape\n input_leng, input_dim = input_shape[1:]\n # self.input = T.tensor3()\n\n self.rnn = LSTM(\n input_dim=input_dim + self.m_length,\n input_length=input_leng,\n output_dim=self.output_dim, init=self.init,\n forget_bias_init='zero',\n inner_init=self.inner_init)\n self.rnn.build(input_shape)\n self.M = theano.shared((.001 * np.ones((1,)).astype(floatX)))\n self.init_h = K.zeros((self.output_dim))\n self.init_wr = self.rnn.init((self.n_slots,))\n self.init_ww = self.rnn.init((self.n_slots,))\n\n # write\n self.W_e = self.rnn.init((self.output_dim, self.m_length)) # erase\n self.b_e = K.zeros((self.m_length))\n self.W_a = self.rnn.init((self.output_dim, self.m_length)) # add\n self.b_a = K.zeros((self.m_length))\n\n # get_w parameters for reading operation\n self.W_k_read = self.rnn.init((self.output_dim, self.m_length))\n self.b_k_read = self.rnn.init((self.m_length,))\n self.W_c_read = self.rnn.init((self.output_dim, 3)) # 3 = beta, g, gamma see eq. 5, 7, 9\n self.b_c_read = K.zeros((3))\n self.W_s_read = self.rnn.init((self.output_dim, self.shift_range))\n self.b_s_read = K.zeros((self.shift_range)) # b_s lol! not intentional\n\n # get_w parameters for writing operation\n self.W_k_write = self.rnn.init((self.output_dim, self.m_length))\n self.b_k_write = self.rnn.init((self.m_length,))\n self.W_c_write = self.rnn.init((self.output_dim, 3)) # 3 = beta, g, gamma see eq. 5, 7, 9\n self.b_c_write = K.zeros((3))\n self.W_s_write = self.rnn.init((self.output_dim, self.shift_range))\n self.b_s_write = K.zeros((self.shift_range))\n\n self.C = _circulant(self.n_slots, self.shift_range)\n\n self.trainable_weights = self.rnn.trainable_weights + [\n self.W_e, self.b_e,\n self.W_a, self.b_a,\n self.W_k_read, self.b_k_read,\n self.W_c_read, self.b_c_read,\n self.W_s_read, self.b_s_read,\n self.W_k_write, self.b_k_write,\n self.W_s_write, self.b_s_write,\n self.W_c_write, self.b_c_write,\n self.M,\n self.init_h, self.init_wr, self.init_ww]\n\n self.init_c = K.zeros((self.output_dim))\n self.trainable_weights = self.trainable_weights + [self.init_c, ]\n\n def _read(self, w, M):\n return (w[:, :, None] * M).sum(axis=1)\n\n def _write(self, w, e, a, M):\n Mtilda = M * (1 - w[:, :, None] * e[:, None, :])\n Mout = Mtilda + w[:, :, None] * a[:, None, :]\n return Mout\n\n def _get_content_w(self, beta, k, M):\n num = beta[:, None] * _cosine_distance(M, k)\n return _softmax(num)\n\n def _get_location_w(self, g, s, C, gamma, wc, w_tm1):\n wg = g[:, None] * wc + (1 - g[:, None]) * w_tm1\n Cs = (C[None, :, :, :] * wg[:, None, None, :]).sum(axis=3)\n wtilda = (Cs * s[:, :, None]).sum(axis=1)\n wout = _renorm(wtilda ** gamma[:, None])\n return wout\n\n def _get_controller_output(self, h, W_k, b_k, W_c, b_c, W_s, b_s):\n k = T.tanh(T.dot(h, W_k) + b_k) # + 1e-6\n c = T.dot(h, W_c) + b_c\n beta = T.nnet.relu(c[:, 0]) + 1e-4\n g = T.nnet.sigmoid(c[:, 1])\n gamma = T.nnet.relu(c[:, 2]) + 1.0001\n s = T.nnet.softmax(T.dot(h, W_s) + b_s)\n return k, beta, g, gamma, s\n\n def get_initial_states(self, X):\n batch_size = X.shape[0]\n init_M = self.M.dimshuffle(0, 'x', 'x').repeat(\n batch_size, axis=0).repeat(self.n_slots, axis=1).repeat(\n self.m_length, axis=2)\n init_M = init_M.flatten(ndim=2)\n\n init_h = self.init_h.dimshuffle(('x', 0)).repeat(batch_size, axis=0)\n init_wr = self.init_wr.dimshuffle(('x', 0)).repeat(batch_size, axis=0)\n init_ww = self.init_ww.dimshuffle(('x', 0)).repeat(batch_size, axis=0)\n init_c = self.init_c.dimshuffle(('x', 0)).repeat(batch_size, axis=0)\n return [init_M, T.nnet.softmax(init_wr), T.nnet.softmax(init_ww),\n init_h, init_c]\n\n @property\n def output_shape(self):\n input_shape = self.input_shape\n if self.return_sequences:\n return input_shape[0], input_shape[1], self.output_dim\n else:\n return input_shape[0], self.output_dim\n\n def call(self, x, mask=None):\n\n input_shape = self.u\n print(input_shape)\n if K._BACKEND == 'tensorflow':\n if not input_shape[1]:\n raise Exception('When using TensorFlow, you should define '\n 'explicitly the number of timesteps of '\n 'your sequences.\\n'\n 'If your first layer is an Embedding, '\n 'make sure to pass it an \"input_length\" '\n 'argument. Otherwise, make sure '\n 'the first layer has '\n 'an \"input_shape\" or \"batch_input_shape\" '\n 'argument, including the time axis. '\n 'Found input shape at layer ' + self.name +\n ': ' + str(input_shape))\n if self.stateful:\n initial_states = self.states\n else:\n initial_states = self.get_initial_states(x)\n constants = self.get_constants(x)\n preprocessed_input = self.preprocess_input(x)\n\n last_output, outputs, states = K.rnn(self.step, preprocessed_input,\n initial_states,\n go_backwards=self.go_backwards,\n mask=mask,\n constants=constants,\n unroll=self.unroll,\n input_length=input_shape[1])\n if self.stateful:\n self.updates = []\n for i in range(len(states)):\n self.updates.append((self.states[i], states[i]))\n\n if self.return_sequences:\n return outputs\n else:\n return last_output\n\n def step(self, x, states):\n M_tm1, wr_tm1, ww_tm1 = states[:3]\n # reshape\n M_tm1 = M_tm1.reshape((x.shape[0], self.n_slots, self.m_length))\n # read\n h_tm1 = states[3:]\n k_read, beta_read, g_read, gamma_read, s_read = self._get_controller_output(\n h_tm1[0], self.W_k_read, self.b_k_read, self.W_c_read, self.b_c_read,\n self.W_s_read, self.b_s_read)\n wc_read = self._get_content_w(beta_read, k_read, M_tm1)\n wr_t = self._get_location_w(g_read, s_read, self.C, gamma_read,\n wc_read, wr_tm1)\n M_read = self._read(wr_t, M_tm1)\n\n # update controller\n h_t = _update_controller(self, x, h_tm1, M_read)\n\n # write\n k_write, beta_write, g_write, gamma_write, s_write = self._get_controller_output(\n h_t[0], self.W_k_write, self.b_k_write, self.W_c_write,\n self.b_c_write, self.W_s_write, self.b_s_write)\n wc_write = self._get_content_w(beta_write, k_write, M_tm1)\n ww_t = self._get_location_w(g_write, s_write, self.C, gamma_write,\n wc_write, ww_tm1)\n e = T.nnet.sigmoid(T.dot(h_t[0], self.W_e) + self.b_e)\n a = T.tanh(T.dot(h_t[0], self.W_a) + self.b_a)\n M_t = self._write(ww_t, e, a, M_tm1)\n\n M_t = M_t.flatten(ndim=2)\n\n return h_t[0], [M_t, wr_t, ww_t] + h_t\n","sub_path":"Another_NTM.py","file_name":"Another_NTM.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"232728072","text":"import math\n\nfrom gennav.planners.graph_search_algorithms.astar import astar\nfrom gennav.planners.prm import PRM\nfrom gennav.planners.samplers import uniform_random_sampler as sampler\nfrom gennav.utils.planner import check_intersection\nfrom gennav.utils.planner import los_optimizer as path_optimizer\n\n\ndef test_prm_astar():\n general_obstacles_list = [\n [[(8, 5), (7, 8), (2, 9), (3, 5)], [(3, 3), (3, 5), (5, 5), (5, 3)]],\n [\n [(2, 10), (7, 10), (7, 1), (6, 1), (6, 6), (4, 6), (4, 9), (2, 9)],\n [(4, 0), (4, 5), (5, 5), (5, 0)],\n [(8, 2), (8, 7), (10, 7), (10, 2)],\n ],\n ]\n\n for obstacles in general_obstacles_list:\n obstacle_list = obstacles\n\n # Instatiate prm constructer object\n my_tree = PRM(sample_area=(-5, 15), sampler=sampler, r=5, n=20)\n graph = my_tree.construct(obstacle_list)\n # PRM.visualize_graph(graph,obstacle_list)\n start = (0, 0)\n end = (12, 10)\n\n min_dist = float(\"inf\")\n for node in graph.keys():\n dist = math.sqrt((node[0] - start[0]) ** 2 + (node[1] - start[1]) ** 2)\n if dist < min_dist and (\n not check_intersection([node, start], obstacle_list)\n ):\n min_dist = dist\n s = node\n\n min_dist = float(\"inf\")\n for node in graph.keys():\n dist = math.sqrt((node[0] - end[0]) ** 2 + (node[1] - end[1]) ** 2)\n if dist < min_dist and (not check_intersection([node, end], obstacle_list)):\n min_dist = dist\n e = node\n path = astar(graph, s, e)\n if len(path) > 1:\n optimized_path = path_optimizer(path, obstacle_list)\n # from gennav.utils.planner import visualize_path\n # visualize_path(optimized_path, obstacle_list)\n if len(optimized_path) > 1:\n assert check_intersection(optimized_path, obstacle_list) is False\n\n\ndef test_prm():\n general_obstacles_list = [\n [[(8, 5), (7, 8), (2, 9), (3, 5)], [(3, 3), (3, 5), (5, 5), (5, 3)]],\n [\n [(2, 10), (7, 10), (7, 1), (6, 1), (6, 6), (4, 6), (4, 9), (2, 9)],\n [(4, 0), (4, 5), (5, 5), (5, 0)],\n [(8, 2), (8, 7), (10, 7), (10, 2)],\n ],\n ]\n\n for obstacles in general_obstacles_list:\n obstacle_list = obstacles\n\n # Instatiate prm constructer object\n my_tree = PRM(sample_area=(-5, 15), sampler=sampler, r=5, n=50)\n graph = my_tree.construct(obstacle_list) # noqa: F841\n # PRM.visualize_graph(graph,obstacle_list)\n","sub_path":"tests/test_planner/prm_test.py","file_name":"prm_test.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"161515261","text":"from django.conf.urls import url\n\nfrom usaspending_api.download.v2 import views\n# from usaspending_api.download.v2.views.common import DownloadStatusViewSet, DownloadColumnsViewSet\n\n\nurlpatterns = [\n url(r'^awards', views.DownloadAwardsViewSet.as_view()),\n # url(r'^columns', DownloadColumnsViewSet.as_view()),\n url(r'^status', views.DownloadStatusViewSet.as_view()),\n url(r'^transactions', views.DownloadTransactionsViewSet.as_view()),\n]\n","sub_path":"usaspending_api/download/v2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"333736688","text":"import numpy\nimport cv2\n\n\nimgBGR = cv2.imread(\"images/tree.jpg\")\nb,g,r = cv2.split(imgBGR)\ncv2.imshow(\"BLUE\" , b)\ncv2.imshow(\"GREEN\" , g)\ncv2.imshow(\"RED\" , r)\n\n#cv2.namedWindow(\"IMAGETREE_RGB\" , cv2.WINDOW_NORMAL)\n\ncv2.imshow(\"IMAGETREE_RGB\" , imgBGR)\n\nimgHSV = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV)\nh,s,v = cv2.split(imgHSV)\ncv2.imshow(\"HUE\" , h)\ncv2.imshow(\"SATURATION\" , s)\ncv2.imshow(\"VALUE\" , v)\n\nimgGREY = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"GREY\" , imgGREY)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"rgbTOhsvTOgrey.py","file_name":"rgbTOhsvTOgrey.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"432335664","text":"from __future__ import absolute_import\n\nfrom appium import webdriver\nimport time\n\nfrom shishito.runtime.environment.shishito import ShishitoEnvironment\n\n\nclass ControlEnvironment(ShishitoEnvironment):\n \"\"\" Appium control environment. \"\"\"\n\n def call_browser(self, config_section):\n \"\"\" Start webdriver for given config section. Prepare capabilities for the webdriver. If saucelabs setting has value,\n webdriver will be connected to saucelabs. Otherwise appium_url setting will be used.\n\n :param str config_section: section in platform/environment.properties config\n :return: created webdriver\n \"\"\"\n\n # get browser capabilities\n capabilities = self.get_capabilities(config_section)\n saucelabs = self.shishito_support.get_opt('saucelabs')\n if saucelabs:\n remote_url = 'http://%s@ondemand.saucelabs.com:80/wd/hub' % saucelabs\n else:\n remote_url = self.shishito_support.get_opt('appium_url')\n\n # get driver\n return self.start_driver(capabilities, remote_url)\n\n def get_capabilities(self, config_section):\n \"\"\" Return dictionary of capabilities for specific config combination.\n\n :param str config_section: section in platform/environment.properties config\n :return: dict with capabilities\n \"\"\"\n\n get_opt = self.shishito_support.get_opt\n\n return {\n 'platformName': get_opt(config_section, 'platformName'),\n 'platformVersion': get_opt(config_section, 'platformVersion'),\n 'deviceName': get_opt(config_section, 'deviceName'),\n 'app': get_opt('app') or get_opt(config_section, 'app'),\n 'appiumVersion': get_opt(config_section, 'appiumVersion') or '1.6.5',\n 'autoAcceptAlerts': None or get_opt(config_section, 'autoAcceptAlerts') == 'true', # default False\n 'waitForQuiescence': None or get_opt(config_section, 'waitForQuiescence') == 'true', # default False\n }\n\n def get_pytest_arguments(self, config_section):\n \"\"\" Get environment specific arguments for pytest.\n\n :param config_section: section in platform/environment.properties config\n :return: dict with arguments for pytest or None\n \"\"\"\n\n pytest_args = {\n '--platformName': '--platformName=%s' % self.shishito_support.get_opt(config_section, 'platformName'),\n '--platformVersion': '--platformVersion=%s' % self.shishito_support.get_opt(config_section, 'platformVersion'),\n '--deviceName': '--deviceName=%s' % self.shishito_support.get_opt(config_section, 'deviceName'),\n '--autoAcceptAlerts': '--autoAcceptAlerts=%s' % self.shishito_support.get_opt(config_section, 'autoAcceptAlerts'),\n '--app': '--app=%s' % (self.shishito_support.get_opt('app') or self.shishito_support.get_opt(config_section, 'app'))\n }\n\n saucelabs = self.shishito_support.get_opt('saucelabs')\n if saucelabs:\n pytest_args['--saucelabs'] = '--saucelabs=%s' % saucelabs\n return pytest_args\n\n def start_driver(self, capabilities, remote_driver_url):\n \"\"\" Prepare selenium webdriver.\n\n :param capabilities: capabilities used for webdriver initialization\n :param remote_driver_url: url to which the driver will be connected\n \"\"\"\n\n driver = webdriver.Remote(\n command_executor=remote_driver_url,\n desired_capabilities=capabilities,\n )\n\n return driver\n","sub_path":"shishito/runtime/environment/appium.py","file_name":"appium.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"578272116","text":"#Count By Check\n#Suppose you want to count from some number start_num by \\\n# another number count_by until you hit a final number end_num, \\\n# and calculate break_num the way you did in the last quiz.\n\n#Now in addition, address what would happen if someone gives a \\\n# start_num that is greater than end_num. If this is the case,\\\n# set result to \"Oops! Looks like your start value is greater\\\n# than the end value. Please try again.\" Otherwise, set result\\\n# to the value of break_num.\n\n\n\ndef test(start_num):\n end_num = 20\n count_by =1\n break_num = 1\n if start_num <= end_num:\n while start_num < end_num:\n break_num += count_by\n print(break_num)\n if break_num == end_num:\n break_num += count_by\n break\n print(break_num)\n else :\n result = 'Oops! Looks like your start value is greater than the end value. Please try again.'\n print(result)\n\ntest(21)\n","sub_path":"while_example2.py","file_name":"while_example2.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"171481372","text":"import csv\nimport glob\nimport unicodedata\nfrom os.path import getsize\n\nimport requests\nfrom fuzzywuzzy import fuzz\n\n\n# Early and primative sanitizing of inputs\ndef verify_clean_zip( num ):\n\ttry:\n\t\tzip_tmp = int(num)\n\t\t# Min,Max of ranges shown by DoH - ZIP Code Definitions https://on.ny.gov/1RnUmTz\n\t\t# Below if to check length is proper and that zip is within NYC ranges\n\t\tif len(str(zip_tmp)) == 5 and (10001 <= zip_tmp and zip_tmp <= 11697):\n\t\t\treturn zip_tmp\n\t\telif (10001 > zip_tmp or zip_tmp > 11697):\n\t\t\tprint(\"BAD ZIP CODE NOT WITHIN NYC RANGE :\", zip_tmp)\n\t\telse:\n\t\t\tprint(\"BAD ZIP:\", zip_tmp, \" NOT ZIP CODE OF LENGTH 5: \", len(zip_tmp))\n\n\texcept Exception as e: # Cannot clean zips, FUBARd\n\t\tzip_tmp = int(str(num).strip().isnumeric())\n\t\tprint(\"BAD ZIP:\", zip_tmp, \" Original : \", num)\n\n\ndef attempt_borough_from_zip( zip ):\n\tzip = str(zip)\n\tf = open('data/borough_zip.csv')\n\tcsv_f = csv.reader(f)\n\tfor row in csv_f:\n\t\tif (zip == str(row[0]).strip()):\n\t\t\tprint(\"Cross-referenced bad borough:\", row)\n\t\t\treturn str(row[1])\n\tf.close()\n\treturn None\n\n# Multiple bombs dropped on misbehaving strings (and still more to come)\ndef clean_string( st ):\n\ts = st.lower().strip().replace(\"dof\", \"\").replace(\" \", \"\") # two spaces in the replace arg0\n\ttry:\n\t\ts = s.translate(str.maketrans(\"\", \"\", \",.-'\\\"():;+/?$°@\"))\n\texcept Exception as e:\n\t\tprint(\"TRANSLATE ERROR:\", st, s, e) #Rare to occur\n\ts = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n\treturn s\n\n\n# Fuzzy search to help detect words that are very similar, possible typos/redundancy can be found.\n# Threshhold at 75 similarity. Number of searches can be reduced if time allows.\ndef string_similarity( word_set ):\n\tword_set = list(word_set)\n\tprint(\"FUZZY LIST - CANDIDATES OVER THRESHHOLD\")\n\tfor s1 in word_set:\n\t\tfor s2 in word_set:\n\t\t\tif s1 != s2:\n\t\t\t\tproximity = fuzz.ratio(s1, s2)\n\t\t\t\tif proximity > 75:\n\t\t\t\t\tprint(proximity, \" == \", s1, \"-- \", s2)\n\tprint(\"\\n\\n\")\n\n\n# Write out functions, will find more use for them in staging and potential cache scenarios.\ndef write_set( zipset ):\n\twith open('nyc_zips.txt', 'w') as f:\n\t\tfor i in zipset:\n\t\t\tf.write(str(i) + \"\\n\")\n\n\ndef write_dict_to_csv( tmp_dict, out_f=\"output.csv\" ):\n\twith open(out_f, 'w') as f: # Just use 'w' mode in 3.x\n\t\tw = csv.DictWriter(f, tmp_dict.keys())\n\t\tw.writeheader()\n\t\tw.writerow(tmp_dict)\n\n\ndef download_jsons():\n\tfor i in range(100): # range 100 is arbitrarly high, break occurs at about 23~\n\t\t# URL takes advantage of 'floating timestamps' that query between two dates (2017) and only selects what is needed\n\t\turl = 'https://data.cityofnewyork.us/resource/fhrw-4uyv.json?$query=SELECT%20incident_zip,due_date,borough,complaint_type%20WHERE%20due_date%20between%20%272017-01-01T12:19:54.000%27%20and%20%272017-12-30T12:19:54.000%27%20LIMIT%2050000%20OFFSET%20'\n\t\t# Downloaded in increments because of 50k line limit, break upon smaller file increment\n\t\turl_offset = url + str(i * 50000)\n\t\tresponse = requests.get(url_offset, stream=True)\n\t\thandle = open('data/data' + str(i) + '.json', \"wb\")\n\t\tprint('Downloading data' + str(i) + '.json ...')\n\t\tfor chunk in response.iter_content(chunk_size=512):\n\t\t\tif chunk and len(chunk) > 3: # filter out keep-alive new chunks\n\t\t\t\thandle.write(chunk)\n\t\tif getsize('data/data' + str(i) + '.json') < (1024 * 1024 * 4): # Last increment if below 4mb (below 50k lines)\n\t\t\tbreak\n\tprint(\"Finished Download Increments\")\n\n\ndef get_jsons_list(): # Retrieve list of jsons, if none then download\n\tread_files = glob.glob(\"data/data*.json\")\n\tif not read_files:\n\t\tprint(\"Data is not found! Downloading JSONs\")\n\t\tdownload_jsons()\n\treturn glob.glob(\"data/data*.json\")\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"583378838","text":"from pyramid.view import view_config\nfrom pyramid.httpexceptions import HTTPFound\n\nfrom . import web\n\nfrom ..lib.misc import update_dict\nfrom ..lib.auto_format import action_ok, registered_formats\n\nfrom ..model import DBSession\nfrom ..model.model_tracks import Track, Tag, TrackTagMapping\nfrom ..model.actions import get_tag\nfrom ..templates.helpers import search_url, track_url\n\nimport re\nimport copy\nfrom sqlalchemy import func\nfrom sqlalchemy.sql import null\nfrom sqlalchemy.orm import joinedload, aliased\n\n\n#-------------------------------------------------------------------------------\n# Constants\n\n# A list of the sub tags allowed when browsing by specific tags\n# rather than overwelming the user with all possible tags, limit the browsing to a subset under known circumstances.\ntag_cats = {\n 'root' : ['category','vocalstyle','vocaltrack','lang'],\n 'category:anime' : ['from'],\n 'category:jdrama': ['from'],\n 'category:game' : ['from'],\n 'category:jpop' : ['artist','from'],\n}\n\n#-------------------------------------------------------------------------------\n\ndef search(request):\n \"\"\"\n The base call for API methods 'list' and 'tags'\n \n Uses URL, Query string and form input to query trackdb for trackid's that match tags and keyworkds\n \n returns action_ok dict\n - that is overlalyed by additional query data by 'list' and 'tags' API calls\n \n data {\n tags - tags involved in this query\n keywords - keywords involved in this query\n trackids - of all tracks returned by the tag/keyword search (used by calling methods querys)\n sub_tags_allowed - a list of tags that will be displayed for the next query (differnt catagorys may have differnt browsing patterns)\n }\n \"\"\"\n # Hack - remove any format tags from route match - idealy this would be done at the route level\n url = re.sub('|'.join(['\\.'+f for f in registered_formats()]),'',request.matchdict['tags'])\n \n try : tags = url.split('/')\n except: tags = []\n try : keywords = [keyword for keyword in re.findall(r'\\w+', request.params['keywords']) if keyword]\n except: keywords = []\n try : trackids = [trackid for trackid in re.findall(r'\\w+', request.params['trackids']) if trackid]\n except: trackids = []\n \n # Transform tag strings into tag objects # This involkes a query for each tag ... a small overhead\n # any tags that dont match are added as keywords\n tag_objs = []\n for tag in tags:\n tag_obj = get_tag(tag)\n if tag_obj:\n tag_objs.append(tag_obj)\n elif tag:\n keywords.append(tag)\n tags = tag_objs\n \n # If trackids not manually given in request - Get trackids for all tracks that match the tags and keywords\n if not trackids:\n trackids = DBSession.query(Track.id)\n for tag in tags:\n trackids = trackids.intersect( DBSession.query(Track.id).join(Track.tags).filter(Tag.id == tag.id ) )\n for keyword in keywords:\n trackids = trackids.intersect( DBSession.query(Track.id).join(Track.tags).filter(Tag.name.like('%%%s%%' % keyword)) )\n trackids = [trackid[0] for trackid in trackids.all()]\n \n # Limit sub tag categorys for last tag selected\n # and remove any selection of previous tags with the same parent\n try : sub_tags_allowed = copy.copy(tag_cats[str(tags[-1])])\n except: sub_tags_allowed = copy.copy(tag_cats['root'])\n for tag in tags:\n try : sub_tags_allowed.remove(tag.parent.name)\n except: pass\n \n return action_ok(\n data={\n 'tags' : [str(tag) for tag in tags],\n 'keywords': keywords,\n 'trackids': trackids,\n 'sub_tags_allowed': sub_tags_allowed,\n }\n )\n \n\n@view_config(route_name='search_tags')\n@web\ndef tags(request):\n \"\"\"\n Browse tacks by 'tag'\n \n if there is only one track then redirect to show the single track\n if the number of tracks being browsed is less then 15 then redirect to 'list'\n \n Get all tags from all the tracks trackid's provided and count the number of occurances.\n \n return search dict + sub_tags( a list of all tags with counts )\n \"\"\"\n action_return = search(request)\n\n tags = action_return['data']['tags']\n keywords = action_return['data']['keywords']\n sub_tags_allowed = action_return['data']['sub_tags_allowed']\n trackids = action_return['data']['trackids']\n \n # If html request then we want to streamline browsing and remove redundent extra steps to get to the track list or track\n\n \n if request.matchdict['format']=='html':\n # If there is only one track present - abort and redirect to single track view, there is no point in doing any more work\n if len(trackids)== 1:\n raise HTTPFound(location=track_url(trackids[0]))\n # If there is only a small list, we might as well just show them all\n if len(trackids)< 15:\n raise HTTPFound(location=search_url(tags=tags,keywords=keywords,route='search_list'))\n \n # Get a list of all the tags for all the trackids\n # Group them by tag name\n # only allow tags in the allowed list (there could be 100's of title's and from's), we just want the browsable ones\n alias_parent_tag = aliased(Tag)\n sub_tags = DBSession.query(Tag ,func.count(TrackTagMapping.tag_id)).\\\n join(TrackTagMapping).\\\n join(alias_parent_tag, Tag.parent).\\\n filter(TrackTagMapping.track_id.in_(trackids)).\\\n filter(alias_parent_tag.name.in_(sub_tags_allowed)).\\\n group_by('tag_1.id',alias_parent_tag.name,Tag.id).\\\n order_by(alias_parent_tag.name,Tag.name).\\\n options(joinedload(Tag.parent))\n \n # AllanC - RRRRRRRAAAAAAAAA!!!! Postgres creates an alias 'tag_1.id' under the hood, but wont actually return results unless it's in the group_by clause\n # it works without the tag_1.id in sqllite. So right now, the SQLLite version is broken with 'tag_1' and postgres dies without it.\n # is there a way to alias this properly?\n # tried alias's 'tag_1.id','tag_2.name'\n \n action_return['data'].update({\n 'sub_tags': [update_dict(tag.to_dict('full'),{'count':count}) for tag,count in sub_tags],\n })\n return action_return\n\n\n@view_config(route_name='search_list')\n@web\ndef list(request):\n \"\"\"\n Browse tacks by 'list'\n \n List all the tracks listed in trackids\n \n return search dict (see above) + tracks (a list of tracks with basic details)\n \"\"\"\n\n action_return = search(request)\n\n trackids = action_return['data']['trackids']\n\n tracks = DBSession.query(Track).\\\n filter(Track.id.in_(trackids)).\\\n options(\\\n joinedload(Track.tags),\\\n joinedload(Track.attachments),\\\n joinedload('tags.parent'),\\\n )\n \n action_return['data'].update({\n 'tracks' : [track.to_dict('full', exclude_fields='lyrics,attachments') for track in tracks],\n })\n return action_return\n\n","sub_path":"website/karakara/views/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":7358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533621106","text":"import kfserving\nimport json\nimport base64\nfrom PIL import Image\nimport io\nfrom model_handler import ModelHandler\nimport os\nimport argparse\nimport yaml\n\nDEFAULT_MODEL_NAME = \"model\"\n\nclass KFServingSampleModel(kfserving.KFModel):\n def __init__(self, name: str):\n super().__init__(name)\n self.name = name\n self.ready = False\n\n def load(self):\n self.infer_model = ModelHandler()\n self.ready = True\n\n def predict(self, request):\n if isinstance(request,str):\n request = json.loads(request)\n buf0 = io.BytesIO(base64.b64decode(request[\"image0\"].encode('utf-8')))\n buf1 = io.BytesIO(base64.b64decode(request[\"image1\"].encode('utf-8')))\n threshold = float(request.get(\"threshold\", 0.5))\n max_distance = float(request.get(\"max_distance\", 50))\n image0 = Image.open(buf0)\n image1 = Image.open(buf1)\n boxes0 = request[\"boxes0\"]\n boxes1 = request[\"boxes1\"]\n results = self.infer_model.infer(image0, boxes0,image1, boxes1, threshold, max_distance)\n return json.dumps(results)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\n parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\n args, _ = parser.parse_known_args()\n model = KFServingSampleModel(args.model_name)\n model.load()\n kfserving.KFServer(workers=1).start([model])\n","sub_path":"serverless/openvino/omz/intel/person-reidentification-retail-300/nuclio/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"278450949","text":"# --------------------------------------------------------\n# Focal loss\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Written by unsky https://github.com/unsky/\n# --------------------------------------------------------\n\n\"\"\"\nFocal loss \n\"\"\"\n\nfrom __future__ import print_function\nimport mxnet as mx\nimport numpy as np\n\nclass FocalLossOperator(mx.operator.CustomOp):\n def __init__(self, gamma, alpha,use_ignore,ignore_label):\n super(FocalLossOperator, self).__init__()\n self._gamma = gamma\n self._alpha = alpha \n self.use_ignore = use_ignore\n self.ignore_label = ignore_label\n # self.normalize = normalize\n self.normalize = True\n self.eps=1e-14\n # print('Focalloss params: ',self._gamma,self._alpha,self.use_ignore,self.ignore_label)\n\n def forward(self, is_train, req, in_data, out_data, aux):\n # print('----forward----')\n # for index,data in enumerate(in_data):\n # print('in_data[{}].shape:{}'.format(index,data.shape))\n # self.label = in_data[1]\n \n self.assign(out_data[0], req[0], in_data[0])\n\n def backward(self, req, out_grad, in_data, out_data, in_grad, aux):\n # print('----backward----')\n # for index,data in enumerate(in_data):\n # print('in_data[{}].shape:{}'.format(index,data.shape))\n # label = mx.nd.reshape(in_data[1], (0, 1, -1))\n # label = mx.nd.reshape(in_data[1], (-1, 1))\n label = in_data[1]\n # print('label:',label)\n # print('label.shape:',label.shape)\n # p = mx.nd.pick(in_data[0], label, axis=1, keepdims=True)\n # print('p.shape',p.shape)\n\n n_class = in_data[0].shape[1]\n # print('n_class:',n_class)\n\n p_t = in_data[0] \n # print('p_t.shape:',p_t.shape)\n\n a_t = (label > 0) * self._alpha + (label == 0) * (1 - self._alpha) # rescale background & foreground\n a_t = mx.nd.broadcast_axis(mx.nd.reshape(a_t,(-1,1)), axis=1, size=p_t.shape[1])\n # print('a_t[0]:',a_t[0])\n # print('a_t.shape:',a_t.shape)\n label_mask = mx.nd.one_hot(label, n_class, on_value=1, off_value=0)\n # print('label_mask.shape:',label_mask.shape)\n # p_t = p_t * label_mask\n # p_t += self.eps\n # print('p_t[0]:',p_t[0])\n dp_t = (-a_t) * label_mask * mx.nd.power(1-p_t, self._gamma-1.0) * (1-p_t-(self._gamma*p_t*mx.nd.log(mx.nd.maximum(p_t, self.eps)))) / p_t\n # print('dp_t.shape:',dp_t.shape)\n\n\n # u = 1 - p - (self._gamma * p * mx.nd.log(mx.nd.maximum(p, self.eps)))\n # v = 1 - p if self._gamma == 2.0 else mx.nd.power(1 - p, self._gamma - 1.0)\n # a = (label > 0) * self._alpha + (label == 0) * (1 - self._alpha) # rescale background & foreground\n # print('u.shape:',u.shape,'\\nv.shape:',v.shape,'\\na.shape:',a.shape)\n # gf = v * u * a.reshape((0,1))\n # print('gf.shape:',gf.shape)\n \n # label_ = mx.nd.reshape(label, (0, -1))\n # print('label.shape:',label.shape)\n\n # label_mask = mx.nd.one_hot(label, n_class, on_value=1, off_value=0)\n # print('label_mask:',label_mask)\n # label_mask = mx.nd.transpose(label_mask, (0, 2, 1))\n # label_mask = mx.nd.reshape(label_mask,(0, -1))\n # gf = mx.nd.reshape(gf,(0,))\n\n # print('label_mask.shape:',label_mask.shape)\n # print('gf.shape:',gf.shape)\n\n # g = (in_data[0] - label_mask) * gf\n # g = gf * (in_data[0] - label_mask)\n # g *= (label >= 0) #\n\n if self.normalize:\n # g /= mx.nd.sum(label > 0).asscalar()\n # print('dp_t[0] before norm:',dp_t[0])\n # dp_t /= mx.nd.sum(label > 0).asscalar()\n dp_t /= in_data[1].shape[0]\t\t# 128 \n # print('norm scale:',mx.nd.sum(label > 0).asscalar())\n # print('dp_t[0] after norm:',dp_t[0])\n \n\n self.assign(in_grad[0], req[0], dp_t)\n self.assign(in_grad[1], req[1], 0)\n # self.assign(in_grad[2], req[2], 0) \n\n # assert False, 'Debugging'\n\n@mx.operator.register('FocalLoss')\nclass FocalLossProp(mx.operator.CustomOpProp):\n def __init__(self, gamma,alpha,use_ignore,ignore_label):\n super(FocalLossProp, self).__init__(need_top_grad=False)\n # self.use_ignore = bool(use_ignore)\n self.use_ignore = False if use_ignore == 'False' else True\n print('use_ignore: ',self.use_ignore)\n self.ignore_label = int(ignore_label)\n\n self._gamma = float(gamma)\n self._alpha = float(alpha)\n\n def list_arguments(self):\n return ['data', 'labels']\n\n def list_outputs(self):\n return ['focal_loss']\n\n def infer_shape(self, in_shape):\n data_shape = in_shape[0]\n labels_shape = in_shape[1]\n out_shape = data_shape\n\n return [data_shape, labels_shape],[out_shape]\n\n def create_operator(self, ctx, shapes, dtypes):\n return FocalLossOperator(self._gamma,self._alpha,self.use_ignore,self.ignore_label)\n\n # def declare_backward_dependency(self, out_grad, in_data, out_data):\n # return []\n","sub_path":"mxnet-cubicle/obj-det/rcnn/rcnn/operator_py/focal_loss.py","file_name":"focal_loss.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"78675451","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nclass StockInternalTransfer(models.TransientModel):\n _name = 'stock.internal.move'\n\n destination_location_id = fields.Many2one('stock.location', string=\"Internal Location\")\n \n\n# @api.multi\n def stock_transfer(self):\n '''\n To move receiving stock to an internal location \n '''\n picking = self.env['stock.picking'].browse(self.env.context.get('active_id', False))\n if picking and picking.picking_type_id and picking.picking_type_id.code == 'incoming':\n picking_type = self.env['stock.picking.type'].search([('code', '=', 'internal')], limit=1)\n for move in picking.move_lines:\n values = {\n 'product_id': move.product_id.id,\n 'partner_id': move.picking_id.partner_id.id,\n 'origin': move.picking_id and move.picking_id.origin or False,\n 'product_uom': move.product_uom.id,\n 'price_unit': move.price_unit,\n 'product_uom_qty': move.product_uom_qty,\n 'picking_type_id': picking_type.id,\n 'location_id': move.location_dest_id.id,\n 'location_dest_id': self.destination_location_id.id,\n 'name': move.product_id.name,\n 'quantity_done': move.quantity_done,\n }\n new_move = self.env['stock.move'].create(values)\n new_move._action_confirm()\n new_move._action_assign()\n new_move._action_done()\n if new_move.state != 'done':\n raise UserError(_('Currently mentioned quantities are not available at the location !'))\n picking.update({'is_transfer_done': True})\n\nStockInternalTransfer()\n","sub_path":"container_management/wizard/stock_internal_transfer.py","file_name":"stock_internal_transfer.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"167522212","text":"############ VARS ############\ninitialise = {}\ntoImport = {'youtube_dl':'','requests':''}\n########### SETUP ###########\nfor i in toImport:\n defult = False if toImport[i] != '' else True\n exec(str('from ' if defult == False else 'import ')+str(i)+str(' import ' + toImport[i] if defult == False else ''))\ndef colored(text, color):\n ColorCodes = {'black':'30','red':'31','yellow':'33','green':'32','blue':'34','cyan':'36','magenta':'35','white':'37','gray':'90','reset':'0'}\n return '\\033[' + ColorCodes[str(color).lower()] + 'm' + str(text) + \"\\033[0m\"\nfor i in initialise:\n exec(i + '=' + initialise[i])\n######### FUNCTIONS #########\ndef Intro():\n print(colored('Welcome to Connor\\'s Youtube Video Downloader!','green')+colored(' V0.1','magenta'))\n print(colored('\\nEnter Download Location (Leave Blank For Defult)','cyan'))\n \n print('\\n'+colored('Enter Video Link to start!','blue'))\ndef CheckLink(URL):\n if URL == False:\n quit()\n else:\n return URL\ndef URLparse(url):\n return url.split('?v=')[1]\ndef URLdataGet(URL):\n data = requests.get(URL)\n return str(data.content)\ndef YoutubeDownload(VideoID,Download):\n try:\n ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'})\n with ydl:\n result = ydl.extract_info(VideoID,download=Download)\n return result\n except:\n return False\ndef GetUserLink():\n UserLink = input(colored('>>> ','green'))\n try:\n Link = str(UserLink)\n VideoData = YoutubeDownload(URLparse(Link),False)\n if VideoData != False:\n print(colored(\"Is this the video you want? \",'cyan')+colored(str(VideoData['title']),'magenta')+colored(' (Y/N)','blue'))\n UserInput = str(input(colored('>>> ','green'))).lower()\n if UserInput == 'y':\n return Link\n else:\n return False\n else:\n print(colored('Invalid Link...','red'))\n return GetUserLink()\n except:\n print(colored('Invalid Link...','red'))\n return GetUserLink()\n####### MAIN FUNCTION #######\ndef main():\n Intro()\n YoutubeDownload(URLparse(CheckLink(GetUserLink())),True)\nif __name__ == \"__main__\":\n main()","sub_path":"YouTubeVideoDownloader/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"349299247","text":"# 字典\r\n'''\r\n\r\n字典 dictionary ,在一些编程语言中也称为 hash , map ,是一种由键值对组成的数据结构。\r\n\r\n顾名思义,我们把键想象成字典中的单词,值想象成词对应的定义,那么——\r\n\r\n一个词可以对应一个或者多个定义,但是这些定义只能通过这个词来进行查询。\r\n'''\r\n\r\n# 基本操作\r\n# python 使用{} 或者dict() 来创造一个空的字典\r\n\r\na = {}\r\ntype(a)\r\n\r\na = dict()\r\ntype(a)\r\n\r\n# 有了dict 之后可以用索引键值的方法添加元素,也可以通过索引来查看元素的值\r\na['one'] = 'this is number 1'\r\na['two'] = 'this is number 2'\r\n\r\na\r\n\r\n# 查看键值\r\na['one']\r\n\r\n# 更新键值\r\na['one'] = 'this is number 1, too'\r\na['one']\r\n\r\n# 初始化字典\r\nb = {'one': 'this is number 1', 'two': 'this is number 2'}\r\nprint(a)\r\nprint(b)\r\n\r\n'''\r\n键必须是不可变的类型\r\n出于hash的目的,Python中要求这些键值对的键必须是不可变的,而值可以是任意的Python对象。\r\n\r\n一个表示近义词的字典\r\n'''\r\n\r\nsynonyms = {}\r\nsynonyms['mutable'] = ['changeable', 'variable', 'varying', 'fluctuating',\r\n 'shifting', 'inconsistent', 'unpredictable',\r\n 'inconstent', 'fickle', 'uneven', 'unstable', ' protean']\r\nsynonyms['immutable'] = ['fixed', 'set', 'rigid', 'inflexible',\r\n 'permanent', 'established', 'carved in stone']\r\nsynonyms\r\n\r\n# 定义四个字典\r\ne1 = {'mag': 0.05, 'width': 20}\r\ne2 = {'mag': 0.04, 'width': 25}\r\ne3 = {'mag': 0.05, 'width': 80}\r\ne4 = {'mag': 0.03, 'width': 30}\r\n# 以字典作为值传入新的字典\r\nevents = {500: e1, 760: e2, 3001: e3, 4180: e4}\r\nevents\r\n\r\n# 使用dict初始化字典\r\n\r\ninventory = dict(\r\n [('foozelator', 123),\r\n ('frombicator', 18),\r\n ('spatzleblock', 34),\r\n ('snitzelhogen', 23)])\r\ninventory\r\n\r\n# 利用索引直接更新对应键值\r\ninventory['frombicator'] += 1\r\ninventory\r\n\r\n# 有时,也可以使用元组作为键值,例如,可以用元组做键来表示从第一个城市飞往第二个城市航班数的多少\r\n\r\nconnections = {}\r\nconnections[('New York', 'Seattle')] = 100\r\nconnections[('Austin', 'New York')] = 200\r\nconnections[('New York', 'Austin')] = 400\r\n\r\n# 元组是有序的,因此 ('New York', 'Austin') 和 ('Austin', 'New York') 是两个不同的键\r\nprint(connections[('Austin', 'New York')])\r\nprint(connections[('New York', 'Austin')])\r\n\r\n\r\n# 字典方法\r\n'''\r\nget 方法\r\n之前已经见过,用索引可以找到一个键对应的值,但是当字典中没有这个键的时候,Python会报错,这时候可以使用字典的 get 方法来处理这种情况,其用法如下:\r\n\r\nd.get(key, default = None)\r\n\r\n返回字典中键 key 对应的值,如果没有这个键,返回 default 指定的值(默认是 None )。\r\n'''\r\na = {}\r\na[\"one\"] = \"this is number 1\"\r\na[\"two\"] = \"this is number 2\"\r\n\r\n# a['three'] # 报错\r\nprint(a.get('three'))\r\n# 指定默认参数\r\nprint(a.get('three', 'undefined'))\r\n\r\n'''\r\npop 方法删除元素\r\npop 方法可以用来弹出字典中某个键对应的值,同时也可以指定默认参数:\r\n\r\n`d.pop(key, default = None)`\r\n\r\n删除并返回字典中键 key 对应的值,如果没有这个键,返回 default 指定的值(默认是 None )。\r\n'''\r\n\r\na\r\na.pop('two')\r\n# 弹出不存在的键值\r\na.pop('two', 'not exist')\r\n\r\n# del 函数可以用来删除字典中特定的键值对\r\ndel a[\"one\"]\r\na\r\n\r\n'''\r\nupdate方法更新字典\r\n之前已经知道,可以通过索引来插入、修改单个键值对,但是如果想对多个键值对进行操作,这种方法就显得比较麻烦,好在有 update 方法:\r\n\r\n`d.update(newd)`\r\n\r\n将字典newd中的内容更新到d中去。\r\n'''\r\nperson = {}\r\nperson['first'] = \"Jmes\"\r\nperson['last'] = \"Maxwell\"\r\nperson['born'] = 1831\r\nprint(person)\r\n\r\nperson_modifications = {'first': 'James', 'middle': 'Clerk'}\r\nperson.update(person_modifications)\r\nprint(person)\r\n\r\n'''\r\nin 查询字典中是否有该键\r\n'''\r\n\r\nbarn = {'cows': 1, 'dog': 5, 'cats': 3}\r\n'chickens' in barn\r\n'cows' in barn\r\n\r\n\r\n'''\r\nkeys 方法,values 方法和items 方法\r\nd.keys()\r\n\r\n返回一个由所有键组成的列表;\r\nd.values()\r\n\r\n返回一个由所有值组成的列表;\r\nd.items()\r\n\r\n返回一个由所有键值对元组组成的列表;\r\n'''\r\n\r\nbarn.keys()\r\nbarn.values()\r\nbarn.items()\r\n","sub_path":"var/notes/python/essentials/007-dictionaries.py","file_name":"007-dictionaries.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146821875","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/diehard/overlapping_permutations.py\n# Compiled at: 2018-02-25 03:28:02\n# Size of source mod 2**32: 1349 bytes\n\"\"\" overlapping_permutations.py \"\"\"\nfrom functools import wraps\n\n@wraps\ndef preprocess(func):\n \"\"\" Standardizes inputs\"\"\"\n\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n\ndef chunker(arr, batch_size, overlapping=False, complete=False):\n \"\"\" Chunk an array into smaller sequences\n\n complete:\n For only returning batches that have length n\n overlapping:\n For generating batches that overlap\n \"\"\"\n skip = batch_size\n if overlapping:\n skip = 1\n start = 0\n end = len(arr)\n for i in range(start, end, skip):\n batch = arr[i:i + batch_size]\n if not complete or len(batch) == batch_size:\n yield batch\n else:\n break\n\n\n@preprocess\ndef overlapping_permutations(arr, cons=5):\n \"\"\" Analyze sequences of n consecutive real numbers. All possible\n orderings should occur with statistically equal probability\n\n PARAMETERS\n ----------\n arr: numpy.array\n 1D list\n\n RETURNS\n -------\n \"\"\"\n chunks = chunker(arr, cons, overlapping=True, complete=True)\n list(chunks)\n chunks = chunker(arr, cons, overlapping=True, complete=False)\n list(chunks)\n chunks = chunker(arr, cons, overlapping=False, complete=False)\n list(chunks)\n chunks = chunker(arr, cons, overlapping=False, complete=True)\n list(chunks)","sub_path":"pycfiles/diehard-0.0.1-py3.6/overlapping_permutations.cpython-36.py","file_name":"overlapping_permutations.cpython-36.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"264679112","text":"##Let d(n) be defined as the sum of proper divisors of n (numbers less than n\n## which divide evenly into n).\n##If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair\n##and each of a and b are called amicable numbers.\n##\n##For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22,\n##44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are\n##1, 2, 4, 71 and 142; so d(284) = 220.\n##\n##Evaluate the sum of all the amicable numbers under 10000.\n\n# to-do\n# create a dictionary of values\n# original number : sum of proper divisors\n# for each new number, check if sum of proper divisors : original number\n# is in dictionary\n\nfrom time import time\nfrom math import sqrt\nt_start = time()\n\ndef euler21():\n def find_divisor_sum(n):\n total = 1 # already counts one as a divisor. Only works for numbers > 1\n root_n = sqrt(n)\n if root_n % 1 == 0:\n total += int(root_n)\n number = 2\n while number < root_n:\n # goes through integers until sqrt is reached and adds bottom and\n # top numbers to total\n if n % number == 0:\n total += number\n total += n / number\n number += 1\n return total\n\n\n def sum_amic(limit):\n count = 1\n a = 0\n total = 0\n for count in range(1, limit):\n b = find_divisor_sum(count)\n if count == find_divisor_sum(b) and count != b:\n total += count\n return total\n\n print(sum_amic(10000))\n \neuler21()\nprint(time() - t_start)\n","sub_path":"code/Python/old/Euler/p21_v3.py","file_name":"p21_v3.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"196288478","text":"# -*- coding: utf-8 -*-\n\nfrom django.utils import unittest\nfrom django.test.client import Client\n\nimport pynag.Model\nimport adagios.settings\npynag.Model.cfg_file = adagios.settings.nagios_config\n\nclass TestObjectBrowser(unittest.TestCase):\n def testNagiosConfigFile(self):\n result = pynag.Model.ObjectDefinition.objects.all\n config = pynag.Model.config.cfg_file\n self.assertGreaterEqual(len(result),0, msg=\"Parsed nagios.cfg, but found no objects, are you sure this is the right config file (%s) ? \" % config )\n def testIndexPage(self):\n c = Client()\n response = c.get('/objectbrowser/')\n self.assertEqual(response.status_code, 200)","sub_path":"adagios/objectbrowser/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"172056115","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 9 10:56:12 2020\n\n@author: lenovo\n\"\"\"\n# import necessary libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n# set varibles for susceptible, infected, recovered\nN=10000\nS=9999\nI=1\nR=0\nSn=[S]\nIn=[I]\nRn=[R]\ntime=[0]\nbeta=0.3\ngamma=0.05\n# repeat 1000 times\nfor i in range(0,1000):\n # randomly find the newly infected people with the probability beta*I/N, 1 means infected, 0 means susceptible\n ni=np.random.choice(range(0,2),S,p=[1-beta*I/N,beta*I/N])\n # randomly find the newly recovered people with the probability gamma, 2 means recovered\n nr=np.random.choice(range(1,3),I,p=[1-gamma,gamma])\n # count the number of newly infected people\n nin=sum(ni==1)\n # count the number of newly recovered people\n nrn=sum(nr==2)\n # the number of susceptible people change to the original number minus the number of newly infected people\n S-=nin\n Sn.append(S)\n # the number of infected people change to the original number plus the number of newly infected people and minus the number of newly recovered people\n I=I+nin-nrn\n In.append(I)\n # the numeber of recovered people change to the original number plus the numebr of newly recovered people\n R+=nrn\n Rn.append(R)\n # record the time\n time.append(i+1)\n# make a plot for the number of susceptible, infected, and recovered people\nplt.figure(figsize=(6,4),dpi=150)\nplt.plot(time,Sn,'b',marker = ',',label='Susceptible')\nplt.plot(time,In,'r',marker=',',label='Infected')\nplt.plot(time,Rn,'g',marker=',',label='Recovered')\nplt.title('SIR model')\nplt.xlabel('time')\nplt.ylabel('number of people')\nplt.legend()\nplt.savefig('SIR model',type='png')","sub_path":"Practical13/SIR.py","file_name":"SIR.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"116148848","text":"\"\"\"\nЗапросите у пользователя значения выручки и издержек фирмы. Определите, с каким финансовым результатом\nработает фирма (прибыль — выручка больше издержек, или убыток ��� издержки больше выручки).\nВыведите соответствующее сообщение. Если фирма отработала с прибылью, вычислите рентабельность\nвыручки (соотношение прибыли к выручке). Далее запросите численность сотрудников фирмы\nи определите прибыль фирмы в расчете на одного сотрудника.\n\"\"\"\n\nproceeds = float(input('Введите значение выручки фирмы (в у.е.):\\n>>>'))\ncosts = float(input('Введите размер издержек фирмы (в у.е.):\\n>>>'))\nprofit = round(proceeds - costs, 2)\nloss = round(costs - proceeds, 2)\nprofitability = round(profit/proceeds, 2)\n\nif proceeds > costs:\n print(f'Финансовый результат - прибыль {profit} (у.е.)')\n print(f'Рентабельность выручки {profitability}')\n number_employees = int(input('Введите количество сотрудников фирмы:\\n>>>'))\n profit_employees = round(profit / number_employees, 2)\n print(f'Прибыль на одного сотрудника {profit_employees} (у.е.)')\nelse:\n print(f'Финансовый результат - убыток {loss} (у.е.)')\n","sub_path":"homeworks/les1/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"406692140","text":"# Libraries\n# Standard library\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport requests\nimport PIL.Image\nimport urllib\nimport cv2\nimport random\nimport matplotlib.pyplot as plt\nfrom urllib.error import HTTPError\nfrom tempfile import TemporaryFile\nfrom scipy import signal\nimport math\n\n\ndef url_to_image(url, dim):\n try:\n resp = urllib.request.urlopen(url)\n except HTTPError as err:\n print(\"Image {} is not loaded. Error {}\".format(url, err.code))\n else:\n img = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n img = cv2.resize(img, dim, interpolation=cv2.INTER_NEAREST)\n return img\n\n\ndef loadImageNetBuilding():\n page = requests.get(\n \"http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n14785065\") # ship synset\n print(page.content)\n # puts the content of the website into the soup variable, each url on a different line\n soup = BeautifulSoup(page.content, 'html.parser')\n str_soup = str(soup) # convert soup to string so it can be split\n urls = str_soup.splitlines()\n urls = urls[:-1]\n img_rows, img_cols = 96, 96\n input_shape = (img_rows, img_cols)\n\n img_matrixes = tuple(url_to_image(urls[idx], input_shape)\n for idx in range(3)) # range(len(urls)))\n\n np.savez(\"outfile.npz\", img_matrixes)\n data = np.load(\"outfile.npz\", allow_pickle=True)\n for idxFiles in data.files:\n arr = data[idxFiles]\n print(arr)\n\n\nclass Network(object):\n def __init__(self, sizes):\n self.sizes = sizes\n np.random.seed(123)\n self.weights = [np.random.rand(i, o)\n for i, o in zip(sizes[1:], sizes[:-1])]\n\n self.biases = [np.random.rand(d, 1)\n for d in sizes[1:]]\n\n def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):\n cost = []\n for ep in range(epochs):\n print(\"Epoch {} complete\".format(ep))\n np.random.seed(123)\n # random.shuffle(training_data)\n\n for k in range(0, len(training_data), mini_batch_size):\n mini_batch = training_data[k:k+mini_batch_size]\n self.update_mini_batch(mini_batch, eta)\n print(\"Mini-Batch {} complete\".format(k))\n\n cost.append(sum(np.array(calculate_cost(\n training_data, self.weights, self.biases)).flatten()))\n plt.plot(cost)\n plt.show()\n\n def update_mini_batch(self, mini_batch, eta):\n nuble_Ws = [np.zeros(w.shape) for w in self.weights]\n nuble_Bs = [np.zeros(b.shape) for b in self.biases]\n\n for x, y in mini_batch:\n bnws, bnbs = self.backprop(x, y)\n nuble_Ws = [nws+bnw for nws, bnw in zip(nuble_Ws, bnws)]\n nuble_Bs = [nbs+bbw for nbs, bbw in zip(nuble_Bs, bnbs)]\n\n self.weights = [w-(eta/len(mini_batch)*nw)\n for w, nw in zip(self.weights, nuble_Ws)]\n self.biases = [b-(eta/len(mini_batch)*nb)\n for b, nb in zip(self.biases, nuble_Bs)]\n\n def backprop(self, x, y):\n derivative_by_w = [np.zeros_like(w) for w in self.weights]\n derivative_by_b = [np.zeros_like(b) for b in self.biases]\n zz = []\n aa = [x]\n\n for w, b in zip(self.weights, self.biases):\n z = w@aa[-1] + b\n a = sigmoid(z)\n zz.append(z)\n aa.append(a)\n\n d = (aa[-1]-y)*sigmoid_prime(zz[-1])\n derivative_by_b[-1] += d\n derivative_by_w[-1] += d@aa[-2].T\n\n for l in reversed(range(1, len(self.sizes)-1)):\n d = self.weights[l].T@d*sigmoid_prime(zz[l-1])\n derivative_by_b[l-1] += d\n derivative_by_w[l-1] += d@aa[l-1].T\n\n return (derivative_by_w, derivative_by_b)\n\n\nclass ConvLayer(object):\n def __init__(self, inputLayer, filters, biases):\n self.nextLayerDim = Helper.get_nextLayerDim(\n inputLayer.shape[0], filters[0].shape[0], 0, 1)\n\n self.filters = filters\n self.inputLayer = inputLayer\n self.biases = biases\n\n def convolve2d(self, input3d, filter3d, mode):\n result = sum([signal.convolve2d(\n input3d[:, :, iterDeepDim],\n filter3d[:, :, iterDeepDim], mode)\n for iterDeepDim in range(3)])\n return result\n\n def run(self):\n output_collection = [self.convolve2d(self.inputLayer, f, mode='valid')\n for f, b in zip(self.filters, self.biases)]\n self.output = np.stack(output_collection)\n return self.output\n\n\nclass ActivationLayer(object):\n def __init__(self, inputLayer, activationLogic):\n self.inputLayer = inputLayer\n self.activationLogic = activationLogic\n\n def run(self):\n self.output = self.activationLogic.function(self.inputLayer)\n return self.output\n\n\nclass PoolLayer(object):\n def __init__(self, inputLayer, poolingLogic):\n self.inputLayer = inputLayer\n self.poolingLogic = poolingLogic\n\n def run(self):\n self.output = self.poolingLogic(self.inputLayer)\n return self.output\n\n\ndef MaxPooling(z):\n output_dim = z.shape[0]/2 if z.shape[0] % 2 == 0 else (z.shape[0]+1)/2\n output = np.zeros((output_dim, output_dim, z.shape[2]))\n for d in range(z.shape[2]):\n for i in range(z.shape[0]):\n for j in range(z.shape[0]):\n output[i, j, d] = z[i:i+2, j:j+2, d].max()\n return output\n\n\nclass ActivationLogicReLU(object):\n @staticmethod\n def function(z):\n return max(0, z)\n\n @staticmethod\n def function_prime(z):\n return 1 if z > 0 else 0\n\n\nclass ActivationLogicSoftplus(object):\n @staticmethod\n def function(z):\n return math.log(1+math.exp(z))\n\n @staticmethod\n def function_prime(z):\n return 1/(1+math.exp(-z))\n\n\nclass Helper(object):\n @staticmethod\n def get_nextLayerDim(inputDim, filterDim, padding, step):\n return (inputDim-filterDim+2*padding)/step+1\n\n\nloadImageNetBuilding()\n","sub_path":"src/muConv.py","file_name":"muConv.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"536447737","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2017-10-12 09:56 zq \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nEvalution for fog project.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nfrom torchvision import datasets, transforms, models\nimport torchvision\nimport time\nimport os\nfrom PIL import Image, ImageFile\n\nMODEL_NAME=\"./model_avgpool_best.pth.tar\"\n\nif os.path.exists(MODEL_NAME):\n model_weights = torch.load(MODEL_NAME)\nelse:\n raise IOError\n\ndata_transform = {\n \"test\": transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]), \n}\n\ndata_dir = '.'\nim_dataset = {\"test\": datasets.ImageFolder(os.path.join(data_dir, \"test\"), data_transform[\"test\"])}\ndataloader = {\"test\": torch.utils.data.DataLoader(im_dataset[\"test\"], batch_size=8, shuffle=False, num_workers=8)}\n\ndataset_sizes = {\"test\": len(im_dataset[\"test\"])}\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nuse_gpu = torch.cuda.is_available()\n#use_gpu = False\n\ndef eval(model):\n since = time.time()\n running_corrects = 0\n\n for ii, data in enumerate(dataloader[\"test\"]):\n (inputs, labels) = data\n if use_gpu:\n inputs = Variable(inputs.cuda())\n labels = Variable(labels.cuda())\n else:\n inputs, labels = Variable(inputs), Variable(labels)\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n running_corrects += torch.sum(preds == labels.data)\n\n final_acc = running_corrects / ((ii+1)*4)\n print (\" | Accuracy: {}\".format(final_acc))\n print (\" | Consume time {}s\".format(time.time() - since))\n\nmodel = models.resnet18(pretrained=True)\n\nclass novelmodel(nn.Module):\n def __init__(self):\n super(novelmodel, self).__init__()\n self.features = nn.Sequential(\n *list(model.children())[:-2]\n )\n self.conv1 = torch.nn.Conv2d(512, 3, kernel_size=(1, 1), stride=2)\n self.avgpool = torch.nn.AvgPool2d(4)\n def forward(self, x):\n #print (\"Feature size: {}\".format(x.size()))\n x = self.features(x)\n x = self.conv1(x)\n #print (\"Conv1 size: {}\".format(x.size()))\n x = self.avgpool(x)\n #x = F.max_pool2d(x, kernel_size=x.size()[2:])\n x = x[:, :, 0, 0]\n return x\n\nmodel = novelmodel()\n\nmodel.load_state_dict(model_weights)\nif use_gpu:\n model = model.cuda()\n\nacc = eval(model)\n\n","sub_path":"eval_resnet18_avg.py","file_name":"eval_resnet18_avg.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"161996151","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\n\nimport ray\nimport ray.rllib.policy_gradient as pg\nimport ray.rllib.evolution_strategies as es\nimport ray.rllib.dqn as dqn\nimport ray.rllib.a3c as a3c\n\nparser = argparse.ArgumentParser(\n description=(\"Train a reinforcement learning agent.\"))\nparser.add_argument(\"--env\", required=True, type=str)\nparser.add_argument(\"--alg\", required=True, type=str)\nparser.add_argument(\"--config\", default=\"{}\", type=str)\nparser.add_argument(\"--upload-dir\", default=\"file:///tmp/ray\", type=str)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n ray.init()\n\n env_name = args.env\n if args.alg == \"PolicyGradient\":\n config = pg.DEFAULT_CONFIG.copy()\n config.update(json.loads(args.config))\n alg = pg.PolicyGradient(\n env_name, config, upload_dir=args.upload_dir)\n elif args.alg == \"EvolutionStrategies\":\n config = es.DEFAULT_CONFIG.copy()\n config.update(json.loads(args.config))\n alg = es.EvolutionStrategies(\n env_name, config, upload_dir=args.upload_dir)\n elif args.alg == \"DQN\":\n config = dqn.DEFAULT_CONFIG.copy()\n config.update(json.loads(args.config))\n alg = dqn.DQN(\n env_name, config, upload_dir=args.upload_dir)\n elif args.alg == \"A3C\":\n config = a3c.DEFAULT_CONFIG.copy()\n config.update(json.loads(args.config))\n alg = a3c.A3C(\n env_name, config, upload_dir=args.upload_dir)\n else:\n assert False, (\"Unknown algorithm, check --alg argument. Valid \"\n \"choices are PolicyGradientPolicyGradient, \"\n \"EvolutionStrategies, DQN and A3C.\")\n\n result_logger = ray.rllib.common.RLLibLogger(\n os.path.join(alg.logdir, \"result.json\"))\n\n while True:\n result = alg.train()\n\n # We need to use a custom json serializer class so that NaNs get\n # encoded as null as required by Athena.\n json.dump(result._asdict(), result_logger,\n cls=ray.rllib.common.RLLibEncoder)\n result_logger.write(\"\\n\")\n","sub_path":"python/ray/rllib/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"294336400","text":"import pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nfrom numpy import genfromtxt\nimport copy\nfrom part2 import EmissionParams\n\n# Global Variables ---\n\n#list of x(words) and y(labels)\nx = []\ny = []\nlengthDataSet = 0\nlsStates = []\nlengthStates = 0\nviterbiScoreTable = pd.DataFrame()\nviterbiStateTable = pd.DataFrame()\ntransParamsTable = pd.DataFrame()\nstopScore = 0\nstopState = ''\nsequence = []\ndf = pd.DataFrame()\n# ---\n\n# Import training data ---\ndef df_train(path):\n \n global x\n global y\n global lengthDataSet\n global lsStates\n global lengthStates\n global df\n\n trainingdata = open(path).read().split('\\n')\n #list of x(words) and y(labels)\n # x.append('')\n # y.append(\"START\")\n for i in range(len(trainingdata)):\n if trainingdata[i] != '':\n word = trainingdata[i].split(' ')[0]\n label = trainingdata[i].split(' ')[1]\n x.append(word)\n y.append(label)\n \n # x.append('')\n # y.append(\"STOP\")\n\n #creates dataframe of unique x rows and unique y columns\n df = pd.DataFrame(index = flatten(x), columns = flatten(y)).fillna(0)\n\n # Aggregate the counts\n for w,lbl in zip(x,y):\n df.at[w,lbl] = df.at[w,lbl] + 1\n #print(w,lbl)\n #print(df.at[w,lbl])\n\n # Sort output in ascending order\n df = df.sort_index(ascending=True)\n print(\"--- Data ingested into df ---\")\n # Store list of uniqe states (y)\n lsStates = sorted(list(flatten(y)))\n #print(lsStates)\n lengthStates = len(lsStates)\n temp = pd.DataFrame(lsStates)\n temp.to_pickle('lsStates')\n\n lengthDataSet = len(x)\n return df , x , y\n# ---\n\n\n# Import test data ---\ndef df_test(path):\n \n global x\n global lengthDataSet\n global lengthStates\n\n lsStates = pd.read_pickle('lsStates')\n lsStates = sorted(list(flatten(lsStates.values.tolist())))\n lengthStates = len(lsStates)\n #print(lsStates)\n\n trainingdata = open(path).read().split('\\n')\n\n #list of x(words) and y(labels)\n for i in range(len(trainingdata)): \n if trainingdata[i] != '':\n word = trainingdata[i].split(' ')[0]\n x.append(word)\n\n #creates dataframe of unique x rows and unique y columns\n df = pd.DataFrame(index = flatten(x), columns = lsStates).fillna(0)\n\n # Aggregate the counts\n for w,lbl in zip(x,y):\n df.at[w,lbl] = df.at[w,lbl] + 1\n\n # Sort output in ascending order\n df = df.sort_index(ascending=True)\n print(\"--- Data ingested into df ---\")\n lengthDataSet = len(x)\n\n return df , x , y\n# ---\n\n\n\n# Helper function - returns unique list of elements from d ---\ndef flatten(d):\n return {i for b in [[i] if not isinstance(i, list) else flatten(i) for i in d] for i in b}\n# ---\n\n\n# Populate the Transition Params Table ---\n# Input: No input. But need to run df first.\n# Output: transParamsTable stored in a pickle\ndef transParamsTable():\n\n global transParamsTable\n\n #rows = ['START']\n rows = []\n columns = []\n for label in flatten(y):\n rows.append(label)\n columns.append(label)\n \n\n transitionParamsTable = pd.DataFrame(index = rows, columns = columns).fillna(0)\n print(transitionParamsTable)\n labels = copy.deepcopy(y)\n #labels.append('STOP')\n #labels.insert(0,'START')\n\n nextLabel = 0\n\n for i in range(len(labels)):\n if nextLabel != 'START':\n label = labels[i]\n nextLabel = labels[i+1]\n transitionParamsTable.at[label,nextLabel] = transitionParamsTable.at[label,nextLabel] + 1\n #print(nextLabel)\n\n summation = transitionParamsTable.sum()\n summation = summation.sort_index(ascending=True)\n #print(summation)\n\n for col in transitionParamsTable.columns:\n transitionParamsTable[col] = transitionParamsTable[col] / summation[col]\n \n print(\"--- Transition Parameters Table populated ---\")\n print(transitionParamsTable.sort_index())\n (transitionParamsTable.sort_index()).to_pickle('transitionParamsTable')\n# ---\n\n\n# Pre-processing for Pi function. Creation of viterbi Tables\n# Input: NONE\n# Output: viterbiScoreTable & viterbiStateTable\ndef preProc():\n\n global viterbiScoreTable \n global viterbiStateTable \n\n lsStates = pd.read_pickle('lsStates')\n lsStates = sorted(list(flatten(lsStates.values.tolist())))\n\n # Creation of Score Table\n viterbiScoreTable = pd.DataFrame(index = lsStates, columns = x).fillna(0)\n \n # Creation of State Table\n viterbiStateTable = pd.DataFrame(index = lsStates, columns = x).fillna(0)\n \n print(\"--- Preprocessing Completed ---\")\n\n\n\n# Find score of specific node ---\n# Input: j - column number (int), u - row number(int), n - length of data y\n# Output: Changes made to viterbiScoreTable and viterbiStateTable\ndef pi(j,u,n):\n \n global viterbiScoreTable\n global viterbiStateTable\n global stopScore\n global stopState\n global lengthStates\n\n lsStates = pd.read_pickle('lsStates')\n lsStates = sorted(list(flatten(lsStates.values.tolist())))\n\n # To load pre-process transParamsTable\n transitionParamsTable = pd.read_pickle('transitionParamsTable')\n #print(transitionParamsTable)\n u_label = lsStates[u]\n j_label = x[j]\n print(\"-----------------------\")\n print(\"-----------------------\")\n print(\"--- Computing Score for j-label: {} & u-label: {}. j: {}] ---\".format(j_label,u_label, j))\n\n # STOP\n if j == (n+1):\n lsPi = []\n j_1 = x[j-1]\n for state in range(lengthStates):\n piVal = viterbiScoreTable.iloc[state, j-1] * transitionParamsTable.at[lsStates[state], u_label]\n lsPi.append(piVal)\n # To generate max score\n maxScore = max(lsPi)\n # Since we do not have space accomodated for STOP in our df\n stopScore = maxScore\n # To generate corresponding prevState\n indxState = lsPi.index(maxScore)\n maxState = lsStates[indxState]\n # Since we do not have space accomodated for STOP in our df\n stopState = maxState\n \n # j == 0\n elif j == 0:\n piVal = 1 * transitionParamsTable.at['START', u_label]\n print(\"--- Max Score: ------------\",piVal)\n viterbiScoreTable.iloc[u,j] = piVal\n viterbiStateTable.iloc[u,j] = 'START'\n\n \n # Everything else\n elif j > 0 and j < (n+1):\n j_1 = x[j-1]\n print(\"j - 1 label ---\",j_1)\n lsPi = []\n for state in range(len(lsStates)):\n em = EmissionParams(j_1, u_label, k = 0.5)\n #print('EM-----------:', em)\n piVal = viterbiScoreTable.iloc[state,j-1] * transitionParamsTable.at[lsStates[state], u_label] * em\n lsPi.append(piVal)\n \n # To generate max score\n maxScore = max(lsPi)\n # Since we do not have space accomodated for STOP in our df\n viterbiScoreTable.iloc[u,j] = maxScore\n \n # To generate corresponding prevState\n indxState = lsPi.index(maxScore)\n #print(indxState)\n maxState = lsStates[indxState]\n # Since we do not have space accomodated for STOP in our df\n viterbiStateTable.iloc[u,j] = maxState\n\n print(\"--- Max Score: ------------\",maxScore)\n print(\"--- Max State: ------------\",maxState)\n\n# ---\n\n\n\n# Parent function for score calculation\n# Input: End Node (Will always start from 0)\n# Output: Modifications made to viterbiScoreTable and viterbiStateTable\ndef parentPi(end):\n\n global viterbiScoreTable\n global viterbiStateTable\n global stopScore\n global stopState\n global lengthStates\n\n print(\"--- Length of States:\",lengthStates)\n print(\"--- Length of Data Set:\",lengthDataSet)\n\n # Preprocessed nec lengths\n for i in range(0,end):\n for j in range(0,lengthStates):\n pi(j = i, u = j, n = lengthDataSet)\n\n print(\"-----------------------\")\n print(\"-----------------------\")\n print(\"--- Score Table:\")\n print(viterbiScoreTable)\n print(\"-----------------------\")\n print(\"-----------------------\")\n print(\"--- State Table:\")\n print(viterbiStateTable)\n (viterbiScoreTable.sort_index()).to_pickle('viterbiScoreTable')\n (viterbiStateTable.sort_index()).to_pickle('viterbiStateTable')\n\n\n# Backtracking funciton \n# Input: Which word to backtrack from (3 for 'close')\n# Output: Series of sentiments\ndef backtrack(s):\n\n global sequence\n\n viterbiScoreTable = pd.read_pickle('viterbiScoreTable')\n viterbiStateTable = pd.read_pickle('viterbiStateTable')\n\n # NOTE: NEED SOME HELP VISUALISING THE INDEXING FOR THIS PART.\n s = s - 1\n\n if s < 0 and s > lengthDataSet:\n print(\"--- Please select an appropriate value to backtrack from ---\")\n\n if s >= 0 and s <= lengthDataSet:\n for i in range(0, (s+1)):\n \n i = s - i\n if s == (lengthDataSet - 1):\n # Need to integrate stopState variable\n sequence.insert(0, stopState)\n print(\"--- LAST ONE ---\")\n print(\"--- For '{}' maximum scoring label is '{}' with a score of '{}'. ---\".format(x[i], stopState, stopScore))\n \n\n # Gather index with maximum value. Convert the output to type str. Split the output. ->\n # -> Retrieve the 2nd entry of the string which is the index value. Do some string cleaning\n maximumScoreIndex = viterbiScoreTable.iloc[:, [i]].idxmax().to_string().split(' ')[1][1:]\n #print(maximumScoreIndex)\n #print(x[i])\n #print(\"--- For '{}' maximum scoring label is '{}' with a score of '{}'. ---\".format(x[i], maximumScoreIndex, viterbiScoreTable.at[maximumScoreIndex,x[i]].to_string().split('\\n')[0]))\n print(\"--- For '{}' maximum scoring label is '{}' with a score of '{}'. ---\".format(x[i], maximumScoreIndex, ' '))\n sequence.insert(0, maximumScoreIndex)\n\n sequence.insert(0, 'START')\n print('--- Generated Sequence ---')\n print(sequence)\n print('--- Storing Generated sequence as a Pickle ---')\n temp_df = pd.DataFrame(sequence)\n (temp_df).to_pickle('sequence')\n return None\n\n\n### ISSUE #5S:\n### NOTE: THE STATE TABLE SETS THE STATE TO B-ADJP WHENEVER THE SCORE IS 0. CAN CONSIDER USING A SMALL NUMBER INSTEAD OF 0 CALCULATIONS.\n### NOTE: SOME INDEXING ISSUES WITH BACKTRACK.\n### NOTE: SOMETIMES THE LOGS WILL SHOW 0 VALUE FOR SOME VERY LOW VALUES.\n### RESOLVED:\n### NOTE: NEED TO SORT OUT RUNNING VITERBI FOR TEST SET. IT WORKS WELL FOR TRAINING SET. ```Can't seem to enter elif j > 0 and j < (n+1):```\n\n\n# Execution Script ---\n# RUN ONCE FOR FILE CREATION. THEN COMMENT OUT.\ndf, x, y = df_train('./Data/EN/train')\ntransParamsTable()\n\n\n\n# Comment the following for the first run. Then uncomment it for all following runs.\n# NOTE: TAKE NOTE OF FILE PATH ENTERED HERE!!!\ndf, x, y = df_test('./Data/EN/train')\npreProc()\nparentPi(30)\n#backtrack(27)\n# seq = pd.read_pickle('sequence')\n# l = list(flatten(seq.values.tolist()))\n# print(l)\n# ---\n","sub_path":"part3_deprecated.py","file_name":"part3_deprecated.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591229267","text":"# import json\nimport re\nimport pprint\n\"\"\"\nto check file encoding, run: enca -L ru 2.txt\n\n{\"analysis\":\n [\n {\"lex\":\"белый\",\n \"gr\":\"A=(вин,ед,полн,муж,неод|им,ед,полн,муж)\"\n }],\n\"text\":\"белый\"\n}\n\"\"\"\n\n# concatenate files in dir?\n# apply mystem\ndef parse_mystem():\n in_file = open('sent2.txt', encoding='utf-8-sig').read().split('\\n')\n #readlines\n in_py = []\n for line in in_file:\n if len(line) > 0:\n # print('.', eval(line))\n in_py.append(eval(line))\n # print(in_py[:3], '\\n===')\n wordline = []\n i=0\n for line in in_py:\n word = line['text']\n if len(line['analysis']) > 0:\n # print(word)\n analysis = line['analysis']\n w_lex = []\n w_pos = []\n for guess in analysis:\n lex = guess['lex']\n w_lex.append(lex)\n gr = re.split(\"\\W+\", guess['gr'])\n pos = gr[0]\n w_pos.append(pos)\n # print(word, w_lex, w_pos)\n # return word, w_lex, w_pos\n wordline.append({'i': i, 'word': word, 'lex': w_lex, 'pos': w_pos})\n else:\n wordline.append({'i': i, 'word': word, 'lex': [], 'pos': []})\n i+=1\n return wordline\n\npp = pprint.PrettyPrinter()\nparsed = parse_mystem()\n# pp.pprint(parsed)\n# print('==========')\ndef grab_features(wordline):\n \"\"\"\n [ (dict(a=1,b=1,c=1), 'y'), ...]\n \"\"\"\n features = []\n for i in range(1, len(wordline)):\n w = wordline[i]\n prev = wordline[i-1]\n features.append(({'lex': w['lex'][0],\n 'pos': w['pos'][0],\n 'prev_lex': prev['lex'][0],\n 'prev_pos': prev['pos'][0]}))\n return features\n\n\npp.pprint(grab_features(parsed))\n# data = grab_features(parsed)\n\n\n\nimport csv\ndef foo():\n fieldnames = ['lex', 'pos', 'prev_lex', 'prev_pos']\n\n with open('fromdict.csv', 'w') as out_file:\n writer = csv.DictWriter(out_file, delimiter=',', fieldnames=fieldnames)\n # writer.writeheader()\n for row in data:\n writer.writerow(row)\n\n print('done')","sub_path":"mystem_prs.py","file_name":"mystem_prs.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"105147696","text":"# PyTorch modules\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom data import create_encoders, encode_with, parse_and_extract\nfrom modules import *\nfrom utils import train\n\n# Parse the training data and create the character/POS encoders\ntrain_data = parse_and_extract(\"UD_English-ParTUT/en_partut-ud-train.conllu\")\ndev_data = parse_and_extract(\"UD_English-ParTUT/en_partut-ud-dev.conllu\")\nchar_enc, pos_enc = create_encoders(train_data)\n\n# Encode the train set\nenc_train = encode_with(train_data, char_enc, pos_enc)\nenc_dev = encode_with(dev_data, char_enc, pos_enc)\n\nmodel = nn.Sequential(\n Map(nn.Sequential(\n nn.Embedding(char_enc.size()+1, 50, padding_idx=char_enc.size()),\n SimpleLSTM(50, 200),\n Apply(lambda xs: xs[-1]),\n )),\n SimpleBiLSTM(200, 200),\n nn.Linear(200, pos_enc.size())\n)\n\n# # Create the POS tagging model, based on character-level embeddings\n# model = nn.Sequential(\n# Map(nn.Embedding(char_enc.size()+1, 50, padding_idx=char_enc.size())),\n# Map(SimpleLSTM(50, 200)),\n# Map(Apply(lambda xs: xs[-1])),\n# SimpleBiLSTM(200, 200),\n# nn.Linear(200, pos_enc.size())\n# )\n\ndef accuracy(model, data):\n \"\"\"Calculate the accuracy of the model on the given dataset\n of (encoded) input/output pairs.\"\"\"\n correct, total = 0, 0\n for x, y in data:\n pred_y = torch.argmax(model(x), dim=1)\n correct += (y == pred_y).sum()\n total += len(y)\n return float(correct) / total\n\n# Report size of the training data\nprint(\"# Train size =\", len(enc_train))\nprint(\"# Dev size =\", len(enc_dev))\n\nloss = nn.CrossEntropyLoss()\ntrain(model, enc_train, enc_dev, loss, accuracy, epoch_num=10, learning_rate=0.001, report_rate=1)\n# => @1: loss(train)=1166.856, acc(train)=0.909, acc(dev)=0.873\n# => @2: loss(train)=435.297, acc(train)=0.944, acc(dev)=0.899\n# => @3: loss(train)=276.842, acc(train)=0.960, acc(dev)=0.904\n# => @4: loss(train)=189.120, acc(train)=0.974, acc(dev)=0.921\n# => @5: loss(train)=130.238, acc(train)=0.981, acc(dev)=0.920\n# => @6: loss(train)=94.067, acc(train)=0.984, acc(dev)=0.920\n# => @7: loss(train)=69.997, acc(train)=0.991, acc(dev)=0.918\n# => @8: loss(train)=47.860, acc(train)=0.991, acc(dev)=0.920\n# => @9: loss(train)=39.960, acc(train)=0.995, acc(dev)=0.913\n# => @10: loss(train)=30.968, acc(train)=0.995, acc(dev)=0.920\n\n# Inspect the first input sentence\n# for word, enc_word in zip(train_data[0][0], enc_train[0][0]):\n# print(word, \"=>\", enc_word)\n\n# xs = [enc_word for enc_word in enc_train[0][0]]","sub_path":"char/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"328779599","text":"#!/usr/bin/env python\nimport lib.xml2datacustom as xml2datacustom\nfrom lib.hephaestus_utils import ruler\nfrom lib.hephaestus_utils import get_port_mapping_dicts\nconfig = 'cache/routerconfigs/62.40.96.2_config'\nconfigdata = xml2datacustom.xml_jcfg2data(config)\nportmap, revportmap = get_port_mapping_dicts()\n\nruler(\"*\")\nprint(\"Using config\\t\\t: \" + config)\nnumfilters = 0\nfor filter in configdata.firewall.family.inet.filter:\n numfilters += 1\nprint(\"Number of filters\\t: \" + str(numfilters))\nruler(\"*\")\n\ndef is_terminating_term(term):\n if term.__contains__(\"then\"):\n thenaction = term.__getattr__(\"then\")\n if thenaction.__contains__(\"accept\") or thenaction.__contains__(\"reject\") or thenaction.__contains__(\"discard\"):\n return True\n else:\n return False\n\ndef is_active_term(term):\n if term.__contains__(\"inactive\"):\n return False\n else:\n return True\n\ndef skip_term(term, reason):\n print(ruler(\"*\") + \"\\n\\tIgnoring TERM:\\t \" + term.name + \"\\t\\t\" + reason + \"\\n\" + ruler(\"*\") + \"\\n\")\n\ndef handle_term(configdata, term):\n print(ruler(\"*\") + \"\\n\\tProcessing TERM:\\t \" + term.name + \"\\n\" + ruler(\"*\") + \"\\n\")\n\n if term.__contains__(\"from\"):\n root = term.__getattr__(\"from\")\n if root._attrs.__contains__(\"source_address\"):\n tree = root.__getattr__(\"source_address\")\n for node in tree:\n print(\"\\t\\tSource address\\t\\t: \" + node.__getattr__(\"name\"))\n elif root._attrs.__contains__(\"source_prefix_list\"):\n tree = root.__getattr__(\"source_prefix_list\")\n for node in tree:\n print(\"\\t\\tSource prefix-list\\t: \" + node.__getattr__(\"name\"))\n print(\"\\t\\t\\t\\tPrefixes:\")\n for list in configdata.policy_options.prefix_list:\n if node.__getattr__(\"name\") == list.__getattr__(\"name\"):\n for item in list.__getattr__(\"prefix_list_item\"):\n print(\"\\t\\t\\t\\t\\t* \" + item.__getattr__(\"name\"))\n elif root._attrs.__contains__(\"destination_address\"):\n tree = root.__getattr__(\"destination_address\")\n for node in tree:\n print(\"\\t\\tDestination address\\t: \" + node.__getattr__(\"name\"))\n elif root._attrs.__contains__(\"destination_prefix_list\"):\n tree = root.__getattr__(\"destination_prefix_list\")\n for node in tree:\n print(\"\\t\\tDestination prefix-list\\t: \" + node.__getattr__(\"name\"))\n print(\"\\t\\t\\t\\tPrefixes:\")\n for list in configdata.policy_options.prefix_list:\n if node.__getattr__(\"name\") == list.__getattr__(\"name\"):\n for item in list.__getattr__(\"prefix_list_item\"):\n print(\"\\t\\t\\t\\t\\t* \" + item.__getattr__(\"name\"))\n\n\n if root._attrs.__contains__(\"port\"):\n tree = root.__getattr__(\"port\")\n if not hasattr(tree, \"strip\"):\n for node in tree:\n if node.isdigit():\n try:\n print(\"\\t\\tPort\\t\\t\\t: \" + revportmap[node] + \" (\" + node + \")\")\n except:\n print(\"\\t\\tPort\\t\\t\\t: \" + node)\n else:\n try:\n print(\"\\t\\tPort\\t\\t\\t: \" + node + \" (\" + portmap[node] + \")\")\n except:\n print(\"\\t\\tPort\\t\\t\\t: \" + node)\n else:\n if node.isdigit():\n try: \n print(\"\\t\\tPort\\t\\t\\t: \" + revportmap[tree] + \" (\" + node + \")\")\n except:\n print(\"\\t\\tPort\\t\\t\\t: \" + tree)\n else:\n try:\n print(\"\\t\\tPort\\t\\t\\t: \" + tree + \" (\" + portmap[tree] + \")\")\n except:\n print(\"\\t\\tPort\\t\\t\\t: \" + tree)\n\n if root._attrs.__contains__(\"protocol\"):\n tree = root.__getattr__(\"protocol\")\n if not hasattr(tree, \"strip\"):\n for node in tree:\n print(\"\\t\\tProtocol\\t\\t: \" + node)\n else:\n print(\"\\t\\tProtocol\\t\\t: \" + tree)\n\n if term.__contains__(\"then\"):\n root = term.__getattr__(\"then\")\n if root._attrs.__contains__(\"accept\"):\n print(\"\\t\\tTerminating action\\t: ACCEPT\")\n elif root._attrs.__contains__(\"reject\"):\n print(\"\\t\\tTerminating action\\t: REJECT\")\n elif root._attrs.__contains__(\"discard\"):\n print(\"\\t\\tTerminating action\\t: DISCARD\")\n else:\n print(\"\\t\\tTerminating action\\t: NOT FOUND\")\n\n####\nfor filter in configdata.firewall.family.inet.filter:\n print(ruler(\"*\") + \"\\nFILTER\\t: \" + filter.name + \"\\n\" + ruler(\"*\") + \"\\n\")\n\n try:\n for term in filter.term:\n if is_active_term(term):\n if is_terminating_term(term):\n handle_term(configdata, term)\n else:\n skip_term(term, \"{non-terminating}\")\n else:\n skip_term(term, \"{inactive term}\")\n except:\n pass\n","sub_path":"fwtest.py","file_name":"fwtest.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496555708","text":"# -*- coding: utf-8 -*-\n'''Reads Photo SQLite database'''\n\n# Original work Copyright 2010 Google Inc.\n# Modified work Copyright 2014 Luke Hagan\n# Modified work Copyright 2017 Benjamín Valero\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Modifications to original source:\n#\n# 2014-06-04: retrieve keywords from iPhoto database using sqlite\n# 2017-01-14: retrieve all necessary data entirely from Photos SQLite database\n \nimport calendar\nimport datetime\nimport unicodedata\nimport os\nimport sys\nimport sqlite3\n\nimport tilutil.systemutils as su\n\n\nAPPLE_BASE = calendar.timegm((2001, 1, 1, 0, 0, 0, 0, 0, -1))\nAPPLE_BASE2 = datetime.datetime.fromtimestamp(calendar.timegm((2001, 1, 1, 0, 0, 0)))\n\n\ndef getappletime(value):\n '''Converts a numeric Apple time stamp into a date and time'''\n try:\n # datetime.datetime.fromtimestamp() takes only int, which limits it to 12/13/1901\n # as the earliest possible date. Use an alternate calculation for earlier dates.\n # This one however adjusts for daylight savings time, so summer times are off by an\n # hour from the time recorded in Photos.\n if APPLE_BASE + float(value) < -sys.maxint:\n return APPLE_BASE2 + datetime.timedelta(seconds=float(value))\n return datetime.datetime.fromtimestamp(APPLE_BASE + float(value))\n except (TypeError, ValueError) as _e:\n # bad time stamp in database, default to \"now\"\n return datetime.datetime.now()\n\n\ndef get_photos_library_file(library_dir):\n \"\"\"Locates the Photos Library.apdb file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_library_file = os.path.join(library_dir, \"database\", \"Library.apdb\")\n if os.path.exists(photos_library_file):\n return photos_library_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef get_photos_metaschema_file(library_dir):\n \"\"\"Locates the Photos metaSchema.db file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_metaschema_file = os.path.join(library_dir, \"database\", \"metaSchema.db\")\n if os.path.exists(photos_metaschema_file):\n return photos_metaschema_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef get_photos_imageproxies_file(library_dir):\n \"\"\"Locates the Photos ImageProxies.apdb file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_imageproxies_file = os.path.join(library_dir, \"database\", \"ImageProxies.apdb\")\n if os.path.exists(photos_imageproxies_file):\n return photos_imageproxies_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef read_apple_library(photos_library_dir):\n photos_dict = {}\n\n photos_metaschema_file = get_photos_metaschema_file(photos_library_dir)\n photos_imageproxies_file = get_photos_imageproxies_file(photos_library_dir)\n photos_library_file = get_photos_library_file(photos_library_dir)\n\n if photos_metaschema_file:\n # Library Version\n library_version = None\n conn1 = sqlite3.connect(photos_metaschema_file)\n c1 = conn1.cursor()\n c1.execute('select value from LiGlobals where keyPath is ?', (\"libraryCompatibleBackToVersion\",))\n for result in c1.fetchall():\n library_version = int(result[0])\n photos_dict['Application Version'] = library_version\n\n if photos_imageproxies_file:\n # Resources\n conn3 = sqlite3.connect(photos_imageproxies_file)\n c3 = conn3.cursor()\n c3.execute('select attachedModelId, resourceUuid, filename from RKModelResource '\n 'where attachedModelType = 2 and resourceType = 4')\n resources_dict = {}\n for result in c3.fetchall():\n attached_model_id = int(result[0])\n resource_dict = {}\n resource_dict['resource_uuid'] = result[1]\n resource_dict['filename'] = unicodedata.normalize(\"NFC\", result[2])\n resources_dict[attached_model_id] = resource_dict\n\n if photos_metaschema_file:\n # Folders\n conn2 = sqlite3.connect(photos_library_file)\n c2 = conn2.cursor()\n c2.execute('select uuid, modelId, name, folderPath from RKFolder '\n 'where folderType = 1 and isInTrash = 0 and isMagic = 0')\n folders_by_id = {}\n folders_by_uuid = {}\n for result in c2.fetchall():\n uuid = result[0]\n model_id = int(result[1])\n folder_dict = {}\n folder_dict['name'] = result[2]\n folder_dict['folderPath'] = result[3]\n folders_by_uuid[uuid] = folder_dict\n folders_by_id[model_id] = folder_dict\n\n # Albums\n c2 = conn2.cursor()\n c2.execute('select modelId, name, folderUuid, recentUserChangeDate'\n ' from RKAlbum where albumType = 1 and albumSubclass = 3'\n ' and isInTrash = 0 and isMagic = 0')\n albums = []\n albums_by_id = {}\n for result in c2.fetchall():\n album_id = int(result[0])\n album_data = {}\n album_data['AlbumName'] = unicodedata.normalize(\"NFC\", result[1])\n album_data['AlbumDate'] = getappletime(result[3])\n album_data['KeyList'] = []\n\n # Load folder path\n album_data['FolderPath'] = None\n album_folder_uuid = result[2]\n if album_folder_uuid in folders_by_uuid:\n album_folder = folders_by_uuid[album_folder_uuid]\n parent_folder_ids = album_folder['folderPath']\n folder_path = ''\n for folder_id in parent_folder_ids.split('/'):\n if folder_id and (int(folder_id) in folders_by_id):\n parent_folder = folders_by_id[int(folder_id)]\n folder_path = os.path.join(folder_path, parent_folder['name'])\n album_data['FolderPath'] = folder_path\n\n albums.append(album_data)\n albums_by_id[album_id] = album_data\n photos_dict['List of Albums'] = albums\n\n # Versions\n c2 = conn2.cursor()\n c2.execute('select modelId, name, imageDate, createDate from RKVersion where isInTrash = 0')\n versions_dict = {}\n for result in c2.fetchall():\n model_id = int(result[0])\n version_dict = {}\n version_name = None\n if result[1]:\n version_name = unicodedata.normalize(\"NFC\", result[1])\n version_dict['VersionName'] = version_name\n if result[2]:\n version_dict['VersionDate'] = getappletime(result[2])\n else:\n version_dict['VersionDate'] = getappletime(result[3])\n versions_dict[model_id] = version_dict\n\n # Masters\n c2 = conn2.cursor()\n c2.execute('select modelId, imagePath from RKMaster '\n 'where importComplete = 1 and isInTrash = 0')\n masters_dict = {}\n for result in c2.fetchall():\n model_id = int(result[0])\n master_dict = {}\n master_dict['ImagePath'] = unicodedata.normalize(\"NFC\", result[1])\n masters_dict[model_id] = master_dict\n\n # Images\n images = {}\n for master_id in masters_dict:\n image_data = {}\n\n master_dict = masters_dict[master_id]\n original_path = os.path.join(photos_library_dir, 'Masters', master_dict['ImagePath'])\n\n if master_id in resources_dict:\n resource_dict = resources_dict[master_id]\n resource_uuid = resource_dict['resource_uuid']\n folder1 = str(ord(resource_uuid[0]))\n folder2 = str(ord(resource_uuid[1]))\n filename = resource_dict['filename']\n image_data['ImagePath'] = os.path.join(photos_library_dir, 'resources', 'modelresources',\n folder1, folder2, resource_uuid, filename)\n image_data['OriginalPath'] = original_path\n else:\n image_data['ImagePath'] = original_path\n\n version_dict = versions_dict[master_id]\n image_data['Caption'] = version_dict['VersionName']\n image_data['ImageDate'] = version_dict['VersionDate']\n images[master_id] = image_data\n photos_dict['Master Image List'] = images\n\n # TODO Keywords\n photos_dict['List of Keywords'] = []\n\n # Album-Versions\n c2 = conn2.cursor()\n c2.execute('select albumId, versionId from RKAlbumVersion')\n for result in c2.fetchall():\n album_id = int(result[0])\n version_id = int(result[1])\n\n if album_id in albums_by_id:\n album_data = albums_by_id[album_id]\n album_data['KeyList'].append(version_id)\n\n return photos_dict\n","sub_path":"appledata/applexml.py","file_name":"applexml.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"229969965","text":"class Adam(object):\r\n \r\n \"\"\"Adam optimizer.\r\n Default parameters follow those provided in the original paper.\r\n # Arguments\r\n lr: float >= 0. Learning rate.\r\n beta_1: float, 0 < beta < 1. Generally close to 1.\r\n beta_2: float, 0 < beta < 1. Generally close to 1.\r\n epsilon: float >= 0. Fuzz factor.\r\n decay: float >= 0. Learning rate decay over each update.\r\n # References\r\n - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)\r\n \"\"\"\r\n\r\n def __init__(self, parameters, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.):\r\n \r\n self.iterations = 0\r\n self.lr = lr\r\n self.beta_1 = beta_1\r\n self.beta_2 = beta_2\r\n self.decay = decay\r\n self.epsilon = epsilon\r\n self.initial_decay = decay\r\n self.parameters = parameters\r\n\r\n def step(self, grads):\r\n \"\"\" params and grads are list of numpy arrays\r\n \"\"\"\r\n original_shapes = [x.shape for x in params]\r\n params = [x.flatten() for x in self.parameters]\r\n grads = [x.flatten() for x in grads]\r\n \r\n \r\n lr = self.lr\r\n if self.initial_decay > 0:\r\n lr *= (1. / (1. + self.decay * self.iterations))\r\n\r\n t = self.iterations + 1\r\n lr_t = lr * (np.sqrt(1. - np.power(self.beta_2, t)) /\r\n (1. - np.power(self.beta_1, t)))\r\n\r\n if not hasattr(self, 'ms'):\r\n self.ms = [np.zeros(p.shape) for p in params]\r\n self.vs = [np.zeros(p.shape) for p in params]\r\n \r\n ret = [None] * len(params)\r\n for i, p, g, m, v in zip(range(len(params)), params, grads, self.ms, self.vs):\r\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\r\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * np.square(g)\r\n p_t = p - lr_t * m_t / (np.sqrt(v_t) + self.epsilon)\r\n self.ms[i] = m_t\r\n self.vs[i] = v_t\r\n ret[i] = p_t\r\n \r\n self.iterations += 1\r\n \r\n for i in range(len(ret)):\r\n ret[i] = ret[i].reshape(original_shapes[i])\r\n \r\n self.parameters = ret","sub_path":"lib/optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"167905694","text":"\"\"\"\ncanvas.py\n\nThis is the Flatland (and not the cairo) Canvas class\n\"\"\"\nimport sys\nfrom flatland_exceptions import InvalidOrientation\nfrom diagram_layout_specification import DiagramLayoutSpecification as diagram_layout\nfrom connector_layout_specification import ConnectorLayoutSpecification as connector_layout\nfrom geometry_types import Rect_Size, Position\nfrom diagram import Diagram\nfrom tablet import Tablet\nfrom sheet import Sheet, Group\nfrom symbol import Symbol\n\n# All sheet and canvas related constants are kept together here for easy review and editing\npoints_in_cm = 28.3465\npoints_in_inch = 72\n\n\nclass Canvas:\n \"\"\"\n You can think of a Canvas as a sheet of paper, typically, not necessarily of a standard size\n such as A1, Tabloid or 8.5 x 11. It represents the total space where any drawing may occur.\n Typically, though, a margin is specified to preserve empty space along the edges of the Canvas.\n The margin can be set to zero all the way around if desired.\n\n Attributes\n\n - Sheet (str) -- A standard name such as letter and tabloid in the US or A2 in Europe to describe sheet size\n - Orientation (str) -- *portrait* or *landscape*\n - Size (Rect_Size) -- The size in points as a Rect_Size named tuple\n - Margin (Padding) -- The default amount of space surrounding a Node in a Cell\n - Diagram (obj) -- Instance of Diagram drawn on this Canvas\n - Tablet (obj) -- This is a proxy for the underlying graphics drawing context\n - Show_margin (boolean) -- Draw the margin? For diagnostic purposes only\n\n \"\"\"\n\n def __init__(self, diagram_type: str, presentation: str, notation: str, standard_sheet_name: str, orientation: str,\n drawoutput=sys.stdout.buffer, show_margin=False):\n \"\"\"\n Constructor\n\n :param diagram_type: A supported type of model diagram such as class, state machine, collaboration\n :param presentation: A predefined set of style specifications such as default, diagnostic, fullcolor\n :param notation: A supported notation such as xUML, Starr, Shlaer-Mellor\n :param standard_sheet_name: A US or international printer sheet size such as A1, tabloid, letter\n :param orientation: portrait or landscape\n :param drawoutput: A standard IO binary object obtained from sys\n :param show_margin: For diagnostics, show the canvas margin in the drawn output\n \"\"\"\n # Load layout specifications\n diagram_layout()\n connector_layout()\n\n self.Sheet = Sheet(standard_sheet_name) # Ensure that the user has specified a known sheet size\n if orientation not in ('portrait', 'landscape'):\n raise InvalidOrientation(orientation)\n self.Orientation = orientation\n # We want to convert all units, inch, mm, etc to points since that's all we use from here on\n factor = points_in_inch if self.Sheet.Group == Group.US else points_in_cm\n\n # Set point size height and width based on portrait vs. landscape orientation\n h, w = (self.Sheet.Size.height, self.Sheet.Size.width) if self.Orientation == 'landscape' else (\n self.Sheet.Size.width, self.Sheet.Size.height)\n self.Size = Rect_Size(\n height=int(round(h * factor)),\n width=int(round(w * factor))\n )\n self.Margin = diagram_layout.Default_margin\n self.Diagram = Diagram(self, diagram_type_name=diagram_type, presentation=presentation, notation_name=notation)\n # Load symbol data\n Symbol(diagram_type=self.Diagram.Diagram_type.Name, notation=self.Diagram.Notation)\n\n self.Tablet = Tablet(\n size=self.Size, output_file=drawoutput,\n # Drawing types include notation such as 'xUML class diagram' since notation affects the choice\n # of shape and text styles. An xUML class diagram association class stem is dashed, for example.\n drawing_type=' '.join([self.Diagram.Notation, diagram_type, 'diagram']), presentation=presentation\n )\n self.Show_margin = show_margin\n\n def render(self):\n \"\"\"\n Draw all content of this Canvas onto the Tablet\n \"\"\"\n if self.Show_margin:\n # Add the margin boundary rectangle to the Tablet\n # The margin rectangle represents the drawable area defined for our Canvas\n # and may be equal to or smaller than the Tablet area\n drawable_origin = Position(x=self.Margin.left, y=self.Margin.bottom)\n draw_area_height = self.Size.height - self.Margin.top - self.Margin.bottom\n draw_area_width = self.Size.width - self.Margin.left - self.Margin.right\n draw_area_size = Rect_Size(height=draw_area_height, width=draw_area_width)\n self.Tablet.add_rectangle(asset='margin', lower_left=drawable_origin, size=draw_area_size)\n\n # Now add all Diagram content to the Tablet\n self.Diagram.render()\n\n # Draw all added content and output a PDF using whatever graphics library is configured in the Tablet\n self.Tablet.render()\n\n def __repr__(self):\n return f'Sheet: {self.Sheet}, Orientation: {self.Orientation}, '\\\n f'Canvas size: h{self.Size.height} pt x w{self.Size.width} pt Margin: {self.Margin}'\n","sub_path":"Node Subsystem/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"527096778","text":"# -*- coding: utf-8 -*-\n# 签到\n\nfrom django.shortcuts import render, HttpResponse\nfrom django.views import View\nimport json\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom tssite.settings import NGINX_PREFIX\nfrom tssite.utils.my_logger import logger\nimport datetime\nfrom tssite.services.sign_service import SignService\nfrom tssite.services.user_service import UserService\nfrom tssite.utils.django_utils import parse_get_params\n\n\nclass SignView(View):\n def __init__(self):\n super(SignView, self).__init__()\n logger.info('SignView::__init__(self)')\n self._sign_serv = SignService()\n self._user_serv = UserService()\n\n def get(self, request):\n path = request.path\n logger.info('GET: ' + path)\n\n # 签到首页(大屏幕上显示)\n if '/sign/' == path:\n return render(request, 'sign/sign.html', {})\n\n # 员工手机上\n elif '/sign/commit_page/' == path:\n # 先获取传过来的uuid\n params = parse_get_params(request)\n _uuid = params['_uuid'][0]\n # 再把uuid传回去\n\n users_dict = self._user_serv.get_users_dict()\n\n return render(request, 'sign/commit_page.html', {'uuid': _uuid, 'users_dict': users_dict})\n\n # html加载完成后,ajax请求加载第一批\n elif '/sign/init_query/' == path:\n signed_queue = self._sign_serv.query_signed_record(method='init')\n signed_list = []\n while not signed_queue.empty():\n signed_list.append(signed_queue.get())\n\n return HttpResponse(json.dumps({'signed_list': signed_list}))\n\n # ajax取随后的员工,append\n elif '/sign/next_query/' == path:\n signed_queue = self._sign_serv.query_signed_record(method='next')\n signed_list = []\n while not signed_queue.empty():\n signed_list.append(signed_queue.get())\n return HttpResponse(json.dumps({'signed_list': signed_list}))\n\n # ajax请求加载qr_code\n elif '/sign/ajax_img_src/' == path:\n img_url = self._sign_serv.get_qr_img_url()\n return HttpResponse(json.dumps({'img_url': img_url}))\n\n else:\n logger.warning('404')\n return render(request, 'index.html', {})\n\n def post(self, request):\n path = request.path\n logger.info('POST: ' + path)\n\n # 提交签到数据\n if '/sign/commit/' == path:\n sign_info = dict()\n dt_now = str(datetime.datetime.now())[:19]\n sign_info['_id'] = request.POST.get('_id', '')\n sign_info['_uuid'] = request.POST.get('_uuid', '')\n sign_info['_name'] = request.POST.get('_name', '')\n sign_info['_dt_now'] = dt_now\n res = self._sign_serv.sign_commit(sign_info)\n return HttpResponse(json.dumps({'result': res}))\n","sub_path":"tssite/views/sign_view.py","file_name":"sign_view.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528974258","text":"T = int(input())\nfor z in range(T):\n print(\"Case #\", z + 1, \": \", sep='', end='')\n k, c, s = map(int, input().split())\n if (s < (k + c - 1) // c):\n print(\"IMPOSSIBLE\")\n continue\n kpows = [1] * (c + 1)\n for i in range(1, c + 1):\n kpows[i] = kpows[i - 1] * k\n curTile = 0\n for i in range(1, k + 1):\n curTile += (i - 1) * kpows[c - (i - 1) % c - 1]\n if (i % c == 0):\n print(curTile + 1, end=' ')\n curTile = 0\n # print(i, (i - 1) % c, kpows[(i - 1) % c])\n if (k % c):\n print(curTile + 1, end=' ')\n print()\n","sub_path":"solutions_5636311922769920_0/Python/thefacetakt/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"230545253","text":"import dxr.plugins\nimport os\n\n''' A DXR plugin that can highlight code-coverage lines.\n\n For the time being, the tool expects the file to be the results of running\n lcov in a result app.info in the root of the build directory; I don't want\n to play around with reading output from gcov or from .gcno/.gcda files\n manually. The upside of this approach is it allows you to run gcov from\n other machines. '''\n\ndef read_file(fd, path, filestruct):\n lines = filestruct.setdefault('lines', {})\n for line in fd:\n line = line.strip()\n if line == 'end_of_record':\n return\n instr, data = line.split(':')\n if instr == 'DA': # DA:,[,]\n data = data.split(',')\n lno, hits = int(data[0]), int(data[1])\n lines[lno] = lines.get(lno, 0) + hits\n elif instr in ['LH', 'LF']: # Hit/found -> we count these ourselves\n continue\n\ndef read_lcov(fd):\n all_data = {}\n for line in fd:\n line = line.strip()\n instr, data = line.split(':')\n if instr == 'TN': # TN:\n continue\n elif instr == 'SF': # SF:\n read_file(fd, data, all_data.setdefault(data, {}))\n return all_data\n\ndef post_process(srcdir, objdir):\n try:\n appinfo = open(os.path.join(objdir, \"app.info\"))\n except IOError:\n # No file? No gcov\n return {}\n try:\n blob = read_lcov(appinfo)\n finally:\n appinfo.close()\n\n return blob\n\ndef can_use(treecfg):\n # We need to have clang and llvm-config in the path\n return dxr.plugins.in_path('lcov')\n\ndef get_schema():\n return ''\nsqlify = dxr.plugins.default_sqlify\npre_html_process = dxr.plugins.default_pre_html_process\n\ndef get_line_annotations(blob, srcpath, treecfg):\n if srcpath in blob and 'lines' in blob[srcpath]:\n for line, hits in blob[srcpath]['lines'].iteritems():\n yield (line, { \"data-gcov-hits\": str(hits) })\n\ndef build_database(conn, srcdir, objdir, cache=None):\n return\n\nhtmlifier = {}\nfor f in ('.c', '.cc', '.cpp', '.h', '.hpp'):\n htmlifier[f] = {\n 'get_line_annotations': get_line_annotations,\n 'no-override': True\n }\n\ndef get_htmlifiers():\n return htmlifier\n\n__all__ = dxr.plugins.required_exports()\n","sub_path":"xref-tools/code-coverage/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"358928573","text":"from functools import wraps\n\nfrom authlib.oauth2 import OAuth2Error\nfrom authlib.oauth2.rfc6749 import MissingAuthorizationError\nfrom flask import request\nfrom flask_apispec import marshal_with\nfrom flask_apispec.views import MethodResource\nfrom flask_security import current_user\n\nfrom project import db\nfrom project.api.schemas import ErrorResponseSchema, UnprocessableEntityResponseSchema\nfrom project.oauth2 import require_oauth\n\n\ndef etag_cache(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n response = func(*args, **kwargs)\n response.add_etag()\n return response.make_conditional(request)\n\n return wrapper\n\n\ndef require_api_access(scope=None, operator=\"AND\", optional=False):\n def inner_decorator(func):\n def wrapped(*args, **kwargs): # see authlib ResourceProtector#__call__\n try: # pragma: no cover\n try:\n require_oauth.acquire_token(scope, operator)\n except MissingAuthorizationError as error:\n if optional:\n return func(*args, **kwargs)\n require_oauth.raise_error_response(error)\n except OAuth2Error as error:\n require_oauth.raise_error_response(error)\n except Exception as e:\n if not current_user or not current_user.is_authenticated:\n raise e\n return func(*args, **kwargs)\n\n return wrapped\n\n return inner_decorator\n\n\n@marshal_with(ErrorResponseSchema, 400, \"Bad Request\")\n@marshal_with(UnprocessableEntityResponseSchema, 422, \"Unprocessable Entity\")\nclass BaseResource(MethodResource):\n decorators = [etag_cache]\n\n def create_instance(self, schema_cls, **kwargs):\n instance = schema_cls().load(request.json, session=db.session)\n\n for key, value in kwargs.items():\n if hasattr(instance, key):\n setattr(instance, key, value)\n\n validate = getattr(instance, \"validate\", None)\n if callable(validate):\n validate()\n\n return instance\n\n def update_instance(self, schema_cls, instance):\n with db.session.no_autoflush:\n instance = schema_cls().load(\n request.json, session=db.session, instance=instance\n )\n\n validate = getattr(instance, \"validate\", None)\n if callable(validate):\n validate()\n\n return instance\n","sub_path":"project/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"234479352","text":"#2011-07-24\n#Coded by Argent Zerda\n\nfrom turtle import *;\nimport math;\n\ndef zcircle(radius=0,angle=360):\n print(heading());\n speed(\"fastest\");\n for n in range(angle//4):\n circle(radius*(0.5),4);\n left(60);\n forward(2);\n right(120);\n forward(2);\n left(60);\n print(heading());\n\ndef main():\n speed(\"fast\");\n begin_fill();\n color(\"red\");\n left(90);\n circle(60, 90);\n circle(60, 180);\n circle(-60, 90);\n left(180);\n circle(-60, 90);\n circle(60, 180);\n circle(60, 90);\n end_fill();\n done();\n\nmain();\n","sub_path":"random/heart.py","file_name":"heart.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546104749","text":"from django.urls import path, include\nfrom .views import order_status_update_views, crud_pizza_record_views, views\n\napp_name = 'restaurantStaffApp'\n\nurlpatterns = [\n # For Restaurant Staffs Only\n\n # Order Status Update - Related\n path('', views.index, name='home_restaurant_staffs'),\n path('order-list/', order_status_update_views.restaurant_order_status_list, name='restaurantBackendOrderStatus'),\n path('order-status-update//', order_status_update_views.restaurant_order_status_update, name='orderStatusUpdate'),\n\n # Pizza Record CRUD - Related\n path('pizza/pizza-records/', crud_pizza_record_views.pizzaList, name='pizzaRecords'),\n path('pizza/create-pizza-record/', crud_pizza_record_views.createPizza, name='createPizzaRecord'),\n path('pizza/update-pizza-record//', crud_pizza_record_views.updatePizza, name='updatePizzaRecord'),\n path('pizza/delete-pizza-record//', crud_pizza_record_views.deletePizza, name='deletePizzaRecord'),\n]","sub_path":"pizzaProj/restaurant_staff/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"134268414","text":"\"\"\"\nvarious utility methods for halo package.\n\"\"\"\nfrom itertools import product\nimport numpy as np\n\nfrom halo import BASE_DIR, DATA_DIR, FIG_DIR\n\n#bin size data and settings depending on cutoff_bins param\n#(indices are for columns of datablock variable)\ncasbinfile = DATA_DIR + 'CAS_bins.npy'\nCAS_bins = np.load(casbinfile, allow_pickle=True).item()\ncentr_cas = (CAS_bins['upper'] + CAS_bins['lower'])/4. #diam to radius\n#print(centr_cas)\ndr_cas = CAS_bins['upper'] - CAS_bins['lower']\nnbins_cas = len(centr_cas)\nnbins_cas_with_cip = 7\n\ncdpbinfile = DATA_DIR + 'CDP_bins.npy'\nCDP_bins = np.load(cdpbinfile, allow_pickle=True).item()\ncentr_cdp = (CDP_bins['upper'] + CDP_bins['lower'])/4. #diam to radius\n#print(centr_cdp)\ndr_cdp = CDP_bins['upper'] - CDP_bins['lower']\nnbins_cdp = len(centr_cdp)\n\ncipbinfile = DATA_DIR + 'CIP_bins.npy'\nCIP_bins = np.load(cipbinfile, allow_pickle=True).item()\ncentr_cip = (CIP_bins['upper'] + CIP_bins['lower'])/4. #diam to radius\n#print(centr_cip)\ndr_cip = CIP_bins['upper'] - CIP_bins['lower']\nnbins_cip = len(centr_cip)\n\nlow_bin_cas = 6 #previous six cols are environmental vars\nhigh_bin_cas = low_bin_cas + nbins_cas\nlow_bin_cdp = 4 + high_bin_cas #extra +4 due to addition of LWC cols\nhigh_bin_cdp = low_bin_cdp + nbins_cdp\n\nlow_bin_cas_with_cip = low_bin_cas #same starting point as without cip \nhigh_bin_cas_with_cip = low_bin_cas_with_cip + nbins_cas_with_cip\nlow_bin_cip = 4 + high_bin_cas_with_cip #extra +4 due to addition of LWC cols\nhigh_bin_cip = low_bin_cip + nbins_cip\n\n#physical constants\nC_ap = 1005. #dry air heat cap at const P (J/(kg K))\nD = 0.23e-4 #diffus coeff water in air (m^2/s)\ng = 9.8 #grav accel (m/s^2)\nK = 2.4e-2 #therm conductivity of air (J/(m s K))\nL_v = 2501000. #latent heat of evaporation of water (J/kg)\nMm_a = .02896 #Molecular weight of dry air (kg/mol)\nMm_v = .01806 #Molecular weight of water vapour (kg/mol)\nR = 8.317 #universal gas constant (J/(mol K))\nR_a = R/Mm_a #Specific gas constant of dry air (J/(kg K))\nR_v = R/Mm_v #Specific gas constant of water vapour (J/(kg K))\nrho_w = 1000. #density of water (kg/m^3) \n\n#various series expansion coeffs - comment = page in pruppacher and klett\nsigma_coeffs = [75.93, 0.115, 6.818e-2, 6.511e-3, \\\n 2.933e-4, 6.283e-6, 5.285e-8] #130\nN_Re_regime2_coeffs = [-0.318657e1, 0.992696, -0.153193e-2, \\\n -0.987059e-3, -0.578878e-3, 0.855176e-4, \\\n -0.327815e-5] #417\nN_Re_regime3_coeffs = [-0.500015e1, 0.523778e1, -0.204914e1, \\\n 0.475294, -0.542819e-1, 0.238449e-2] #418\n\ndef get_ind_bounds(arr, minval, maxval, startind=0):\n \"\"\"\n Return: (imin, imax) where arr[imin] >= minval and arr[imax] <= maxval.\n Assumes arr is sorted smallest to largest (ie time series)\n Starts sorting at startind in arr, if specified\n \"\"\"\n i = startind\n while (arr[i] < minval) or (np.isnan(arr[i])):\n i += 1\n imin = i\n while (arr[i] < maxval) or (np.isnan(arr[i])):\n i += 1\n imax = i\n return(imin, imax)\n\ndef match_two_arrays(arr1, arr2):\n \"\"\"\n Return: (inds1, inds2) where arr1[inds1] = arr2[inds2].\n Assumes arr1 and arr2 are both sorted in the same order (ie time series)\n \"\"\"\n inds1 = []\n inds2 = []\n startind2 = 0\n for i1, x1 in enumerate(arr1):\n for i2, x2 in enumerate(arr2[startind2:]):\n if x1 == x2:\n inds1.append(i1)\n inds2.append(i2+startind2)\n startind2 = i2 + startind2 + 1\n break\n return(inds1, inds2)\n\ndef match_multiple_arrays(arrays):\n \"\"\"\n Return: [inds1, ... , indsN] where arr1[inds1] = ... = arrN[indsN].\n Assumes all arrays are sorted in the same order (ie time series)\n probably a better way to do this recursively but I never learned that shit xd\n \"\"\"\n inds = [[i for i in range(len(arrays[0]))]]\n for i, array in enumerate(arrays[:-1]):\n (inds1, inds2) = match_two_arrays([array[i] for i in inds[-1]], arrays[i+1])\n inds = [[indsj[i] for i in inds1] for indsj in inds]\n inds.append(inds2)\n return [np.array(indsarr) for indsarr in inds]\n\ndef calc_lwc(setname, setdata, envdata, cutoff_bins, change_cas_corr):\n \"\"\"\n\n Returns: (lwc_array, time_inds)\n -lwc_array = array of lwc values\n -time_inds = indices of setname array originally passed by user which \\\n match indexing of lwc_array (i.e. lwc_array is time-commensurate with \\\n setname[time_inds].\n \n update 1/20/20: if envdata is None-valued, return None's. For cleaner \\\n data processing flow.\n \"\"\"\n #return None values if envdata is None\n if envdata is None:\n return (None, None)\n\n #load particle diameters\n bin_dict = np.load(DATA_DIR + setname+ '_bins.npy', \\\n allow_pickle=True).item()\n \n #constants\n rho_ww = 1.e3 #pure liquid water density\n R_a = 287 #ideal gas const dry air\n \n #calculate air density using ideal gas law\n rho_air = envdata['stat_pres']/(R_a*envdata['stat_temp'])\n print(np.nanmean(rho_air))\n\n #calculate density of liquid water in atmosphere by summing over bins\n n = len(setdata['time'])\n sum_nconc_radcubed = np.zeros((n,))\n if setname in ['CAS', 'CIP']:\n sum_nconc_radcubed_with_cip = np.zeros((n,))\n print(setname, sum_nconc_radcubed.shape)\n i = 0\n for key in setdata.keys():\n if 'nconc' in key and 'tot' not in key:\n if cutoff_bins and bin_dict['lower'][i]<3.e-6:\n i += 1\n continue\n d_mean = (bin_dict['upper'][i] + bin_dict['lower'][i])/2.\n r = d_mean/2.\n if setname == 'CAS' and change_cas_corr:\n corr_factor = setdata['xi']*setdata['TAS']/setdata['PAS']\n else:\n corr_factor = np.ones((n,))\n sum_nconc_radcubed += np.power(r, 3.)*corr_factor*setdata[key]\n if setname == 'CAS' and bin_dict['lower'][i] < 25.e-6:\n sum_nconc_radcubed_with_cip += np.power(r, 3.)*corr_factor*setdata[key]\n if setname == 'CIP':\n sum_nconc_radcubed_with_cip += np.power(r, 3.)*corr_factor*setdata[key]\n i += 1\n rho_wat = rho_ww*4./3.*np.pi*sum_nconc_radcubed\n if setname in ['CAS', 'CIP']:\n rho_wat_with_cip = rho_ww*4./3.*np.pi*sum_nconc_radcubed_with_cip\n\n #match time values so we can combine set data with environmental data\n #(currently just rounding)\n (set_t_inds, env_t_inds) = match_two_arrays( \\\n np.around(setdata['time']), np.around(envdata['time']))\n print(len(set_t_inds), len(env_t_inds))\n if setname == 'CAS':\n return (rho_wat[set_t_inds]/rho_air[env_t_inds], \\\n rho_wat_with_cip[set_t_inds]/rho_air[env_t_inds], np.array(set_t_inds))\n else:\n return (rho_wat[set_t_inds]/rho_air[env_t_inds], \\\n None, np.array(set_t_inds))\n\ndef linregress(x, y=None):\n \"\"\"\n ~~copy pasta from scipy so I don't have to import the whole damn module~~\n Calculate a regression line\n This computes a least-squares regression for two sets of measurements.\n Parameters\n ----------\n x, y : array_like\n two sets of measurements. Both arrays should have the same length.\n If only x is given (and y=None), then it must be a two-dimensional\n array where one dimension has length 2. The two sets of measurements\n are then found by splitting the array along the length-2 dimension.\n Returns\n -------\n slope : float\n slope of the regression line\n intercept : float\n intercept of the regression line\n r-value : float\n correlation coefficient\n stderr : float\n Standard error of the estimate\n \"\"\"\n TINY = 1.0e-20\n if y is None: # x is a (2, N) or (N, 2) shaped array_like\n x = np.asarray(x)\n if x.shape[0] == 2:\n x, y = x\n elif x.shape[1] == 2:\n x, y = x.T\n else:\n msg = \"If only `x` is given as input, it has to be of shape (2, N) \\\n or (N, 2), provided shape was %s\" % str(x.shape)\n raise ValueError(msg)\n else:\n x = np.asarray(x)\n y = np.asarray(y)\n n = len(x)\n xmean = np.mean(x,None)\n ymean = np.mean(y,None)\n\n # average sum of squares:\n ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat\n r_num = ssxym\n r_den = np.sqrt(ssxm*ssym)\n if r_den == 0.0:\n r = 0.0\n else:\n r = r_num / r_den\n # test for numerical error propagation\n if (r > 1.0):\n r = 1.0\n elif (r < -1.0):\n r = -1.0\n\n df = n-2\n t = r*np.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))\n slope = r_num / ssxm\n intercept = ymean - slope*xmean\n sterrest = np.sqrt((1-r*r)*ssym / ssxm / df)\n return slope, intercept, r, sterrest\n\ndef get_datablock(adlrinds, casinds, cdpinds, adlrdata, casdata, cdpdata):\n \"\"\"\n Consolidate data for easier processing \n Format of output array (order of columns): time, temperature, vertical \\\n velocity, ADLR TAS, CAS TAS, CAS correction factor, number conc for cas \\\n bins (12 cols), LWC for cas (4 cols), number conc for cdp bins (15 cols), \\\n LWC for cdp (4 cols), pressure, altitude\n \"\"\"\n # extra fifteen columns: time, temperature, \n # vertical wind velocity, ADLR TAS, CAS TAS, \n # CAS corr factor, lwc for cas and cdp,\n # pressure, altitude\n datablock = np.zeros([len(adlrinds), 15 + nbins_cas + nbins_cdp])\n datablock[:, 0] = np.around(adlrdata['data']['time'][adlrinds])\n datablock[:, 1] = adlrdata['data']['stat_temp'][adlrinds]\n datablock[:, 2] = adlrdata['data']['vert_wind_vel'][adlrinds]\n datablock[:, 3] = adlrdata['data']['TAS'][adlrinds]\n datablock[:, 4] = casdata['data']['TAS'][casinds]\n datablock[:, 5] = casdata['data']['TAS'][casinds]\\\n\t\t\t/casdata['data']['PAS'][casinds]\\\n\t\t\t*casdata['data']['xi'][casinds]\n for i in range(nbins_cas):\n key = 'nconc_' + str(i+5)\n datablock[:, i+low_bin_cas] = casdata['data'][key][casinds]\n datablock[:, high_bin_cas] = casdata['data']['lwc']['00'][casinds]\n datablock[:, 1+high_bin_cas] = casdata['data']['lwc']['01'][casinds]\n datablock[:, 2+high_bin_cas] = casdata['data']['lwc']['10'][casinds]\n datablock[:, 3+high_bin_cas] = casdata['data']['lwc']['11'][casinds]\n for i in range(nbins_cdp):\n key = 'nconc_' + str(i+1)\n datablock[:, i+low_bin_cdp] = cdpdata['data'][key][cdpinds]\n datablock[:, high_bin_cdp] = cdpdata['data']['lwc']['00'][cdpinds]\n datablock[:, 1+high_bin_cdp] = cdpdata['data']['lwc']['01'][cdpinds]\n datablock[:, 2+high_bin_cdp] = cdpdata['data']['lwc']['10'][cdpinds]\n datablock[:, 3+high_bin_cdp] = cdpdata['data']['lwc']['11'][cdpinds]\n #just tackin on shit at the end at this point\n datablock[:, -2] = adlrdata['data']['stat_pres'][adlrinds]\n datablock[:, -1] = adlrdata['data']['alt_asl'][adlrinds]\n\n return datablock\n\ndef get_datablock_with_cip(adlrinds, casinds, cipinds, \\\n adlrdata, casdata, cipdata):\n \"\"\"\n Consolidate data for easier processing \n Format of output array (order of columns): time, temperature, vertical \\\n velocity, ADLR TAS, CAS TAS, CAS correction factor, number conc for cas \\\n bins (7 cols - up to 25um diam), partial LWC for cas (up to 25um diam) \\\n (4 cols), number conc for cip bins (19 cols - starting at 25um diam), \\\n LWC for cas+cip (full diam range) (4 cols), pressure\n \"\"\"\n # extra fifteen columns: time, temperature, \n # vertical wind velocity, ADLR TAS, CAS TAS, \n # CAS corr factor, lwc for cas and cas+cip, \\\n # pressure, altitude\n datablock = np.zeros([len(adlrinds), 16 + nbins_cas_with_cip + nbins_cip])\n datablock[:, 0] = np.around(adlrdata['data']['time'][adlrinds])\n datablock[:, 1] = adlrdata['data']['stat_temp'][adlrinds]\n datablock[:, 2] = adlrdata['data']['vert_wind_vel'][adlrinds]\n datablock[:, 3] = adlrdata['data']['TAS'][adlrinds]\n datablock[:, 4] = casdata['data']['TAS'][casinds]\n datablock[:, 5] = casdata['data']['TAS'][casinds]\\\n\t\t\t/casdata['data']['PAS'][casinds]\\\n\t\t\t*casdata['data']['xi'][casinds]\n for i in range(nbins_cas_with_cip):\n key = 'nconc_' + str(i+5)\n datablock[:, i+low_bin_cas_with_cip] = casdata['data'][key][casinds]\n #next 4 cols are total cas LWC (something supposed to be akin to cloud LWC)\n datablock[:, high_bin_cas_with_cip] = casdata['data']['lwc']['00'][casinds]\n datablock[:, 1+high_bin_cas_with_cip] = casdata['data']['lwc']['01'][casinds]\n datablock[:, 2+high_bin_cas_with_cip] = casdata['data']['lwc']['10'][casinds]\n datablock[:, 3+high_bin_cas_with_cip] = casdata['data']['lwc']['11'][casinds]\n for i in range(nbins_cip):\n key = 'nconc_' + str(i+1)\n datablock[:, i+low_bin_cip] = cipdata['data'][key][cipinds]\n #next 4 cols are total cas + cip LWC (minus overlap)\n datablock[:, high_bin_cip] = \\\n casdata['data']['lwc_with_cip']['00'][casinds] \\\n + cipdata['data']['lwc_with_cip']['00'][cipinds]\n datablock[:, 1+high_bin_cip] = \\\n casdata['data']['lwc_with_cip']['01'][casinds] \\\n + cipdata['data']['lwc_with_cip']['01'][cipinds]\n datablock[:, 2+high_bin_cip] = \\\n casdata['data']['lwc_with_cip']['10'][casinds] \\\n + cipdata['data']['lwc_with_cip']['10'][cipinds]\n datablock[:, 3+high_bin_cip] = \\\n casdata['data']['lwc_with_cip']['11'][casinds] \\\n + cipdata['data']['lwc_with_cip']['11'][cipinds]\n\n datablock[:, -2] = adlrdata['data']['stat_pres'][adlrinds]\n datablock[:, -1] = adlrdata['data']['alt_asl'][adlrinds]\n\n return datablock\n\ndef get_datablock_with_sharc(adlrinds, casinds, sharcinds, \\\n adlrdata, casdata, sharcdata):\n \"\"\"\n Consolidate data for easier processing \n Format of output array (order of columns): time, temperature, vertical \\\n velocity, ADLR TAS, CAS TAS, CAS correction factor, number conc for cas \\\n bins (12 cols), LWC for cas (4 cols), theta_v_adlr, theta_v_sharc, ss_sharc\n \"\"\"\n # extra 13 columns: time, temperature, \n # vertical wind velocity, ADLR TAS, CAS TAS, \n # CAS corr factor, lwc for cas, theta_v for adlr,\n # theta_v for sharc, ss\n datablock = np.zeros([len(adlrinds), 13 + nbins_cas])\n datablock[:, 0] = np.around(adlrdata['data']['time'][adlrinds])\n datablock[:, 1] = adlrdata['data']['stat_temp'][adlrinds]\n datablock[:, 2] = adlrdata['data']['vert_wind_vel'][adlrinds]\n datablock[:, 3] = adlrdata['data']['TAS'][adlrinds]\n datablock[:, 4] = casdata['data']['TAS'][casinds]\n datablock[:, 5] = casdata['data']['TAS'][casinds]\\\n\t\t\t/casdata['data']['PAS'][casinds]\\\n\t\t\t*casdata['data']['xi'][casinds]\n for i in range(nbins_cas):\n key = 'nconc_' + str(i+5)\n datablock[:, i+low_bin_cas] = casdata['data'][key][casinds]\n datablock[:, high_bin_cas] = casdata['data']['lwc']['00'][casinds]\n datablock[:, 1+high_bin_cas] = casdata['data']['lwc']['01'][casinds]\n datablock[:, 2+high_bin_cas] = casdata['data']['lwc']['10'][casinds]\n datablock[:, 3+high_bin_cas] = casdata['data']['lwc']['11'][casinds]\n datablock[:, 4+high_bin_cas] = adlrdata['data']['virt_potl_temp'][adlrinds]\n datablock[:, 5+high_bin_cas] = sharcdata['data']['virt_potl_temp'][sharcinds]\n datablock[:, 6+high_bin_cas] = sharcdata['data']['RH_w'][sharcinds] - 100\n return datablock\n\ndef get_nconc_vs_t(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns (nconc_cas, nconc_cdp)\n \"\"\"\n if cutoff_bins:\n cas_offset = 3\n cdp_offset = 2\n else:\n cas_offset = 0 \n cdp_offset = 0\n nconc_cas = []\n nconc_cdp = []\n for i, row in enumerate(datablock):\n if change_cas_corr:\n nconc_cas.append(np.sum(row[5]*row[(low_bin_cas+cas_offset):high_bin_cas]))\n else:\n nconc_cas.append(np.sum(row[(low_bin_cas+cas_offset):high_bin_cas]))\n nconc_cdp.append(np.sum(row[(low_bin_cdp+cdp_offset):high_bin_cdp]))\n return (np.array(nconc_cas), np.array(nconc_cdp))\n\ndef get_meanr_vs_t(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns (meanr_cas, meanr_cdp)\n \"\"\"\n if cutoff_bins:\n cas_offset = 3\n cdp_offset = 2\n else:\n cas_offset = 0\n cdp_offset = 0\n meanr_cas = []\n meanr_cdp = []\n for row in datablock:\n if change_cas_corr:\n meanr_cas.append(np.sum(row[5]*row[(low_bin_cas+cas_offset):high_bin_cas]\\\n *centr_cas[nbins_cas-(high_bin_cas-(low_bin_cas+cas_offset)):nbins_cas])\\\n /np.sum(row[5]*row[(low_bin_cas+cas_offset):high_bin_cas]))\n else:\n meanr_cas.append(np.sum(row[(low_bin_cas+cas_offset):high_bin_cas]\\\n *centr_cas[nbins_cas-(high_bin_cas-(low_bin_cas+cas_offset)):nbins_cas])\\\n /np.sum(row[(low_bin_cas+cas_offset):high_bin_cas]))\n meanr_cdp.append(np.sum(row[(low_bin_cdp+cdp_offset):high_bin_cdp]\\\n *centr_cdp[nbins_cdp-(high_bin_cdp-(low_bin_cdp+cdp_offset)):nbins_cdp])\\\n /np.sum(row[(low_bin_cdp+cdp_offset):high_bin_cdp]))\n return (np.array(meanr_cas), np.array(meanr_cdp))\n\ndef get_nconc_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns nconc from cas+cip data \n \"\"\"\n if cutoff_bins:\n cas_offset = 3\n else:\n cas_offset = 0 \n nconc = []\n for i, row in enumerate(datablock):\n if change_cas_corr:\n nconc.append(np.sum(row[5]*row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]))\n else:\n nconc.append(np.sum(row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]))\n return np.array(nconc)\n\ndef get_meanr_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns meanr from cas+cip data \n \"\"\"\n if cutoff_bins:\n cas_offset = 3\n else:\n cas_offset = 0\n meanr = []\n for row in datablock:\n if change_cas_corr:\n meanr.append((np.sum(row[5]*row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]\\\n * centr_cas[cas_offset:nbins_cas_with_cip]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]\\\n * centr_cip)) \\\n / (np.sum(row[low_bin_cip:high_bin_cip]) \\\n + np.sum(row[5]*row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip])))\n else:\n meanr.append((np.sum(row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]\\\n * centr_cas[cas_offset:nbins_cas_with_cip]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]\\\n * centr_cip)) \\\n / (np.sum(row[low_bin_cip:high_bin_cip]) \\\n + np.sum(row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip])))\n return (np.array(meanr))\n\ndef get_meanfr_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns meanr from cas+cip data \n \"\"\"\n T = datablock[:, 1]\n P = datablock[:, -2]\n rho_air = P/(R_a*T)\n eta = get_dyn_visc(T)\n sigma = sum([sigma_coeffs[i]*(T - 273)**i for i in \\\n range(len(sigma_coeffs))])*1.e-3\n N_Be_div_r3 = 32*rho_w*rho_air*g/(3*eta**2.) #pr&kl p 417\n N_Bo_div_r2 = g*rho_w/sigma #pr&kl p 418\n N_P = sigma**3.*rho_air**2./(eta**4.*g*rho_w) #pr&kl p 418\n\n if cutoff_bins:\n cas_offset = 3\n else:\n cas_offset = 0\n\n radii = np.concatenate(\n (centr_cas[cas_offset:high_bin_cas_with_cip], centr_cip))\n #print(radii.shape)\n u_term = np.array([get_u_term(r, eta, N_Be_div_r3, N_Bo_div_r2, \\\n N_P, P, rho_air, T) for r in radii])\n N_Re_vals = np.array([2*rho_air*r*u_term[j]/eta for j, r in enumerate(radii)])\n f_vals = np.array([get_vent_coeff(N_Re) for N_Re in N_Re_vals])\n #print(f_vals.shape)\n meanfr = []\n for j, row in enumerate(datablock):\n if change_cas_corr:\n meanfr.append((np.sum(row[5]*row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]\\\n * centr_cas[cas_offset:nbins_cas_with_cip] \\\n * f_vals[cas_offset:nbins_cas_with_cip, j]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]\\\n * f_vals[nbins_cas_with_cip:nbins_cas_with_cip+nbins_cip, j] \\\n * centr_cip)) \\\n / (np.sum(row[low_bin_cip:high_bin_cip]) \\\n + np.sum(row[5]*row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip])))\n else:\n meanfr.append((np.sum(row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip]\\\n * centr_cas[cas_offset:nbins_cas_with_cip] \\\n * f_vals[j][cas_offset:nbins_cas_with_cip]) \\\n + np.sum(row[low_bin_cip:high_bin_cip]\\\n * f_vals[j][nbins_cas_with_cip:nbins_cas_with_cip+nbins_cip] \\\n * centr_cip)) \\\n / (np.sum(row[low_bin_cip:high_bin_cip]) \\\n + np.sum(row[(low_bin_cas_with_cip+cas_offset):high_bin_cas_with_cip])))\n return (np.array(meanfr))\n\ndef get_full_ss_vs_t_with_cip_and_vent(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns ss using full temperature-dependent coefficients and assuming\n input datablock of the form returned by get_datablock_with_cip function.\n [redundant to ss_scatter_figsrc module at the moment which is not the \n best but whatever]\n \"\"\"\n meanfr = get_meanfr_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins)\n nconc = get_nconc_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins)\n \n T = datablock[:, 1]\n #print(T)\n w = datablock[:, 2]\n #print(w)\n rho_a = datablock[:, -2]/(R_a*T)\n #print(rho_a)\n A = g*(L_v*R_a/(C_ap*R_v)*1/T - 1)*1./R_a*1./T\n e_s = get_sat_vap_pres(T)\n B = rho_w*(R_v*T/e_s + L_v**2./(R_v*C_ap*rho_a*T**2.))\n F_d = rho_w*R_v*T/(D*e_s) \n F_k = (L_v/(R_v*T) - 1)*L_v*rho_w/(K*T)\n #print(B/(F_d + F_k))\n ss = A*w*(F_d + F_k)/(4*np.pi*nconc*meanfr*B)*100\n return (np.array(ss))\n\ndef get_full_ss_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns ss using full temperature-dependent coefficients and assuming\n input datablock of the form returned by get_datablock_with_cip function.\n [redundant to ss_scatter_figsrc module at the moment which is not the \n best but whatever]\n \"\"\"\n meanr = get_meanr_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins)\n nconc = get_nconc_vs_t_with_cip(datablock, change_cas_corr, cutoff_bins)\n \n T = datablock[:, 1]\n w = datablock[:, 2]\n rho_a = datablock[:, -2]/(R_a*T)\n A = g*(L_v*R_a/(C_ap*R_v)*1/T - 1)*1./R_a*1./T\n e_s = get_sat_vap_pres(T)\n B = rho_w*(R_v*T/e_s + L_v**2./(R_v*C_ap*rho_a*T**2.))\n F_d = rho_w*R_v*T/(D*e_s) \n F_k = (L_v/(R_v*T) - 1)*L_v*rho_w/(K*T)\n ss = A*w*(F_d + F_k)/(4*np.pi*nconc*meanr*B)*100\n return (np.array(ss))\n\ndef get_full_ss_vs_t(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns (ss_cas, ss_cdp) using full temperature-dependent coefficients\n [redundant to ss_scatter_figsrc module at the moment which is not the \n best but whatever]\n \"\"\"\n (meanr_cas, meanr_cdp) = get_meanr_vs_t(datablock, \\\n change_cas_corr, cutoff_bins)\n (nconc_cas, nconc_cdp) = get_nconc_vs_t(datablock, \\\n change_cas_corr, cutoff_bins)\n \n T = datablock[:, 1]\n w = datablock[:, 2]\n rho_a = datablock[:, -2]/(R_a*T)\n A = g*(L_v*R_a/(C_ap*R_v)*1/T - 1)*1./R_a*1./T\n e_s = get_sat_vap_pres(T)\n B = rho_w*(R_v*T/e_s + L_v**2./(R_v*C_ap*rho_a*T**2.))\n F_d = rho_w*R_v*T/(D*e_s) \n F_k = (L_v/(R_v*T) - 1)*L_v*rho_w/(K*T)\n ss_cas = A*w*(F_d + F_k)/(4*np.pi*nconc_cas*meanr_cas*B)*100\n ss_cdp = A*w*(F_d + F_k)/(4*np.pi*nconc_cdp*meanr_cdp*B)*100\n return (np.array(ss_cas), np.array(ss_cdp))\n\ndef get_ss_vs_t(datablock, change_cas_corr, cutoff_bins):\n \"\"\"\n Returns (ss_cas, ss_cdp) [redundant to ss_scatter_figsrc module at the\n moment which is not the best but whatever]\n \"\"\"\n (meanr_cas, meanr_cdp) = get_meanr_vs_t(datablock, \\\n change_cas_corr, cutoff_bins)\n (nconc_cas, nconc_cdp) = get_nconc_vs_t(datablock, \\\n change_cas_corr, cutoff_bins)\n\n T = datablock[:, 1]\n w = datablock[:, 2]\n A = g*(L_v*R_a/(C_ap*R_v)*1/T - 1)*1./R_a*1./T\n ss_cas = A*w/(4*np.pi*D*nconc_cas*meanr_cas)*100\n ss_cdp = A*w/(4*np.pi*D*nconc_cdp*meanr_cdp)*100\n \n return (np.array(ss_cas), np.array(ss_cdp))\n\ndef pad_lwc_arrays(dataset, change_cas_corr, cutoff_bins):\n lwc_t_inds = dataset['data']['lwc_t_inds']\n dataset_shape = np.shape(dataset['data']['time'])\n\n for cutoff_bins, change_cas_corr in product([True, False], repeat=2):\n booleankey = str(int(cutoff_bins)) \\\n + str(int(change_cas_corr)) \n padded_arr = np.empty(dataset_shape)\n padded_arr[:] = np.nan\n lwc_vals = dataset['data']['lwc'][booleankey]\n padded_arr[lwc_t_inds] = lwc_vals\n dataset['data']['lwc'][booleankey] = padded_arr\n\n return dataset\n\ndef pad_lwc_arrays_with_cip(dataset, change_cas_corr, cutoff_bins):\n lwc_t_inds = dataset['data']['lwc_t_inds']\n dataset_shape = np.shape(dataset['data']['time'])\n\n for cutoff_bins, change_cas_corr in product([True, False], repeat=2):\n booleankey = str(int(cutoff_bins)) \\\n + str(int(change_cas_corr)) \n padded_arr = np.empty(dataset_shape)\n padded_arr[:] = np.nan\n padded_arr_with_cip = np.empty(dataset_shape)\n padded_arr_with_cip[:] = np.nan\n lwc_vals = dataset['data']['lwc'][booleankey]\n lwc_vals_with_cip = dataset['data']['lwc_with_cip'][booleankey]\n padded_arr[lwc_t_inds] = lwc_vals\n padded_arr_with_cip[lwc_t_inds] = lwc_vals_with_cip\n dataset['data']['lwc'][booleankey] = padded_arr\n dataset['data']['lwc_with_cip'][booleankey] = padded_arr_with_cip\n\n return dataset\n\ndef get_sat_vap_pres(T):\n \"\"\"\n returns saturation vapor pressure in Pa given temp in K\n \"\"\"\n e_s = 611.2*np.exp(17.67*(T - 273)/(T - 273 + 243.5))\n return e_s\n\ndef get_u_term(r, eta, N_Be_div_r3, N_Bo_div_r2, N_P, pres, rho_air, temp):\n \"\"\"\n get terminal velocity for cloud / rain droplet of radius r given ambient\n temperature and pressure (from pruppacher and klett pp 415-419)\n \"\"\"\n if r <= 10.e-6:\n lam = 6.6e-8*(10132.5/pres)*(temp/293.15)\n u_term = (1 + 1.26*lam/r)*(2*r**2.*g*rho_w/9*eta)\n elif r <= 535.e-6:\n N_Be = N_Be_div_r3*r**3.\n X = np.log(N_Be)\n N_Re = np.exp(sum([N_Re_regime2_coeffs[i]*X**i for i in \\\n range(len(N_Re_regime2_coeffs))]))\n u_term = eta*N_Re/(2*rho_air*r)\n else:\n N_Bo = N_Bo_div_r2*r**2.\n X = np.log(16./3.*N_Bo*N_P**(1./6.))\n N_Re = N_P**(1./6.)*np.exp(sum([N_Re_regime3_coeffs[i]*X**i for i in \\\n range(len(N_Re_regime3_coeffs))]))\n u_term = eta*N_Re/(2*rho_air*r)\n return u_term\n\ndef get_dyn_visc(temp):\n \"\"\"\n get dynamic viscocity as a function of temperature (from pruppacher and\n klett p 417)\n \"\"\"\n eta = np.piecewise(temp, [temp < 273, temp >= 273], \\\n [lambda temp: (1.718 + 0.0049*(temp - 273) \\\n - 1.2e-5*(temp - 273)**2.)*1.e-5, \\\n lambda temp: (1.718 + 0.0049*(temp - 273))*1.e-5])\n return eta\n\ndef get_vent_coeff(N_Re):\n \"\"\"\n get ventilation coefficient (from pruppacher and klett p 541)\n \"\"\"\n f = np.piecewise(N_Re, [N_Re < 2.46, N_Re >= 2.46], \\\n [lambda N_Re: 1. + 0.086*N_Re, \\\n lambda N_Re: 0.78 + 0.27*N_Re**0.5])\n return f\n","sub_path":"src/halo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":28076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"92688236","text":"import unittest\nfrom SI507project5_code import *\n\n\nCACHE_TEST_FNAME = \"cache_contents_example.json\"\n\ntry:\n with open(CACHE_TEST_FNAME, 'r', encoding='UTF-8') as cache_test:\n test_json = cache_test.read()\n CACHE_TEST_DICTION = json.loads(test_json)\nexcept FileNotFoundError:\n CACHE_TEST_DICTION = {}\n\nTEXT_IDENT =\\\n \"HTTPS://API.TUMBLR.COM/V2/BLOG/NBCNEWS.TUMBLR.COM/POSTS\"\\\n + \"/?FILTER_TEXT_LIMIT_20_TYPE_TEXT\"\nPHOTO_IDENT = \\\n \"HTTPS://API.TUMBLR.COM/V2/BLOG/NBCNEWS.TUMBLR.COM/POSTS\"\\\n +\"/?FILTER_TEXT_LIMIT_20_TYPE_PHOTO\"\n\n\nclass PostTest(unittest.TestCase):\n def setUp(self):\n self.text_data = get_from_cache(\n TEXT_IDENT, CACHE_TEST_DICTION)['response']['posts']\n self.photo_data = get_from_cache(\n PHOTO_IDENT, CACHE_TEST_DICTION)['response']['posts']\n self.test_post1 = Post(self.text_data[0])\n self.test_text = TextPost(self.text_data[0])\n self.test_post2 = Post(self.photo_data[0])\n self.test_photo = PhotoPost(self.photo_data[0])\n\n def test_init(self):\n self.assertIsInstance(self.test_post1, Post,\n \"Post not init properly\")\n self.assertIsInstance(self.test_text, TextPost,\n \"TextPost not init properly\")\n self.assertIsInstance(self.test_photo, PhotoPost,\n \"PhotoPost not init properly\")\n\n def test_var(self):\n self.assertIsInstance(self.test_post1.blog_name, str,\n \"Post not init properly\")\n self.assertIsInstance(self.test_post1.post_id, int,\n \"Post not init properly\")\n self.assertIsInstance(self.test_post1.post_url, str,\n \"Post not init properly\")\n self.assertIsInstance(self.test_post1.post_type, str,\n \"Post not init properly\")\n self.assertIsInstance(self.test_post1.post_date, str,\n \"Post not init properly\")\n self.assertIsInstance(self.test_text.title, str,\n \"TextPost not init properly\")\n self.assertIsInstance(self.test_text.body, str,\n \"TextPost not init properly\")\n self.assertIsInstance(self.test_photo.photos, list,\n \"PhotoPost not init properly\")\n self.assertIsInstance(self.test_photo.caption, str,\n \"PhotoPost not init properly\")\n\n def test_str(self):\n self.assertNotEqual(str(self.test_post2), str(self.test_photo),\n \"PhotoPost string method not correct\")\n self.assertNotEqual(str(self.test_post1), str(self.test_text),\n \"TextPost string method not correct\")\n\n def tearDown(self):\n self.text_data = None\n self.photo_data = None\n self.test_post1 = None\n self.test_text = None\n self.test_photo = None\n\nclass ListTest(unittest.TestCase):\n\n def test_length(self):\n self.assertTrue(len(posts) <= 20,\n \"posts length not correct\")\n self.assertTrue(len(photo_posts) <= 20,\n \"photo_posts length not correct\")\n self.assertTrue(len(text_posts) <= 20,\n \"text_posts length not correct\")\n\n def test_type(self):\n self.assertIsInstance(posts[0], Post,\n \"posts instance not correct\")\n self.assertIsInstance(photo_posts[0], PhotoPost,\n \"photo_posts instance not correct\")\n self.assertIsInstance(text_posts[0], TextPost,\n \"text_posts instance not correct\")\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"SI507project5_tests.py","file_name":"SI507project5_tests.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"46639697","text":"import sqlite3\nfrom analysis.regression.data_prep import DataPreparation\nimport pandas as pd\nimport matplotlib\nmatplotlib.use\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom scipy.stats import shapiro\nfrom scipy.stats import anderson\nfrom scipy.stats import normaltest\nfrom scipy.stats import kstest\nimport statsmodels.stats.api as sms\nfrom statsmodels.compat import lzip\n#from analysis.regression.regression_dictsservice_regression_dicts import default_init_steps, service_steps\nfrom analysis.regression.data_prep import DataPreparation\nfrom sklearn.feature_selection import SelectKBest, f_regression\nimport numpy as np\nimport itertools\nimport math\n\ndct = {}\n\nconn = sqlite3.connect(\"../databases/taskrabbit_ny.db\")\n\nc = conn.cursor()\n\ndata_prep = DataPreparation()\n\nvars_important = {}\n\nservices = {\n 1: [\"avg_mood\", \"avg_vehicles\", \"per_white\"],\n 2: [\"per_male\", \"errors_per_word\", \"avg_sent_desc\"],\n 3: [\"per_male\", \"per_white\", \"errors_per_word\", \"avg_vehicles\"],\n 4: [\"per_white\", \"avg_sent_desc\", \"avg_vehicles\"],\n 6: [\"per_male\", \"avg_mood\", \"errors_per_word\", \"avg_vehicles\"],\n 7: [\"per_white\", \"avg_mood\", \"errors_per_word\", \"avg_vehicles\"],\n 8: [\"per_white\", \"avg_mood\", \"avg_vehicles\"],\n 29: [\"per_white\", \"avg_vehicles\"],\n 30: [\"avg_mood\", \"errors_per_word\", \"avg_sent_desc\"],\n 35: [\"per_male\", \"avg_mood\"],\n 51: [\"per_male\", \"per_white\", \"errors_per_word\"],\n 52: [\"per_male\", \"per_white\", \"avg_mood\", \"avg_sent_desc\", \"avg_vehicles\"]\n\n}\n\n\ndef mallow_cp(model, s2, N):\n return (model[\"result\"].ssr / s2) - N + model[\"num_vars\"] * 2\n\ndef plot_mallows(models):\n x, y = [], []\n fig1, ax1 = plt.subplots()\n fig1.set_size_inches(10, 10)\n for k in models:\n for model in models[k]:\n x.append(k)\n y.append(model[\"mallows\"])\n plt.scatter(x, y, alpha=0.60, color=(.2, .5, .8))\n ax1.set_xlabel('Number of predictors', fontsize=18)\n ax1.set_ylabel(\"Mallow's Cp\", fontsize=18)\n ax1.set_ylim([0, 10])\n x = np.linspace(*ax1.get_xlim())\n ax1.plot(x, x, color=(.2, .5, .8), alpha=0.8)\n\n # removing top and right borders\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n ax1.patch.set_alpha(0.7)\n\n ax1.xaxis.set_ticks_position('none')\n ax1.yaxis.set_ticks_position('none')\n\n # adds major gridlines\n ax1.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)\n ax1.spines['right'].set_color((.8, .8, .8))\n ax1.spines['top'].set_color((.8, .8, .8))\n ax1.grid('on', linestyle='dotted')\n ax1.tick_params(axis='both', which='major', labelsize=15)\n ax1.tick_params(axis='both', which='minor', labelsize=15)\n\n plt.savefig(\"scatter_mallows.png\")\n plt.show()\n\n\ndef get_all_subsets(X, y, mallows=True):\n combs = []\n results = []\n for i in range(1, len(X) + 1):\n els = [list(x) for x in itertools.combinations(X, i)]\n combs.extend(els)\n for comb in combs:\n model = sm.OLS(y, sm.add_constant(X[list(comb)]))\n result = model.fit()\n results.append({\"model\": model, \"result\": result, \"num_vars\": len(comb), \"vars\": X[list(comb)]})\n\n full_mse_res = sm.OLS(y, sm.add_constant(X)).fit().mse_resid\n acceptable_models = {}\n\n for model in results:\n not_acceptable = False\n for pvalue in model[\"result\"].pvalues:\n if pvalue > 0.05:\n not_acceptable = True\n break\n if not_acceptable:\n continue\n\n mallows_objective = model[\"num_vars\"]\n curr_mallows = mallow_cp(model, full_mse_res, X.shape[0])\n curr_min = None\n if model[\"num_vars\"] in acceptable_models and len(acceptable_models[model[\"num_vars\"]]) > 9:\n curr_min = acceptable_models[model[\"num_vars\"]][-1][\"mallows\"]\n\n model[\"mallows\"] = curr_mallows\n model[\"mallows_diff\"] = abs(curr_mallows - mallows_objective)\n if not curr_min is None:\n if model[\"mallows_diff\"] < abs(curr_min - mallows_objective):\n del acceptable_models[model[\"num_vars\"]][-1]\n acceptable_models[model[\"num_vars\"]].append(model)\n else:\n continue\n else:\n if not model[\"num_vars\"] in acceptable_models:\n acceptable_models[model[\"num_vars\"]] = []\n acceptable_models[model[\"num_vars\"]].append(model)\n\n acceptable_models[model[\"num_vars\"]] = \\\n sorted(acceptable_models[model[\"num_vars\"]], key=lambda k: k['mallows_diff'])\n\n curr_best = None\n for num_vars in acceptable_models:\n for model in acceptable_models[num_vars]:\n\n if curr_best is None:\n curr_best = model\n else:\n if curr_best[\"mallows_diff\"] > model[\"mallows_diff\"]:\n curr_best = model\n\n print(curr_best[\"result\"].summary())\n std = curr_best[\"model\"].exog.std(0)\n std[0] = 1\n tt = curr_best[\"result\"].t_test(np.diag(std))\n print(tt.summary())\n tt.summary_frame()\n\n fig = plt.figure(figsize=(12, 30))\n sm.graphics.plot_partregress_grid(curr_best[\"result\"])\n plt.savefig(\"resid_ny.png\")\n #plt.show()\n if False:\n fig, ax = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(12, 10))\n params = list(dict(curr_best[\"result\"].params).keys())\n\n n1 = math.floor(len(params)/2)\n n2 = math.floor(len(params) % 2)\n\n for i in range(2):\n for j in range(2):\n try:\n ax[i, j].scatter(curr_best[\"result\"].model.exog[:, i * 2 + j ], curr_best[\"result\"].resid)\n ax[i, j].set_xlabel(params[i*2 + j])\n ax[i, j].set_ylabel(\"resid\")\n ax[i, j].axhline(y=0, color=\"black\")\n except Exception:\n break\n plt.savefig(\"resid_sf.png\")\n plt.show()\n #fig = plt.figure(figsize=(12, 10))\n #fig = sm.graphics.plot_regress_exog(curr_best[\"result\"], \"per_white\", fig=fig)\n fig = sm.graphics.plot_partregress_grid(curr_best[\"result\"], fig=fig)\n fig.gca().set_title(\"\")\n plt.suptitle(\"\")\n plt.savefig(\"resid_ny.png\")\n #plt.show()\n\n stat, p = shapiro(curr_best[\"result\"].resid)\n print (\"Shapiro\")\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n stat, p = normaltest(curr_best[\"result\"].resid)\n print (\"D’Agostino’s\")\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n\n stat, p = kstest(curr_best[\"result\"].resid, 'norm')\n print(\"Kolmogorov-Smirnov\")\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n #plot_mallows(acceptable_models)\n return curr_best[\"result\"].rsquared_adj\n\n for var in curr_best[\"vars\"]:\n coef = curr_best[\"result\"].params[var]\n pos_neg = \"pos\"\n if coef < 0:\n pos_neg = \"neg\"\n try:\n dct[var + \"_\" + pos_neg] += 1\n except Exception:\n dct[var + \"_\" + pos_neg] = 1\n\n\nadjusted_r2s = {}\n # exit()\nfor service in services.keys():\n #### COMBINATION #####\n print (service)\n rows = []\n for row in c.execute(\"SELECT * FROM service_location_data WHERE service_id = \" + str(service)):\n rows.append((row[2], row[3], row[4], row[5], row[6], row[7], row[8]))\n X = pd.DataFrame(rows, columns=[\"per_male\", \"per_white\", \"avg_mood\", \"errors_per_word\", \"avg_sent_desc\", \"avg_vehicles\", \"med_price\"])\n y = X[\"med_price\"]\n X = X.drop([\"med_price\"], axis=1)\n\n\n X_data_geog = data_prep.data_load(\"new_york\", c.execute(\"SELECT name FROM services WHERE service_id = \" + str(service)).fetchall()[0][0], True)\n #y = X_data_geog[\"Median service cost\"]\n X_data_geog = X_data_geog.drop([\"Median service cost\"], axis=1)\n #print (X_data_geog)\n\n\n X = pd.concat([X.reset_index(drop=True), X_data_geog.reset_index(drop=True)], axis=1)\n\n\n X = data_prep.scale_data(X)\n X = X[~X.isin([np.nan, np.inf, -np.inf]).any(1)]\n print (X)\n X = data_prep.calculate_vif_(X, 10)\n\n try:\n adjusted_r2s[service] = get_all_subsets(X, y)\n except Exception as e:\n print (e)\n pass\n #### ONLY TASKRABBIT\n '''\n print (service)\n\n rows = []\n for row in c.execute(\"SELECT * FROM service_location_data WHERE service_id = \" + str(service)):\n rows.append((row[2], row[3], row[4], row[5], row[6], row[7], row[8]))\n X = pd.DataFrame(rows, columns=[\"per_male\", \"per_white\", \"avg_mood\", \"errors_per_word\", \"avg_sent_desc\", \"avg_vehicles\", \"med_price\"])\n y = X[\"med_price\"]\n X = X.drop([\"med_price\"], axis=1)\n\n\n X = data_prep.scale_data(X)\n X = data_prep.calculate_vif_(X, 10)\n\n try:\n adjusted_r2s[service] = get_all_subsets(X, y)\n\n except Exception as e:\n print (e)\n pass\n\n '''\n #### ONLY CENSUS #####\n '''\n print (service)\n\n X_data_geog = data_prep.data_load(\"san_francisco\", c.execute(\"SELECT name FROM services WHERE service_id = \" + str(service)).fetchall()[0][0], True)\n y = X_data_geog[\"Median service cost\"]\n X_data_geog = X_data_geog.drop([\"Median service cost\"], axis=1)\n #print (X_data_geog)\n\n\n X = data_prep.scale_data(X_data_geog)\n X = data_prep.calculate_vif_(X, 10)\n\n try:\n adjusted_r2s[service] = get_all_subsets(X, y)\n except Exception as e:\n print (e)\n pass\n '''\n\n\n\nprint (vars_important)\nprint (adjusted_r2s)\n\n\n","sub_path":"analysis/regression_taskrabbit.py","file_name":"regression_taskrabbit.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"243961775","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nCLOCK_MCU = 84000000\r\nTOLERANCE = 0.01\r\nSAMPLING_RATE = 50\r\n\r\nnotes = [\r\n # (\"pp \", 1),\r\n # (\"C2 \", 65),\r\n # (\"CS2\", 69),\r\n # (\"D2 \", 73),\r\n # (\"DS2\", 78),\r\n # (\"E2 \", 82),\r\n # (\"F2 \", 87),\r\n # (\"FS2\", 92),\r\n # (\"G2 \", 98),\r\n # (\"GS2\", 104),\r\n # (\"A2 \", 110),\r\n # (\"AS2\", 117),\r\n # (\"B2 \", 123),\r\n # (\"C3 \", 131),\r\n # (\"CS3\", 139),\r\n # (\"D3 \", 147),\r\n # (\"DS3\", 156),\r\n # (\"E3 \", 165),\r\n # (\"F3 \", 175),\r\n # (\"FS3\", 185),\r\n # (\"G3 \", 196),\r\n # (\"GS3\", 208),\r\n # (\"A3 \", 220),\r\n # (\"AS3\", 233),\r\n # (\"B3 \", 247),\r\n # (\"C4 \", 262),\r\n # (\"CS4\", 277),\r\n # (\"D4 \", 294),\r\n # (\"DS4\", 311),\r\n # (\"E4 \", 330),\r\n # (\"F4 \", 349),\r\n # (\"FS4\", 370),\r\n # (\"G4 \", 392),\r\n # (\"GS4\", 415),\r\n # (\"A4 \", 440),\r\n # (\"AS4\", 466),\r\n # (\"B4 \", 494),\r\n # (\"C5 \", 523),\r\n # (\"CS5\", 554),\r\n # (\"D5 \", 587),\r\n # (\"DS5\", 622),\r\n # (\"E5 \", 659),\r\n # (\"F5 \", 698),\r\n # (\"FS5\", 740),\r\n # (\"G5 \", 784),\r\n # (\"GS5\", 831),\r\n # (\"A5 \", 880),\r\n # (\"AS5\", 932),\r\n # (\"B5 \", 988),\r\n # (\"C6 \", 1047),\r\n # (\"CS6\", 1109),\r\n # (\"D6 \", 1175),\r\n # (\"DS6\", 1245),\r\n # (\"E6 \", 1318),\r\n # (\"F6 \", 1397),\r\n # (\"FS6\", 1480),\r\n # (\"G6 \", 1568),\r\n # (\"GS6\", 1661),\r\n # (\"A6 \", 1760),\r\n # (\"AS6\", 1865),\r\n # (\"B6 \", 1976),\r\n # (\"C7 \", 2093)\r\n (\"sequence\", 1)\r\n]\r\n\r\n# -----------------------------------------------------\r\n\r\n\r\ndef abs_error(num1, num2):\r\n return abs((num1 - num2) / num1)\r\n\r\n\r\ndef hertz(clock, prescaler, period):\r\n f = clock / (prescaler * period)\r\n return f\r\n\r\n\r\ndef perfect_divisors(trg_f):\r\n exacts = []\r\n for psc in range(1, 65536):\r\n arr = CLOCK_MCU / (trg_f * psc)\r\n if CLOCK_MCU % psc == 0:\r\n if arr <= 65536 and arr >= 2:\r\n exacts.append(psc)\r\n return exacts\r\n\r\n\r\ndef add_exact_period(prescaler, trg_f):\r\n entries = []\r\n arr = CLOCK_MCU / (trg_f * prescaler)\r\n if arr == int(arr):\r\n entry = [prescaler, arr, trg_f, 0.0]\r\n entries.append(entry)\r\n return entries\r\n\r\n\r\ndef possible_prescaler_value(trg_f):\r\n possibles = []\r\n for psc in range(1, 65536):\r\n if psc in exact_prescalers:\r\n continue\r\n h1 = hertz(CLOCK_MCU, psc, 1)\r\n h2 = hertz(CLOCK_MCU, psc, 65536)\r\n if h1 >= trg_f >= h2:\r\n possibles.append(psc)\r\n return possibles\r\n\r\n\r\ndef close_divisor(psc, tolerance, trg_f):\r\n arr = CLOCK_MCU / (trg_f * psc)\r\n error = abs_error(int(arr), arr)\r\n if error < tolerance and arr < 65536.0 and arr >= 2:\r\n h = hertz(CLOCK_MCU, psc, int(arr))\r\n return psc, int(arr), h, error\r\n else:\r\n return None\r\n\r\n\r\n# ------------------------------------------------------------------------\r\n\r\nf = open(\"sequence.c\", \"w\")\r\nf.write(\"void setNote(char* note, TIM_HandleTypeDef* htim){\\n\")\r\nnotes_sampled = [(x[0], x[1] * 100) for x in notes]\r\nfor note, freq in notes_sampled:\r\n df = pd.DataFrame(columns=['PSC', 'ARR', 'F', 'ERROR'], dtype=np.double)\r\n exact_prescalers = perfect_divisors(freq)\r\n exact_values = []\r\n for index in range(len(exact_prescalers)):\r\n rows = add_exact_period(exact_prescalers[index], freq)\r\n for rowindex in range(len(rows)):\r\n df = df.append(pd.DataFrame(\r\n np.array(rows[rowindex]).reshape(1, 4), columns=df.columns))\r\n\r\n poss_prescalers = possible_prescaler_value(freq)\r\n close_prescalers = []\r\n for index in range(len(poss_prescalers)):\r\n value = close_divisor(poss_prescalers[index], TOLERANCE, freq)\r\n if value is not None:\r\n close_prescalers.append((value[0], value[1], value[2], value[3]))\r\n df = df.append(pd.DataFrame(np.array(close_prescalers).reshape(\r\n len(close_prescalers), 4), columns=df.columns))\r\n\r\n df['PSC'] = df['PSC'] - 1\r\n df['ARR'] = df['ARR'] - 1\r\n\r\n df = df.sort_values(['ERROR', 'PSC'])\r\n\r\n df['EXACT'] = pd.Series(\"?\", index=df.index)\r\n df['EXACT'] = np.where(df['ERROR'] == 0.0, \"YES\", \"NO\")\r\n\r\n df['PSC'] = df['PSC'].map('{:.0f}'.format)\r\n df['ARR'] = df['ARR'].map('{:.0f}'.format)\r\n df['F'] = df['F'].map('{:.6f}'.format)\r\n df['ERROR'] = df['ERROR'].map('{:.10f}'.format)\r\n\r\n psc = df.iloc[0].loc[\"PSC\"]\r\n arr = df.iloc[0].loc[\"ARR\"]\r\n f.write(\" if(strcmp(note, \\\"\"+note+\"\\\") == 0){\\n\")\r\n f.write(\" htim->Instance->PSC = \"+psc+\";\\n\")\r\n f.write(\" htim->Instance->ARR = \"+arr+\";\\n\")\r\n f.write(\" }\\n\")\r\n print(note + \" generated.\")\r\nf.write(\"}\")\r\nexit(0)\r\n","sub_path":"Extra/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"558884518","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"Defined models for cassobjects\n\nTwo classes:\n - Model: a simple class representing a \"standard\" SQL table\n - TimestampedModel: this class represents an object that will be altered in\n time. Only one object will be stored by rowkey. Each column will be a new\n version of the object. The last column always represents the last state\n of the object.\n\nThe internal mechanism if largely inspired by the way SQLAlchemy works.\nIt defines classes (models) where properties maps columns.\nEach model can be instanciated to represent a single row (object) from\nCassandra.\n\nBy now, it only supports read, create columns families, and create new entries\nin columns families. Objects cannot be updated and saved.\n\n\"\"\"\n\nimport inspect\nfrom functools import partial\nfrom datetime import datetime\n\nfrom pycassa import ConnectionPool, ConsistencyLevel, NotFoundException\nfrom pycassa.types import CassandraType\nfrom pycassa.columnfamily import ColumnFamily\nfrom pycassa.index import create_index_expression, create_index_clause\nfrom pycassa.util import convert_time_to_uuid\n\nimport simplejson as json\n\nfrom cassobjects.utils import immutabledict\n\n__all__ = ['declare_model', 'MetaModel', 'MetaTimestampedModel', 'Column',\n 'ConsistencyLevel']\n\nclass ModelException(Exception):\n \"\"\"An exception occured during Model parsing/construction\"\"\"\n pass\n\n# Will hold ConnectionPool objects, where keys are the keyspace\nDEFAULT_KEYSPACE = 'Keyspace'\nDEFAULT_HOSTS = ['localhost:9160']\nPOOLS = {}\n\n#################\n# Column object #\n#################\n\nclass Column(object):\n \"\"\"Wraps \"metadata\" of a field into this class.\n\n This class allow to define metadata for a model field, like if it's an\n index.\n Column type must be a valid Cassandra Type, can also be a CompositeType,\n the class or directly an instanciated object.\n For a CompositeType, arguments *MUST* be instanciated objects, not the\n class (ie: CompositeType(UTF8Type(), IntegerType())).\n Column types are the pycassa types. They accepts the same parameters.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.index = kwargs.get('index', False)\n self.foreign_key = kwargs.get('foreign_key', None)\n self.unique = kwargs.get('unique', False)\n self.alias = None\n self.col_type = None\n args = list(args)\n if args:\n if isinstance(args[0], basestring):\n self.alias = args.pop(0)\n if args:\n self.col_type = args[0]\n if self.col_type is None:\n raise ModelException(\"Column needs to have a type\")\n if (inspect.isclass(self.col_type) and not issubclass(self.col_type, CassandraType)) \\\n or (not inspect.isclass(self.col_type) and not issubclass(self.col_type.__class__, CassandraType)):\n raise ModelException(\"Column type must be an instance or a class \"\n \"inherited from a cassandra type: %s\" % self.col_type)\n # instanciate the CassandraType if not already done in model\n if inspect.isclass(self.col_type):\n self.col_type = self.col_type()\n\n def do_init(self, local_class):\n \"\"\"No initialization is needed\"\"\"\n pass\n\n def get(self, instance):\n \"\"\"No need\"\"\"\n pass\n\n##################\n# models classes #\n##################\n\nclass ModelAttribute(object):\n \"\"\"This class wraps attributes (Column, ModelRelationship) of a Model with\n a descriptor-like object.\n As there is only one object instanciated in the Model inherited class, this\n class must store and retrieve values for all Model object instances.\n\n \"\"\"\n def __init__(self, host_class, attribute, prop):\n self.host_class = host_class\n self.attribute = attribute\n self.prop = prop\n self.values = {}\n\n def __get__(self, instance, owner):\n \"\"\"Access to the object is made.\n If `instance` is None, it means that we are called on the class object,\n so, simply returns self.\n If the returned value has already been saved, just returns it.\n Otherwise, we initialize the property, and get the value.\n\n \"\"\"\n if instance is None:\n return self\n self.prop.do_init(self.host_class)\n if not self.values.get(instance):\n self.values[instance] = self.prop.get(instance)\n return self.values[instance]\n\n def __set__(self, instance, value):\n \"\"\"Set a value for a Model instance object attribute\"\"\"\n if instance not in self.values:\n self.values[instance] = value\n else:\n #TODO: updating objects is currently not supported\n pass\n\nclass MetaModel(type):\n \"\"\"Represents a \"standard\" SQL table mapped on top of Cassandra.\n\n As there is no such concepts as \"foreign keys\" in Cassandra, the foreign\n keys created in cassobjects are only based on rowkey values. Column\n families can be in different keyspaces, and still have \"cassobjects foreign\n keys\" working.\n\n \"\"\"\n def __init__(cls, name, bases, dct):\n \"\"\"Verify model validity, add methods in `cls` to access indexes,\n etc...\n\n \"\"\"\n if 'registry' in cls.__dict__:\n return type.__init__(cls, name, bases, dct)\n # Indexes\n indexes = dct.get('__indexes__', [])\n columns = {}\n for attr, value in cls.__dict__.items():\n if isinstance(value, Column):\n if hasattr(value, 'index') and value.index:\n setattr(cls, 'get_by_%s' % attr, partial(cls.get_by, attr))\n setattr(cls, 'get_one_by_%s' % attr, partial(cls.get_one_by, attr))\n columns[attr] = value\n setattr(cls, attr, ModelAttribute(cls, attr, value))\n elif isinstance(value, ModelRelationship):\n setattr(cls, attr, ModelAttribute(cls, attr, value))\n if indexes:\n raise ModelException('Following indexes \"%s\" are not declared as '\n 'fields' % ','.join(indexes))\n # Column family name\n if '__column_family__' not in dct:\n cls.__column_family__ = cls.__name__.lower()\n\n # add the model in the CFRegistry object\n cls.registry.add(cls, columns)\n\n return type.__init__(cls, name, bases, dct)\n\n def get_by(cls, attribute, value):\n \"\"\"Only works for columns indexed in Cassandra.\n This means that the property must be in the __indexes__ attribute.\n\n :param attribute: The attribute to lookup.\n This argument is always provided by the partial method.\n\n :param value: The value to match.\n\n Returns a list of matched objects.\n\n \"\"\"\n col_fam = ColumnFamily(cls.pool, cls.__column_family__)\n clause = create_index_clause([create_index_expression(attribute, value)])\n idx_slices = col_fam.get_indexed_slices(clause)\n result = []\n for rowkey, columns in idx_slices:\n result.append(cls(rowkey, **columns))\n return result\n\n def get_one_by(cls, attribute, value):\n \"\"\"Same as :meth:`get_one`, except that it will raise if more than one\n value is returned, and will return directly an object instead of a\n list.\n\n :param attribute: The attribute to lookup.\n This argument is always provided by the partial method.\n\n :param value: The value to match.\n\n \"\"\"\n res = cls.get_by(attribute, value)\n if len(res) > 1 or len(res) == 0:\n raise ModelException(\"get_one_by_%s() returned more than one \"\n \"element or zero\" % attribute)\n return res[0]\n\n # Maps pycassa.ColumnFamily methods\n def get(self, *args, **kwargs):\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n return col_fam.get(*args, **kwargs)\n\n def multiget(self, *args, **kwargs):\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n return col_fam.multiget(*args, **kwargs)\n\n def get_count(self, *args, **kwargs):\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n return col_fam.get_count(*args, **kwargs)\n\n def multiget_count(self, *args, **kwargs):\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n return col_fam.multiget_count(*args, **kwargs)\n\n def get_range(self, *args, **kwargs):\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n return col_fam.get_range(*args, **kwargs)\n\n def insert(self, columns, **kwargs):\n \"\"\"Insert a new row in the column family.\n\n Several things are checked before inserting:\n\n - Verify that inputs exists in class, and resolve aliases.\n - As we are handling manually uniqueness, we must ensure that all\n unique fields are present in the `columns` parameter.\n - For all unique fields, we use :meth:`get_by` to ensure given value is\n actually.. unique.\n - We need to create a TimeUUID compatible object using pycassa helper.\n\n Fields that refers to relationships cannot be assigned directly at\n insert. Maybe this will be implemented later.\n\n TODO: maybe it will need to have some consistency level adjusted to\n avoid possible race conditions.\n\n \"\"\"\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n reg = self.registry[self.__column_family__]\n # verify inputs and resolve aliases\n for k, v in dict(columns).items():\n if k not in reg:\n raise ModelException('%s: no column \"%s\" found' %\n (self.__column_family__, k))\n if hasattr(reg[k], 'alias') and reg[k].alias:\n columns[reg[k].alias] = v\n del columns[k]\n # handles unique keys\n unique = [k for k, v in reg.items()\n if hasattr(v, 'unique') and v.unique]\n missing = set(unique) - set(columns.keys())\n if missing:\n raise ModelException(\"%s: cannot insert without following fields: %s\" %\n (self.__column_family__, ','.join(missing)))\n # ensure uniqueness\n verif_unique = [(k, v) for k, v in columns.items() if k in unique]\n for k, v in verif_unique:\n exists = self.get_by(k, v)\n if exists:\n # we have a hit, so this value is not unique\n break\n else:\n # generate a TimeUUID object for the rowkey\n key = convert_time_to_uuid(datetime.utcnow())\n ret = col_fam.insert(key, columns, **kwargs)\n return self(key, **columns)\n # some key in not unique\n raise ModelException(\"%s: cannot create, a value is not unique\" %\n self.__column_family__)\n\n\nclass MetaTimestampedModel(type):\n \"\"\"Represents a serialized object that will be altered in time.\n\n You can picture this model as the timeline of an object. Only one\n object is stored by row. Each column will be a new verseion of the\n object. Column keys are TimeUUID. Column values are the serialized\n object.\n This kind of model can't support Columns, and foreign keys.\n\n \"\"\"\n def __init__(cls, name, bases, dct):\n if 'registry' in cls.__dict__:\n return type.__init__(cls, name, bases, dct)\n # Column family name\n if '__column_family__' not in dct:\n cls.__column_family__ = cls.__name__.lower()\n\n # add the model in the CFRegistry object\n cls.registry.add(cls, {})\n\n return type.__init__(cls, name, bases, dct)\n\n def get_one_by_rowkey(self, rowkey, **kwargs):\n \"\"\"Get the object by the rowkey. Supports pycassa method `get` kwargs.\"\"\"\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n res = col_fam.get(rowkey, **kwargs)\n if len(res) > 1 or len(res) == 0:\n raise ModelException(\"get_one_by_rowkey() returned more than one \"\n \"element or zero\")\n return self(rowkey, res[rowkey])\n\n def insert(self, obj, *args, **kwargs):\n \"\"\"Insert a new object into the Column family.\n\n This method is responsible for serializing the object.\n If `args` exists, all objects in `args` will be associated with the\n newly created object.\n\n \"\"\"\n col_fam = ColumnFamily(self.pool, self.__column_family__)\n key = convert_time_to_uuid(datetime.utcnow())\n serialized = json.dumps(obj)\n ret = col_fam.insert(key, {key: serialized}, **kwargs)\n versions = ((key, obj),)\n for remote in args:\n assert(hasattr(remote, '__column_family__'))\n # as we are the timestamped object, we are the \"target\" in the many\n # to many table.\n cf = \"%s_%s\" % (remote.__column_family__, self.__column_family__)\n col_fam_mtm = ColumnFamily(self.pool, cf)\n col_fam_mtm.insert(remote.rowkey,\n {convert_time_to_uuid(datetime.utcnow()): key})\n return self(key, versions)\n\n#################################\n# Column Family Registry object #\n#################################\n\nclass CFRegistry(object):\n \"\"\"Store all created models in an immutable dict, and also store classes\"\"\"\n def __init__(self):\n self.cfs = immutabledict()\n self.classes = immutabledict()\n\n def __contains__(self, item):\n if not isinstance(item, basestring):\n item = item.__column_family__\n return item in self.cfs\n\n def __getitem__(self, item):\n return dict.__getitem__(self.cfs, item)\n\n def add(self, klass, definition):\n name = klass.__column_family__\n dict.__setitem__(self.cfs, name, definition)\n dict.__setitem__(self.classes, name, klass)\n\n def remove(self, name):\n dict.pop(self.cfs, name)\n dict.pop(self.classes, name)\n\n def clear(self):\n dict.clear(self.cfs)\n dict.clear(self.classes)\n\n def get_class(self, name):\n return dict.__getitem__(self.classes, name)\n\n #TODO do we need this here ?\n def create_column_families(self):\n pass\n\n# metaclass methods\n\ndef _model_constructor(self, rowkey, **kwargs):\n \"\"\"Constructor for instanciated model objects.\n Simply set the given attributes, and the given rowkey.\n Handles correctly Column aliases.\n\n \"\"\"\n kls = self.__class__\n setattr(self, 'rowkey', rowkey)\n for arg in kwargs:\n if not hasattr(kls, arg):\n # maybe it's an alias\n for key, value in kls.__dict__.items():\n if isinstance(value, ModelAttribute) and \\\n isinstance(value.prop, Column) and \\\n value.prop.alias == arg:\n setattr(self, key, kwargs[arg])\n break\n else:\n raise ModelException(\"%s can't be resolved in %s\" % (arg, kls))\n else:\n setattr(self, arg, kwargs[arg])\n_model_constructor.__name__ = '__init__'\n\n\ndef _timestamped_constructor(self, rowkey, versions):\n \"\"\"Constructor for instanciated MetaTimestamped models objects.\n The `version` parameter just represents differents versions of the objects.\n `version` parameter is a list of 2-tuples.\n\n \"\"\"\n kls = self.__class__\n self.rowkey = rowkey\n self.versions = versions\n_timestamped_constructor.__name__ = '__init__'\n\n\nCONSTRUCTORS = {\n MetaModel: _model_constructor,\n MetaTimestampedModel: _timestamped_constructor,\n}\n\ndef declare_model(cls=object, name='Model', metaclass=MetaModel,\n keyspace=DEFAULT_KEYSPACE, hosts=DEFAULT_HOSTS,\n reg=CFRegistry()):\n \"\"\"Constructs a base class for models.\n All models inheriting from this base will share the same CFRegistry object.\n\n \"\"\"\n POOLS.setdefault(keyspace, ConnectionPool(keyspace, hosts))\n return metaclass(name, (cls,), {'pool': POOLS[keyspace],\n 'registry': reg,\n '__init__': CONSTRUCTORS[metaclass]})\n\n# Relationships between models\n\nclass ModelRelationship(object):\n def __init__(self, target_kls, **kwargs):\n self.target = target_kls\n self.kwargs = kwargs\n self._initialized = False\n self.target_method = None\n\n def do_init(self, local_class):\n \"\"\"Resolve the relationship, and creates backref if necessary.\n Look in CFRegistry if the remote side (column family) is present,\n then look up for a foreign key linking to this instance.\n\n :param local_class: class on which the relationship is attached\n\n \"\"\"\n if self._initialized:\n return\n # find the remote side.\n registry = local_class.registry\n if self.target not in registry:\n raise ModelException('Model with column family name \"%s\" not found '\n 'in registry' % self.target)\n target_model = registry.get_class(self.target)\n if isinstance(target_model, MetaTimestampedModel):\n # MetaTimestampedModel relationships works with an intermediate\n # table that mimic many to many relationships.\n def _lookup_many_to_many(local_model, target_model, local_rowkey):\n \"\"\"This method will retrieve `target_model` instances\n associated with `local_rowkey` by looking up the relations in\n the intermediate table.\n\n \"\"\"\n cf = \"%s_%s\" % (local_model.__column_family__,\n target_model.__column_family__)\n col_fam = ColumnFamily(local_model.pool, cf)\n try:\n rows = col_fam.get(local_rowkey)\n except NotFoundException:\n return []\n ret = []\n for _, v in rows.items():\n ret.append(target_model.get_one_by_rowkey(v))\n return ret\n self.target_method = partial(_lookup_many_to_many, local_class, target_model)\n else:\n # find foreign key\n local_cf = local_class.__column_family__\n for col, value in registry[self.target].items():\n if value.foreign_key == local_cf:\n name = value.alias or col\n self.target_method = partial(getattr(target_model, 'get_by'), name)\n if self.target_method is None:\n raise ModelException('No foreign key found in \"%s\" for relationship '\n '\"%s\"' % (self.target, local_cf))\n self._initialized = True\n\n def get(self, instance):\n \"\"\"\n \"\"\"\n assert self._initialized\n return self.target_method(instance.rowkey)\n\n#TODO\ndef relationship(target_kls, **kwargs):\n return ModelRelationship(target_kls, **kwargs)\n","sub_path":"src/cassobjects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"516574314","text":"import sys\nimport argparse\nfrom memory import Memory\nfrom cache import CyclicCache, LRUCache\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--strategy',\n help='Expects one of Memory (default), LRU or Cyclic',\n default=\"Memory\")\n args = parser.parse_args()\n model = None\n if args.strategy == \"Memory\":\n model = Memory()\n elif args.strategy == \"Cyclic\":\n model = CyclicCache()\n elif args.strategy == \"LRU\":\n model = LRUCache()\n else:\n print(\"Unknown strategy: {}\".format(args.strategy))\n sys.exit(1)\n\n # Reads a list of integers from the command line. No error\n # checking, so non integers will bomb out.\n count = 0\n location = sys.stdin.readline().strip()\n while(location):\n count += 1\n location = int(location)\n print(\"{:03d},{:2d},\".format(count, location), end=\" \")\n print(model.lookup(location))\n location = sys.stdin.readline().strip()\n print()\n print(\"Model: {}\".format(model.name()))\n print(\"{:03d} Accesses\".format(count))\n print(\"{:03d} Memory Hits\".format(model.get_hit_count()))\n","sub_path":"caching/harness.py","file_name":"harness.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"500069503","text":"#This is an easy Converter which is called after a GCode-File is written\n#to use its output by splitting the lines and Convert it to a MPF\n\nimport io\n\nclass DoIt():\n\n version = 2\n gCodeFile = None\n path = \"\"\n output =\"\"\n tempWinkel = 0.0\n counter = 10\n\n #Variablen\n verrechnungswert = 0.4497\n xVar = 'XL'\n yVar = 'YL'\n zVar = 'ZL'\n g1Var = 'G1'\n g0Var = 'G0'\n #/Variablen\n\n @classmethod\n def openFile(self, pathSource, pathTarget, head, end):\n gCodeFile = open(pathSource, \"r\")\n\n\n gCodeLines = gCodeFile.readlines()\n\n #with open(path[:-5] + 'mpf', 'w+') as f:\n with io.open(pathTarget, \"w+\", encoding=\"utf8\") as f:\n\n for line in (head.readlines()):\n f.write(\"N\" + self.counter.__str__() + \" \" + line)\n self.counter += 10\n #f.writelines(head.readlines())\n f.write(\"\\n\\n\\n\")\n\n for line in gCodeLines:\n self.output += \"N\" + self.counter.__str__() + \" \"\n self.readLine(line)\n self.counter += 10\n f.write(self.output)\n self.output = \"\"\n\n f.write(\"\\n\\n\\n\")\n\n for line in (end.readlines()):\n f.write(\"N\" + self.counter.__str__() + \" \" + line)\n self.counter += 10\n #f.writelines(end.readlines())\n self.counter = 10\n self.tempWinkel = 0.0\n\n return\n\n @classmethod\n def readLine(self, line):\n\n elements = line.split(\" \")\n\n if(elements[0] != \"G0\" and elements[0] != \"G1\"):\n if \";\" not in elements[0]:\n if elements[0] == \"G92\":\n self.tempWinkel = 0.0\n self.output += \";\" + line\n else:\n self.output += line\n return\n else:\n containsXYZ = line.__contains__('X') or line.__contains__('Y') or line.__contains__('Z')\n checkEValue = elements[-1:].__str__()\n\n if containsXYZ:\n self.output += self.g1Var + \" \"\n else:\n self.output += self.g0Var + \" \" + self.calculate(checkEValue[3:-4], containsXYZ)\n return\n\n if checkEValue[2:3] is 'E':\n self.output += self.replaceVars(elements[1:-1]) + (self.calculate(checkEValue[3:-4], containsXYZ))\n return\n else:\n self.output += \" \".join(elements[1:])\n\n @classmethod\n def replaceVars(self, elements):\n moddedLine = ''\n\n for strValue in elements:\n if 'X' in strValue:\n moddedLine += self.xVar + strValue[1:] + \" \"\n elif 'Y' in strValue:\n moddedLine += self.yVar + strValue[1:] + \" \"\n elif 'Z' in strValue:\n moddedLine += self.zVar + strValue[1:] + \" \"\n else:\n moddedLine += strValue\n\n return moddedLine\n\n\n\n @classmethod\n def calculate(self, value, containsXYZ):\n # value = value.__getitem__(0)\n value = float(value)\n rEplace = value * self.verrechnungswert * 360.0 * 1.02 * 2\n\n temp = rEplace\n rEplace = rEplace - self.tempWinkel\n self.tempWinkel = temp\n\n if (containsXYZ):\n return (\"SP1=IC(\" + ('%.3f' % rEplace) + \")\\n\")\n else:\n return (\"SPOS=IC(\" + ('%.3f' % rEplace) + \")\\n\")\n\n\n\n\n","sub_path":"MPFWriter/Output/J_Do_It_modded.py","file_name":"J_Do_It_modded.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73118921","text":"\"\"\"Programme pour le cours IFT2015\n Écrit par François Major le 16 février 2014.\n\"\"\"\n\nclass ArrayQueue:\n\n #implements the ADT Queue (Queue.py)\n #uses the python default List\n\n DEFAULT_CAPACITY = 1\n\n def __init__( self, capacity = DEFAULT_CAPACITY ):\n self._data = [None] * capacity\n self._capacity = capacity\n self._size = 0\n self._front = 0\n\n def __str__( self ):\n pp = str( self._data )\n pp += \"(size = \" + str( len( self ) )\n pp += \")[first = \" + str( self._front )\n pp += \"; capacity = \" + str( self._capacity ) + \"]\"\n return pp\n\n def __len__( self ):\n return self._size\n\n def is_empty( self ):\n return self._size == 0\n\n def first( self ):\n if self.is_empty():\n return False\n else:\n return self._data[self._front]\n\n def dequeue( self ):\n if self.is_empty():\n return False\n else:\n elem = self._data[self._front]\n self._data[self._front] = None\n self._front = ( self._front + 1 ) % len( self._data )\n self._size -= 1\n return elem\n\n def enqueue( self, elem ):\n if self._size == len( self._data ):\n self._resize( 2 * len( self._data ) )\n avail = ( self._front + self._size ) % len( self._data )\n self._data[avail] = elem\n self._size += 1\n\n def _resize( self, newcapacity ):\n old = self._data\n self._data = [None] * newcapacity\n walk = self._front\n for k in range( self._size ):\n self._data[k] = old[walk]\n walk = ( 1 + walk ) % len( old )\n self._front = 0\n self._capacity = newcapacity\n\n\"\"\"unit testing\n\"\"\"\nif __name__ == '__main__':\n\n data = ArrayQueue()\n print( data )\n\n data.enqueue( 5 )\n print( \"enqueue 5\" )\n print( data )\n\n data.enqueue( 3 )\n print( \"push 3\" )\n print( data )\n\n print( \"len = \", str( len( data ) ) )\n print( \"is_empty = \", data.is_empty() )\n print( data )\n\n print( \"dequeue = \", data.dequeue() )\n print( data )\n print( \"is_empty = \", data.is_empty() )\n\n print( \"dequeue = \", data.dequeue() )\n print( data )\n print( \"is_empty = \", data.is_empty() )\n\n print( \"dequeue = \", data.dequeue() )\n\n data.enqueue( 7 )\n print( \"enqueue 7\" )\n print( data )\n data.enqueue( 9 )\n print( \"enqueue 9\" )\n print( data )\n\n print( \"first = \", data.first() )\n data.enqueue( 4 )\n print( \"enqueue 4\" )\n print( data )\n\n print( \"len = \", str( len( data ) ) )\n print( \"dequeue = \", data.dequeue() )\n print( data )\n\n data.enqueue( 13 )\n print( \"enqueue 13\" )\n print( data )\n\n data.enqueue( 15 )\n print( \"enqueue 15\" )\n print( data )\n\n data.enqueue( 21 )\n print( \"enqueue 21\" )\n print( data )\n","sub_path":"code/ArrayQueue.py","file_name":"ArrayQueue.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372155577","text":"import os\nfrom setuptools import setup\n\n# Little hack to make sure tests work\nos.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'\n\n__package_name__ = 'django-statsd'\n__version__ = '1.9.3'\n__author__ = 'Rick van Hattem'\n__author_email__ = 'Rick.van.Hattem@Fawo.nl'\n__description__ = '''django-statsd is a django app that submits query and\n view durations to Etsy's statsd.'''\n__url__ = 'https://github.com/WoLpH/django-statsd'\n\nif os.path.isfile('README.rst'):\n long_description = open('README.rst').read()\nelse:\n long_description = 'See http://pypi.python.org/pypi/django-statsd/'\n\nsetup(\n name=__package_name__,\n version=__version__,\n author=__author__,\n author_email=__author_email__,\n description=__description__,\n url=__url__,\n license='BSD',\n packages=['django_statsd'],\n long_description=long_description,\n test_suite='nose.collector',\n tests_requires='''\n python-statsd\n nose\n git+git://github.com/akheron/nosedjango@nose-and-django-versions#egg=nosedjango\n coverage\n django\n mock\n ''',\n setup_requires=['nose'],\n install_requires=['python-statsd>=1.6.0'],\n classifiers=[\n 'License :: OSI Approved :: BSD License',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"421182305","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n#Auth dependencies\nfrom rest_framework.decorators import api_view, permission_classes #for authenticated routes\nfrom rest_framework.permissions import IsAuthenticated #for authenticated routes\nfrom django.views.decorators.csrf import csrf_exempt #for authenticated routes\n# API dependencies\nfrom .serializers import BookSerializer, AuthorSerializer\nfrom .models import Book, Author\nfrom rest_framework import status\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\n# We must add the User object from the user app for the \"added_by\" field to work!!\nfrom django.apps import apps \nUsers = apps.get_model('users', 'CustomUser')\n\n\n# Create your views here.\n@api_view([\"GET\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef welcome(request):\n content = {\"message\": \"Welcome to the Heya Music App!\"}\n return JsonResponse(content)\n\n# Users can get ALL Books added by ALL users\n@api_view([\"GET\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef get_books(request):\n user = request.user.id # dont need a Users model since we are only searching\n # books = Book.objects.filter(added_by=user) # get all books added by this particular user\n books = Book.objects.all()\n serializer = BookSerializer(books, many=True)\n # PROCESSING DATA IN WHICH FRONTEND CAN READ\n data = serializer.data\n for item in data:\n item[\"author\"] = Author.objects.get(id=item[\"author\"]).name\n item[\"added_by\"] = Users.objects.get(id=item[\"added_by\"]).username\n return JsonResponse({'books': data }, safe=False, status=status.HTTP_200_OK)\n\n# Users can add a book\n@api_view([\"POST\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef add_book(request):\n payload = json.loads(request.body)\n user = request.user\n # user = Users.objects.get(id=request.user.id) #the user MUST come from the Object or else it will get an ERROR!!!\n # print(user.username)\n # print(payload)\n try:\n author = Author.objects.get(name=payload[\"author\"])\n book = Book.objects.create(\n title=payload[\"title\"],\n description=payload[\"description\"],\n added_by=user,\n author=author\n )\n serializer = BookSerializer(book)\n # PROCESS SERIALIZER - change to a field that humans can READ not SQL key data!\n data = serializer.data\n data[\"added_by\"] = user.username \n data[\"author\"] = payload[\"author\"] \n return JsonResponse({'books': data}, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n# Users can update a book entry by id # USERS who created it can change the book!\n@api_view([\"PUT\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef update_book(request, book_id):\n user = request.user\n payload = json.loads(request.body)\n #PROCESS PAYLOAD TO SQL KEYS\n process_data = payload\n process_data[\"author\"] = Author.objects.get(name=payload[\"author\"]).id # Database can only read SQL keys so we need to change it to IDs\n try:\n book_item = Book.objects.filter(added_by=user, id=book_id)\n # returns 1 or 0\n book_item.update(**process_data)\n book = Book.objects.get(id=book_id)\n serializer = BookSerializer(book)\n #PROCESS DATA\n data = serializer.data\n data[\"added_by\"] = user.username \n data[\"author\"] = Author.objects.get(id=process_data[\"author\"]).name\n return JsonResponse({'book': data}, safe=False, status=status.HTTP_200_OK)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n# Users can delete a book entry by id\n@api_view([\"DELETE\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef delete_book(request, book_id):\n user = request.user.id\n try:\n book = Book.objects.get(added_by=user, id=book_id)\n book.delete()\n return JsonResponse({'Success': 'Deleted successfully'}, status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n\n@api_view([\"GET\"])\n@csrf_exempt\n@permission_classes([IsAuthenticated])\ndef author_profile(request, author_id):\n author = Author.objects.get(id=author_id)\n books = author.books.all() # get all book associated to that author\n num_of_books = author.books.count()\n serializer = BookSerializer(books, many=True)\n #PROCESSING DATA\n data = serializer.data\n for item in data:\n item[\"author\"] = Author.objects.get(id=item[\"author\"]).name\n item[\"added_by\"] = Users.objects.get(id=item[\"added_by\"]).username\n return JsonResponse({'numOfBooks': num_of_books,'books': data }, safe=False, status=status.HTTP_200_OK)\n\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"187741180","text":"from ..base import VerticalAdvectionStencil\nfrom ....tools import timing\n\nfrom dace.codegen.instrumentation.report import InstrumentationReport\nfrom gt4py import gtscript\nfrom .mixin import CPUStencilMixin, GPUStencilMixin\n\n\nclass VerticalAdvectionStencil(VerticalAdvectionStencil):\n def run_stencil(self, data):\n with self.on_device(data) as device_data:\n exec_info = {}\n origin = (self.halo,) * 3\n ustage, upos, utens, utens_stage, wcon, _, _, _ = device_data\n dtr_stage = 3.0 / 20.0\n self._gt4py_stencil_object.run(\n utens_stage=utens_stage,\n u_stage=ustage,\n wcon=wcon,\n u_pos=upos,\n utens=utens,\n dtr_stage=dtr_stage,\n exec_info=exec_info,\n _domain_=self.domain,\n _origin_=dict(\n utens_stage=origin,\n u_stage=origin,\n wcon=origin,\n u_pos=origin,\n utens=origin,\n ),\n )\n report = InstrumentationReport(exec_info[\"instrumentation_report\"])\n total_ms = sum(sum(v) for v in report.entries.values())\n return total_ms / 1000\n\n @property\n def constants(self):\n return {\"BET_M\": 0.5, \"BET_P\": 0.5}\n\n @property\n def definition(self):\n def vertical_advection_dycore(\n utens_stage: gtscript.Field[\"dtype\"],\n u_stage: gtscript.Field[\"dtype\"],\n wcon: gtscript.Field[\"dtype\"],\n u_pos: gtscript.Field[\"dtype\"],\n utens: gtscript.Field[\"dtype\"],\n *,\n dtr_stage: float,\n ):\n from __externals__ import BET_M, BET_P\n\n with computation(FORWARD):\n with interval(0, 1):\n gcv = 0.25 * (wcon[1, 0, 1] + wcon[0, 0, 1])\n cs = gcv * BET_M\n\n ccol = gcv * BET_P\n bcol = dtr_stage - ccol[0, 0, 0]\n\n # update the d column\n correction_term = -cs * (u_stage[0, 0, 1] - u_stage[0, 0, 0])\n dcol = (\n dtr_stage * u_pos[0, 0, 0]\n + utens[0, 0, 0]\n + utens_stage[0, 0, 0]\n + correction_term\n )\n\n # Thomas forward\n divided = 1.0 / bcol[0, 0, 0]\n ccol = ccol[0, 0, 0] * divided\n dcol = dcol[0, 0, 0] * divided\n\n with interval(1, -1):\n gav = -0.25 * (wcon[1, 0, 0] + wcon[0, 0, 0])\n gcv = 0.25 * (wcon[1, 0, 1] + wcon[0, 0, 1])\n\n as_ = gav * BET_M\n cs = gcv * BET_M\n\n acol = gav * BET_P\n ccol = gcv * BET_P\n bcol = dtr_stage - acol[0, 0, 0] - ccol[0, 0, 0]\n\n # update the d column\n correction_term = -as_ * (\n u_stage[0, 0, -1] - u_stage[0, 0, 0]\n ) - cs * (u_stage[0, 0, 1] - u_stage[0, 0, 0])\n dcol = (\n dtr_stage * u_pos[0, 0, 0]\n + utens[0, 0, 0]\n + utens_stage[0, 0, 0]\n + correction_term\n )\n\n # Thomas forward\n divided = 1.0 / (bcol[0, 0, 0] - ccol[0, 0, -1] * acol[0, 0, 0])\n ccol = ccol[0, 0, 0] * divided\n dcol = (dcol[0, 0, 0] - (dcol[0, 0, -1]) * acol[0, 0, 0]) * divided\n\n with interval(-1, None):\n gav = -0.25 * (wcon[1, 0, 0] + wcon[0, 0, 0])\n as_ = gav * BET_M\n acol = gav * BET_P\n bcol = dtr_stage - acol[0, 0, 0]\n\n # update the d column\n correction_term = -as_ * (u_stage[0, 0, -1] - u_stage[0, 0, 0])\n dcol = (\n dtr_stage * u_pos[0, 0, 0]\n + utens[0, 0, 0]\n + utens_stage[0, 0, 0]\n + correction_term\n )\n\n # Thomas forward\n divided = 1.0 / (bcol[0, 0, 0] - ccol[0, 0, -1] * acol[0, 0, 0])\n dcol = (dcol[0, 0, 0] - (dcol[0, 0, -1]) * acol[0, 0, 0]) * divided\n\n with computation(BACKWARD):\n with interval(-1, None):\n datacol = dcol[0, 0, 0]\n data_col = datacol\n utens_stage = dtr_stage * (datacol - u_pos[0, 0, 0])\n\n with interval(0, -1):\n datacol = dcol[0, 0, 0] - ccol[0, 0, 0] * data_col[0, 0, 1]\n data_col = datacol\n utens_stage = dtr_stage * (datacol - u_pos[0, 0, 0])\n\n return vertical_advection_dycore\n\n\nclass GpuVerticalAdvectionStencil(GPUStencilMixin, VerticalAdvectionStencil):\n def setup(self):\n assert self.parameters[\"u_only\"]\n return super().setup()\n\n\nclass CpuVerticalAdvectionStencil(CPUStencilMixin, VerticalAdvectionStencil):\n def setup(self):\n assert self.parameters[\"u_only\"]\n return super().setup()\n","sub_path":"stencil_benchmarks/benchmarks_collection/stencils/gt4py/vertical_advection.py","file_name":"vertical_advection.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"592583240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 16.01.2019\n\n@author: wagnerpeer\n\nModule is used to extract general information about parking ramps and details\nabout their capacity from the official OPG website:\nhttps://www.parken-osnabrueck.de/\n\"\"\"\nfrom contextlib import contextmanager\nimport functools\nimport html\nimport json\nimport logging\nimport logging.config\nimport re\nimport time\nfrom urllib import robotparser\n\nfrom bs4 import BeautifulSoup\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\n\n\nAGENT_NAME = 'codeforosnabrueckbot'\n\n\nLOGGING_CONFIGURATION = {\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)-8s %(name)-15s %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'default',\n 'stream': 'ext://sys.stdout',\n }\n },\n 'loggers': {\n 'opg_scraper': {\n 'level': 'INFO',\n 'propagate': True,\n 'handlers': ['console'],\n }\n }\n}\n\n\nlogging.config.dictConfig(LOGGING_CONFIGURATION)\n\n\nlogger = logging.getLogger('opg_scraper.' + __name__)\n\n\ndef eval_robots_txt(agent_name):\n @functools.wraps\n def inner(func):\n def wrapper(*args, **kwargs):\n parser = robotparser.RobotFileParser(url=kwargs['url'])\n parser.read()\n\n if parser.can_fetch(agent_name, kwargs['url']):\n return func(*args, **kwargs)\n else:\n raise PermissionError(f'The robots.txt permitts the crawling of the site {kwargs[\"url\"]}')\n return wrapper\n return inner\n\n\ndef wait_for_ajax(driver):\n wait = WebDriverWait(driver, 2)\n try:\n wait.until(lambda driver: driver.execute_script('return jQuery.active') == 0)\n wait.until(lambda driver: driver.execute_script('return document.readyState') == 'complete')\n except TimeoutException:\n logger.info('Timeout reached while waiting for website to load!')\n\n\n@eval_robots_txt(AGENT_NAME)\ndef get_details(driver, *, url=None):\n # Get handle to send shortcut which opens new tab\n driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')\n driver.get(url)\n wait_for_ajax(driver)\n\n total_capacity = driver.find_element_by_class_name('detail-total-capacity').text\n free_capacity = driver.find_element_by_class_name('detail-free-capacity').text\n\n logger.info((f'{free_capacity} von {total_capacity} frei.'))\n\n # Get handle to send shortcut which closes current tab\n driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')\n\n return int(free_capacity), int(total_capacity)\n\n\n@eval_robots_txt(AGENT_NAME)\ndef get_general_info(driver, *, url=None):\n driver.get(url)\n wait_for_ajax(driver)\n # wait_for_ajax` function might not work as expected, add extra sleep\n time.sleep(1)\n\n page_source = driver.page_source\n\n parking_ramps = re.search(pattern='var parkingRampData = (\\{.*\\});',\n string=page_source)\n\n parking_ramps = json.loads(html.unescape(parking_ramps.group(1)))\n\n for identifier, ramp_data in parking_ramps.items():\n logger.info(f'Parking Ramp Name: {ramp_data[\"name\"]}')\n\n soup = BeautifulSoup(ramp_data['gmapsMarker'], 'html.parser')\n details_url = soup.find('a', 'opg-map-infowindow-detaillink').get('href').replace('\\\\', '')\n\n free_capacity, total_capacity = get_details(driver=driver, url=details_url)\n ramp_data['free_capacity'] = free_capacity\n ramp_data['total_capacity'] = total_capacity\n\n return parking_ramps\n\n\ndef main(url):\n with get_webdriver() as driver:\n return get_general_info(driver=driver, url=url)\n\n\n@contextmanager\ndef get_webdriver():\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options)\n driver.implicitly_wait(10)\n yield driver\n driver.close()\n driver.quit()\n\n\nif __name__ == '__main__':\n url = r'https://www.parken-osnabrueck.de/'\n main(url)\n","sub_path":"src/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"591565056","text":"from flask import Flask, render_template, jsonify\nimport pandas as pd\nimport pathlib as pl\nimport numpy as np\nimport json\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('names.html')\n\n\n@app.route('/names')\ndef more_stuff():\n fn = pl.Path(__file__).parent / 'data/names.csv'\n\n dfx = pd.read_csv(fn.absolute())\n\n # Id,Name,Year,Gender,State,Count\n\n n = len(dfx)\n data = json.loads(dfx.to_json(orient=\"split\"))[\"data\"];\n info = [\n {\"title\": str(col)} for col in json.loads(dfx.to_json(orient=\"split\"))[\"columns\"]\n\n ]\n\n return jsonify(data=data)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"149484958","text":"import os\nimport sys\nfrom pprint import pprint\nfrom wes.main import load_db, clone_update_repo, find_framework_plugins, \\\n import_all_framework_plugins, extend_endpoints_with_metadata\nfrom wes.framework_plugins.common import JavaProcessor, PythonProcessor\n\nimport pytest\n\n# @pytest.mark.skip(reason=\"We need to fix a bunch of django stuff before we can get this test to not throw exceptions\")\ndef test_integration(tmpdir_factory):\n # Determine our working directory\n workingDir = str(tmpdir_factory.getbasetemp())\n\n projects = [{'baseUrl': 'http://west.example.com/', 'gitRepo': 'git@github.com:indeedsecurity/WEST.git'}]\n\n for project in projects:\n # Create commonly used variables for each Repo\n projectRepoPath = project['gitRepo'].split(':')[-1][:-4]\n projectName = project['gitRepo'].split('/')[-1][:-4]\n productGroup = projectRepoPath.split('/')[0]\n\n print(\"{}Processing the {} project{}\".format(10*\"-\", projectRepoPath, 10*\"-\"))\n\n groupFolder = os.path.join(workingDir, productGroup)\n projectFolder = os.path.join(workingDir, productGroup, projectName)\n\n # clone/update the repositories\n clone_update_repo(projectFolder, project['gitRepo'])\n\n # Find and import all framework plugins\n framework_plugins = find_framework_plugins()\n plugins = import_all_framework_plugins(framework_plugins)\n\n # Find all the endpoints\n endpoints = []\n\n # Load up the processors so they only preprocess once per project\n processors = {\n 'java': JavaProcessor(projectFolder),\n 'python': PythonProcessor(projectFolder)\n }\n print('Pre-processing the project...')\n for name, processor in processors.items():\n processor.load_project()\n\n # Loop through all the plugins\n for plugin in plugins:\n pluginObj = plugin.CustomFramework(workingDir=os.path.abspath(projectFolder), processors=processors)\n\n # If the project is identified by the plugin try to find the endpoints\n # for the project with the find_endpoints() method\n if pluginObj.identify():\n print(\"** Identified the project as a {} project.\".format(plugin.__name__[29:]))\n\n pluginEndpoints = pluginObj.find_endpoints()\n\n if pluginEndpoints:\n pluginEndpoints = extend_endpoints_with_metadata(pluginEndpoints,\n project['gitRepo'],\n productGroup,\n projectName,\n plugin.__name__[29:],\n project['baseUrl'])\n\n endpoints += pluginEndpoints\n\n # Load the list of all endpoints from the west project\n sys.path.append(os.path.abspath(os.path.join(projectFolder, 'scripts')))\n import west2json\n knownEndpoints = west2json.main(os.path.abspath(os.path.join(projectFolder)))\n\n failed_conditions = []\n # Verify we found all the endpoints\n for knownEp in knownEndpoints:\n # Find matches in endpoints\n possibleMatches = list(filter(lambda x: set(knownEp['endpoints']) == set(x['endpoints']), endpoints))\n if len(possibleMatches) < 1:\n failed_conditions.append(\" - Couldn't find the path: {}, desc: {}\".format(knownEp['endpoints'], knownEp['description']))\n elif len(possibleMatches) >= 1:\n if len(possibleMatches) > 1:\n possibleMatches = list(filter(lambda x: set(knownEp['methods']) == set(x['methods']), possibleMatches))\n if len(possibleMatches) != 1:\n # Okay we found a match, lets check the other parameters\n failed_conditions.append(\" - Found multiple results for the path: {}, desc: {}\".format(knownEp['endpoints'], knownEp['description']))\n continue\n match = possibleMatches[0]\n for param in knownEp['params']:\n if param not in match['params']:\n failed_conditions.append(\" - Couldn't find the param: {} for {}, desc: {}\".format(param, knownEp['endpoints'], knownEp['description']))\n for template in knownEp['templates']:\n if template not in match['templates']:\n failed_conditions.append(\" - Couldn't find the template: {} for {}, desc: {}\".format(template, knownEp['endpoints'], knownEp['description']))\n if knownEp['methods'] != match['methods']:\n failed_conditions.append(\" - Couldn't find the correct method for: {}, desc: {}\".format(knownEp['endpoints'], knownEp['description']))\n # pprint(possibleMatches)\n\n # pprint(knownEndpoints)\n # pprint(endpoints)\n\n assert len(failed_conditions) < 1, \"\\n\" + \"\\n\".join(failed_conditions)\n","sub_path":"integrationTest/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"212027848","text":"#!/usr/bin/env python3\n\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\n#from sqlalchemy import create_engine\nimport mysql.connector\nfrom mysql.connector import errorcode\n\nconfig = {\n 'user': 'query',\n 'password': 'query',\n 'host': '10.1.1.101',\n 'database': 'bfb',\n 'raise_on_warnings': True,\n}\n\ntry:\n cnx = mysql.connector.connect(**config)\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n\ncursor = cnx.cursor()\n\npv_stat_query=(\"select stat,url,count(*) as sum \"\n\"from tbl_bfb_pv \"\n\"where stat = %s and date >=DATE_SUB(CURRENT_DATE, INTERVAL 1 DAY) and \"\n\"date < CURRENT_DATE group by url \"\n\"order by sum DESC limit %s;\")\n\nstat_list = [499,404]\ntop = 10\ndata_list = list()\n\nfor stat in stat_list:\n cursor.execute(pv_stat_query,(stat,top))\n data_list.append(cursor.fetchall())\n#print(data_list)\n\ncursor.close()\ncnx.close()\n\ndata = np.vstack(data_list)\n\nresult = [ row for row in data if int(row[-1]) >10]\n#print(result)\ndf = pd.DataFrame(result,columns=['状态','URL','数量'])\nhtml = df.style.set_properties(**{'background-color': '#D2D8F9',\n 'color': '#000000',\n 'border-color': 'white'}).render()\n#print(s.to_html(index=False))\nprint(html)\n","sub_path":"bfb/pv_stat2html.py","file_name":"pv_stat2html.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"72448909","text":"import logging\n\nimport pajbot.models\nfrom pajbot.managers.adminlog import AdminLogManager\nfrom pajbot.managers.db import DBManager\nfrom pajbot.managers.handler import HandlerManager\nfrom pajbot.modules import BaseModule\n\nlog = logging.getLogger(__name__)\n\n\nclass BanphraseModule(BaseModule):\n\n ID = __name__.split('.')[-1]\n NAME = 'Banzin'\n DESCRIPTION = 'Kijkt naar elke message voor een ban zin.'\n ENABLED_DEFAULT = True\n CATEGORY = 'Filter'\n SETTINGS = []\n\n def is_message_bad(self, source, msg_raw, event):\n msg_lower = msg_raw.lower()\n\n res = self.bot.banphrase_manager.check_message(msg_raw, source)\n if res is not False:\n self.bot.banphrase_manager.punish(source, res)\n return True\n\n for f in self.bot.filters:\n if f.type == 'regex':\n m = f.search(source, msg_lower)\n if m:\n log.debug('Gelijke regex filter \\'{0}\\''.format(f.name))\n f.run(self.bot, source, msg_raw, event, {'match': m})\n return True\n elif f.type == 'banphrase':\n if f.filter in msg_lower:\n log.debug('Gelijke banzin filter \\'{0}\\''.format(f.name))\n f.run(self.bot, source, msg_raw, event)\n return True\n\n return False # message was ok\n\n def enable(self, bot):\n self.bot = bot\n HandlerManager.add_handler('on_message', self.on_message, priority=150)\n\n def disable(self, bot):\n HandlerManager.remove_handler('on_message', self.on_message)\n\n def on_message(self, source, message, emotes, whisper, urls, event):\n if whisper:\n return\n if source.level >= 500 or source.moderator:\n return\n\n if self.is_message_bad(source, message, event):\n # we matched a filter.\n # return False so no more code is run for this message\n return False\n\n def add_banphrase(self, **options):\n \"\"\"Method for creating and editing banphrases.\n Usage: !add banphrase BANPHRASE [options]\n Multiple options available:\n --length LENGTH\n --perma/--no-perma\n --notify/--no-notify\n \"\"\"\n\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n options, phrase = bot.banphrase_manager.parse_banphrase_arguments(message)\n\n if options is False:\n bot.whisper(source.username, 'Ongeldige banzin')\n return False\n\n options['added_by'] = source.id\n options['edited_by'] = source.id\n\n banphrase, new_banphrase = bot.banphrase_manager.create_banphrase(phrase, **options)\n\n if new_banphrase is True:\n bot.whisper(source.username, 'De volgende banzin toegevoegd (ID: {banphrase.id})'.format(banphrase=banphrase))\n AdminLogManager.post('Banzin toegevoegd', source, phrase)\n return True\n\n banphrase.set(**options)\n banphrase.data.set(edited_by=options['edited_by'])\n DBManager.session_add_expunge(banphrase)\n bot.banphrase_manager.commit()\n bot.whisper(source.username, 'De volgende banzin toegevoegd (ID: {banphrase.id}) with ({what})'.format(banphrase=banphrase, what=', '.join([key for key in options if key != 'added_by'])))\n AdminLogManager.post('Banzin gewijzigd', source, phrase)\n\n def remove_banphrase(self, **options):\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n id = None\n try:\n id = int(message)\n except ValueError:\n pass\n\n banphrase = bot.banphrase_manager.find_match(message=message, id=id)\n\n if banphrase is None:\n bot.whisper(source.username, 'Geen banzin gevonden met de gegeven parameters')\n return False\n\n AdminLogManager.post('Banzin verwijderd', source, banphrase.phrase)\n bot.whisper(source.username, 'Succesvol de banzin met id {0} verwijderd'.format(banphrase.id))\n bot.banphrase_manager.remove_banphrase(banphrase)\n else:\n bot.whisper(source.username, 'Gebruik: !remove banphrase (BANPHRASE_ID)')\n return False\n\n def load_commands(self, **options):\n self.commands['add'] = pajbot.models.command.Command.multiaction_command(\n level=100,\n delay_all=0,\n delay_user=0,\n default=None,\n command='add',\n commands={\n 'banphrase': pajbot.models.command.Command.raw_command(self.add_banphrase,\n level=500,\n description='Voeg een banzin toe!',\n delay_all=0,\n delay_user=0,\n examples=[\n pajbot.models.command.CommandExample(None, 'Maak een banzin',\n chat='user:!add banphrase testman123\\n'\n 'bot>user:Banzin toegevoegd (ID: 83)',\n description='Dit maakt een banzin met de standaard instellingen. Wanneer een non-moderator testman123 typt wordt deze getimedout voor 300 seconden en krijgt een whisper met dat dit is gebeurd').parse(),\n pajbot.models.command.CommandExample(None, 'Maak een banzin die iemand permabanned',\n chat='user:!add banphrase testman123 --perma\\n'\n 'bot>user:Banzin toegevoegd (ID: 83)',\n description='Dit maakt een banzin die mensen permabanned die testman123 zeggen in de chat. De gebruiker krijgt een whisper dat dit ook is gebeurd').parse(),\n pajbot.models.command.CommandExample(None, 'Maak een banzin die iemand permabanned zonder het te laten weten',\n chat='user:!add banphrase testman123 --perma --no-notify\\n'\n 'bot>user:Banzin toegevoegd (ID: 83)',\n description='Dit maakt een banzin die mensen permabanned die testman123 zeggen in de chat.').parse(),\n pajbot.models.command.CommandExample(None, 'Verander de standaard tijd voor een banzin',\n chat='user:!add banphrase testman123 --time 123\\n'\n 'bot>user:De banzin (ID: 83) ge-update met (time, extra_args)',\n description='Veranderd de standaat timeout lengte naar een lengte van 123 seconden').parse(),\n pajbot.models.command.CommandExample(None, 'Zorg ervoor dat subs niet een timeout krijgen',\n chat='user:!add banphrase testman123 --subimmunity\\n'\n 'bot>user:De banzin (ID: 83) ge-update met (sub_immunity)',\n description='Veranderd de banzin zodat subs niet een timeout krijgen').parse(),\n ]),\n }\n )\n\n self.commands['remove'] = pajbot.models.command.Command.multiaction_command(\n level=100,\n delay_all=0,\n delay_user=0,\n default=None,\n command='remove',\n commands={\n 'banphrase': pajbot.models.command.Command.raw_command(self.remove_banphrase,\n level=500,\n delay_all=0,\n delay_user=0,\n description='Verwijder een banzin!',\n examples=[\n pajbot.models.command.CommandExample(None, 'Verwijder een banzin',\n chat='user:!remove banphrase KeepoKeepo\\n'\n 'bot>user:Succesvol de banzin met id 33 verwijderd',\n description='Verwijderd de banzin met KeepoKeepo.').parse(),\n pajbot.models.command.CommandExample(None, 'Verwijder een banzin met het ID.',\n chat='user:!remove banphrase 25\\n'\n 'bot>user:Succesvol de banzin met id 25 verwijderd',\n description='Verijwder de banzin met id 25').parse(),\n ]),\n }\n )\n","sub_path":"pajbot/modules/banphrase.py","file_name":"banphrase.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"261199417","text":"\"\"\"\nModule to store the solubility laws of each species present in the melt,\nusing the method specified in the run.yaml file.\n\"\"\"\n\nimport numpy as np\nimport gmpy2 as gp\nfrom scipy.special import erf\nimport warnings\nfrom scipy.optimize import fsolve\nimport math\n\nimport conversions as cnvs\nimport constants as cnst\n\n# ------------------------------------------------------------------------------\n# MODEL DEFINITIONS (Melt from gas & fugacity from melt)\n# ------------------------------------------------------------------------------\n\ndef ardia2013(fCH4, P):\n \"\"\"\n The weight fraction of CH4 in the melt using method of Ardia et.al., 2013\n\n Args:\n fCH4 (float): The fugacity of CH4 (bars)\n P (float): Pressure (bars)\n \n Returns:\n The weight fraction of CH4 in the melt.\n \"\"\"\n P = P*1e-4 # Convert to GPa\n fCH4 = fCH4*1e-4\n \n k = gp.exp(4.93 - (0.000193*P))\n\n return (k*fCH4)/1e6\n\ndef ardia2013_fugacity(melt_CH4, P):\n \"\"\"\n The fugacity CH4 using method of Ardia et.al., 2013\n\n Args:\n melt_CH4 (float): The weight fraction of CH4 in the melt\n P (float): Pressure (bars)\n \n Returns:\n fCH4 Fugacity of CH4 (bar)\n \"\"\"\n P = P*1e-4 # Convert to GPa\n \n k = gp.exp(4.93 - (0.000193*P))\n\n fugacity = (melt_CH4*1e6)/k\n\n return fugacity*1e4 # convert fugacity from GPa to bar\n\ndef armstrong2015(fCO, P):\n \"\"\"\n The weight fraction of CARBON in C=O complexes (assume mean stoichiometry is\n a single CO) in a reduced melt from eq. 10 of Armstrong et al., 2015.\n (Technically this is 'non-carbonate C')\n \n Args:\n fCO (float): The fugacity of CO in the gas phase\n P (float): Pressure (bars)\n \n Returns:\n The weight fraction of CO in the melt.\n \"\"\"\n \n C = 10**(-0.738 + 0.876*gp.log10(fCO) - 5.44e-5*P)\n melt_CO = C/1e6 # ppm to weight fraction\n\n return melt_CO\n\ndef armstrong2015_fugacity(melt_CO, P):\n \"\"\"\n The fugacity of CO in the gas based on non-carbonate C in the melt.\n from eq. 10 of Armstrong et al., 2015.\n \n Args:\n melt_CO (float): weight fraction of CO in the melt\n P (float): Pressure (bars)\n \n Returns:\n fCO (float): FUgacity of CO (bar)\n \"\"\"\n \n melt_CO = melt_CO*1e6\n\n fCO = 10**((gp.log10(melt_CO) + 0.738 + 5.44e-5*P)/0.876)\n\n return fCO\n\ndef burguisser2015_co2(fCO2, CO2):\n \"\"\"\n Returns the weight fraction of CO2 in the melt using the method applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n fCO2 (float): The CO2 fugacity\n CO2 (class): The CO2 instance of the Molecule class\n \n Returns:\n The weight fraction of CO2 in the melt.\n \"\"\" \n return (CO2.solCon['a']*(fCO2)**CO2.solCon['b'])\n\ndef burguisser2015_co2_fugacity(melt_co2, CO2):\n \"\"\"\n Returns the fugacity of CO2 in the gas phase according to the solubility law applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n melt_co2 (float): The weight fraction of CO2 in the melt\n CO2 (class): The CO2 instance of the Molecule class\n \n Returns:\n The fugacity of CO2 in the gas phase\n \"\"\" \n return (melt_co2/CO2.solCon['a']) ** (1 / CO2.solCon['b'])\n\ndef burguisser2015_h2(mH2, H2, P, H2Y=None):\n \"\"\"\n Returns the weight fraction of H2 in the melt using the method applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n mH2 (float): The mol fraction of H2 in the gas phase\n H2 (class): The H2 instance of the Molecule class\n P (float): Pressure (bars)\n H2Y (float): The H2 fugacity coefficient (if different to one stored in H2)\n \n Returns:\n The weight fraction of H2 in the melt.\n \"\"\" \n if H2Y == None:\n return (H2.solCon['a']*(H2.Y*mH2*P)**H2.solCon['b'])\n else:\n return (H2.solCon['a']*(H2Y*mH2*P)**H2.solCon['b'])\n\ndef burguisser2015_h2_fugacity(melt_h2, H2):\n \"\"\"\n Returns the fugacity of H2 in the melt using the solubility law applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n mH2 (float): The weight fraction of H2 in the melt\n H2 (class): The H2 instance of the Molecule class\n \n Returns:\n The fugacity of H2 in the gas phase\n \"\"\" \n return (melt_h2/H2.solCon['a'])**(1/H2.solCon['b'])\n\ndef burguisser2015_h2o(mH2O, H2O, P, H2OY=None):\n \"\"\"\n Returns the weight fraction of H2O in the melt using the method applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n mH2O (float): The mol fraction of H2O in the gas phase\n H2O (class): The H2O instance of the Molecule class\n P (float): Pressure (bars)\n H2OY (float): The H2O fugacity coefficient (if different to version in H2O)\n \n Returns:\n The weight fraction of H2O in the melt.\n \"\"\" \n if H2OY == None:\n return (H2O.solCon['a']*(H2O.Y*mH2O*P)**H2O.solCon['b'])\n else:\n return (H2O.solCon['a']*(H2OY*mH2O*P)**H2O.solCon['b'])\n\ndef burguisser2015_h2o_fugacity(melt_h2o, H2O):\n \"\"\"\n Returns the fugacity of H2O in the gas phase using the solubility law applied in\n D-Compress (Burguisser et. al., 2015).\n\n Args:\n melt_h2o (float): The weight fraction of H2O in the melt\n H2O (class): The H2O instance of the Molecule class\n \n Returns:\n The fugacity of H2O in the gas.\n \"\"\" \n return (melt_h2o/H2O.solCon['a'])**(1/H2O.solCon['b'])\n\ndef eguchi2018(fCO2, fO2, T, P, melt):\n \"\"\"\n Returns the weight fraction of CO2 dissolved in the melt, as a converted sum of\n both molecular CO2, and carbonate (CO3^2-)\n \n Args:\n fCO2 (float): Fugacity of CO2 (bar)\n fO2 (float): Oxygen fugacity (bar)\n T (float): Temperature (K)\n P (float): Pressure (bar)\n melt (class): active instance of Melt class\n\n Returns:\n Weight fraction of CO2 dissolved in the melt, as the converted sum for\n CO2(mol) + CO3\n \"\"\"\n\n def NBO(mol_fracs):\n nsi = mol_fracs['sio2']\n nti = mol_fracs['tio2']\n nal = mol_fracs['al2o3'] * 2\n nfe2 = mol_fracs['feo']\n nfe3 = mol_fracs['fe2o3'] * 2\n nmn = mol_fracs['mno']\n nmg = mol_fracs['mgo']\n nca = mol_fracs['cao']\n nna = mol_fracs['na2o'] * 2\n nk = mol_fracs['k2o'] * 2\n np = mol_fracs['p2o5'] * 2\n o = mol_fracs['sio2'] * 2 + mol_fracs['tio2'] * 2 + mol_fracs['al2o3'] * 3 + nfe2 + nfe3 * 1.5 + mol_fracs['mno'] + mol_fracs['mgo']\\\n + mol_fracs['cao'] + mol_fracs['na2o'] + mol_fracs['k2o'] + mol_fracs['p2o5'] * 5\n\n\n NM = (nmg + nca + nfe2 + nna + nk + nmn)\n\n Al = nal - NM\n\n if Al > 0:\n al_tet = NM\n else:\n al_tet = nal\n\n Fe = nfe3 + Al\n\n if Al > 0:\n fe_tet = 0\n elif Al <= 0 and Fe > 0:\n fe_tet = Al*-1\n elif Al <= 0 and Fe <= 0:\n fe_tet = nfe3\n\n Tet = nsi + nti + np + al_tet + fe_tet\n NBO = 2*o - 4*Tet\n return NBO\n \n def xi(fCO2, T, P, nbo, oxides, name='co2'):\n \n cao = float(oxides['cao'])\n na2o = float(oxides['na2o'])\n k2o = float(oxides['k2o']) \n \n if name =='co2':\n DH = -90212\n DV = 0.000019244\n DS = -43.0815\n B = 1114.9\n yNBO = -7.0937\n A_CaO = 0\n A_Na2O = 0\n A_K2O = 0\n \n elif name == 'co3':\n DH = -164480\n DV = 0.00002384\n DS = -43.6385\n B = 1473.2\n yNBO = 3.291\n A_CaO = 1.68e5\n A_Na2O = 1.759e5\n A_K2O = 2.1085e5\n \n # PL: This isn't great really\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n lnXi = float(-(DV * (P*1e5) / (8.3144 * T)) + DH / (8.3144 * T) + (np.log(fCO2) * B) / T + DS / 8.3144 + (yNBO * nbo) + \\\n (A_CaO * cao + A_Na2O * na2o + A_K2O * k2o) / (8.3144 * T))\n \n return np.exp(lnXi)\n \n fCO2 = float(fCO2)\n fO2 = float(fO2)\n \n oxides = melt.iron_fraction(np.log(fO2), ppa = P*1e5)[0] # dry melt as oxide mol fractions\n\n nbo = float(NBO(oxides))\n \n Xco2 = xi(fCO2, T, P, nbo, oxides, name='co2')\n Xco3 = xi(fCO2, T, P, nbo, oxides, name='co3')\n\n FW_one = float(melt.formula_weight(fO2, P))\n\n # This assumes that the only species present in the melt is CO2/CO3. Not completely accurate here.\n CO2_CO2 = ((44.01*Xco2)/(44.01*Xco2+(1-(Xco2+Xco3))*FW_one))\n CO2_CO3 = ((44.01*Xco3)/(44.01*Xco3+(1-(Xco2+Xco3))*FW_one))\n\n return CO2_CO2 + CO2_CO3\n\ndef eguchi2018_fugacity(melt_co2, fO2, T, P, melt):\n \"\"\"\n Calculates fCO2 from the total melt CO2 content (melt fraction) using \n Eguchi & Dasgupta 2018.\n\n Uses fsolve to find the partitioning between molecular CO2 & CO3^2-,\n then the corresponding fCO2.\n\n Args:\n melt_CO2 (float): Total melt CO2 content (melt frac)\n fO2 (float): Oxygen fugacity\n T (float): Temperature (K)\n P (float): Pressure (bar)\n melt (class): active instance of Melt class\n\n Returns:\n fCO2 (float): CO2 fugacity\n \"\"\"\n\n def f(fco2, fO2, T, P, melt, melt_CO2):\n return eguchi2018(fco2, fO2, T, P, melt) - melt_CO2\n \n fCO2 = fsolve(f, 1.0, args=(float(fO2), float(T), float(P), melt, float(melt_co2)))[0]\n\n # Check for graphite saturation\n logK1 = 40.07639 - 2.53932 * 10 ** -2 * T + 5.27096 * 10 ** -6 * T ** 2 + 0.0267 * (P - 1) / T\n graph_fco2 = 10 ** logK1 * 10 ** gp.log10(fO2)\n\n if fCO2 > graph_fco2:\n melt.graphite_sat = True\n return gp.mpfr(graph_fco2)\n else:\n melt.graphite_sat = False\n return gp.mpfr(fCO2)\n\ndef graphite_fco2(T, P, fO2):\n \"\"\"\n Calculates the fCO2 in equilibrium with a graphite sturated melt.\n \"\"\"\n\n logK1 = 40.07639 - 2.53932 * 10 ** -2 * T + 5.27096 * 10 ** -6 * T ** 2 + 0.0267 * (P - 1) / T\n graph_fco2 = 10 ** logK1 * 10 ** gp.log10(fO2)\n\n return graph_fco2\n\ndef libourel2003(mN2, fO2, P):\n \"\"\"\n Returns the weight fraction of N in the melt.\n\n Args:\n mN2 (float): The mol fraction of N2 in the gas phase\n mO2 (float): The mol fraction of O2 in the gas phase\n O2 (class): The O2 instance of the Molecule class\n P (float): Pressure (bars)\n \n Returns:\n The weight fraction of N in the melt.\n \"\"\"\n return (0.0611e-6*P*0.986923*mN2 + 5.97e-16 * fO2**-0.75 * (P*0.986923*mN2)**0.5)\n\ndef libourel2003_fugacity(n_melt, nY, fO2, P):\n \"\"\"\n Returns the fugacity of N2 in the gas phase.\n\n Args:\n n_melt (float): The weight fraction of N in the melt\n nY (float): The fugacity coefficient of N2\n fO2 (float): The absolute oxygen fugacity\n P (float): Pressure (bars)\n \n Returns:\n The fugacity of N2 in the gas phase\n \"\"\"\n \n def n_quadratic(n_melt):\n \n a = 0.0611e-6\n b = 5.97e-16\n\n def f0(x):\n return a*x + b*((fO2)**(-0.75))*(x**0.5) - float(n_melt)\n \n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n PN2 = fsolve(f0, 1)[0]\n except:\n try:\n PN2 = fsolve(f0, 1e-10)[0]\n except:\n raise RuntimeError('Failed to find N2 partial pressure.')\n\n return PN2\n\n PN2 = n_quadratic(n_melt)\n mN2 = PN2/(P*0.986923) # pressure in atmospheres\n fn2 = nY * mN2 * P\n\n return fn2\n\ndef nash2019(fO2, P, T, melt, run):\n \"\"\"\n Calculates the ratio of sulfate:sulfide (S6+/S2-) in the melt\n after Nash et al., 2019.\n\n Args:\n fO2 (float): Absolute fO2\n P (float): Pressure (bar)\n T (float): Temperature (K)\n melt (class): active instance of Melt class\n run (class): active instance of the RunDef class\n \n Returns:\n ratio (float): S6+/S2- ratio of sulfur species in the melt\n \"\"\"\n F = 2*cnvs.fo2_2F(melt.Cm(), T, P*1e5, gp.log(fO2), run.FO2_MODEL) # F is mFe2O3/mFeO. *2 gives fe3/fe2.\n\n ratio = 10**(8*gp.log10(F) + (8.7436e6/T**2) - (27703/T) + 20.273)\n\n return ratio\n\ndef oneill2002(fO2, P, melt):\n \"\"\"\n Calculates the sulfide capacity of a melt based on the methods of\n O'Neill et al., 2002.\n\n Args:\n fO2 (float): Absolute fO2\n P (float): Pressure (bar)\n melt (class): active instance of Melt class\n \n Returns:\n capacity (float): Sulfide (S2-) capacity of melt as a weight fraction\n \"\"\"\n mol_frac = melt.iron_fraction(gp.log(fO2), ppa=P*1e5)[0]\n wts = cnvs.mol2wt(mol_frac)\n feo = wts['feo']*100 # wt% of feo in melt\n \n capacity = (0.0003*(100-feo)*gp.exp(0.21*feo))/1000000 # Convert ppm -> wt fraction\n return capacity\n\ndef oneill2020(T, melt):\n \"\"\"\n Calculates the sulfide capacity of a melt based on the methods of\n O'Neill et al., 2020.\n\n Args:\n T (float): temperature, K\n melt (class): active instance of Melt class\n \n Returns:\n capacity (float): Sulfide (S2-) capacity of melt as a weight fraction\n \"\"\"\n if not bool(melt.cm_dry):\n comp = melt.Cm()\n else:\n comp = melt.cm_dry\n\n if comp['fe2o3'] > 0:\n comp['feo'] = comp['feo'] + comp['fe2o3']*0.8998\n comp['fe2o3'] = 0.0\n \n comp = cnvs.mol2wt(comp)\n comp = cnvs.single_cat(comp) # Converts the dry mol fraction to normalized mol fraction on single cation basis.\n \n Na = comp['na2o']\n Ti = comp['tio2']\n K = comp['k2o']\n Ca = comp['cao']\n Mg = comp['mgo']\n Si = comp['sio2']\n Al = comp['al2o3']\n Fe2 = comp['feo'] # FeOt\n Mn = comp['mno']\n FeX = Fe2 + Mn\n \n capacity = (gp.exp(-23590/T + 8.77 +(1673/T)*(6.7*(Na+K) + 1.8*Al + 4.9*Mg + 8.1*Ca + 5*Ti + 8.9*FeX - 22.2*FeX*Ti + 7.2*FeX*Si) - 2.06*gp.erf(-7.2*FeX)))/1000000 # Convert ppm -> wt fraction\n\n return capacity\n\n\n# ------------------------------------------------------------------------------\n# MELT CONTENT MODEL SELECTION\n# ------------------------------------------------------------------------------\n\ndef ch4_melt(fCH4, P, name='ardia2013'):\n \"\"\"\n Returns the number of moles of CH4 in the melt.\n Applicable only for low fO2 conditions.\n\n Args:\n fCH4 (float): the fugacity of CH4 in the gas\n P (float): Pressure (bar)\n name (string): The name of the solubility law to be used, taken from 'run'\n\n Returns:\n melt_ch4 (float): the number of moles of CH4 in the melt.\n \"\"\"\n if name == 'ardia2013':\n return ardia2013(fCH4, P)/cnst.m['ch4']\n \n elif name == 'None':\n return 0\n\ndef co_melt(fCO, P, name='armstrong2015'):\n \"\"\"\n Returns the number of moles of CO in the melt.\n Applicable only for low fO2 conditions.\n\n Args:\n fCO (float): the fugacity of CO in the gas\n P (float): Pressure (bar)\n name (string): The name of the solubility law to be used, taken from 'run'\n\n Returns:\n melt_co (float): the number of moles of CO in the melt.\n \"\"\"\n if name == 'armstrong2015':\n return armstrong2015(fCO, P)/cnst.m['co']\n \n elif name == 'None':\n return 0\n\ndef co2_melt(fCO2, CO2, fO2, T, P, melt, name='burguisser2015'):\n \"\"\"\n Returns the number of moles of CO2 in the melt.\n\n Args:\n fCO2 (float): CO2 fugacity\n CO2 (class): CO2 instance of the Molecule class\n fO2 (float): Oxygen fugacity\n T (float): Temperature (K)\n P (float): Pressure (bars)\n melt (class): Active instance of the Melt class\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n melt_CO2 (float): the number of moles of CO2 in the melt.\n \"\"\"\n if name == 'burguisser2015':\n return burguisser2015_co2(fCO2, CO2)/cnst.m['co2']\n \n elif name == 'eguchi2018':\n return eguchi2018(fCO2, fO2, T, P, melt)/cnst.m['co2']\n\ndef h2_melt(mH2, H2, P, name='burguisser2015', Y = None):\n \"\"\"\n Returns the number of moles of H2 in the melt.\n\n Args:\n mH2 (float): The mol fraction of H2 in the gas phase\n H2 (class): The H2 instance of the Molecule class\n P (float): Pressure (bars)\n name (string): The name of the solubility law to be used, taken from 'run'\n Y (float): The H2 fugacity coefficient, if different to version stored in H2\n \n Returns:\n melt_H2 (float): the number of moles of H2 in the melt.\n \"\"\"\n \n if name == 'burguisser2015':\n return burguisser2015_h2(mH2, H2, P, H2Y=Y)/cnst.m['h2']\n\ndef h2o_melt(mH2O, H2O, P, name='burguisser2015', Y = None):\n \"\"\"\n Returns the number of moles of H2O in the melt.\n\n Args:\n mH2O (float): The mol fraction of H2O in the gas phase\n H2O (class): The H2O instance of the Molecule class\n P (float): Pressure (bars)\n name (string): The name of the solubility law to be used, taken from 'run'\n Y (float): The H2O fugacity coefficient, if different to version stored in H2O\n \n Returns:\n melt_H2O (float): the number of moles of H2O in the melt.\n \"\"\"\n \n if name == 'burguisser2015':\n return burguisser2015_h2o(mH2O, H2O, P, H2OY=Y)/cnst.m['h2o']\n\ndef n_melt(mN2, fO2, P, name='libourel2003'):\n \"\"\"\n Returns the number of moles of N in the melt.\n\n Args:\n mN2 (float): The mol fraction of N2 in the gas phase\n mO2 (float): The mol fraction of O2 in the gas phase\n O2 (class): The O2 instance of the Molecule class\n P (float): Pressure (bars)\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n The number of moles of N in the melt.\n \"\"\"\n if name == 'libourel2003':\n return libourel2003(mN2, fO2, P)/cnst.m['n']\n\ndef sulfate_melt(fS2, fO2, P, T, melt, run, name = 'nash2019'):\n \"\"\"\n Returns the number of moles of S6+ in the melt.\n\n Args:\n mS2 (float): The mol fraction of S2 in the gas phase\n S2 (class): The S2 instance of the Molecule class\n mO2 (float): The mol fraction of O2 in the gas phase\n O2 (class): The O2 instance of the Molecule class\n P (float): Pressure (bars)\n T (float): Temperature (K)\n melt (class): active instance of the Melt class\n run (class): active instance of the RunDef class\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n melt_S6+ (float): the weight fraction of sulfate (S6+) in the melt.\n \"\"\"\n\n if name == 'nash2019':\n \n ratio = nash2019(fO2, P, T, melt, run)\n \n return ratio * (sulfide_melt(fS2, fO2, P, T, melt, name = run.SULFIDE_CAPACITY))\n\ndef sulfide_melt(fS2, fO2, P, T, melt, name = 'oneill2020'):\n \"\"\"\n Returns the number of moles of S2- in the melt.\n\n Args:\n mS2 (float): The mol fraction of S2 in the gas phase\n S2Y (float): S2 fugacity coefficient\n mO2 (float): The mol fraction of O2 in the gas phase\n O2Y (float): O2 fugacity coefficient\n P (float): Pressure (bars)\n T (float): Temperature (K)\n melt (class): active instance of the Melt class\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n melt_S2- (float): the weight fraction of sulfide (S2-) in the melt.\n \"\"\"\n if name == 'oneill2002': \n capacity = oneill2002(fO2, P, melt)\n \n elif name == 'oneill2020':\n capacity = oneill2020(T, melt)\n \n return (capacity * (fS2/fO2) ** 0.5)/cnst.m['s']\n\n\n# ------------------------------------------------------------------------------\n# FUGACITY MODEL SELECTION\n# ------------------------------------------------------------------------------\n\ndef ch4_fugacity(melt_ch4, P, name='ardia2013'):\n \"\"\"\n Returns the fugacity of CH4.\n Applicable only for low fO2 conditions.\n\n Args:\n melt_ch4 (float): the weight fraction of CH4 in the melt\n P (float): Pressure (bar)\n name (string): The name of the solubility law to be used, taken from 'run'\n\n Returns:\n fCH4 (float): the fugacity of CH4\n \"\"\"\n if name == 'ardia2013':\n return ardia2013_fugacity(melt_ch4, P)\n \n elif name == 'None':\n return 0\n\ndef co_fugacity(melt_co, P, name='armstrong2015'):\n \"\"\"\n Returns the fugacity of CO.\n Applicable only for low fO2 conditions.\n\n Args:\n melt_co (float): the weight fraction of CO in the melt\n P (float): Pressure (bar)\n name (string): The name of the solubility law to be used, taken from 'run'\n\n Returns:\n fCO (float): the fugacity of CO\n \"\"\"\n if name == 'armstrong2015':\n return armstrong2015_fugacity(melt_co, P)\n \n elif name == 'None':\n return 0\n\ndef co2_fugacity(melt_co2, CO2, fO2, T, P, melt, name='burguisser2015'):\n \"\"\"\n Returns the fugacity of CO2 in the gas.\n\n Args:\n melt_co2 (float): The weight fraction of CO2 in the melt\n CO2 (class): The CO2 instance of the Molecule class\n fO2 (float): Oxygen fugacity NOT NAT LOGGED\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n fCO2 (float): the fugacity of CO2\n \"\"\"\n if name == 'burguisser2015':\n return burguisser2015_co2_fugacity(melt_co2, CO2)\n \n elif name == 'eguchi2018':\n return eguchi2018_fugacity(melt_co2, fO2, T, P, melt)\n\ndef h2_fugacity(melt_h2, H2, name='burguisser2015'):\n \"\"\"\n Returns the fugacity of H2 in the gas.\n\n Args:\n melt_h2 (float): The weight fraction of H2 in the melt\n H2 (class): The H2 instance of the Molecule class\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n fH2 (float): the fugacity of H2\n \"\"\"\n \n if name == 'burguisser2015':\n return burguisser2015_h2_fugacity(melt_h2, H2)\n\ndef h2o_fugacity(melt_h2o, H2O, name='burguisser2015'):\n \"\"\"\n Returns the fugacity of H2O in the gas.\n\n Args:\n melt_h2o (float): The weight fraction of H2O in the melt\n H2O (class): The H2O instance of the Molecule class\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n fH2O (float): the fugacity of H2O.\n \"\"\"\n \n if name == 'burguisser2015':\n return burguisser2015_h2o_fugacity(melt_h2o, H2O)\n\ndef n2_fugacity(melt_n, N2Y, fo2, P, name='libourel2003'):\n \"\"\"\n Returns the fugacity of N2 in the gas.\n\n Args:\n melt_n (float): The weight fraction of N in the melt\n N2Y (float): The fugacity coefficient of N2\n fo2 (float): The absolute fO2\n P (float): Pressure (bars)\n name (string): The name of the solubility law to be used, taken from 'run'\n \n Returns:\n The fugacity of N2\n \"\"\"\n if name == 'libourel2003':\n return libourel2003_fugacity(melt_n, N2Y, fo2, P)\n\ndef S2_fugacity(s_melt, fO2, P, T, melt, run, sulfidename = 'oneill2020', sulfatename='nash2019'):\n \"\"\"\n Returns the fugacity of S2 in the gas.\n\n Args:\n s_melt (float): The weight fraction of total sulfur (S2- + S6+) in the melt\n fO2 (float): The absolute fO2\n P (float): Pressure (bars)\n T (float): temperature (K)\n melt (class): active instance of the Melt class\n run (class): active instance of the RunDef class\n sulfidename (string): The name of the sulfide capacity law\n sulfatename (string): The name of the sulfate calculation\n \n Returns:\n fS2: the fugacity of S2 in the gas.\n \"\"\"\n\n if sulfatename == 'nash2019':\n melt.cm_dry = melt.iron_fraction(gp.log(fO2), ppa=P*1e5)[0] # Uses fO2 to set the Fe2/Fe3 ratio and dry melt chemistry prior to needing sulfide capacity\n \n # need to find the ratio of s6+/s2-, then use the amount of s2- to get mS2.\n ratio = nash2019(fO2, P, T, melt, run) # s6+/s2-\n s2_melt = s_melt * (1/(1+ratio))\n\n if sulfidename == 'oneill2002': \n capacity = oneill2002(fO2, P, melt)\n \n elif sulfidename == 'oneill2020':\n capacity = oneill2020(T, melt)\n\n fS2 = (s2_melt/capacity)**(1/0.5) * fO2\n\n return fS2\n\n","sub_path":"EVo/solubility_laws.py","file_name":"solubility_laws.py","file_ext":"py","file_size_in_byte":24468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"55738627","text":"\n\"\"\"\r\nEsta modulo es utilizado para desarrollar el jugador.\r\n\"\"\"\r\nimport pygame\r\n\r\nimport constantes\r\nfrom puntos import Punto\r\nfrom platforms import MovingPlatform\r\nfrom funciones_spritesheet import SpriteSheet\nfrom funciones_spritesheet import SpriteSheetNotas\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\r\n # -- Atributos\r\n mover_x = 0\r\n mover_y = 0\r\n vida = 60\r\n # Estas listas definen todas las imagenes de nuestro jugador.\r\n jugador_frame_izq = []\r\n jugador_frame_der = []\n jugador_frame_izq2 = []\r\n jugador_frame_der2 = []\n jugador_frame_izq3 = []\n jugador_frame_der3 = []\r\n puntaje = 0\r\n \r\n # Direccion en la que va el jugador.\r\n direccion = \"R\"\r\n salto = False\r\n\r\n # Lista de sprite con las cosas que nos podemos chocar.\r\n nivel = None\r\n\r\n colision = None\r\n \r\n # -- Methods\r\n def __init__(self, jugador):\r\n \"\"\" Constructor function \"\"\"\r\n\r\n pygame.sprite.Sprite.__init__(self)\n \n \n if jugador == 1:\n sprite_sheet = SpriteSheetNotas(\"imagenes/metalerosht.png\")\n \n image = sprite_sheet.get_image(0, 0, 39, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(39, 0, 39, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(78, 0, 39, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(118, 0, 39, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(156, 0, 39, 81)\n self.jugador_frame_der.append(image)\n \n #Rotacion \n image = sprite_sheet.get_image(0, 0, 39, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(39, 0, 39, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(78, 0, 39, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(117, 0, 39, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(156, 0, 39, 81)\n image = pygame.transform.flip(image, True, False)\n \n elif jugador == 2:\n sprite_sheet = SpriteSheetNotas(\"imagenes/rastasht.png\")\n \n image = sprite_sheet.get_image(0, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(46, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(92, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(138, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(183, 0, 45, 81)\n self.jugador_frame_der.append(image)\n \n #Rotacion \n image = sprite_sheet.get_image(0, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(46, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(92, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(138, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(183, 0, 45, 81)\n image = pygame.transform.flip(image, True, False)\n \n elif jugador == 3:\n sprite_sheet = SpriteSheetNotas(\"imagenes/raperosht.png\")\n\n image = sprite_sheet.get_image(0, 0, 46, 80)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(46, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(92, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(139, 0, 46, 81)\n self.jugador_frame_der.append(image)\n image = sprite_sheet.get_image(185, 0, 45, 81)\n self.jugador_frame_der.append(image)\n \n #Rotacion \n image = sprite_sheet.get_image(0, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(46, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(92, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(139, 0, 46, 81)\n image = pygame.transform.flip(image, True, False)\n self.jugador_frame_izq.append(image)\n image = sprite_sheet.get_image(185, 0, 45, 81)\n image = pygame.transform.flip(image, True, False)\n \n \n \n\n self.jugador_frame_izq.append(image)\n self.image = self.jugador_frame_der[0]\n self.rect = self.image.get_rect()\r\n self.comidas = pygame.sprite.Group()\r self.colision = pygame.mixer.Sound(\"sonido/sonidocolicion.ogg\")\r\n self.comidas_que_chocan = pygame.mixer.Sound(\"sonido/SOL.ogg\")\n \n \r\n def update(self):\r\n \"\"\" Metodo que mueve al jugador. \"\"\"\r\n # Gravedad\r\n self.calc_grav()\r\n\r\n # Movimientos Izquierda/Derecha\r\n self.rect.x += self.mover_x\r\n pos = self.rect.x + self.nivel.world_shift\r\n if self.direccion == \"R\":\r\n frame = (pos // 30) % len(self.jugador_frame_der)\r\n self.image = self.jugador_frame_der[frame]\r\n else:\r\n frame = (pos // 30) % len(self.jugador_frame_izq)\r\n self.image = self.jugador_frame_izq[frame]\r\n\r\n # Verficiamos si colisionamos con algo\r\n lista_de_bloques_colisionados = pygame.sprite.spritecollide(self, self.nivel.platform_list, False)\r\n for block in lista_de_bloques_colisionados:\r\n if self.mover_x > 0:\r\n self.rect.right = block.rect.left\r\n elif self.mover_x < 0:\r\n self.rect.left = block.rect.right\r\n \r\n self.colision.play()\r\n \r\n self.rect.y += self.mover_y\r\n\r\n lista_de_bloques_colisionados = pygame.sprite.spritecollide(self, self.nivel.platform_list, False)\r\n for block in lista_de_bloques_colisionados:\r\n if self.mover_y > 0:\r\n self.rect.bottom = block.rect.top\r\n elif self.mover_y < 0:\r\n self.rect.top = block.rect.bottom\r\n\r\n self.mover_y = 0\r\n\r\n if isinstance(block, MovingPlatform):\r\n self.rect.x += block.mover_x\r\n \r\n lista_de_comidas_a_comer = pygame.sprite.spritecollide(self, self.nivel.lista_de_comidas,False)\r\n for comidas_que_chocan in lista_de_comidas_a_comer:\r\n comidas_que_chocan.kill()\r\n self.puntaje += 1\r\n self.comidas_que_chocan.play()\r\n \r\n \r\n \r\n def calc_grav(self):\r\n \"\"\" Calcula el efecto de la gravedad. \"\"\"\r\n if self.mover_y == 0:\r\n if self.salto:\r\n self.colision.play()\r\n self.salto = False\r\n self.mover_y = 2\r\n else:\r\n self.mover_y += .34\r\n\r\n # Verificamos si estamos en el suelo.\r\n if self.rect.y >= constantes.LARGO_PANTALLA - self.rect.height and self.mover_y >= 1:\r\n self.mover_y = 1\r\n self.rect.y = constantes.LARGO_PANTALLA - self.rect.height\r\n \r\n\r\n def jump(self):\r\n \"\"\" Metodo que se llamam si saltamos. \"\"\"\r\n \r\n self.salto = True\r\n self.rect.y += 2\r\n platform_hit_list = pygame.sprite.spritecollide(self, self.nivel.platform_list, False)\r\n self.rect.y -= 2\r\n\r\n if len(platform_hit_list) > 0 or self.rect.bottom >= constantes.LARGO_PANTALLA:\r\n self.mover_y = -10\r\n\r\n def go_left(self):\r\n \"\"\" Se llama cuando movemos hacia la izq. \"\"\"\r\n self.mover_x = -6\r\n self.direccion = \"L\"\r\n\r\n def go_right(self):\r\n \"\"\" Se llama cuando movemos hacia la der. \"\"\"\r\n self.mover_x = 6\r\n self.direccion = \"R\"\n \r\n\r\n def stop(self):\r\n \"\"\" Se llama cuando soltamos la tecla. \"\"\"\r\n self.mover_x = 0\r\n","sub_path":"jugador.py","file_name":"jugador.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271853279","text":"import datetime\nfrom python.api import eikon\nfrom python.api_requests import request_news_for_ticker_and_date\n\n\ndef graph_high_low_with_markers(name, data):\n\n results = []\n\n for point in data['R'][0]['Data']:\n values = dict([(x, point[x]) for x in ['High', 'Low', 'Close', 'Open', 'Date']])\n\n values['Date'] = values['Date'][:10]\n results.append(values)\n\n eikon.save('%s_range' % name, results)\n points = key_points(results)\n\n dates = get_date(points)\n data = request_news_for_ticker_and_date(name, dates)\n\n for i in range(len(dates)):\n points[i]['Headlines'] = [d['HeadlineText'] for d in data[i]['Headlines']]\n\n eikon.save('%s_markers' % name, points)\n return points\n\n\ndef key_points(data):\n rs = []\n\n for row in data:\n gap_alert = float(row['Close']) * 0.10\n gap = float(row['High']) - float(row['Low'])\n if gap > gap_alert:\n\n if row['Open'] >= row['Close']:\n label = \"v\"\n else:\n label = \"^\"\n\n rs.append({'Date': row['Date'], 'label': label, 'data': row})\n\n return rs\n\n\ndef get_date(data):\n return [datetime.datetime.strptime(x.get('Date'), \"%Y-%m-%d\").date() for x in data]\n\n\ndef max_spread(data):\n return max(float(row['High']) - float(row['Low']) for row in data)\n","sub_path":"python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340984305","text":"#Definicion de variables\nprint(\"Ejercicio 18\")\n#Datos de entrada\nbono=0\nsueldo= 1800\nprint(\"\\t.:Opciones para el bono:.\")\nprint(\"1. Digite esta opcion si Ud. tiene mas de 4 años de servicio\")\nprint(\"2. Digite esta opcion si Ud. tiene menos o igual a 4 años de servicio\")\nprint(\"3. Digite esta opcion si Ud. percibe un salario menor a 2000\")\nprint(\"4. Digite esta opcion si Ud. percibe un ralario mayor a 2000\")\nopcion = int(input(\"Digite una opcion: \"))\nprint()\nif opcion==1:\n extra = float(input(\"Cuantos años de servicio tiene? → \"))\n sueldobono=1800 * 0.25\n if extra >4:\n bono = sueldobono\n print(\"Su bono sera de:\", bono)\nelif opcion==2:\n extra = float(input(\"Cuantos años de servicio tiene? → \"))\n if extra <= 4:\n sueldobono= 1800 * 0.20\n bono = sueldobono\n print(\"Su bono sera de:\", bono)\nelif opcion==3:\n extra = float(input(\"Cuanto de salario percibe? → \"))\n if extra < 2000:\n bono= extra * 0.25\n print(\"Su bono sera de:\",bono)\nelif opcion==3:\n extra = float(input(\"Cual es su salario que percibe? → \"))\n if extra >=2000:\n bono= extra * 0.20\n print(\"Su bono sera de:\",bono)\n\n","sub_path":"ejercicio18.py","file_name":"ejercicio18.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"644442868","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport django.core.validators\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('full_name', models.TextField(max_length=80)),\n ('phone_number', models.CharField(validators=[django.core.validators.RegexValidator(regex='^(\\\\+\\\\d{1,2}\\\\s)?\\\\(?\\\\d{3}\\\\)?[\\\\s.-]\\\\d{3}[\\\\s.-]\\\\d{4}$', message='Invalid phone number')], max_length=100)),\n ('company_name', models.CharField(max_length=100)),\n ('department_name', models.CharField(max_length=100)),\n ('occupation_name', models.CharField(max_length=100)),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"rest_datagrid/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"649661960","text":"#Function to process input data\ndef process(a,b):\n\n #a and b must be integers\n a,b = int(a),int(b)\n \n #Loop that repeats from a to b and each time increases +1\n #This variable stores sumed numbers\n number=0\n while a<=b:\n\n #Check if the number is odd\n if a%2==1:\n number = number + a\n #Increase (a) variable +1 each time, otherwise the loop will never end!\n a=a+1\n \n return number\n\n\n#Get data from client\na=input(\"Enter a integer: \")\nb=input(\"Enter b integer: \")\n#Call the process function with given numbers and store return value in result variable\nresult=process(a,b)\n#Print result\nprint(result)\n","sub_path":"python-village/ConditionsAndLoops.py","file_name":"ConditionsAndLoops.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85945707","text":"from sqlalchemy import (create_engine, ForeignKey, Column,\n Integer, String, DateTime, Float)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.exc import IntegrityError\nfrom typing import Union\nfrom pathlib import Path\nfrom uuid import uuid4\nfrom shutil import copyfile\nfrom flightdata import Flight\nimport io\nfrom streamlit.uploaded_file_manager import UploadedFile\nimport os\nfrom geometry import GPSPosition\nfrom flightanalysis.flightline import Box\n\nBase = declarative_base()\n\nclass Log(Base):\n __tablename__ = \"log\"\n rootfolder = Path(\"data/private_logs\")\n id = Column(Integer, primary_key=True)\n added = Column(DateTime, server_default=func.now())\n filesize = Column(Integer)\n stick_name = Column(String)\n bin_file = Column(String)\n csv_file = Column(String)\n sequence_id = Column(Integer, ForeignKey('sequence.id'))\n sequence = relationship(\"Sequence\")\n boxreg_id = Column(Integer, ForeignKey('boxreg.id'))\n boxreg = relationship(\"BoxReg\")\n start_index = Column(Integer)\n end_index = Column(Integer)\n\n @staticmethod\n def register_bin(bin_file: Union[str, Path, UploadedFile]):\n if isinstance(bin_file, (str, Path)): # or isinstance(bin_file, Path):\n return Log._register_bin_path(bin_file)\n elif isinstance(bin_file, UploadedFile):\n return Log._register_bin_uploaded(bin_file)\n\n @staticmethod\n def _register_bin_uploaded(file: UploadedFile):\n new_name = str(uuid4())\n binpath = Log.rootfolder / '{}.BIN'.format(new_name)\n with io.open(binpath, 'wb') as f:\n f.write(file.read())\n return Log(\n filesize=file.size,\n stick_name=Path(file.name).name,\n bin_file=str(binpath),\n csv_file=str(Log.rootfolder / '{}.csv'.format(new_name))\n )\n\n @staticmethod\n def _register_bin_path(file: Union[str, Path]):\n if isinstance(file, str):\n file = Path(file)\n\n new_name = str(uuid4())\n return Log(\n filesize=file.stat().st_size,\n stick_name=file.name,\n bin_file=str(copyfile(file, Log.rootfolder /\n '{}.BIN'.format(new_name))),\n csv_file=str(Log.rootfolder / '{}.csv'.format(new_name))\n )\n\n def flight(self):\n if os.path.exists(self.csv_file):\n return Flight.from_csv(self.csv_file)\n else:\n flight = Flight.from_log(str(self.bin_file))\n flight.to_csv(self.csv_file)\n return flight\n\n\nclass Manoeuvre(Base):\n __tablename__ = \"manoeuvre\"\n id = Column(Integer, primary_key=True)\n name = Column(String)\n k_factor = Column(Integer)\n sequence_id = Column(Integer, ForeignKey('sequence.id'))\n sequence = relationship(\"Sequence\")\n\nclass Sequence(Base):\n __tablename__ = \"sequence\"\n id = Column(Integer, primary_key=True)\n name = Column(String)\n logs = relationship(\"Log\", back_populates=\"sequence\")\n manoeuvres = relationship(\"Manoeuvre\", back_populates=\"sequence\")\n\n @staticmethod\n def get_or_create(sess, name: str):\n seq = sess.query(Sequence).filter(Sequence.name == name).first()\n if seq is None:\n seq = Sequence(name=name)\n sess.add(seq)\n sess.commit()\n return seq\n\n\nclass BoxReg(Base):\n __tablename__ = \"boxreg\"\n id = Column(Integer, primary_key=True)\n name = Column(String, unique=True)\n club = Column(String)\n country = Column(String)\n pilot_lat = Column(Float)\n pilot_long = Column(Float)\n pilot_heading = Column(Float)\n logs = relationship(\"Log\", back_populates=\"boxreg\")\n\n @staticmethod\n def from_box(sess, box: Box):\n fl = sess.query(BoxReg).filter(\n BoxReg.club == box.club,\n BoxReg.country == box.country,\n BoxReg.name == box.name,\n BoxReg.pilot_lat == box.pilot_position.latitude,\n BoxReg.pilot_long == box.pilot_position.longitude,\n BoxReg.pilot_heading == box.heading\n ).first()\n \n if fl is None:\n try:\n fl = BoxReg(\n name=box.name,\n club=box.club,\n country=box.country,\n pilot_lat=box.pilot_position.latitude,\n pilot_long=box.pilot_position.longitude,\n pilot_heading=box.heading\n )\n sess.add(fl)\n sess.commit()\n except IntegrityError:\n sess.rollback()\n raise Exception(\"club name already exists\")\n return fl\n\n @property\n def box(self) -> Box:\n return Box(\n self.name,\n GPSPosition(self.pilot_lat, self.pilot_long),\n self.pilot_heading, self.club, self.country)\n\n\ndef create_db(path=\"sqlite:///data/private_logs/register.db\"):\n Session = sessionmaker()\n engine = create_engine(path)\n Base.metadata.create_all(engine)\n Session.configure(bind=engine)\n return engine, Session\n","sub_path":"pyflightcoach/log_register/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"639087144","text":"from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nimport pandas as pd\nimport numpy as np\nimport json\nimport ast\nfrom sklearn.model_selection import train_test_split\n\nfrom utils import Indexer\n\nfrom tqdm import tqdm\n\n\ndef args_parser():\n\tparser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n\tparser.add_argument('--links', required=False, default='data/links.csv',\n help='movieId mapping')\n\tparser.add_argument('--metadata', required=False, default='data/movies_metadata.csv',\n help='movie metadata file')\n\tparser.add_argument('--credit', required=False, default='data/credits.csv',\n help='credits file')\n\tparser.add_argument('--rating', required=False, default='data/ratings.csv',\n help='ratings file')\n\targs = parser.parse_args()\n\treturn args\n\ndef movieIdIndexing(args):\n\t\"\"\"\n\tbuild two mappings from the original tmdbId/movieId in links.csv\n\tto range(movies.size)\n\t\"\"\"\n\tlinks = pd.read_csv(args.links)\n\tmovieIds = links['movieId']\n\tmvid2mid = dict(zip(movieIds, range(movieIds.size)))\n\tlinks['newId'] = links['movieId'].map(mvid2mid)\n\ttmdbIds = links['tmdbId']\n\ttmid2mid = dict(zip(tmdbIds, links['newId']))\n\tprint(\"num_of_movies: %d\"%(movieIds.size))\n\treturn mvid2mid, tmid2mid, movieIds.size\n\ndef readMovieMetadata(args, tmid2mid):\n\tdf = pd.read_csv(args.metadata, usecols=['genres', 'id', 'overview', 'title'])\n\t# remove rows with invalid Ids\n\tdf.drop(df[df.id.apply(lambda x: not x.isnumeric())].index, inplace=True)\n\tdf = df.astype({'id': 'int32'})\n\tdf = df.rename(columns={'id': 'tmdbId'})\n\t# insert a column named 'id'\n\tdf.insert(len(df.columns), 'mId', [tmid2mid[x] for x in df['tmdbId']])\n\n\treturn df\n\ndef readCreditData(args, tmid2mid): # Redundant code w.r.t. readMovieMetaData\n\tdf = pd.read_csv(args.credit).astype({'id': 'str'})\n\tdf.drop(df[df.id.apply(lambda x: not x.isnumeric())].index, inplace=True)\n\tdf = df.astype({'id': 'int32'})\n\tdf = df.rename(columns={'id': 'tmdbId'})\n\t# insert a column named 'id'\n\tdf.insert(len(df.columns), 'mId', [tmid2mid[x] for x in df['tmdbId']])\n\n\treturn df\n\ndef readRatingData(args, mvid2mid, id_base): # Redundant code w.r.t. readMovieMetaData\n\tdf = pd.read_csv(args.rating)\n\tdf.drop(['timestamp'], axis=1, inplace=True)\n\t# df.drop(df[df.id.apply(lambda x: not x.isnumeric())].index, inplace=True)\n\tdf = df.astype({'movieId': 'int32', 'userId': 'int32', 'rating': 'float32'})\n\t# insert a column named 'id'\n\tdf.insert(len(df.columns), 'mId', [mvid2mid[x] for x in df['movieId']])\n\tdf.drop(['movieId'], axis=1, inplace=True)\n\n\t# re-index the users\n\tuser_values = df.userId.unique()\n\tnum_users = len(user_values)\n\tuser2uId = dict(zip(user_values, range(id_base, id_base+num_users)))\n\tdf['uId'] = df['userId'].map(user2uId)\n\tdf.drop(['userId'], axis=1, inplace=True)\n\n\t# add binary scores\n\tdf['binary'] = (df['rating'] > 3.5).astype(int)\n\n\treturn df, user2uId, num_users\n\nif __name__ == \"__main__\":\n\targs = args_parser()\n\n\t''' get movie id mappings from links.csv\n\tmvid2mid: mapping from 'movieId' to range(45843)\n\ttmid2mid: mapping from 'tmdbId' to range(45843)\n\tnum_movies = 45843 (all movies in links.csv)\n\n\tNote that in links.csv, there are missing values of tmdbId\n\t'''\n\tmvid2mid, tmid2mid, num_movies = movieIdIndexing(args) # num_movies=45843\n\tid_base = num_movies\n\n\t''' read metadata from movies_metadata.csv\n\tOnly 45463 movies have valid metadata.\n\t'''\n\tmovies = readMovieMetadata(args, tmid2mid)\n\tprint(\"movies.shape %s\"%(str(movies.shape)))\n\n\t''' create overviews.csv\n\tcontains a header line and 45463 data lines,\n\teach line includes a mId and its overview (some sentences).\n\t'''\n\tmovies.to_csv(\"processed_data/overviews.csv\", columns=['mId', 'overview'], index=False)\n\tmovies.to_csv(\"processed_data/mId2Title.csv\", columns=['mId', 'tmdbId', 'title'], index=False)\n\n\t''' create genres\n\tmId2Genre: 45463 lines, each line includes (mId, num of genres, gIds)\n\tGenre2Id: 20 lines, each line includes (gId, genre name)\n\tgId ranges from 45843 to 45862\n\t'''\n\tf = open(\"processed_data/mId2Genre.txt\", \"w\")\n\tgenreIdx = Indexer()\n\tfor idx, row in movies.iterrows():\n\t\tmId, raw_genres = row['mId'], row['genres']\n\t\traw_genres = raw_genres.replace(\"\\'\", \"\\\"\")\n\t\tgenres_l = json.loads(raw_genres)\n\t\tf.write(\"%d %d\"%(mId, len(genres_l)))\n\t\tfor g in genres_l:\n\t\t\tf.write(\" %d\"%(genreIdx.add_and_get_index(g['name']) + id_base))\n\t\tf.write(\"\\n\")\n\tf.close()\n\n\tf = open(\"processed_data/Genre2Id.txt\", \"w\")\n\tnum_genres = len(genreIdx)\n\tfor i in range(num_genres):\n\t\tf.write(\"%d %s\\n\"%(i + id_base, genreIdx.get_object(i)))\n\tf.close()\n\tid_base += num_genres\n\n\t''' create credits\n\tmId2CC.txt: 45476 lines\n\teach line includes (mId, num of crew/casts, cIds)\n\t'''\n\tcredits = readCreditData(args, tmid2mid)\n\tprint(\"credits.shape %s\"%(str(credits.shape)))\n\tcIdx = Indexer()\n\tf = open(\"processed_data/mId2CC.txt\", \"w\")\n\tfor idx, row in credits.iterrows():\n\t\tmId, raw_cast, raw_crew = row['mId'], row['cast'], row['crew']\n\t\tcast_l = ast.literal_eval(raw_cast)\n\t\tcrew_l = ast.literal_eval(raw_crew)\n\t\tattr = []\n\t\tfor c in crew_l:\n\t\t\tif c['job'].lower() == \"director\":\n\t\t\t\tattr.append(cIdx.add_and_get_index(c['name']) + id_base)\n\t\tfor c in cast_l:\n\t\t\tif int(c['order']) < min(8, len(cast_l)):\n\t\t\t\tattr.append(cIdx.add_and_get_index(c['name']) + id_base)\n\t\tf.write(\"%d %d\"%(mId, len(attr)))\n\t\tfor att in attr:\n\t\t\tf.write(\" %d\"%(att))\n\t\tf.write(\"\\n\")\n\tf.close()\n\tnum_cast = len(cIdx)\n\tprint(\"num of cast/crews: %d\"%(num_cast))\n\tid_base += num_cast\n\n\t''' create ratings\n\ttrain.csv: training data, each line includes \n\ttest.csv: test data, each line includes \n\t'''\n\tratings, user2uId, num_users = readRatingData(args, mvid2mid, id_base)\n\tX_train, X_test, y_train, y_test = train_test_split(ratings[['uId', 'mId']], ratings[['binary', 'rating']], train_size=0.9)\n\ttrain = pd.concat([X_train, y_train], axis=1, sort=False)\n\ttest = pd.concat([X_test, y_test], axis=1, sort=False)\n\ttrain.to_csv(\"processed_data/rating_train.csv\", columns=['uId', 'mId', 'binary', 'rating'], index=False)\n\ttest.to_csv(\"processed_data/rating_test.csv\", columns=['uId', 'mId', 'binary', 'rating'], index=False)\n\t\n\tprint(\"Finished: \\nnum_movies %d \\nnum_genres %d \\nnum_cast %d \\nnum_users %d \\n--- \\ntotal %d\"%(num_movies, num_genres, num_cast, num_users, id_base + num_users))\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"239903546","text":"import os\nimport json\nimport datetime\nimport time\n\nimport sys\nsys.path.insert(0, \"..\")\nimport logging\nimport random\nfrom opcua import ua, Server, uamethod\n\nimport EpeverChargeController as cc\nimport relayBox as rb\nimport currentMonitor\nimport sensors\n\nDUMMY_DATA = False\nOPC_ENDPOINT = \"opc.tcp://0.0.0.0:4840/freeopcua/server/\"\nOPC_NAMESPACE = \"http://examples.freeopcua.github.io\"\n\n'''\nusername = os.environ['MY_USER']\npassword = os.environ['MY_PASS']\ndocker run -e MY_USER=test -e MY_PASS=12345 ... ...\n'''\n\nif 'DELAY' in os.environ:\n DELAY = os.environ['DELAY']\nelse:\n DELAY = 1.0\n\n#RELAY_STATE = [False, False, False, False]\n\n######### da spostare in neogpio.py ####################\n\nINPUT = 'in'\nOUTPUT = 'out'\nHIGH = '1'\nLOW = '0'\nPATH_GPIO = '/sys/class/gpio/'\n\ndef exportGpio( gpio ):\n try:\n f = open(PATH_GPIO + 'export' ,'w')\n f.write(str(gpio))\n f.flush()\n f.close()\n except Exception as e:\n print(\"Error exporting gpio \" + str(gpio) )\n\n return 0\n\n\ndef setDirection( gpio, direction ):\n try:\n f = open(PATH_GPIO + 'gpio' + str(gpio) + '/direction' ,'w')\n f.write( str(direction) )\n f.flush()\n f.close()\n except Exception as e:\n print(\"Error setting gpio direction \" + str(gpio) )\n\n return 0\n\n\ndef readValue( gpio ):\n value = None\n try:\n f = open(PATH_GPIO + 'gpio' + str(gpio) + '/value' ,'r')\n value = f.read( )\n f.close()\n except Exception as e:\n print(\"Error reading gpio value \" + str(gpio) )\n\n return value\n\ndef setValue( gpio, value ):\n try:\n f = open(PATH_GPIO + 'gpio' + str(gpio) + '/value' ,'w')\n f.write( str(value) )\n f.flush()\n f.close()\n except Exception as e:\n print(\"Error setting gpio value \" + str(gpio) )\n\n return 0\n######################################################\n\n\nLED_13_GPIO_NUMBER = 102\n\ndef initializeLed13():\n exportGpio( LED_13_GPIO_NUMBER )\n setDirection( LED_13_GPIO_NUMBER , OUTPUT )\n\ndef turnOnLed():\n setValue(LED_13_GPIO_NUMBER , HIGH)\n\ndef turnOffLed():\n setValue(LED_13_GPIO_NUMBER , LOW)\n\ndef blinkLed( howLong, howMany):\n for i in range(0, howMany):\n turnOnLed()\n time.sleep(howLong)\n turnOffLed()\n time.sleep(howLong)\n\n#####################################################\n\ndef calibrateCurrentSensors():\n try:\n currentMonitor.calculateCurrentBias( currentMonitor.PLUG_1 )\n currentMonitor.calculateCurrentBias( currentMonitor.PLUG_2 )\n currentMonitor.calculateCurrentBias( currentMonitor.INVERTER )\n except Exception as e:\n print( e )\n\n\n\n\n\n\n\n\ndef init():\n initializeLed13()\n # blinkLed(0.05, 4)\n\n calibrateCurrentSensors()\n # blinkLed(0.05, 4)\n\n\n\n\nrelay_box = None\n\n## method to be exposed through server\ndef set_plug_state(parent, args):\n ret = False\n\n plug = args.Value[0].Value\n state = args.Value[1].Value\n\n if plug in range(0,4):\n print(\"Plug: \" + str(plug) + \" to state: \" + str(state) )\n ret = relay_box.setRelayState(plug, state)\n else:\n print(\"PlugId is wrong, has to be integer in range 0 ... 3 . not \" + str(plug) )\n\n print(\"opc method answer: \" + str(ret))\n return [ua.Variant(ret, ua.VariantType.Boolean )]\n\n\n\n\n\n\ndef main():\n print('Gionji Solar Plant')\n\n global relay_box\n relay_box = rb.RelayBox()\n relay_box.add_relay('cane', 0, 25)\n init()\n\n # setup our server\n server = Server()\n server.set_endpoint( OPC_ENDPOINT )\n\n # setup our own namespace, not really necessary but should as spec\n server_namespace = OPC_NAMESPACE\n address_space = server.register_namespace(server_namespace)\n\n # get Objects node, this is where we should put our custom stuff\n objects_node = server.get_objects_node()\n\n # populating our address space\n ChargeControllerObject = objects_node.add_object(address_space, \"ChargeController\")\n RelayBoxObject = objects_node.add_object(address_space, \"RelayBox\")\n\n opc_variables = dict()\n\n panelVoltage = ChargeControllerObject.add_variable(address_space, \"panelVoltage\", 0.0)\n panelCurrent = ChargeControllerObject.add_variable(address_space, \"panelCurrent\", 0.0)\n batteryVoltage = ChargeControllerObject.add_variable(address_space, \"batteryVoltage\", 0.0)\n batteryCurrent = ChargeControllerObject.add_variable(address_space, \"batteryCurrent\", 0.0)\n loadVoltage = ChargeControllerObject.add_variable(address_space, \"loadVoltage\", 0.0)\n loadCurrent = ChargeControllerObject.add_variable(address_space, \"loadCurrent\", 0.0)\n inPower = ChargeControllerObject.add_variable(address_space, \"inPower\", 0.0)\n outPower = ChargeControllerObject.add_variable(address_space, \"outPower\", 0.0)\n batteryStatus = ChargeControllerObject.add_variable(address_space, \"batteryStatus\", \"\")\n batteryCapacity = ChargeControllerObject.add_variable(address_space, \"batteryCapacity\", 0.0)\n batteryTemperature = ChargeControllerObject.add_variable(address_space, \"batteryTemperature\", 0.0)\n\n plug1Current = RelayBoxObject.add_variable(address_space, \"plug_1_current\", 0.0)\n plug2Current = RelayBoxObject.add_variable(address_space, \"plug_2_current\", 0.0)\n inverterCurrent = RelayBoxObject.add_variable(address_space, \"inverter_current\", 0.0)\n\n irradiation = ChargeControllerObject.add_variable(address_space, \"irradiation\", 0.0)\n\n\n\n inverter_control_node = RelayBoxObject.add_method( address_space,\n \"set_plug_state\",\n set_plug_state,\n [ ua.VariantType.Int32, ua.VariantType.Boolean ],\n [ ua.VariantType.Boolean, ua.VariantType.Boolean,ua.VariantType.Boolean,ua.VariantType.Boolean ]\n )\n\n # starting!\n server.start()\n print(\"Server starting ...\")\n\n # creating my machinery objects\n chargeController = cc.EpeverChargeController(produce_dummy_data=DUMMY_DATA)\n\n while(True):\n\n print( relay_box.get_relays() )\n\n data = dict()\n\n ## Read data from hardware machines\n try:\n data = chargeController.readAllData()\n panelVoltage.set_value(data['panelVoltage'])\n panelCurrent.set_value(data['panelCurrent'])\n batteryVoltage.set_value(data['batteryVoltage'])\n batteryCurrent.set_value(data['batteryCurrent'])\n loadVoltage.set_value(data['loadVoltage'])\n loadCurrent.set_value(data['loadCurrent'])\n inPower.set_value(data['inPower'])\n outPower.set_value(data['outPower'])\n # batteryStatus.set_value(data['batteryStatus'])\n # batteryCapacity.set_value(data['batteryCapacity'])\n batteryTemperature.set_value(data['batteryTemperature'])\n except Exception as e:\n print( e )\n\n ## Read Irradiation data\n try:\n data['irradiation'] = sensors.getIrradiation()\n irradiation.set_value(data['irradiation'])\n except Exception as e:\n print( e )\n\n ## Read currents\n try:\n data['plug_1_current'] = currentMonitor.getCurrentPlug1()\n data['plug_2_current'] = currentMonitor.getCurrentPlug2()\n data['inverter_current'] = currentMonitor.getCurrentInverter()\n plug1Current.set_value(data['plug_1_current'])\n plug2Current.set_value(data['plug_2_current'])\n inverterCurrent.set_value(data['inverter_current'])\n except Exception as e:\n print( e )\n\n\n print( json.dumps(data) )\n\n #blinkLed(0.05, 2)\n\n time.sleep( DELAY )\n\n\n\n\nif __name__ == \"__main__\":\n #application.listen(8888)\n #tornado.ioloop.IOLoop.instance().start()\n main()\n","sub_path":"src/main_cc.py","file_name":"main_cc.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"427710972","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2019/11/13 下午4:27\n# @Author : Aries\n# @Site :\n# @File : test.py\n# @Software: PyCharm\nimport numpy as np\n\n\ndef main():\n\tarr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n\tmean1 = arr.mean(axis=0)\n\tmean2 = arr.mean(axis=1)\n\t# print(mean1)\n\t# print(mean2, mean2.shape)\n\t\n\t# 后缘维度 mean1的后缘维度是3 所以会在缺失维度上进行广播,扩展到4 ---> (4,3)\n\t# print(arr - mean1)\n\t\n\t# 后缘纬度不相等,另一个矩阵有一个维度为1,则会在纬度为1上进行广播,扩展到3 ---> (4,3)\n\t# print(mean2.reshape(4, 1))\n\t# print(arr - mean2.reshape(4, 1))\n\t# print(arr - mean2.reshape(4, 1))\n\t\n\t# print(mean2.reshape(1, 4))\n\t# print(arr - mean2.reshape(1, 4))\n\n\n\t# Traceback (most recent call last):\n\t# File \"/Users/houruixiang/python/tensorflow_nlp_master/broadcast/test.py\", line 30, in \n\t# main()\n\t# File \"/Users/houruixiang/python/tensorflow_nlp_master/broadcast/test.py\", line 26, in main\n\t# print(arr - mean2.reshape(1, 4))\n\t# ValueError: operands could not be broadcast together with shapes (4,3) (1,4)\n\t\n\t# a = np.array([[1],[2]])\n\t# print(a.shape)\n\t# print(arr - a)\n\t\n\t# Traceback (most recent call last):\n\t# File \"/Users/houruixiang/python/tensorflow_nlp_master/broadcast/test.py\", line 41, in \n\t# File \"/Users/houruixiang/python/tensorflow_nlp_master/broadcast/test.py\", line 37, in main\n\t# print(arr - a)\n\t# ValueError: operands could not be broadcast together with shapes (4,3) (2,1)\n\t\n\tarr1 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]])\n\tarr2 = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]])\n\tprint(arr1.shape)\n\tprint(arr2.shape)\n\tprint(arr1-arr2)\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"broadcast/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"514916810","text":"# Sarsa control with Q-value function approximation\n# Policy evaluation: Q(s, a) <- Q(s, a) + 𝜶 * (R + 𝜸Q(s', a') - Q(s, a))\n# Policy improvement: epsilon-greedy exploration\n# Q-value function approximation: Two-layer perception (input layer and output layer only)\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom logging import getLogger\nfrom collections import defaultdict\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom src.base import BaseController\nfrom src.config import Config, ControllerType\n\nlogger = getLogger(__name__)\n\n\nclass SarsaControl(BaseController):\n def __init__(self, env, config: Config):\n super().__init__()\n self.env = env\n self.epsilon = config.controller.epsilon\n self.gamma = config.controller.gamma\n self.model = self.build_model()\n self.max_workers = config.controller.max_workers\n\n def build_training_set(self, buf):\n '''Sarsa evaluation\n\n Q(s, a) <- Q(s, a) + 𝜶 * (R + 𝜸Q(s', a') - Q(s, a))\n\n Args:\n buf.states = [s1, s2, ..., sT-1]\n buf.actions = [a1, a2, ..., aT-1]\n buf.rewards = [r2, r3, ..., rT]\n\n Return:\n (inputs, targets): \n inputs is a state list; \n targets contains lists of action-values for each state in inputs\n '''\n Q_ = dict()\n history = [(s, a) for s, a in zip(buf.states, buf.actions)]\n his1 = history.copy()\n his2 = history\n del(his2[0])\n his2.append((0, 0))\n inputs = np.zeros((len(buf.rewards), ) +\n self.env.observation_space.shape)\n targets = np.zeros((len(buf.rewards), self.env.action_space.n))\n\n for i, ((s, a), r, (s_, a_)) in enumerate(zip(his1, buf.rewards, his2)):\n inputs[i] = np.array(s)\n targets[i] = self.model.predict(np.expand_dims(inputs[i], axis=0))\n if i + 1 == len(buf.rewards):\n targets[i, a] = r\n else:\n if tuple(s_) not in Q_:\n Q_[tuple(s_)] = self.model.predict(\n np.expand_dims(s_, axis=0))[0]\n targets[i, a] = r + self.gamma * Q_[tuple(s_)][a_]\n return inputs, targets\n","sub_path":"src/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"581530051","text":"##################\n## \n## this will run forever\n##\n##################\n\nimport time, os\nfrom datetime import date\n\nnow = date.today()\ndatum = now.strftime(\"%d. %B %Y\")\nuhr = time.strftime(\"%H:%M:%S\")\n\nstart = time.clock()\n## auszuführender programblock ab hier ##\n\na, b = 10, 100\nwhile b < 1100:\n print(b)\n a, b = b, a + b\n\n## ende progam ##\nende = time.clock()\n\nzeit_messung = (\"- Die Funktion lief \"\n \"{0:1.2f} Sekunden\".format(ende - start))\n\t\t\t\nzeit_druck = str(datum + \" \" + uhr + \" \" + zeit_messung + \"\\n\")\nzeit_ausgabe = open(\"zeitmessung_neu.txt\", \"a\")\nzeit_ausgabe.write(zeit_druck)\nzeit_ausgabe.close()\n","sub_path":"fuckyou14072013.py","file_name":"fuckyou14072013.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34706621","text":"from soa import devices, signalprocessing, analyse, distort_tf\nfrom soa.optimisation import PSO, run_test\n\nimport numpy as np\nimport multiprocessing\nimport pickle\nfrom scipy import signal\nimport os\nimport matplotlib.pyplot as plt\n\n\n# set dir to save data\ndirectory = '../../data/'\n\n# init basic params\nnum_points_list = np.arange(120, 260, 20)\ntime_start = 0\ntime_stop = 20e-9\n\n# set PSO params\nn = 3 \niter_max = 150\nrep_max = 1 \nmax_v_f = 0.05 \ninit_v_f = max_v_f \ncost_f = 'mSE' \nw_init = 0.9\nw_final = 0.5\non_suppress_f = 2.0\n\n\n# initial transfer function numerator and denominator coefficients\nnum = [2.01199757841099e85]\nden = [\n 1.64898505756825e0,\n 4.56217233166632e10,\n 3.04864287973918e21,\n 4.76302109455371e31,\n 1.70110870487715e42,\n 1.36694076792557e52,\n 2.81558045148153e62,\n 9.16930673102975e71,\n 1.68628748250276e81,\n 2.40236028415562e90,\n]\ntf = signal.TransferFunction(num, den)\n\n# run PSO tests in parallel with multiprocessing\npso_objs = multiprocessing.Manager().list()\njobs = []\nfor num_points in num_points_list:\n # make directory for this test\n direc = directory + '/num_points_{}'.format(num_points)\n if os.path.exists(direc) == False:\n os.mkdir(direc)\n\n # basic params\n t = np.linspace(time_start, time_stop, num_points)\n\n # define initial drive signal\n init_OP = np.zeros(num_points) # initial drive signal (e.g. a step)\n init_OP[:int(0.25*num_points)],init_OP[int(0.25*num_points):] = -1, 0.5\n\n # get initial output of initial signal and use to generate a target set point\n init_PV = distort_tf.getTransferFunctionOutput(tf,init_OP,t)\n sp = analyse.ResponseMeasurements(init_PV, t).sp.sp\n\n p = multiprocessing.Process(target=run_test, \n args=(direc, \n tf, \n t, \n init_OP, \n n, \n iter_max, \n rep_max, \n init_v_f, \n max_v_f, \n w_init, \n w_final, \n True, \n 'pisic_shape', \n on_suppress_f, \n True, \n None, \n cost_f, \n None, \n True, \n True,\n sp, \n pso_objs,))\n\n jobs.append(p)\n p.start()\nfor job in jobs:\n job.join()\n\n# pickle PSO objects so can re-load later if needed\nPIK = directory + '/pickle.dat'\ndata = pso_objs\nwith open(PIK, 'wb') as f:\n pickle.dump(data, f)\n\n","sub_path":"docs/scripts/tutorial_script_2.py","file_name":"tutorial_script_2.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"183288867","text":"from django import forms\nfrom .models import Leave\nimport datetime\n\nclass LeaveCreationForm(forms.ModelForm):\n\treason = forms.CharField(required=False)\n\tclass Meta:\n\t\tmodel = Leave\n\t\texclude = ['user','defaultdays','hrcomments','status','is_approved','updated','created']\n\n\tdef clean_enddate(self):\n\t\tenddate = self.cleaned_data['enddate']\n\t\tstartdate = self.cleaned_data['startdate']\n\t\ttoday_date = datetime.date.today()\n\n\t\tif (startdate or enddate) < today_date:\n\t\t\traise forms.ValidationError(\"Selected dates are incorrect,please select again\")\n\n\t\telif startdate >= enddate:\n\t\t\traise forms.ValidationError(\"Selected dates are wrong\")\n\n\t\treturn enddate\n\n\n\n\n\n","sub_path":"leave/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"148429891","text":"import uuid\n\nfrom django import template\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom paypal.standard.forms import PayPalPaymentsForm\n\nregister = template.Library()\n\n\ndef paypal_form_for(magazine, user):\n \"\"\"\n Handles creating a form when passed the user and\n magazine objects.\n \"\"\"\n if user.is_subscribed(magazine): # see accounts/models.py\n html = \"Subscribed!\"\n else:\n paypal_dict = {\n \"business\": settings.PAYPAL_RECEIVER_EMAIL,\n \"currency_code\": \"GBP\",\n \"cmd\": \"_xclick-subscriptions\",\n \"a3\": magazine.price,\n \"p3\": 1,\n \"t3\": \"M\",\n \"src\": 1,\n \"sra\": 1,\n \"item_name\": magazine.name,\n \"invoice\": uuid.uuid4,\n \"notify_url\": settings.PAYPAL_NOTIFY_URL,\n \"return_url\": \"%s%s\" % (settings.SITE_URL, reverse('paypal-return')),\n \"cancel_return\": \"%s%s\" % (settings.SITE_URL, reverse('paypal-cancel')),\n \"custom\": \"%s-%s\" % (magazine.pk, user.id) # magazines/signals methods use this.\n }\n\n # render the form\n if settings.DEBUG:\n html = PayPalPaymentsForm(initial=paypal_dict,button_type='subscribe').sandbox()\n else:\n html = PayPalPaymentsForm(initial=paypal_dict,button_type='subscribe').render()\n\n return html\n\n# Register this simple_tag (template tag) with the Django template system.\n# Using a simple_tag is important in this stage, as a normal filter can\n# only accept one argument, and in our case we want to pass in both the\n# user and the magazine as arguments.\n# called in the template like so:\n# E.g.\n# {% load magazine_extras %}\n# {% paypal_form_for magazine user %}\nregister.simple_tag(paypal_form_for)\n","sub_path":"we_are_social_inc_reusable_app/magazines/templatetags/magazine_extras.py","file_name":"magazine_extras.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"123562292","text":"from django.db import models\n\nclass Deal(models.Model):\n stock = models.CharField(max_length=50)\n price_deal = models.DecimalField(max_digits=10, decimal_places=2)\n volume_deal = models.IntegerField()\n sum_deal = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)\n date_deal = models.DateField()\n\n def summary(self):\n self.sum_deal = self.price_deal * self.volume_deal\n self.save()\n\n def __str__(self):\n return self.stock","sub_path":"deals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"395903435","text":"import os\nimport shutil\nimport logging\nimport asyncio\nfrom aiohttp import web\nfrom tempfile import mkstemp\nfrom celestial import normalize_mimetype, normalize_extension\n\nfrom convert.converter import FORMATS, PdfConverter\nfrom convert.converter import ConversionFailure\n\nMEGABYTE = 1024 * 1024\nBUFFER_SIZE = 8 * MEGABYTE\nMAX_UPLOAD = 800 * MEGABYTE\nlogging.basicConfig(level=logging.DEBUG)\nlogging.getLogger('aiohttp').setLevel(logging.WARNING)\nlog = logging.getLogger('convert')\nconverter = PdfConverter()\n\n\nasync def info(request):\n return web.Response(text=\"OK\")\n\n\nasync def convert(request):\n data = await request.post()\n upload = data['file']\n extension = normalize_extension(upload.filename)\n mime_type = normalize_mimetype(upload.content_type, default=None)\n log.info('PDF convert: %s [%s]', upload.filename, mime_type)\n fd, upload_file = mkstemp()\n os.close(fd)\n fd, out_file = mkstemp(suffix='.pdf')\n os.close(fd)\n\n try:\n with open(upload_file, 'wb') as fh:\n shutil.copyfileobj(upload.file, fh, BUFFER_SIZE)\n\n filters = list(FORMATS.get_filters(extension, mime_type))\n timeout = int(request.query.get('timeout', 300))\n\n await asyncio.sleep(0)\n converter.convert_file(upload_file, out_file, filters,\n timeout=timeout)\n out_size = os.path.getsize(out_file)\n if out_size == 0:\n raise ConversionFailure(\"Could not convert.\")\n await asyncio.sleep(0)\n\n response = web.StreamResponse()\n response.content_length = out_size\n response.content_type = 'application/pdf'\n await response.prepare(request)\n with open(out_file, 'rb') as f:\n while True:\n chunk = f.read(BUFFER_SIZE)\n if not chunk:\n break\n await response.write(chunk)\n return response\n except ConversionFailure as fail:\n log.info(\"Failed to convert: %s\", fail)\n return web.Response(text=str(fail), status=400)\n except Exception as exc:\n log.exception('System error: %s.', exc)\n converter.terminate()\n finally:\n os.remove(upload_file)\n os.remove(out_file)\n\n\napp = web.Application(client_max_size=MAX_UPLOAD)\napp.add_routes([web.get('/', info)])\napp.add_routes([web.post('/convert', convert)])\nweb.run_app(app, port=3000)\n","sub_path":"services/convert-document/convert/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"431443804","text":"'''\nWrite a Program that prints series of perfect numbers up to the\nentered limiting number.\nInput: 100\nOutput: The Series of all perfect number from 1 to 10 is\n1, 6, 28\n\n'''\n\n\n\nn=int(input(\"Inputs:\"))\n\nprint(\"The series of perfect number from 1 to 100 is:\")\n\nfor i in range(1,n+1):\n\tsum=0\n\tfor x in range(1,i):\n\t\tif(1%x==0):\n\t\t\tsum=sum+x\n\tif(sum==i):\n\t\tprint(i)\n","sub_path":"Python/DailyFlash/31jan2020/MySolution/program5.py","file_name":"program5.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"273149854","text":"# Time Complexity : O(n), where n is the length of the list\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : N.A.\n# Any problem you faced while coding this : No\n\n# Your code here along with comments explaining your approach\nclass Node:\n # Function to initialise the node object \n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self): \n self.head = None\n\n # Adding to the head of the list.\n def push(self, new_data):\n toAdd = Node(new_data)\n toAdd.next = self.head\n self.head = toAdd\n\n # implemented using Floyd's slow and fast pointers approach, fast\n # pointer reaches the end of the list the slow is at the middle.\n def printMiddle(self):\n if self.head == None:\n print(\"The list is empty\")\n return\n\n slow = self.head\n fast = self.head\n\n while fast != None and fast.next != None:\n slow = slow.next\n fast = fast.next.next\n print(slow.data)\n\n# Driver code \nlist1 = LinkedList()\n# list1.push(6)\nlist1.push(5)\nlist1.push(4)\nlist1.push(3)\nlist1.push(2)\nlist1.push(1)\nlist1.printMiddle() \n","sub_path":"Exercise_3.py","file_name":"Exercise_3.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"50453098","text":"import timeit\n\nfrom flask import Flask\nfrom flask_socketio import SocketIO, emit\n\nfrom hand_detector import HandDetector\nfrom utils import decode_base64, substract_background, filter_small_boxes\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n@socketio.on(\"frame\")\ndef handle_frame(data):\n print(\"Got Frame\")\n start = timeit.default_timer()\n image = decode_base64(data['frame'])\n image = substract_background(img=image)\n boxes, scores = hand_detector.get_boxes(image, data[\"threshold\"])\n if len(boxes) > 0:\n boxes, scores = filter_small_boxes(boxes, scores, 0.2)\n print(f\"Found {len(boxes)} hands, with max score of {max(scores or [0])}\")\n emit(\"box\", {'boxes': boxes, 'scores': scores}) # Send the client the box to show\n\n print(f\"Finished processing frame in {timeit.default_timer() - start}sec\")\n\n\n@app.route('/', methods=['GET'])\ndef hello():\n return \"Welcome to ASLie\"\n\n\nif __name__ == '__main__':\n print(\"Starting ASLie...\")\n print(\"Loading hand detector...\")\n hand_detector = HandDetector()\n print(\"Hand detector loaded.\")\n print(\"ASLie ready :)\")\n socketio.run(app, host=\"0.0.0.0\", port=\"1607\")\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"296162844","text":"#!usr/bin/python\n# coding:utf-8\n\nfrom mp.test_model.models.my_unittest import *\nfrom mp.test_model.page_obj.menulist_page import *\nfrom mp.test_model.page_obj.source_manage_page import *\nimport sys\nfrom selenium.webdriver.common.keys import Keys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\nclass AddSinglePageTextTitleTest(MyTest):\n\n \"\"\"mp 素材管理之单图文中图片上传测试\"\"\"\n\n def add_single_page_text_image(self, casenumber):\n logger.info(\"开始执行测试用例:用例编号%s:mp登录后,进入素材管理的图文界面,进行单图文添加\" % casenumber)\n logger.info(\"开始登录操作...\")\n self.assertTrue(self.user_login_success())\n self.assertTrue(SourceManagePage(self.driver).get_new_single_page_text_page())\n self.assertTrue(SourceManagePage(self.driver).get_single_page_text_image_input(casenumber))\n logger.info(\" 正在获得用例期望值...\")\n expected_value = get_expected_value(casenumber)\n logger.info(\"正在获得截图标题...\")\n title = get_image_title(casenumber)\n logger.info(\"生成截图中...\")\n insert_img(self.driver, title)\n return expected_value\n\n def test_042_edit_single_title(self):\n \"\"\"用例编号042:mp登录后,进入素材管理的图文界面,添加单图文的图片(png)\"\"\"\n expected_value = self.add_single_page_text_image(\"042\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n actual_value = SourceManagePage(self.driver).get_mp_single_shown_image_src()\n logger.info(\"获得的测试用例实际值为: %s\" % actual_value)\n logger.info(\"测试判断:%s 是否不等于:%s\" % (actual_value, expected_value))\n self.assertNotEqual(actual_value, expected_value)\n\n def test_043_edit_single_title(self):\n \"\"\"用例编号043:mp登录后,进入素��管理的图文界面,添加单图文的图片(jpeg)\"\"\"\n expected_value = self.add_single_page_text_image(\"043\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n actual_value = SourceManagePage(self.driver).get_mp_single_shown_image_src()\n logger.info(\"获得的测试用例实际值为: %s\" % actual_value)\n logger.info(\"测试判断:%s 是否不等于:%s\" % (actual_value, expected_value))\n self.assertNotEqual(actual_value, expected_value)\n\n def test_044_edit_single_title(self):\n \"\"\"用例编号044:mp登录后,进入素材管理的图文界面,添加单图文的图片(jpeg)\"\"\"\n expected_value = self.add_single_page_text_image(\"044\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n actual_value = SourceManagePage(self.driver).get_mp_single_shown_image_src()\n logger.info(\"获得的测试用例实际值为: %s\" % actual_value)\n logger.info(\"测试判断:%s 是否不等于:%s\" % (actual_value, expected_value))\n self.assertNotEqual(actual_value, expected_value)\n\n def test_045_edit_single_title(self):\n \"\"\"用例编号045:mp登录后,进入素材管理的图文界面,添加单图文的图片(jpg)\"\"\"\n expected_value = self.add_single_page_text_image(\"045\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n actual_value = SourceManagePage(self.driver).get_mp_single_shown_image_src()\n logger.info(\"获得的测试用例实际值为: %s\" % actual_value)\n logger.info(\"测试判断:%s 是否不等于:%s\" % (actual_value, expected_value))\n self.assertNotEqual(actual_value, expected_value)\n\n def test_046_edit_single_title(self):\n \"\"\"用例编号046:mp登录后,进入素材管理的图文界面,添加单图文的图片(log)\"\"\"\n expected_value = self.add_single_page_text_image(\"046\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n error_hint = SourceManagePage(self.driver).get_mp_single_image_error_hint()\n logger.info((\"获得的测试用例实际值为: %s\" % error_hint))\n logger.info(\"测试判断:%s 是否等于:%s\" % (error_hint, expected_value))\n self.assertEqual(error_hint, expected_value)\n\n def test_047_edit_single_title(self):\n \"\"\"用例编号047:mp登录后,进入素材管理的图文界面,添加单图文的图片(大图片文件)\"\"\"\n expected_value = self.add_single_page_text_image(\"047\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n error_hint = SourceManagePage(self.driver).get_mp_single_image_size_error_hint()\n logger.info((\"获得的测试用例实际值为: %s\" % error_hint))\n logger.info(\"测试判断:%s 是否等于:%s\" % (error_hint, expected_value))\n self.assertEqual(error_hint, expected_value)\n\n def test_048_edit_single_title(self):\n \"\"\"用例编号048:mp登录后,进入素材管理的图文界面,添加单图文的图片(0kb jpg)\"\"\"\n expected_value = self.add_single_page_text_image(\"048\")\n logger.info(\"获得的测试用例期望值为: %s\" % expected_value)\n actual_value = SourceManagePage(self.driver).get_mp_single_shown_image_src()\n logger.info((\"获得的测试用例实际值为: %s\" % actual_value))\n logger.info(\"测试判断:%s 是否不等于:%s\" % (actual_value, expected_value))\n self.assertNotEqual(actual_value, expected_value)\n\n'''\n@author Mavis\n\n'''","sub_path":"mp/test_model/test_case/add_single_page_text_image_test.py","file_name":"add_single_page_text_image_test.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"121216098","text":"def func(n):\n a = -1\n for i in range(len(n)-2, -1, -1):\n if n[i] < n[i+1] :\n a = i\n break\n \n if a == -1 :\n return \"BIGGEST\"\n\n b = n.index(max(n[a+1:]))\n for i in range(len(n)-1, a+1, -1):\n if n[a] < n[i] < n[b] :\n b = i\n \n n = swap(n, a, b)\n n = n[:a+1] + ''.join(sorted(n[a+1:]))\n return n\n\ndef swap(c, i, j):\n c = list(c)\n c[i], c[j] = c[j], c[i]\n return ''.join(c)\n\ndef main():\n n = int(input())\n\n for _ in range(0, n):\n num = input()\n print(func(num))\n\nif __name__ == '__main__':\n main()","sub_path":"Algorithm_python/Baekjoon/2697.py","file_name":"2697.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"609781113","text":"import time\nimport threading\n\n\ndef loop1(in1):\n # ctime得到当前时间\n try:\n print('Start loop l at:',time.ctime())\n # 把参数打印\n print(\"我是参数\",in1)\n #睡眠时间,单位���\n time.sleep(10)\n print('End loop l at:',time.ctime())\n except:\n print(\"eeeeee\")\n\ndef loop2(in1,in2):\n # ctime得到当前时间\n try:\n print('Start loop 2 at:',time.ctime())\n # 把参数打印\n print(\"我是参数\",in1,\"和参数\",in2)\n #睡眠时间,单位秒\n time.sleep(2)\n print('End loop 2 at:',time.ctime())\n except:\n print(\"eeeeee\")\n\ndef loop3(in1,in2):\n # ctime得到当前时间\n try:\n print('Start loop 2 at:',time.ctime())\n # 把参数打印\n print(\"我是参数\",in1,\"和参数\",in2)\n #睡眠时间,单位秒\n time.sleep(10)\n print('End loop 2 at:',time.ctime())\n except:\n print(\"eeeeee\")\ndef mian():\n print(\"Start at:\",time.ctime())\n\n t1 = threading.Thread(target=loop1, args=(\"panda\",))\n t1.start()\n t1.setName(\"TH1-1\")\n\n t2 = threading.Thread(target=loop2, args=(\"盼达\",\"zeilin\"))\n t2.start()\n t2.setName(\"TH2-2\")\n\n t3 = threading.Thread(target=loop3, args=(\"盼达\",\"zeilin\"))\n t3.start()\n t3.setName(\"TH3-3\")\n\n# 预期3秒后thread2已经结束\n time.sleep(5)\n#enumerate 得到正在运行的子线程\n for thr in threading.enumerate():\n # getname能够得到线程的名字\n print(' 正在运行线程的名字是:{}'.format(thr.getName()))\n print(\"正在运行的线程数量为:{}\".format(threading.activeCount()))\n print(\"ALL DONE AT\",time.ctime())\n\n\nif __name__ == '__main__':\n mian()\n\n while True:\n time.sleep(1)","sub_path":"python/senior/py-24-duoxianc/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"369911498","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\nfrom . import test_utils\nfrom edgetpu.classification.engine import ClassificationEngine\nfrom edgetpu.learn.imprinting.engine import ImprintingEngine\nfrom PIL import Image\n\n\nclass TestImprintingEnginePythonAPI(unittest.TestCase):\n\n _EXTRACTOR_LIST = [\n 'imprinting/mobilenet_v1_1.0_224_quant_embedding_extractor.tflite',\n 'imprinting/mobilenet_v1_1.0_224_quant_embedding_extractor_edgetpu.tflite'\n ]\n\n def _ClassifyImage(self, engine, data_dir, image_name, label_id, score):\n with Image.open(os.path.join(data_dir, image_name)) as img:\n ret = engine.ClassifyWithImage(img, top_k=1)\n self.assertEqual(len(ret), 1)\n self.assertEqual(ret[0][0], label_id)\n self.assertGreater(ret[0][1], score)\n\n def testTrainingFromScratch(self):\n for extractor in self._EXTRACTOR_LIST:\n with self.subTest():\n with tempfile.NamedTemporaryFile(suffix='.tflite') as output_model_path:\n # Train.\n engine = ImprintingEngine(test_utils.TestDataPath(extractor))\n shape = (224, 224)\n train_set = {\n 'cat': ['cat_train_0.bmp'],\n 'dog': ['dog_train_0.bmp'],\n 'hot_dog': ['hotdog_train_0.bmp', 'hotdog_train_1.bmp']\n }\n data_dir = test_utils.TestDataPath('imprinting')\n self.assertEqual(engine.Train(\n test_utils.PrepareImages(train_set['cat'], data_dir, shape)), 0)\n self.assertEqual(engine.Train(\n test_utils.PrepareImages(train_set['dog'], data_dir, shape)), 1)\n self.assertEqual(engine.Train(\n test_utils.PrepareImages(train_set['hot_dog'], data_dir, shape)), 2)\n engine.SaveModel(output_model_path.name)\n\n # Test.\n engine = ClassificationEngine(output_model_path.name)\n self.assertEqual(1, engine.get_num_of_output_tensors())\n self.assertEqual(3, engine.get_output_tensor_size(0))\n\n self._ClassifyImage(engine, data_dir, 'cat_test_0.bmp', 0, 0.38)\n self._ClassifyImage(engine, data_dir, 'dog_test_0.bmp', 1, 0.38)\n self._ClassifyImage(engine, data_dir, 'hotdog_test_0.bmp', 2, 0.38)\n\n def testIncrementalTraining(self):\n for extractor in [\n 'imprinting/retrained_mobilenet_v1_cat_only.tflite',\n 'imprinting/retrained_mobilenet_v1_cat_only_edgetpu.tflite']:\n with self.subTest():\n with tempfile.NamedTemporaryFile(suffix='.tflite') as output_model_path:\n # Train.\n engine = ImprintingEngine(test_utils.TestDataPath(extractor))\n shape = (224, 224)\n train_set = {\n 'dog': ['dog_train_0.bmp'],\n 'hot_dog': ['hotdog_train_0.bmp', 'hotdog_train_1.bmp']\n }\n data_dir = test_utils.TestDataPath('imprinting')\n self.assertEqual(engine.Train(\n test_utils.PrepareImages(train_set['dog'], data_dir, shape)), 1)\n self.assertEqual(engine.Train(\n test_utils.PrepareImages(train_set['hot_dog'], data_dir, shape)), 2)\n engine.SaveModel(output_model_path.name)\n\n # Test.\n engine = ClassificationEngine(output_model_path.name)\n self.assertEqual(1, engine.get_num_of_output_tensors())\n self.assertEqual(3, engine.get_output_tensor_size(0))\n\n self._ClassifyImage(engine, data_dir, 'cat_test_0.bmp', 0, 0.38)\n self._ClassifyImage(engine, data_dir, 'dog_test_0.bmp', 1, 0.38)\n self._ClassifyImage(engine, data_dir, 'hotdog_test_0.bmp', 2, 0.38)\n\n def testTrainAll(self):\n for extractor in self._EXTRACTOR_LIST:\n with self.subTest():\n with tempfile.NamedTemporaryFile(suffix='.tflite') as output_model_path:\n data_dir = test_utils.TestDataPath('imprinting')\n engine = ImprintingEngine(test_utils.TestDataPath(extractor))\n\n # Train.\n shape = (224, 224)\n train_set = {\n 'cat': ['cat_train_0.bmp'],\n 'dog': ['dog_train_0.bmp'],\n 'hot_dog': ['hotdog_train_0.bmp', 'hotdog_train_1.bmp']\n }\n train_input = {}\n for category, image_list in train_set.items():\n train_input[category] = test_utils.PrepareImages(\n image_list, data_dir, shape)\n id_to_label_map = engine.TrainAll(train_input)\n label_to_id_map = {v: k for k, v in id_to_label_map.items()}\n engine.SaveModel(output_model_path.name)\n\n # Test.\n engine = ClassificationEngine(output_model_path.name)\n self.assertEqual(1, engine.get_num_of_output_tensors())\n self.assertEqual(3, engine.get_output_tensor_size(0))\n\n self._ClassifyImage(\n engine, data_dir, 'cat_test_0.bmp', label_to_id_map['cat'], 0.38)\n self._ClassifyImage(\n engine, data_dir, 'dog_test_0.bmp', label_to_id_map['dog'], 0.38)\n self._ClassifyImage(\n engine, data_dir, 'hotdog_test_0.bmp', label_to_id_map['hot_dog'],\n 0.38)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/imprinting_engine_test.py","file_name":"imprinting_engine_test.py","file_ext":"py","file_size_in_byte":5655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"33110545","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom time import time\nfrom copy import deepcopy\nfrom ams import Topic\nfrom ams.nodes import EventLoop, User, Vehicle, TrafficSignal, SimTaxi\nfrom ams.messages import user_message, vehicle_message, traffic_signal_message, fleet_manager_message\n\n\nclass FleetManager(EventLoop):\n class ACTION(object):\n PUBLISH_RELATIONS = \"pub_relations\"\n\n class TOPIC(object):\n PUBLISH = \"pub_fleet_manager\"\n SUBSCRIBE = \"sub_fleet_manager\"\n\n def __init__(self, waypoint, arrow, route):\n super().__init__()\n\n self.topicUserPublish = Topic()\n self.topicUserPublish.set_root(User.TOPIC.PUBLISH)\n self.topicUserPublish.set_message(user_message)\n\n self.topicUserSubscribe = Topic()\n self.topicUserSubscribe.set_root(User.TOPIC.SUBSCRIBE)\n self.topicUserSubscribe.set_message(user_message)\n\n self.topicVehiclePublish = Topic()\n self.topicVehiclePublish.set_root(Vehicle.TOPIC.PUBLISH)\n self.topicVehiclePublish.set_message(vehicle_message)\n\n self.topicVehicleSubscribe = Topic()\n self.topicVehicleSubscribe.set_root(Vehicle.TOPIC.SUBSCRIBE)\n self.topicVehicleSubscribe.set_message(vehicle_message)\n\n self.topicTrafficSignalPublish = Topic()\n self.topicTrafficSignalPublish.set_root(TrafficSignal.TOPIC.PUBLISH)\n self.topicTrafficSignalPublish.set_message(traffic_signal_message)\n\n self.topicFleetManagerPublish = Topic()\n self.topicFleetManagerPublish.set_id(self.event_loop_id)\n self.topicFleetManagerPublish.set_root(FleetManager.TOPIC.PUBLISH)\n self.topicFleetManagerPublish.set_message(fleet_manager_message)\n\n self.topicFleetManagerSubscribe = Topic()\n self.topicFleetManagerSubscribe.set_id(self.event_loop_id)\n self.topicFleetManagerSubscribe.set_root(FleetManager.TOPIC.SUBSCRIBE)\n self.topicFleetManagerSubscribe.set_message(fleet_manager_message)\n\n self.waypoint = waypoint\n self.arrow = arrow\n self.route = route\n\n self.users = {}\n self.vehicles = {}\n self.traffic_signals = {}\n self.relations = {} # vehicle_id -> user_id, user_id -> vehicle_id\n\n self.add_on_message_function(self.update_user_status)\n self.add_on_message_function(self.update_vehicle_status)\n self.add_on_message_function(self.update_traffic_signal_status)\n self.add_on_message_function(self.response_request)\n\n self.set_subscriber(self.topicUserPublish.all)\n self.set_subscriber(self.topicVehiclePublish.all)\n self.set_subscriber(self.topicTrafficSignalPublish.all)\n self.set_subscriber(self.topicFleetManagerSubscribe.all)\n\n def update_user_status(self, _client, _userdata, topic, payload):\n # print(\"update_user_status\", topic)\n if self.topicUserPublish.root in topic:\n user_id = self.topicUserPublish.get_id(topic)\n message = self.topicUserPublish.unserialize(payload)\n\n if message[\"state\"] == User.STATE.LOGIN:\n # print(\"user\", User.STATE.LOGIN)\n # todo: move to dispatcher class\n self.dispatch(user_id, message)\n elif message[\"state\"] == User.STATE.WAITING:\n pass\n # print(\"user\", User.STATE.WAITING)\n elif message[\"state\"] == User.STATE.GETTING_ON:\n pass\n # print(\"user\", User.STATE.GETTING_ON)\n elif message[\"state\"] == User.STATE.GOT_ON:\n # print(\"user\", User.STATE.GOT_ON)\n vehicle_id = self.relations[user_id]\n self.vehicles[vehicle_id][\"schedules\"][0][\"action\"] = Vehicle.ACTION.MOVE\n payload = self.topicVehicleSubscribe.serialize({\"schedules\": self.vehicles[vehicle_id][\"schedules\"]})\n self.publish(self.topicVehicleSubscribe.root + \"/\" + vehicle_id + \"/schedules\", payload)\n elif message[\"state\"] == User.STATE.MOVING:\n pass\n # print(\"user\", User.STATE.MOVING)\n elif message[\"state\"] == User.STATE.GETTING_OUT:\n pass\n # print(\"user\", User.STATE.GETTING_OUT)\n elif message[\"state\"] == User.STATE.GOT_OUT:\n pass\n # print(\"user\", User.STATE.GOT_OUT)\n else:\n print(\"user\", message[\"state\"])\n\n self.users[user_id] = message\n\n def update_vehicle_status(self, _client, _userdata, topic, payload):\n if self.topicVehiclePublish.root in topic:\n vehicle_id = self.topicVehiclePublish.get_id(topic)\n message = self.topicVehiclePublish.unserialize(payload)\n # print(\"update_vehicle_status\", topic, message[\"state\"])\n\n if vehicle_id in self.relations:\n prev_state = self.vehicles[vehicle_id][\"state\"]\n if message[\"state\"] == SimTaxi.STATE.MOVE_TO_USER:\n # print(\"vehicle\", SimTaxi.STATE.MOVE_TO_USER)\n if prev_state == SimTaxi.STATE.STANDBY:\n user_id = self.relations[vehicle_id]\n self.users[user_id][\"schedules\"][0][\"action\"] = User.ACTION.WAIT\n self.publish(\n self.topicUserSubscribe.root+\"/\"+user_id+\"/schedules\",\n self.topicUserSubscribe.serialize({\"schedules\": self.users[user_id][\"schedules\"]}))\n elif message[\"state\"] == SimTaxi.STATE.STOP_FOR_PICKING_UP:\n # print(\"vehicle\", SimTaxi.STATE.STOP_FOR_PICKING_UP)\n if prev_state == SimTaxi.STATE.MOVE_TO_USER:\n # print(\"vehicle\", SimTaxi.STATE.STOP_FOR_PICKING_UP, SimTaxi.STATE.MOVE_TO_USER)\n user_id = self.relations[vehicle_id]\n self.users[user_id][\"schedules\"][0][\"action\"] = User.ACTION.GET_ON\n self.publish(\n self.topicUserSubscribe.root+\"/\"+user_id+\"/schedules\",\n self.topicUserSubscribe.serialize({\"schedules\": self.users[user_id][\"schedules\"]}))\n elif message[\"state\"] == SimTaxi.STATE.MOVE_TO_USER_DESTINATION:\n # print(\"vehicle\", SimTaxi.STATE.MOVE_TO_USER_DESTINATION)\n if prev_state == SimTaxi.STATE.STOP_FOR_PICKING_UP:\n # print(\"vehicle\", SimTaxi.STATE.MOVE_TO_USER_DESTINATION, SimTaxi.STATE.STOP_FOR_PICKING_UP)\n user_id = self.relations[vehicle_id]\n self.users[user_id][\"schedules\"][0][\"event\"] = User.EVENT.MOVE_VEHICLE\n self.publish(\n self.topicUserSubscribe.root+\"/\"+user_id+\"/event\",\n self.topicUserSubscribe.serialize({\"event\": User.EVENT.MOVE_VEHICLE}))\n elif message[\"state\"] == SimTaxi.STATE.STOP_FOR_DISCHARGING:\n # print(\"vehicle\", SimTaxi.STATE.STOP_FOR_DISCHARGING)\n if prev_state == SimTaxi.STATE.MOVE_TO_USER_DESTINATION:\n user_id = self.relations[vehicle_id]\n self.users[user_id][\"schedules\"][0][\"action\"] = User.ACTION.GET_OUT\n self.publish(\n self.topicUserSubscribe.root+\"/\"+user_id+\"/schedules\",\n self.topicUserSubscribe.serialize({\"schedules\": self.users[user_id][\"schedules\"]}))\n elif message[\"state\"] == SimTaxi.STATE.MOVE_TO_STANDBY:\n pass\n # print(\"vehicle\", SimTaxi.STATE.MOVE_TO_STANDBY)\n elif message[\"state\"] == SimTaxi.STATE.STANDBY:\n pass\n # print(\"vehicle\", SimTaxi.STATE.STANDBY)\n else:\n print(\"vehicle\", message[\"state\"])\n\n # vehicleSchedule = vehicle.pop(\"schedule\")\n if vehicle_id not in self.vehicles:\n # print(\"set vehicle\", vehicle_id, message[\"name\"])\n self.vehicles[vehicle_id] = message\n else:\n # print(\"update vehicle\", vehicle_id, message[\"name\"])\n self.vehicles[vehicle_id].update(message)\n\n def update_traffic_signal_status(self, _client, _userdata, topic, payload):\n if self.topicTrafficSignalPublish.root in topic:\n message = self.topicTrafficSignalPublish.unserialize(payload)\n for route in message[\"routes\"]:\n self.traffic_signals[route[\"route_code\"]] = route\n\n def response_request(self, _client, _userdata, topic, payload):\n if self.topicFleetManagerSubscribe.root in topic:\n message = self.topicFleetManagerSubscribe.unserialize(payload)\n if message[\"action\"] == FleetManager.ACTION.PUBLISH_RELATIONS:\n self.publish_relations()\n\n\n def get_dispatchable_vehicles(self):\n return dict(filter(\n lambda x: x[1][\"state\"] in [SimTaxi.STATE.STANDBY],\n self.vehicles.items()\n ))\n\n def dispatch(self, user_id, user_status):\n start_point = {\n \"arrow_code\": self.arrow.get_arrow_codes_from_waypoint_id(\n user_status[\"schedules\"][0][\"start\"][\"waypoint_id\"])[0],\n \"waypoint_id\": user_status[\"schedules\"][0][\"start\"][\"waypoint_id\"],\n }\n vehicles = self.get_dispatchable_vehicles()\n if len(vehicles) == 0:\n print(\"no dispatchable vehicles\")\n return\n\n goal_points = []\n for vehicle_id, goal_waypoint_id in map(\n lambda x: (x[0], x[1][\"location\"][\"waypoint_id\"]), vehicles.items()):\n goal_points.append({\n \"goal_id\": vehicle_id,\n \"arrow_code\": self.arrow.get_arrow_codes_from_waypoint_id(goal_waypoint_id)[0],\n \"waypoint_id\": goal_waypoint_id,\n })\n routes = self.route.get_shortest_routes(start_point, goal_points, reverse=True)\n if len(routes) == 0:\n print(\"no pick_up_route\")\n return\n pick_up_route = min(routes.items(), key=lambda x: x[1][\"cost\"])[1]\n\n vehicle_id = pick_up_route[\"goal_id\"]\n\n start_point = {\n \"arrow_code\": self.arrow.get_arrow_codes_from_waypoint_id(\n user_status[\"schedules\"][0][\"start\"][\"waypoint_id\"])[0],\n \"waypoint_id\": user_status[\"schedules\"][0][\"start\"][\"waypoint_id\"],\n }\n goal_points = [{\n \"goal_id\": user_id,\n \"arrow_code\": self.arrow.get_arrow_codes_from_waypoint_id(\n user_status[\"schedules\"][0][\"goal\"][\"waypoint_id\"])[0],\n \"waypoint_id\": user_status[\"schedules\"][0][\"goal\"][\"waypoint_id\"],\n }]\n routes = self.route.get_shortest_routes(start_point, goal_points, reverse=False)\n if len(routes) == 0:\n print(\"cant carry_route\")\n return\n carry_route = min(routes.items(), key=lambda x: x[1][\"cost\"])[1]\n\n current_time = time()\n vehicle_schedule = deepcopy(self.topicVehicleSubscribe.get_template()[\"schedules\"][0])\n vehicle_schedule.update({\n \"name\": \"pickup\",\n \"start_time\": current_time,\n \"duration\": 1000,\n \"action\": Vehicle.ACTION.MOVE,\n \"route\": {\n \"start\": {\n \"waypoint_id\": pick_up_route[\"goal_waypoint_id\"],\n },\n \"goal\": {\n \"waypoint_id\": pick_up_route[\"start_waypoint_id\"],\n },\n \"arrow_codes\": pick_up_route[\"arrow_codes\"],\n }\n })\n self.vehicles[vehicle_id][\"schedules\"].append(vehicle_schedule)\n\n vehicle_schedule = deepcopy(self.topicVehicleSubscribe.get_template()[\"schedules\"][0])\n vehicle_schedule.update({\n \"name\": \"takeOn\",\n \"start_time\": current_time+1000,\n \"duration\": 10,\n \"action\": Vehicle.ACTION.STOP,\n })\n self.vehicles[vehicle_id][\"schedules\"].append(vehicle_schedule)\n\n vehicle_schedule = deepcopy(self.topicVehicleSubscribe.get_template()[\"schedules\"][0])\n vehicle_schedule.update({\n \"name\": \"carry\",\n \"start_time\": current_time+1010,\n \"duration\": 1000,\n \"action\": Vehicle.ACTION.MOVE,\n \"route\": {\n \"start\": {\n \"waypoint_id\": carry_route[\"start_waypoint_id\"],\n },\n \"goal\": {\n \"waypoint_id\": carry_route[\"goal_waypoint_id\"],\n },\n \"arrow_codes\": carry_route[\"arrow_codes\"],\n }\n })\n self.vehicles[vehicle_id][\"schedules\"].append(vehicle_schedule)\n\n vehicle_schedule = deepcopy(self.topicVehicleSubscribe.get_template()[\"schedules\"][0])\n vehicle_schedule.update({\n \"name\": \"discharge\",\n \"start_time\": current_time+2010,\n \"duration\": 10,\n \"action\": Vehicle.ACTION.STOP,\n })\n self.vehicles[vehicle_id][\"schedules\"].append(vehicle_schedule)\n\n vehicle_schedule = deepcopy(self.topicVehicleSubscribe.get_template()[\"schedules\"][0])\n vehicle_schedule.update({\n \"name\": \"standBy\",\n \"start_time\": current_time+2020,\n \"duration\": 86400,\n \"action\": SimTaxi.ACTION.STANDBY,\n })\n self.vehicles[vehicle_id][\"schedules\"].append(vehicle_schedule)\n\n payload = self.topicVehicleSubscribe.serialize({\"schedules\": self.vehicles[vehicle_id][\"schedules\"]})\n self.publish(self.topicVehicleSubscribe.root+\"/\"+vehicle_id+\"/schedules\", payload)\n\n self.relations[user_id] = vehicle_id\n self.relations[vehicle_id] = user_id\n\n self.publish_relations()\n\n def publish_relations(self):\n message = self.topicFleetManagerPublish.get_template()\n message[\"time\"] = time();\n message[\"relations\"] = self.relations\n payload = self.topicFleetManagerPublish.serialize(message)\n self.publish(self.topicFleetManagerPublish.private, payload)\n","sub_path":"ams/nodes/fleet_manager.py","file_name":"fleet_manager.py","file_ext":"py","file_size_in_byte":14166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"594220398","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int16\nfrom geometry_msgs.msg import Twist\nimport time\ndistance = 999\npub = None\ncount = 0\npos = 0\ncount_state = 0\nsmaller = 80\nprev = 300\n\ndef callback(data):\n global distance, count\n \n\n distance = data.data\n #rospy.loginfo(distance)\n\n\ndef findmin():\n global pos\n num = prev\n if distance < num:\n global prev\n prev = distance\n pos = count\n num = distance\n #rospy.loginfo(pos)\n\n \n \n \n\n return num\n\ndef turn_left():\n action = Twist()\n action.angular.z = 0.2\n return action\n \n \ndef turn_right():\n action = Twist()\n action.angular.z = -0.2\n return action\n\ndef stop():\n action = Twist()\n action.angular.z = 0\n action.linear.x = 0\n return action\n\n\n \n \n \n \ndef main():\n global pub,count,count_state, smaller\n\n \n rospy.init_node('dis_pickup', anonymous=True)\n pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\n rospy.Subscriber(\"tofdis\", Int16, callback)\n \n rate = rospy.Rate(20)\n min_left = 999\n min_right = 999\n direction = 0\n \n \n while not rospy.is_shutdown():\n action = Twist()\n #rospy.loginfo(distance)\n\n if(count_state == 0):\n min_left = findmin()\n rospy.loginfo(\"left \"+str(min_left))\n action = turn_left()\n elif(count_state==1):\n #min_left = findmin()\n #rospy.loginfo(min_left)\n action = turn_right()\n elif(count_state==2):\n min_right = findmin()\n rospy.loginfo(\"right \"+str(min_right))\n action = turn_right()\n elif(count_state==3):\n #min_right = findmin()\n action = turn_left()\n elif(count_state == 4):\n if(min_leftmin_right):\n action = turn_right()\n rospy.loginfo(\"right\")\n rospy.loginfo(min_right)\n\n else:\n action = stop()\n \n\n\n \n pub.publish(action)\n count += 1\n if count == 80 and count_state == 0:\n count_state = 1\n count = 0\n elif count == 80 and count_state ==1:\n count_state = 2\n count = 0\n elif count == 80 and count_state ==2:\n count_state =3\n count = 0\n elif count == 80 and count_state ==3:\n action = Twist()\n action = stop()\n pub.publish(action)\n time.sleep(1)\n count_state = 4\n count = 0\n rospy.loginfo(pos)\n \n elif count == pos and count_state ==4:\n action = Twist()\n action = stop()\n pub.publish(action)\n time.sleep(1)\n count_state = 5\n\n count = 0\n smaller = smaller / 2\n rospy.loginfo(smaller)\n if smaller == 20:\n count_state = 5\n \n\n rate.sleep()\n\n\n \n \n\n \n \n rospy.spin()\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n \n pass\n","sub_path":"object_tracking/tof/src/cmd_rotate.py","file_name":"cmd_rotate.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"595699810","text":"from sklearn import tree, metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n\ndef AdaBoost(dataset, label, test_set, test_label, T, cnt):\n clf = tree.DecisionTreeClassifier(max_depth=2)\n train_size = len(dataset)\n test_size = len(test_set)\n w = np.ones(train_size) / train_size\n Result = np.zeros(test_size)\n AUC = []\n global best_AUC, best_T\n for i in range(1, T + 1):\n clf.fit(dataset, label, sample_weight=w)\n train_pred = np.array(clf.predict(dataset))\n test_pred = np.array(clf.predict(test_set))\n error_list = list(map(int, train_pred != label))\n error_two = [-1 if x == 1 else 1 for x in error_list]\n error_rate = sum(error_list * w)\n if error_rate > 0.5:\n break\n alpha = 0.5 * np.log((1 - error_rate) / error_rate)\n Result = Result + alpha * test_pred\n if i % cnt == 0:\n FinalResult = np.sign(Result)\n Tmp_AUC = metrics.roc_auc_score(test_label, FinalResult)\n if Tmp_AUC > best_AUC:\n best_AUC = Tmp_AUC\n best_T = i\n AUC.append(metrics.roc_auc_score(test_label, FinalResult))\n Z = 2 * np.sqrt(error_rate * (1 - error_rate))\n w = w * ([np.exp(-alpha * x) for x in error_two]) / Z\n return AUC\n\ndef fromZerotoOne(x):\n if x == 1:\n return 1\n return -1\n\nif __name__ == '__main__':\n dataset = np.loadtxt(\".\\\\adult_train_feature.txt\")\n label = np.loadtxt(\".\\\\adult_train_label.txt\")\n test_set = np.loadtxt(\".\\\\adult_test_feature.txt\")\n test_label = np.loadtxt(\".\\\\adult_test_label.txt\")\n\n best_T = 4\n Total_T = 100\n print_cnt = 10\n test_cnt = 5\n best_AUC = 0\n\n label = np.array(list(map(fromZerotoOne, label)))\n test_label = np.array(list(map(fromZerotoOne, test_label)))\n\n # AdaBoost(dataset, label, test_set, test_label, 500, 10)\n X = [x for x in range(print_cnt, Total_T + 1, print_cnt)]\n y = np.zeros(len(X))\n\n for i in range(test_cnt):\n train_X, test_X, train_y, test_y = train_test_split(dataset, label, test_size=0.2)\n tmp = AdaBoost(train_X, train_y, test_X, test_y, Total_T, print_cnt)\n y += tmp\n y = y / test_cnt\n print(best_T)\n\n plt.figure(figsize=(6, 6))\n plt.plot(X, y, color=\"red\", linewidth=1)\n plt.xlabel(\"Num\") # xlabel、ylabel:分别设置X、Y轴的标题文字。\n plt.ylabel(\"AUC\")\n plt.ylim(0.6, 0.85) # xlim、ylim:分别设置X、Y轴的显示范围。\n plt.show()\n\n AUC = AdaBoost(dataset, label, test_set, test_label, best_T, best_T)[0]\n print(AUC)\n\n\n","sub_path":"ch8/BoostMain.py","file_name":"BoostMain.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"394584211","text":"import json\nimport arrow\nfrom dateutil import tz\n\nwith open(\"data/dataset.json\") as fp:\n dataset = json.load(fp)\n\nfor item in dataset:\n if item[\"raw_version\"] != \"2.0\": continue\n try:\n date = arrow.get(item[\"raw_data\"][\"caption\"], \"[Data last updated] h:mma MMMM D, YYYY\", tzinfo=tz.gettz('US/Eastern'))\n except arrow.parser.ParserMatchError:\n try: \n date = arrow.get(item[\"raw_data\"][\"caption\"], \"[Data last updated] ha MMMM D, YYYY\", tzinfo=tz.gettz('US/Eastern'))\n except:\n continue\n\n item[\"timestr\"] = str(date)\n item[\"timestamp\"] = date.timestamp\n\n print(item)\n\nwith open(\"data/dataset.json\", \"w\") as fp:\n json.dump(dataset, fp, indent=2)","sub_path":"scripts/parse_v2.py","file_name":"parse_v2.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"235892117","text":"import torch\nimport torch.nn as nn\n\nfrom matplotlib.pylab import *\nfrom scripts.model.nl2sql.utils.utils_data import encode, encode_hpu\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass WNP(nn.Module):\n def __init__(self, iS=300, hS=100, lS=2, dr=0.3, ):\n super(WNP, self).__init__()\n self.iS = iS\n self.hS = hS\n self.lS = lS\n self.dr = dr\n\n self.mL_w = 4 # max where condition number\n\n self.enc_h = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.enc_n = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n # column-attetention?\n self.W_att_h = nn.Linear(hS, 1)\n self.W_hidden = nn.Linear(hS, lS * hS)\n self.W_cell = nn.Linear(hS, lS * hS)\n\n self.W_att_n = nn.Linear(hS, 1)\n self.wn_out = nn.Sequential(nn.Linear(hS, hS),\n nn.Tanh(),\n nn.Linear(hS, self.mL_w + 1)) # max number (4 + 1)\n\n self.softmax_dim1 = nn.Softmax(dim=1)\n self.softmax_dim2 = nn.Softmax(dim=2)\n\n def forward(self, wemb_n, l_n, wemb_hpu, l_hpu, l_hs, show_p_wn=False):\n # Encode\n\n wenc_hs = encode_hpu(self.enc_h, wemb_hpu, l_hpu, l_hs) # [b, mL_hs, dim]\n\n bS = len(l_hs)\n mL_n = max(l_n)\n mL_hs = max(l_hs)\n # mL_h = max(l_hpu)\n\n # (self-attention?) column Embedding?\n # [B, mL_hs, 100] -> [B, mL_hs, 1] -> [B, mL_hs]\n att_h = self.W_att_h(wenc_hs).squeeze(2)\n\n # Penalty\n for b, l_hs1 in enumerate(l_hs):\n if l_hs1 < mL_hs:\n att_h[b, l_hs1:] = -10000000000\n p_h = self.softmax_dim1(att_h)\n\n if show_p_wn:\n if p_h.shape[0] != 1:\n raise Exception(\"Batch size should be 1.\")\n fig = figure(2001)\n subplot(7, 2, 5)\n cla()\n plot(p_h[0].data.numpy(), '--rs', ms=7)\n title('wn: header_weight')\n grid(True)\n fig.canvas.draw()\n show()\n # input('Type Eenter to continue.')\n\n # [B, mL_hs, 100] * [ B, mL_hs, 1] -> [B, mL_hs, 100] -> [B, 100]\n c_hs = torch.mul(wenc_hs, p_h.unsqueeze(2)).sum(1)\n\n # [B, 100] --> [B, 2*100] Enlarge because there are two layers.\n hidden = self.W_hidden(c_hs) # [B, 4, 200/2]\n hidden = hidden.view(bS, self.lS * 2, int(\n self.hS / 2)) # [4, B, 100/2] # number_of_layer_layer * (bi-direction) # lstm input convention.\n hidden = hidden.transpose(0, 1).contiguous()\n\n cell = self.W_cell(c_hs) # [B, 4, 100/2]\n cell = cell.view(bS, self.lS * 2, int(self.hS / 2)) # [4, B, 100/2]\n cell = cell.transpose(0, 1).contiguous()\n\n # (hidden, cell) -> the initial statement of the LSTM\n wenc_n = encode(self.enc_n, wemb_n, l_n,\n return_hidden=False,\n hc0=(hidden, cell),\n last_only=False) # [b, n, dim]\n\n att_n = self.W_att_n(wenc_n).squeeze(2) # [B, max_len, 100] -> [B, max_len, 1] -> [B, max_len]\n\n # Penalty\n for b, l_n1 in enumerate(l_n):\n if l_n1 < mL_n:\n att_n[b, l_n1:] = -10000000000\n p_n = self.softmax_dim1(att_n)\n\n if show_p_wn:\n if p_n.shape[0] != 1:\n raise Exception(\"Batch size should be 1.\")\n fig = figure(2001)\n subplot(7, 2, 6)\n cla()\n plot(p_n[0].data.numpy(), '--rs', ms=7)\n title('wn: nlu_weight')\n grid(True)\n fig.canvas.draw()\n\n show()\n # input('Type Enter to continue.')\n\n # [B, mL_n, 100] *([B, mL_n] -> [B, mL_n, 1] -> [B, mL_n, 100] ) -> [B, 100]\n c_n = torch.mul(wenc_n, p_n.unsqueeze(2).expand_as(wenc_n)).sum(dim=1)\n s_wn = self.wn_out(c_n)\n\n return s_wn\n\n\nclass WCP(nn.Module):\n def __init__(self, iS=300, hS=100, lS=2, dr=0.3):\n super(WCP, self).__init__()\n self.iS = iS\n self.hS = hS\n self.lS = lS\n self.dr = dr\n\n self.enc_h = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.enc_n = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.W_att = nn.Linear(hS, hS)\n self.W_c = nn.Linear(hS, hS)\n self.W_hs = nn.Linear(hS, hS)\n self.W_out = nn.Sequential(\n nn.Tanh(), nn.Linear(2 * hS, 1)\n )\n\n self.softmax_dim1 = nn.Softmax(dim=1)\n self.softmax_dim2 = nn.Softmax(dim=2)\n\n def forward(self, wemb_n, l_n, wemb_hpu, l_hpu, l_hs, show_p_wc, penalty=True):\n # Encode\n wenc_n = encode(self.enc_n, wemb_n, l_n,\n return_hidden=False,\n hc0=None,\n last_only=False) # [b, n, dim]\n\n wenc_hs = encode_hpu(self.enc_h, wemb_hpu, l_hpu, l_hs) # [b, hs, dim]\n\n # attention\n # wenc = [bS, mL, hS]\n # att = [bS, mL_hs, mL_n]\n # att[b, i_h, j_n] = p(j_n| i_h)\n att = torch.bmm(wenc_hs, self.W_att(wenc_n).transpose(1, 2))\n\n # penalty to blank part.\n mL_n = max(l_n)\n for b_n, l_n1 in enumerate(l_n):\n if l_n1 < mL_n:\n att[b_n, :, l_n1:] = -10000000000\n\n # make p(j_n | i_h)\n p = self.softmax_dim2(att)\n\n if show_p_wc:\n # p = [b, hs, n]\n if p.shape[0] != 1:\n raise Exception(\"Batch size should be 1.\")\n fig = figure(2001)\n # subplot(6,2,7)\n subplot2grid((7,2), (3, 1), rowspan=2)\n cla()\n _color = 'rgbkcm'\n _symbol='.......'\n for i_h in range(l_hs[0]):\n color_idx = i_h % len(_color)\n plot(p[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)\n\n title('wc: p_n for each h')\n grid(True)\n fig.tight_layout()\n fig.canvas.draw()\n show()\n # max nlu context vectors\n # [bS, mL_hs, mL_n]*[bS, mL_hs, mL_n]\n\n # weighted vector\n wenc_n = wenc_n.unsqueeze(1) # [ b, n, dim] -> [b, 1, n, dim]\n p = p.unsqueeze(3) # [b, hs, n] -> [b, hs, n, 1]\n c_n = torch.mul(wenc_n, p).sum(2) # -> [b, hs, dim], c_n for each header.\n\n y = torch.cat([self.W_c(c_n), self.W_hs(wenc_hs)], dim=2) # [b, hs, 2*dim]\n score = self.W_out(y).squeeze(2) # [b, hs]\n\n if penalty:\n for b, l_hs1 in enumerate(l_hs):\n score[b, l_hs1:] = -1e+10\n\n return score\n\n\nclass WOP(nn.Module):\n def __init__(self, iS=300, hS=100, lS=2, dr=0.3, n_cond_ops=3):\n super(WOP, self).__init__()\n self.iS = iS\n self.hS = hS\n self.lS = lS\n self.dr = dr\n\n self.mL_w = 4 # max where condition number\n\n self.enc_h = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.enc_n = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.W_att = nn.Linear(hS, hS)\n self.W_c = nn.Linear(hS, hS)\n self.W_hs = nn.Linear(hS, hS)\n self.wo_out = nn.Sequential(\n nn.Linear(2*hS, hS),\n nn.Tanh(),\n nn.Linear(hS, n_cond_ops)\n )\n\n self.softmax_dim1 = nn.Softmax(dim=1)\n self.softmax_dim2 = nn.Softmax(dim=2)\n\n def forward(self, wemb_n, l_n, wemb_hpu, l_hpu, l_hs, wn, wc, wenc_n=None, show_p_wo=False):\n # Encode\n if not wenc_n:\n wenc_n = encode(self.enc_n, wemb_n, l_n,\n return_hidden=False,\n hc0=None,\n last_only=False) # [b, n, dim]\n\n wenc_hs = encode_hpu(self.enc_h, wemb_hpu, l_hpu, l_hs) # [b, hs, dim]\n\n bS = len(l_hs)\n # wn\n\n\n wenc_hs_ob = [] # observed hs\n for b in range(bS):\n # [[...], [...]]\n # Pad list to maximum number of selections\n real = [wenc_hs[b, col] for col in wc[b]]\n pad = (self.mL_w - wn[b]) * [wenc_hs[b, 0]] # this padding could be wrong. Test with zero padding later.\n wenc_hs_ob1 = torch.stack(real + pad) # It is not used in the loss function.\n wenc_hs_ob.append(wenc_hs_ob1)\n\n # list to [B, 4, dim] tensor.\n wenc_hs_ob = torch.stack(wenc_hs_ob) # list to tensor.\n wenc_hs_ob = wenc_hs_ob.to(device)\n\n # [B, 1, mL_n, dim] * [B, 4, dim, 1]\n # -> [B, 4, mL_n, 1] -> [B, 4, mL_n]\n # multiplication bewteen NLq-tokens and selected column\n att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),\n wenc_hs_ob.unsqueeze(3)\n ).squeeze(3)\n\n # Penalty for blank part.\n mL_n = max(l_n)\n for b, l_n1 in enumerate(l_n):\n if l_n1 < mL_n:\n att[b, :, l_n1:] = -10000000000\n\n p = self.softmax_dim2(att) # p( n| selected_col )\n if show_p_wo:\n # p = [b, hs, n]\n if p.shape[0] != 1:\n raise Exception(\"Batch size should be 1.\")\n fig=figure(2001)\n # subplot(6,2,7)\n subplot2grid((7,2), (5, 0), rowspan=2)\n cla()\n _color='rgbkcm'\n _symbol='.......'\n for i_wn in range(self.mL_w):\n color_idx = i_wn % len(_color)\n plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)\n\n title('wo: p_n for selected h')\n grid(True)\n fig.tight_layout()\n fig.canvas.draw()\n show()\n\n # [B, 1, mL_n, dim] * [B, 4, mL_n, 1]\n # --> [B, 4, mL_n, dim]\n # --> [B, 4, dim]\n c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)\n\n # [bS, 5-1, dim] -> [bS, 5-1, 3]\n\n vec = torch.cat([self.W_c(c_n), self.W_hs(wenc_hs_ob)], dim=2)\n s_wo = self.wo_out(vec)\n\n return s_wo\n\nclass WVP_se(nn.Module):\n \"\"\"\n Discriminative model\n Get start and end.\n Here, classifier for [ [투수], [팀1], [팀2], [연도], ...]\n Input: Encoded nlu & selected column.\n Algorithm: Encoded nlu & selected column. -> classifier -> mask scores -> ...\n \"\"\"\n def __init__(self, iS=300, hS=100, lS=2, dr=0.3, n_cond_ops=4, old=False):\n super(WVP_se, self).__init__()\n self.iS = iS\n self.hS = hS\n self.lS = lS\n self.dr = dr\n self.n_cond_ops = n_cond_ops\n\n self.mL_w = 4 # max where condition number\n\n self.enc_h = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.enc_n = nn.LSTM(input_size=iS, hidden_size=int(hS / 2),\n num_layers=lS, batch_first=True,\n dropout=dr, bidirectional=True)\n\n self.W_att = nn.Linear(hS, hS)\n self.W_c = nn.Linear(hS, hS)\n self.W_hs = nn.Linear(hS, hS)\n self.W_op = nn.Linear(n_cond_ops, hS)\n\n # self.W_n = nn.Linear(hS, hS)\n if old:\n self.wv_out = nn.Sequential(\n nn.Linear(4 * hS, 2)\n )\n else:\n self.wv_out = nn.Sequential(\n nn.Linear(4 * hS, hS),\n nn.Tanh(),\n nn.Linear(hS, 2)\n )\n # self.wv_out = nn.Sequential(\n # nn.Linear(3 * hS, hS),\n # nn.Tanh(),\n # nn.Linear(hS, self.gdkL)\n # )\n\n self.softmax_dim1 = nn.Softmax(dim=1)\n self.softmax_dim2 = nn.Softmax(dim=2)\n\n def forward(self, wemb_n, l_n, wemb_hpu, l_hpu, l_hs, wn, wc, wo, wenc_n=None, show_p_wv=False):\n\n # Encode\n if not wenc_n:\n wenc_n, hout, cout = encode(self.enc_n, wemb_n, l_n,\n return_hidden=True,\n hc0=None,\n last_only=False) # [b, n, dim]\n\n wenc_hs = encode_hpu(self.enc_h, wemb_hpu, l_hpu, l_hs) # [b, hs, dim]\n\n bS = len(l_hs)\n\n wenc_hs_ob = [] # observed hs\n\n for b in range(bS):\n # [[...], [...]]\n # Pad list to maximum number of selections\n real = [wenc_hs[b, col] for col in wc[b]]\n pad = (self.mL_w - wn[b]) * [wenc_hs[b, 0]] # this padding could be wrong. Test with zero padding later.\n wenc_hs_ob1 = torch.stack(real + pad) # It is not used in the loss function.\n wenc_hs_ob.append(wenc_hs_ob1)\n\n # list to [B, 4, dim] tensor.\n wenc_hs_ob = torch.stack(wenc_hs_ob) # list to tensor.\n wenc_hs_ob = wenc_hs_ob.to(device)\n\n\n # Column attention\n # [B, 1, mL_n, dim] * [B, 4, dim, 1]\n # -> [B, 4, mL_n, 1] -> [B, 4, mL_n]\n # multiplication bewteen NLq-tokens and selected column\n att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),\n wenc_hs_ob.unsqueeze(3)\n ).squeeze(3)\n # Penalty for blank part.\n mL_n = max(l_n)\n for b, l_n1 in enumerate(l_n):\n if l_n1 < mL_n:\n att[b, :, l_n1:] = -10000000000\n\n p = self.softmax_dim2(att) # p( n| selected_col )\n\n if show_p_wv:\n # p = [b, hs, n]\n if p.shape[0] != 1:\n raise Exception(\"Batch size should be 1.\")\n fig=figure(2001)\n # subplot(6,2,7)\n subplot2grid((7,2), (5, 1), rowspan=2)\n cla()\n _color='rgbkcm'\n _symbol='.......'\n for i_wn in range(self.mL_w):\n color_idx = i_wn % len(_color)\n plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)\n\n title('wv: p_n for selected h')\n grid(True)\n fig.tight_layout()\n fig.canvas.draw()\n show()\n\n\n # [B, 1, mL_n, dim] * [B, 4, mL_n, 1]\n # --> [B, 4, mL_n, dim]\n # --> [B, 4, dim]\n c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)\n\n # Select observed headers only.\n # Also generate one_hot vector encoding info of the operator\n # [B, 4, dim]\n wenc_op = []\n for b in range(bS):\n # [[...], [...]]\n # Pad list to maximum number of selections\n wenc_op1 = torch.zeros(self.mL_w, self.n_cond_ops)\n wo1 = wo[b]\n idx_scatter = []\n l_wo1 = len(wo1)\n for i_wo11 in range(self.mL_w):\n if i_wo11 < l_wo1:\n wo11 = wo1[i_wo11]\n idx_scatter.append([int(wo11)])\n else:\n idx_scatter.append([0]) # not used anyway\n\n wenc_op1 = wenc_op1.scatter(1, torch.tensor(idx_scatter), 1)\n\n wenc_op.append(wenc_op1)\n\n # list to [B, 4, dim] tensor.\n wenc_op = torch.stack(wenc_op) # list to tensor.\n wenc_op = wenc_op.to(device)\n\n # Now after concat, calculate logits for each token\n # [bS, 5-1, 3*hS] = [bS, 4, 300]\n vec = torch.cat([self.W_c(c_n), self.W_hs(wenc_hs_ob), self.W_op(wenc_op)], dim=2)\n\n # Make extended vector based on encoded nl token containing column and operator information.\n # wenc_n = [bS, mL, 100]\n # vec2 = [bS, 4, mL, 400]\n vec1e = vec.unsqueeze(2).expand(-1,-1, mL_n, -1) # [bS, 4, 1, 300] -> [bS, 4, mL, 300]\n wenc_ne = wenc_n.unsqueeze(1).expand(-1, 4, -1, -1) # [bS, 1, mL, 100] -> [bS, 4, mL, 100]\n vec2 = torch.cat([vec1e, wenc_ne], dim=3)\n\n # now make logits\n s_wv = self.wv_out(vec2) # [bS, 4, mL, 400] -> [bS, 4, mL, 2]\n\n # penalty for spurious tokens\n for b, l_n1 in enumerate(l_n):\n if l_n1 < mL_n:\n s_wv[b, :, l_n1:, :] = -10000000000\n return s_wv\n","sub_path":"scripts/model/nl2sql/models/cond_predict.py","file_name":"cond_predict.py","file_ext":"py","file_size_in_byte":16683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"456369472","text":"#!/usr/bin/python3\r\n\"\"\"\r\n Author: Ves\r\n Feature: Painting a fractal tree by recursion\r\n Version: 1.0\r\n Date: 03/12/2017\r\n\"\"\"\r\n\r\nimport turtle\r\n\r\n\r\ndef draw_branch(branch_length, floor):\r\n \"\"\"\r\n Painting a fractal tree\r\n \"\"\"\r\n step = 15\r\n\r\n if floor == 0:\r\n global n\r\n n = 1 + branch_length // step\r\n\r\n if branch_length >5:\r\n # Painting right branches\r\n if floor > n - 3:\r\n turtle.color('green')\r\n else:\r\n turtle.color('brown')\r\n turtle.pensize(n - floor)\r\n turtle.fd(branch_length)\r\n turtle.rt(20)\r\n floor += 1\r\n draw_branch(branch_length - step, floor)\r\n\r\n # Painting left branches\r\n turtle.lt(40)\r\n draw_branch(branch_length - step, floor)\r\n\r\n # the branch before return\r\n turtle.rt(20)\r\n floor -= 1\r\n if floor > n - 3:\r\n turtle.color('green')\r\n else:\r\n turtle.color('brown')\r\n turtle.pensize(n - floor)\r\n print(n, floor)\r\n turtle.bk(branch_length)\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n main program\r\n \"\"\"\r\n floor = 0\r\n size = 100\r\n turtle.lt(90)\r\n turtle.pu()\r\n turtle.bk(150)\r\n turtle.pd()\r\n draw_branch(size, floor)\r\n turtle.exitonclick()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PaintFractalTree.py","file_name":"PaintFractalTree.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73523961","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport keras\n\n\nclass MyCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n\n if logs.get('acc') > 0.85:\n\n print(\"\\nReaches 85% accuracy so cancelling training!\\n\")\n self.model.stop_training = True\n\n\n\nif __name__ == '__main__':\n\n mnist = keras.datasets.fashion_mnist\n\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n\n X_train, X_test = X_train / 255.0, X_test / 255.0\n\n\n model = keras.models.Sequential([\n keras.layers.Flatten(),\n keras.layers.Dense(units=256, activation=tf.nn.relu),\n keras.layers.Dense(units=10, activation=tf.nn.softmax),\n ])\n\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'],\n )\n\n callbacks = [MyCallback()]\n model.fit(\n X_train,\n Y_train,\n epochs=2,\n callbacks=callbacks,\n )\n\n model.evaluate(\n X_test,\n Y_test,\n )\n\n\n classifications = model.predict(X_test)\n\n print(classifications[234])\n print(Y_test[234])","sub_path":"TF-in-practice-Coursera/1-Introduction/Programming/fashion_mnist/fashion_mnist.7.py","file_name":"fashion_mnist.7.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"74913374","text":"#!/bin/python\n\nimport unittest\n\nimport sap\nimport sap.adt\nfrom sap.adt.aunit import Alert, AlertSeverity\n\nfrom fixtures_adt import DummyADTObject\nfrom fixtures_adt_aunit import AUNIT_RESULTS_XML, AUNIT_NO_TEST_RESULTS_XML\n\n\nconnection = sap.adt.Connection('nohost', 'noclient', 'nouser', 'nopassword')\n\nclass TestAUnit(unittest.TestCase):\n\n def test_build_tested_object_uri(self):\n victory = DummyADTObject()\n\n victory_uri = sap.adt.AUnit.build_tested_object_uri(connection, victory)\n self.assertEquals(victory_uri, '/sap/bc/adt/awesome/success/noobject')\n\n\nclass TestAlert(unittest.TestCase):\n\n def test_error_as_severity_fatal(self):\n alert = sap.adt.aunit.Alert(severity=AlertSeverity.FATAL, kind=None, title=None, details=None, stack=None)\n self.assertTrue(alert.is_error)\n self.assertFalse(alert.is_warning)\n\n def test_error_as_severity_critical(self):\n alert = sap.adt.aunit.Alert(severity=AlertSeverity.CRITICAL, kind=None, title=None, details=None, stack=None)\n self.assertTrue(alert.is_error)\n self.assertFalse(alert.is_warning)\n\n def test_warning_as_severity_tolerable(self):\n alert = sap.adt.aunit.Alert(severity=AlertSeverity.TOLERABLE, kind=None, title=None, details=None, stack=None)\n self.assertTrue(alert.is_warning)\n self.assertFalse(alert.is_error)\n\n def test_ok_as_severity_random(self):\n alert = sap.adt.aunit.Alert(severity='UNKNOWN', kind=None, title=None, details=None, stack=None)\n self.assertFalse(alert.is_warning)\n self.assertFalse(alert.is_error)\n\n\nclass TestAUnitParseResults(unittest.TestCase):\n\n def test_parse_full(self):\n run_results = sap.adt.aunit.parse_run_results(AUNIT_RESULTS_XML)\n\n self.assertEqual([program.name for program in run_results.programs], ['ZCL_THEKING_MANUAL_HARDCORE', 'ZEXAMPLE_TESTS'])\n\n program_the_king = run_results.programs[0]\n self.assertEqual([test_class.name for test_class in program_the_king.test_classes], ['LTCL_TEST', 'LTCL_TEST_HARDER'])\n\n test_class = program_the_king.test_classes[0]\n self.assertEqual([test_method.name for test_method in test_class.test_methods], ['DO_THE_FAIL', 'DO_THE_TEST'])\n\n test_method = test_class.test_methods[0]\n self.assertEqual([(alert.kind, alert.severity, alert.title) for alert in test_method.alerts],\n [('failedAssertion', 'critical', 'Critical Assertion Error: \\'I am supposed to fail\\'')])\n\n program_example = run_results.programs[1]\n self.assertEqual([test_class.name for test_class in program_example.test_classes], ['LTCL_TEST'])\n\n def test_parse_no_tests(self):\n run_results = sap.adt.aunit.parse_run_results(AUNIT_NO_TEST_RESULTS_XML)\n\n self.assertEqual([(alert.kind, alert.severity, alert.title) for alert in run_results.alerts],\n [('noTestClasses', 'tolerable', 'The task definition does not refer to any test')])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/unit/test_sap_adt_aunit.py","file_name":"test_sap_adt_aunit.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"212622598","text":"\n\ndef ind_find(lst):\n vl = int(input('Введите искомое число: '))\n\n lt = 0\n ml = len(lst) // 2\n rt = len(lst) - 1\n\n while lst[ml] != vl and lt <= rt:\n if vl > lst[ml]:\n lt = ml + 1\n else:\n rt = ml - 1\n\n ml = (lt + rt) // 2\n\n if lt > rt:\n return None\n else:\n fa = 'Индекс искомого числа: ' + str(ml)\n return fa\n","sub_path":"itter.py","file_name":"itter.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"371194970","text":"'''\n31. Next Permutation\n\nImplement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.\n\nIf such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).\n\nThe replacement must be in-place and use only constant extra memory.\n\nHere are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.\n\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n'''\n\n\nclass Solution:\n def nextPermutation(self, nums) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n # step 1: find the change number whose last numbers are largest order\n i = len(nums)-1\n while i >= 1:\n if nums[i-1] nums[i] and smallest > nums[i]:\n smallest = nums[t]\n index = t\n t += 1\n\n nums[i], nums[index] = nums[index], nums[i]\n # step 3: order the rest of the numbers(after number i) smallest(sort the numbers after i)\n a = nums[i+1:len(nums)]\n a.sort()\n nums[i+1:len(nums)] = a\n\n\nsol = Solution()\na = [4, 3, 2, 1]\nfor i in range(10):\n sol.nextPermutation(a)\n print(a)\n","sub_path":"PS1_100/Ps31.py","file_name":"Ps31.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"12959640","text":"import datetime\n\"\"\"\nyear = int(input(\"Dime el año \"))\nmonth = int(input(\"Dime el mes \"))\nday = int(input(\"Dime el día \"))\n\nuser_date = datetime.datetime(year=year, month=month, day=day)\n\"\"\"\nfecha_actual = datetime.datetime.now()\nmanana = fecha_actual+datetime.timedelta(days=1)\nprint(\"Manñana sera {}\".format(manana.strftime(\"%d de %m del %Y\")))\n\ninicio_de_manana = datetime.datetime(year=manana.year,month=manana.month,day=manana.day)\n\nprint(\"Mañana inicia: \".format(inicio_de_manana))\n\nfaltante = inicio_de_manana - fecha_actual \n\nprint(\"faltan {} días y {} horas para mañana\".format(faltante.days,(faltante.total_seconds()/3600)))\n\n","sub_path":"python_nateAcademy/data_time.py","file_name":"data_time.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345562739","text":"from ast import literal_eval\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom autogluon.timeseries.splitter import MultiWindowSplitter\n\nfrom .common import (\n DATAFRAME_WITH_COVARIATES,\n DATAFRAME_WITH_STATIC,\n DUMMY_VARIABLE_LENGTH_TS_DATAFRAME,\n get_data_frame_with_variable_lengths,\n)\n\n\ndef get_original_item_id_and_slice(tuning_item_id: str):\n \"\"\"Extract information from tuning set item_id that has format f\"{item_id}_[{start}:{end}]\".\"\"\"\n item_id, slice_info = tuning_item_id.rsplit(\"_\", maxsplit=1)\n start, end = slice_info.strip(\"[]\").split(\":\")\n return item_id, literal_eval(start), literal_eval(end)\n\n\n@pytest.mark.parametrize(\"item_id_to_length\", [{\"A\": 22, \"B\": 50, \"C\": 10}, {\"A\": 23}])\n@pytest.mark.parametrize(\"prediction_length, num_windows\", [(5, 2), (2, 5), (8, 1)])\ndef test_when_multi_window_splitter_splits_then_train_items_have_correct_length(\n item_id_to_length, prediction_length, num_windows\n):\n splitter = MultiWindowSplitter(num_windows=num_windows)\n ts_dataframe = get_data_frame_with_variable_lengths(item_id_to_length=item_id_to_length)\n train_data, _ = splitter.split(ts_dataframe=ts_dataframe, prediction_length=prediction_length)\n original_lengths = ts_dataframe.num_timesteps_per_item()\n\n for item_id, length in original_lengths.items():\n num_windows_from_this_item = min(num_windows, max((length - 1) // prediction_length - 1, 0))\n expected_length = length - num_windows_from_this_item * prediction_length\n assert expected_length == len(train_data.loc[item_id])\n\n\n@pytest.mark.parametrize(\"item_id_to_length\", [{\"A\": 22, \"B\": 50, \"C\": 10}, {\"A\": 23}])\n@pytest.mark.parametrize(\"prediction_length, num_windows\", [(5, 2), (2, 5), (8, 1), (10, 2)])\ndef test_when_multi_window_splitter_splits_then_val_item_ids_correctly_represent_length(\n item_id_to_length, prediction_length, num_windows\n):\n splitter = MultiWindowSplitter(num_windows=num_windows)\n ts_dataframe = get_data_frame_with_variable_lengths(item_id_to_length=item_id_to_length)\n\n _, val_data = splitter.split(ts_dataframe=ts_dataframe, prediction_length=prediction_length)\n for new_item_id in val_data.item_ids:\n old_item_id, start, end = get_original_item_id_and_slice(new_item_id)\n new_length = len(val_data.loc[new_item_id])\n expected_length = len(ts_dataframe.loc[old_item_id][start:end])\n assert expected_length == new_length\n\n\ndef test_when_multi_window_splitter_splits_then_cached_freq_is_preserved():\n splitter = MultiWindowSplitter()\n prediction_length = 10\n train_data, val_data = splitter.split(\n ts_dataframe=DUMMY_VARIABLE_LENGTH_TS_DATAFRAME, prediction_length=prediction_length\n )\n assert DUMMY_VARIABLE_LENGTH_TS_DATAFRAME._cached_freq == train_data._cached_freq == val_data._cached_freq\n\n\ndef test_when_all_series_too_short_then_multi_window_splitter_raises_value_error():\n splitter = MultiWindowSplitter(num_windows=5)\n prediction_length = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME.num_timesteps_per_item().max() // 2 + 1\n with pytest.raises(ValueError, match=\"all training time series are too short\"):\n splitter.split(ts_dataframe=DUMMY_VARIABLE_LENGTH_TS_DATAFRAME, prediction_length=prediction_length)\n\n\ndef test_when_splitter_adds_suffix_to_index_then_data_is_not_copied():\n ts_df = DUMMY_VARIABLE_LENGTH_TS_DATAFRAME.copy()\n splitter = MultiWindowSplitter()\n ts_df_with_suffix = splitter._append_suffix_to_item_id(ts_dataframe=ts_df, suffix=\"_[None:None]\")\n assert ts_df.values.base is ts_df_with_suffix.values.base\n\n\ndef test_when_static_features_are_present_then_static_features_index_is_aligned_with_data():\n item_id_to_length = {\"B\": 15, \"A\": 7, \"Z\": 22, \"1\": 10}\n ts_dataframe = get_data_frame_with_variable_lengths(item_id_to_length=item_id_to_length)\n ts_dataframe.static_features = pd.DataFrame(\n np.random.normal(size=len(item_id_to_length)), index=item_id_to_length.keys()\n )\n splitter = MultiWindowSplitter()\n prediction_length = 7\n train, val = splitter.split(ts_dataframe, prediction_length)\n assert (train.item_ids == train.static_features.index).all()\n assert (val.item_ids == val.static_features.index).all()\n\n\ndef test_when_static_features_are_present_then_splitter_correctly_splits_them():\n original_df = DATAFRAME_WITH_STATIC.copy()\n splitter = MultiWindowSplitter()\n prediction_length = 7\n train_data, val_data = splitter.split(ts_dataframe=original_df, prediction_length=prediction_length)\n\n for item_id in train_data.item_ids:\n assert (train_data.static_features.loc[item_id] == original_df.static_features.loc[item_id]).all()\n\n for item_id in val_data.item_ids:\n original_item_id, _, _ = get_original_item_id_and_slice(item_id)\n assert (val_data.static_features.loc[item_id] == original_df.static_features.loc[original_item_id]).all()\n\n\ndef test_when_covariates_are_present_then_splitter_correctly_splits_them():\n original_df = DATAFRAME_WITH_COVARIATES.copy()\n splitter = MultiWindowSplitter()\n prediction_length = 7\n train_data, val_data = splitter.split(ts_dataframe=original_df, prediction_length=prediction_length)\n\n for column in original_df.drop(\"target\", axis=1).columns:\n for item_id in train_data.item_ids:\n train_series = train_data[column].loc[item_id]\n assert (train_series == original_df[column].loc[item_id][: len(train_series)]).all()\n\n for item_id in val_data.item_ids:\n original_item_id, _, _ = get_original_item_id_and_slice(item_id)\n val_series = val_data[column].loc[item_id]\n assert (val_series == original_df[column].loc[original_item_id][: len(val_series)]).all()\n","sub_path":"timeseries/tests/unittests/test_splitter.py","file_name":"test_splitter.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"377744953","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport logging\nimport sys\nimport numbers\nimport math\n#import sklearn\nimport time\nimport datetime\nimport numpy as np\nimport cv2\nimport mxnet as mx\nfrom mxnet import ndarray as nd\nfrom mxnet import io\nfrom mxnet import recordio\nfrom mxnet import context\nfrom mxnet.ndarray._internal import _cvimresize as imresize\nimport mxnet.gluon.data.dataloader as dataloader\nsys.path.append(os.path.join(os.path.dirname(__file__), 'common'))\nimport multiprocessing\nimport threading\nimport Queue\n\nlogger = logging.getLogger()\n\ndef pick_triplets_impl(q_in, q_out):\n more = True\n while more:\n deq = q_in.get()\n if deq is None:\n more = False\n else:\n embeddings, emb_start_idx, nrof_images, alpha = deq\n print('running', emb_start_idx, nrof_images, os.getpid())\n for j in xrange(1,nrof_images):\n a_idx = emb_start_idx + j - 1\n neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)\n for pair in xrange(j, nrof_images): # For every possible positive pair.\n p_idx = emb_start_idx + pair\n pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))\n neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN\n all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n #triplets.append( (a_idx, p_idx, n_idx) )\n q_out.put( (a_idx, p_idx, n_idx) )\n #emb_start_idx += nrof_images\n print('exit',os.getpid())\n\nclass FaceImageIter(io.DataIter):\n\n def __init__(self, batch_size, data_shape,\n path_imgrec=None,\n shuffle=False,\n mean=None, \n rand_mirror=False, cutoff=0,\n split_size=1, rank=0,\n data_extra=None,\n data_name='data', label_name='softmax_label',\n **kwargs):\n super(FaceImageIter, self).__init__()\n self.split_size = split_size\n assert path_imgrec\n if path_imgrec:\n logging.info('loading recordio %s...',\n path_imgrec)\n path_imgidx = path_imgrec[0:-4]+\".idx\"\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type\n s = self.imgrec.read_idx(0)\n logging.info('unpack32 used!')\n header, _ = recordio.unpack(s)\n if header.flag>0:\n logging.info('header0 label %s'%(str(header.label)))\n self.header0 = (int(header.label[0]), int(header.label[1]))\n #assert(header.flag==1)\n self.imgidx = range(1, int(header.label[0]))\n self.id2range = {}\n self.id_num = {}\n self.seq_identity = range(int(header.label[0]), int(header.label[1]))\n for identity in self.seq_identity:\n s = self.imgrec.read_idx(identity)\n header, _ = recordio.unpack(s)\n a,b = int(header.label[0]), int(header.label[1])\n self.id2range[identity] = (a,b)\n count = b-a\n\n self.id_num[identity] = count\n logging.info('id2range %d'%(len(self.id2range)))\n else:\n self.imgidx = list(self.imgrec.keys)\n if shuffle or split_size > 1:\n self.seq = self.imgidx\n self.len_seq = len(self.imgidx)#for local test\n self.oseq = self.imgidx\n else:\n self.seq = None\n\n if split_size > 1:\n assert rank < split_size\n random.seed(10)\n random.shuffle(self.seq)\n logging.info('[Init]Now rank: %d, and random seq is: %s'%(rank,str(self.seq[:100])))\n epc_size = len(self.seq)\n epc_size_part = epc_size // split_size\n if rank == split_size-1:\n self.seq = self.seq[rank*epc_size_part : ]\n else:\n self.seq = self.seq[rank*epc_size_part : (rank+1)*epc_size_part]\n\n self.mean = mean\n self.nd_mean = None\n if self.mean:\n self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)\n self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size,) + data_shape)]\n #self.provide_label = [(label_name, (batch_size,))]\n self.provide_label = []\n\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.shuffle = shuffle\n self.image_size = '%d,%d'%(data_shape[1],data_shape[2])\n self.rand_mirror = rand_mirror\n self.cutoff = cutoff\n\n self.data_extra = None\n if data_extra is not None:\n self.data_extra = nd.array(data_extra)\n self.provide_data = [(data_name, (batch_size,) + data_shape), ('extra', data_extra.shape)]\n\n self.cur = 0\n self.is_init = False\n self.reset()\n\n\n def ____pick_triplets(self, embeddings, nrof_images_per_class):\n pass\n\n def _pairwise_dists(self, embeddings):\n pass\n\n def pairwise_dists(self, embeddings):\n pass\n\n def pick_triplets(self, embeddings, nrof_images_per_class):\n pass\n\n def __pick_triplets(self, embeddings, nrof_images_per_class):\n pass\n\n def triplet_oseq_reset(self):\n pass\n\n def time_reset(self):\n self.time_now = datetime.datetime.now()\n\n def time_elapsed(self):\n time_now = datetime.datetime.now()\n diff = time_now - self.time_now\n return diff.total_seconds()\n\n def select_triplets(self):\n pass\n\n def triplet_reset(self):\n self.select_triplets()\n\n def hard_mining_reset(self):\n pass\n\n def reset_c2c(self):\n pass\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n print('call reset()')\n self.cur = 0\n\n if self.shuffle:\n random.seed(10)\n random.shuffle(self.seq)\n\n if self.split_size > 1:\n self.seq = self.seq[:self.len_seq]\n\n if self.seq is None and self.imgrec is not None:\n self.imgrec.reset()\n\n @property\n def num_samples(self):\n return len(self.seq)\n\n def next_sample(self, index=None, lock=None, imgrec=None):\n \"\"\"Helper function for reading in next sample.\"\"\"\n #set total batch size, for example, 1800, and maximum size for each people, for example 45\n if self.seq is not None:\n while True:\n idx = self.seq[index]\n if imgrec is not None:\n lock.acquire()\n s = imgrec.read_idx(idx)\n lock.release()\n header, img = recordio.unpack(s)\n label = header.label\n return label, img, None, None\n elif self.imgrec is not None:\n if self.cur >= len(self.seq):\n raise StopIteration\n idx = self.seq[self.cur]\n self.cur += 1\n\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n label = header.label\n return label, img, None, None\n else:\n label, fname, bbox, landmark = self.imglist[idx]\n return label, self.read_image(fname), bbox, landmark\n else:\n s = self.imgrec.read()\n if s is None:\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img, None, None\n\n def brightness_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n src *= alpha\n src = nd.clip(src, 0, 255)\n return src\n\n def contrast_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n coef = np.array([[[0.299, 0.587, 0.114]]])\n gray = src * coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n src = nd.clip(src, 0, 255)\n return src\n\n def saturation_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n coef = np.array([[[0.299, 0.587, 0.114]]])\n gray = src * coef\n gray = np.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n\n def color_aug(self, img, x):\n augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]\n random.shuffle(augs)\n for aug in augs:\n #print(img.shape)\n img = aug(img, x)\n #print(img.shape)\n return img\n\n def mirror_aug(self, img):\n _rd = random.randint(0,1)\n if _rd==1:\n for c in xrange(img.shape[2]):\n img[:,:,c] = np.fliplr(img[:,:,c])\n return img\n\n def next(self, lock=None, imgrec=None, index=None):\n \"\"\"Returns the next batch of data.\"\"\"\n batch_size = self.batch_size\n c, h, w = self.data_shape\n batch_data = nd.empty((batch_size, c, h, w))\n # if self.provide_label is not None:\n if self.provide_label:\n batch_label = nd.empty(self.provide_label[0][1])\n else:\n batch_label = nd.empty((batch_size,))\n if index is None:\n index = random.sample(range(0, len(self.seq)), batch_size)\n i = 0\n try:\n while i < batch_size:\n label, s, bbox, landmark = self.next_sample(index[i], lock, imgrec)\n _data = self.imdecode(s)\n if self.rand_mirror:\n _rd = random.randint(0,1)\n if _rd==1:\n _data = mx.ndarray.flip(data=_data, axis=1)\n if self.nd_mean is not None:\n _data = _data.astype('float32')\n _data -= self.nd_mean\n _data *= 0.0078125\n if self.cutoff>0:\n centerh = random.randint(0, _data.shape[0]-1)\n centerw = random.randint(0, _data.shape[1]-1)\n half = self.cutoff//2\n starth = max(0, centerh-half)\n endh = min(_data.shape[0], centerh+half)\n startw = max(0, centerw-half)\n endw = min(_data.shape[1], centerw+half)\n _data = _data.astype('float32')\n _data[starth:endh, startw:endw, :] = 127.5\n #_data = self.augmentation_transform(_data)\n data = [_data]\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n for datum in data:\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n #print(datum.shape)\n if not isinstance(label, numbers.Number):\n label = label[0]\n batch_data[i][:] = self.postprocess_data(datum)\n batch_label[i][:] = label\n i += 1\n except StopIteration:\n if i < batch_size: # same as last_batch_handle='discard'\n raise StopIteration\n _label = None\n if self.provide_label is not None:\n _label = [batch_label]\n if self.data_extra is not None:\n return io.DataBatch([batch_data, self.data_extra], _label, batch_size - i)\n else:\n return io.DataBatch([batch_data], _label, batch_size - i)\n \n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError('data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError('This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n img = mx.image.imdecode(s) #mx.ndarray\n return img\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = [ret for src in data for ret in aug(src)]\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n\nclass FaceImageIterList(io.DataIter):\n def __init__(self, iter_list):\n assert len(iter_list)>0\n self.provide_data = iter_list[0].provide_data\n self.provide_label = iter_list[0].provide_label\n self.iter_list = iter_list\n self.cur_iter = None\n\n def reset(self):\n self.cur_iter.reset()\n\n def next(self):\n self.cur_iter = random.choice(self.iter_list)\n while True:\n try:\n ret = self.cur_iter.next()\n except StopIteration:\n self.cur_iter.reset()\n continue\n return ret\n\nclass PrefetchingIter(io.DataIter):\n \"\"\"Performs pre-fetch for other data iterators.\n\n This iterator will create another thread to perform ``iter_next`` and then\n store the data in memory. It potentially accelerates the data read, at the\n cost of more memory usage.\n\n Parameters\n ----------\n iters : DataIter or list of DataIter\n The data iterators to be pre-fetched.\n rename_data : None or list of dict\n The *i*-th element is a renaming map for the *i*-th iter, in the form of\n {'original_name' : 'new_name'}. Should have one entry for each entry\n in iter[i].provide_data.\n rename_label : None or list of dict\n Similar to ``rename_data``.\n\n Examples\n --------\n >>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)\n >>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)\n >>> piter = mx.io.PrefetchingIter([iter1, iter2],\n ... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])\n >>> print(piter.provide_data)\n [DataDesc[data_1,(25, 10L),,NCHW],\n DataDesc[data_2,(25, 10L),,NCHW]]\n \"\"\"\n def __init__(self, iters, prefetch_process=1, capacity=2, rank=0, shuffle=False):\n super(PrefetchingIter, self).__init__()\n self.n_iter = iters.num_samples #len(iters)\n assert self.n_iter > 0\n self.iters = iters\n self.batch_size = self.provide_data[0][1][0]\n self.rank = rank\n self.batch_counter = 0\n\n if hasattr(self.iters, 'epoch_size'):\n self.epoch_size = self.iters.epoch_size\n if self.iters.epoch_size is None:\n self.epoch_size = int(self.iters.num_samples/self.batch_size)\n else:\n self.epoch_size = int(self.iters.num_samples/self.batch_size)\n\n self.next_iter = 0\n self.prefetch_process = prefetch_process\n self.shuffle = shuffle\n\n self._data_queue = dataloader.Queue(maxsize=capacity)\n self._data_buffer = Queue.Queue(maxsize=capacity*2)\n self._index_queue = multiprocessing.Queue()\n\n self.prefetch_reset_event = multiprocessing.Event()\n self.epoch_end_event = multiprocessing.Event()\n self.next_reset_event = threading.Event()\n\n self.lock = multiprocessing.Lock()\n self.imgrec = self.iters.imgrec\n\n def prefetch_func(data_queue, event, end_event):\n while True:\n if event.is_set() and (not end_event.is_set()):\n #index = []\n #i = 0\n #while i < self.batch_size:\n # try:\n # index.append(self._index_queue.get())\n # i += 1\n # except:\n # end_event.set()\n #if i == self.batch_size:\n index = None\n try:\n index = self._index_queue.get()\n except:\n end_event.set()\n\n if index is not None and len(index) == self.batch_size:\n next_data = self.iters.next(self.lock, self.imgrec, index)\n data_queue.put((dataloader.default_mp_batchify_fn(next_data.data[0]),\n dataloader.default_mp_batchify_fn(next_data.label[0])))\n\n def next_func(data_queue, event):\n while True:\n if event.is_set():\n batch, label = data_queue.get(block=True)\n batch = dataloader._as_in_context(batch, context.cpu())\n label = dataloader._as_in_context(label, context.cpu())\n label = label.reshape((label.shape[0],))\n self._data_buffer.put((batch, label))\n\n # producer next\n self.produce_lst = []\n for ith in range(prefetch_process):\n p_process = multiprocessing.Process(target=prefetch_func,\n args=(self._data_queue, self.prefetch_reset_event,\n self.epoch_end_event))\n p_process.daemon = True\n p_process.start()\n self.produce_lst.append(p_process)\n\n # consumer get\n self.data_buffer = {}\n self.prefetch_thread = threading.Thread(target=next_func,\n args=(self._data_queue, self.next_reset_event))\n self.prefetch_thread.daemon = True\n self.prefetch_thread.start()\n\n # first epoch\n self.reset()\n\n def __del__(self):\n self.__clear_queue()\n\n for i_process in self.produce_lst:\n i_process.join()\n self.prefetch_thread.join()\n\n def __clear_queue(self):\n \"\"\" clear the queue\"\"\"\n while True:\n try:\n self._data_queue.get_nowait()\n except:\n break\n while True:\n try:\n self._data_buffer.get_nowait()\n except:\n break\n while True:\n try:\n self._index_queue.get_nowait()\n except:\n break\n\n @property\n def provide_data(self):\n return self.iters.provide_data\n\n @property\n def provide_label(self):\n return self.iters.provide_label\n\n def reset(self):\n self.epoch_end_event.set()\n self.next_iter = 0\n self.iters.reset()\n self.__clear_queue()\n\n assert self._index_queue.empty()\n logging.info(\"Prefetch Dataiter Inqueue\")\n seq_index = range(0, len(self.iters.seq))\n if self.shuffle:\n random.shuffle(seq_index)\n #for index in range(0, len(self.iters.seq)):\n # self._index_queue.put(seq_index[index])\n for index in range(0, len(self.iters.seq), self.batch_size):\n self._index_queue.put(seq_index[index:index+self.batch_size])\n logging.info(\"Queue Reset Done\")\n\n self.prefetch_reset_event.set()\n self.next_reset_event.set()\n self.epoch_end_event.clear()\n\n def iter_next(self):\n self.next_iter += 1\n if self.next_iter > self.epoch_size:\n self.prefetch_reset_event.clear()\n self.next_reset_event.clear()\n return False\n else:\n return True\n\n def next(self):\n if self.iter_next():\n self.batch_counter += 1\n batch, label = self._data_buffer.get(block=True)\n return io.DataBatch(data=[batch], label=[label], pad=0)\n else:\n raise StopIteration\n\ndef PrefetchFaceIter(prefetch_process=1, capacity=2, prefetch=False, **kwargs):\n if prefetch:\n iters = PrefetchingIter(\n FaceImageIter(**kwargs),\n prefetch_process, capacity,\n shuffle=kwargs['shuffle'],\n rank=kwargs['rank'],\n )\n import atexit\n atexit.register(lambda a : a.__del__(), iters)\n else:\n iters = FaceImageIter(**kwargs)\n iters.epoch_size=int(iters.num_samples/iters.provide_data[0][1][0])\n return iters\n\n\n\n","sub_path":"mxnet/insightface/insightface/src/model_parallel/data_split_iter.py","file_name":"data_split_iter.py","file_ext":"py","file_size_in_byte":21262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"615398631","text":"import discord\nimport urllib.request\nimport json\nimport re\nimport os\n\n#トークン\nTOKEN = os.environ['DISCORD_BOT_TOKEN']\n\nclient = discord.Client()\n\nModeFlag = 0\n\ncitycodes = {\n \"土浦\": '080020',\n \"水戸\": '080010',\n \"札幌\": '016010',\n \"仙台\": '040010',\n \"東京\": '130010',\n \"横浜\": '140010',\n \"名古屋\": '230010',\n \"大阪\": '270000',\n \"広島\": '340010',\n \"福岡\": '400010',\n \"鹿児島\": '460010',\n \"那覇\": '471010'\n}\n\ntaio = \"札幌、仙台、土浦、水戸、東京、横浜、名古屋、大阪、広島、福岡、鹿児島、那覇\"\n\n@client.event\nasync def on_ready():\n print(\"logged in as \" + client.user.name)\n await client.change_presence(status=discord.Status.idle,activity=discord.Game(name='創成の女神'))\n\n\n@client.event\nasync def on_message(message):\n if message.content == '対応都市':\n await message.channel.send(taio)\n\n if message.author != client.user:\n\n reg_res = re.compile(u\"ノア、(.+)の天気は?\").search(message.content)\n if reg_res:\n\n if reg_res.group(1) in citycodes.keys():\n\n citycode = citycodes[reg_res.group(1)]\n resp = urllib.request.urlopen('http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%citycode).read()\n resp = json.loads(resp.decode('utf-8'))\n\n msg = resp['location']['city']\n msg += \"の天気は、\\n\"\n for f in resp['forecasts']:\n msg += f['dateLabel'] + \"が\" + f['telop'] + \"\\n\"\n msg += \"です。\\n```\"\n msg += resp['description']['text']\n msg += \"```\"\n\n await message.channel.send(message.author.mention + msg)\n\n else:\n await message.channel.send(\"そこの天気はわかりません...\")\n\nclient.run(TOKEN)\n","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"80262227","text":"# Copyright (c) 2017 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ipaddress\nimport json\nimport six\nimport uuid\n\nfrom oslo_log import log as logging\nfrom oslo_utils import strutils\n\nimport taskflow.engines\nfrom taskflow.patterns import linear_flow\nfrom taskflow import task\nfrom taskflow.types import failure\n\nfrom cinder import exception\nfrom cinder.i18n import _\nfrom cinder.volume.drivers.huawei import constants\nfrom cinder.volume.drivers.huawei import huawei_utils\nfrom cinder.volume.drivers.huawei import hypermetro\nfrom cinder.volume.drivers.huawei import replication\nfrom cinder.volume.drivers.huawei import smartx\nfrom cinder.volume import volume_utils\n\nLOG = logging.getLogger(__name__)\n\n\nclass LunOptsCheckTask(task.Task):\n default_provides = 'opts'\n\n def __init__(self, client, feature_support, configuration, new_opts=None,\n *args, **kwargs):\n super(LunOptsCheckTask, self).__init__(*args, **kwargs)\n self.client = client\n self.feature_support = feature_support\n self.configuration = configuration\n self.new_opts = new_opts\n\n def execute(self, volume):\n if self.new_opts:\n opts = self.new_opts\n else:\n is_dorado_v6 = self.configuration.is_dorado_v6\n opts = huawei_utils.get_volume_params(volume, is_dorado_v6)\n\n huawei_utils.check_volume_type_valid(opts)\n\n feature_pairs = (\n ('qos', 'SmartQoS'),\n ('smartcache', 'SmartCache'),\n ('smartpartition', 'SmartPartition'),\n ('hypermetro', 'HyperMetro'),\n ('replication_enabled', 'HyperReplication'),\n ('policy', 'SmartTier'),\n ('dedup', 'SmartDedupe[\\s\\S]*LUN'),\n ('compression', 'SmartCompression[\\s\\S]*LUN'),\n )\n\n for feature in feature_pairs:\n if opts.get(feature[0]) and not self.feature_support[feature[1]]:\n msg = _(\"Huawei storage doesn't support %s.\") % feature[1]\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if opts.get('smartcache'):\n smartcache = smartx.SmartCache(self.client)\n smartcache.check_cache_valid(opts['cachename'])\n\n if opts.get('smartpartition'):\n smartpartition = smartx.SmartPartition(self.client)\n smartpartition.check_partition_valid(opts['partitionname'])\n\n return opts\n\n\nclass CreateLunTask(task.Task):\n default_provides = ('lun_id', 'lun_info')\n\n def __init__(self, client, configuration, feature_support,\n *args, **kwargs):\n super(CreateLunTask, self).__init__(*args, **kwargs)\n self.client = client\n self.configuration = configuration\n self.feature_support = feature_support\n\n def _get_lun_application_name(self, opts, lun_params):\n if opts.get('applicationname') is not None:\n workload_type_id = self.client.get_workload_type_id(\n opts['applicationname'])\n if workload_type_id:\n lun_params['WORKLOADTYPEID'] = workload_type_id\n else:\n msg = _(\"The workload type %s is not exist. Please create it \"\n \"on the array\") % opts['applicationname']\n LOG.error(msg)\n raise exception.InvalidInput(reason=msg)\n return lun_params\n\n def execute(self, volume, opts, src_size=None):\n pool_name = volume_utils.extract_host(volume.host, level='pool')\n pool_id = self.client.get_pool_id(pool_name)\n if not pool_id:\n msg = _(\"Pool %s doesn't exist in storage.\") % pool_name\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n lun_params = {\n 'NAME': huawei_utils.encode_name(volume.id),\n 'PARENTID': pool_id,\n 'DESCRIPTION': volume.name,\n 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type),\n 'CAPACITY': int(int(src_size) * constants.CAPACITY_UNIT if src_size\n else int(volume.size) * constants.CAPACITY_UNIT),\n }\n\n if opts.get('controllername'):\n controller = self.client.get_controller_id(opts['controllername'])\n if controller:\n lun_params['OWNINGCONTROLLER'] = controller\n if hasattr(self.configuration, 'write_type'):\n lun_params['WRITEPOLICY'] = self.configuration.write_type\n if hasattr(self.configuration, 'prefetch_type'):\n lun_params['PREFETCHPOLICY'] = self.configuration.prefetch_type\n if hasattr(self.configuration, 'prefetch_value'):\n lun_params['PREFETCHVALUE'] = self.configuration.prefetch_value\n if opts.get('policy'):\n lun_params['DATATRANSFERPOLICY'] = opts['policy']\n\n if opts.get('dedup') is not None:\n lun_params['ENABLESMARTDEDUP'] = opts['dedup']\n elif not self.feature_support['SmartDedupe[\\s\\S]*LUN']:\n lun_params['ENABLESMARTDEDUP'] = False\n\n if opts.get('compression') is not None:\n lun_params['ENABLECOMPRESSION'] = opts['compression']\n elif not self.feature_support['SmartCompression[\\s\\S]*LUN']:\n lun_params['ENABLECOMPRESSION'] = False\n\n lun_params = self._get_lun_application_name(opts, lun_params)\n\n lun = self.client.create_lun(lun_params)\n return lun['ID'], lun\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_lun(result[0])\n\n\nclass WaitLunOnlineTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(WaitLunOnlineTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, lun_id):\n huawei_utils.wait_lun_online(self.client, lun_id)\n\n\nclass AddQoSTask(task.Task):\n default_provides = 'qos_id'\n\n def __init__(self, client, configuration, *args, **kwargs):\n super(AddQoSTask, self).__init__(*args, **kwargs)\n self.smartqos = smartx.SmartQos(client, configuration.is_dorado_v6)\n\n def execute(self, lun_id, opts):\n if opts.get('qos'):\n qos_id = self.smartqos.add(opts['qos'], lun_id)\n return qos_id\n\n def revert(self, result, lun_id, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n self.smartqos.remove(result, lun_id)\n\n\nclass AddCacheTask(task.Task):\n default_provides = 'cache_id'\n\n def __init__(self, client, *args, **kwargs):\n super(AddCacheTask, self).__init__(*args, **kwargs)\n self.smartcache = smartx.SmartCache(client)\n\n def execute(self, lun_id, opts):\n if opts.get('smartcache'):\n cache_id = self.smartcache.add(opts['cachename'], lun_id)\n return cache_id\n\n def revert(self, result, lun_id, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n self.smartcache.remove(result, lun_id)\n\n\nclass AddPartitionTask(task.Task):\n default_provides = 'partition_id'\n\n def __init__(self, client, *args, **kwargs):\n super(AddPartitionTask, self).__init__(*args, **kwargs)\n self.smartpartition = smartx.SmartPartition(client)\n\n def execute(self, lun_id, opts):\n if opts.get('smartpartition'):\n partition_id = self.smartpartition.add(\n opts['partitionname'], lun_id)\n return partition_id\n\n def revert(self, result, lun_id, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n self.smartpartition.remove(result, lun_id)\n\n\nclass CreateHyperMetroTask(task.Task):\n default_provides = 'hypermetro_id'\n\n def __init__(self, local_cli, remote_cli, config, is_sync=True,\n *args, **kwargs):\n super(CreateHyperMetroTask, self).__init__(*args, **kwargs)\n self.hypermetro = hypermetro.HuaweiHyperMetro(\n local_cli, remote_cli, config)\n self.loc_client = local_cli\n self.rmt_client = remote_cli\n self.sync = is_sync\n\n def execute(self, volume, lun_id, lun_info, opts):\n metadata = huawei_utils.get_volume_private_data(volume)\n hypermetro_id = None\n\n if not opts.get('hypermetro'):\n return hypermetro_id\n\n if metadata.get('hypermetro'):\n hypermetro = huawei_utils.get_hypermetro(self.loc_client, volume)\n hypermetro_id = hypermetro.get('ID') if hypermetro else None\n\n if not hypermetro_id:\n lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY',\n 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY')\n lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info}\n lun_params['NAME'] = huawei_utils.encode_name(volume.id)\n lun_params['DESCRIPTION'] = volume.name\n if (lun_info.get(\"WORKLOADTYPENAME\") and\n lun_info.get(\"WORKLOADTYPEID\")):\n workload_type_name = self.loc_client.get_workload_type_name(\n lun_info['WORKLOADTYPEID'])\n rmt_workload_type_id = self.rmt_client.get_workload_type_id(\n workload_type_name)\n if rmt_workload_type_id:\n lun_params['WORKLOADTYPEID'] = rmt_workload_type_id\n else:\n msg = _(\"The workload type %s is not exist. Please create \"\n \"it on the array\") % workload_type_name\n LOG.error(msg)\n raise exception.InvalidInput(reason=msg)\n\n hypermetro_id = self.hypermetro.create_hypermetro(\n lun_id, lun_params, self.sync)\n\n return hypermetro_id\n\n def revert(self, result, volume, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n self.hypermetro.delete_hypermetro(volume)\n\n\nclass AddHyperMetroGroupTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(AddHyperMetroGroupTask, self).__init__(*args, **kwargs)\n self.hypermetro = hypermetro.HuaweiHyperMetro(\n local_cli, remote_cli, config)\n\n def execute(self, volume, hypermetro_id):\n if volume.group_id and hypermetro_id:\n self.hypermetro.add_hypermetro_to_group(\n volume.group_id, hypermetro_id)\n\n\nclass CreateReplicationTask(task.Task):\n default_provides = 'replication_id'\n\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(CreateReplicationTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n self.loc_client = local_cli\n self.rmt_client = remote_cli\n\n def execute(self, volume, lun_id, lun_info, opts):\n data = huawei_utils.get_replication_data(volume)\n pair_id = data.get('pair_id')\n\n if opts.get('replication_enabled') and not pair_id:\n lun_keys = ('CAPACITY', 'ALLOCTYPE', 'PREFETCHPOLICY',\n 'PREFETCHVALUE', 'WRITEPOLICY', 'DATATRANSFERPOLICY')\n lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info}\n lun_params['NAME'] = huawei_utils.encode_name(volume.id)\n lun_params['DESCRIPTION'] = volume.name\n if (lun_info.get(\"WORKLOADTYPENAME\") and\n lun_info.get(\"WORKLOADTYPEID\")):\n workload_type_name = self.loc_client.get_workload_type_name(\n lun_info['WORKLOADTYPEID'])\n rmt_workload_type_id = self.rmt_client.get_workload_type_id(\n workload_type_name)\n if rmt_workload_type_id:\n lun_params['WORKLOADTYPEID'] = rmt_workload_type_id\n else:\n msg = _(\"The workload type %s is not exist. Please create \"\n \"it on the array\") % workload_type_name\n LOG.error(msg)\n raise exception.InvalidInput(reason=msg)\n\n pair_id = self.replication.create_replica(\n lun_id, lun_params, opts['replication_type'])\n elif not opts.get('replication_enabled') and pair_id:\n pair_id = None\n\n return pair_id\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n self.replication.delete_replica(result)\n\n\nclass AddReplicationGroupTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(AddReplicationGroupTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n\n def execute(self, volume, replication_id):\n if volume.group_id and replication_id:\n self.replication.add_replication_to_group(\n volume.group_id, replication_id)\n\n\nclass CheckLunExistTask(task.Task):\n default_provides = ('lun_info', 'lun_id')\n\n def __init__(self, client, *args, **kwargs):\n super(CheckLunExistTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, volume):\n lun_info = huawei_utils.get_lun_info(self.client, volume)\n if not lun_info:\n msg = _(\"Volume %s does not exist.\") % volume.id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return lun_info, lun_info['ID']\n\n\nclass CheckLunIsInUse(task.Task):\n def __init__(self, *args, **kwargs):\n super(CheckLunIsInUse, self).__init__(*args, **kwargs)\n\n def execute(self, opts, volume, lun_info):\n \"\"\"\n opts: come from LunOptsCheckTask\n lun_info: come from CheckLunExistTask\n \"\"\"\n add_hypermetro = False\n delete_hypermetro = False\n metadata = huawei_utils.get_volume_private_data(volume)\n in_use_lun = lun_info.get('EXPOSEDTOINITIATOR') == 'true'\n if opts.get('hypermetro'):\n if not metadata.get('hypermetro'):\n add_hypermetro = True\n else:\n if metadata.get('hypermetro'):\n delete_hypermetro = True\n\n if (add_hypermetro or delete_hypermetro) and in_use_lun:\n msg = _(\"Cann't add hypermetro to the volume in use.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n\nclass GetLunIDTask(task.Task):\n default_provides = 'lun_id'\n\n def __init__(self, client, *args, **kwargs):\n super(GetLunIDTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, volume):\n lun_info = huawei_utils.get_lun_info(self.client, volume)\n if not lun_info:\n LOG.error(\"Volume %s does not exist.\", volume.id)\n return None\n\n return lun_info['ID']\n\n\nclass CheckLunMappedTask(task.Task):\n def __init__(self, client, configuration, *args, **kwargs):\n super(CheckLunMappedTask, self).__init__(*args, **kwargs)\n self.client = client\n self.configuration = configuration\n\n def execute(self, lun_info):\n if lun_info.get('EXPOSEDTOINITIATOR') == 'true':\n msg = _(\"LUN %s has been mapped to host. Now force to \"\n \"delete it\") % lun_info['ID']\n LOG.warning(msg)\n huawei_utils.remove_lun_from_lungroup(\n self.client, lun_info[\"ID\"],\n self.configuration.force_delete_volume)\n\n\nclass DeleteHyperMetroTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(DeleteHyperMetroTask, self).__init__(*args, **kwargs)\n self.hypermetro = hypermetro.HuaweiHyperMetro(\n local_cli, remote_cli, config)\n\n def execute(self, volume, opts=None):\n metadata = huawei_utils.get_volume_private_data(volume)\n\n if ((not opts or not opts.get('hypermetro'))\n and metadata.get('hypermetro')):\n self.hypermetro.delete_hypermetro(volume)\n\n\nclass DeleteReplicationTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(DeleteReplicationTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n\n def execute(self, volume, opts=None):\n data = huawei_utils.get_replication_data(volume)\n pair_id = data.get('pair_id')\n if (not opts or not opts.get('replication_enabled')) and pair_id:\n self.replication.delete_replica(pair_id)\n\n\nclass DeleteQoSTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeleteQoSTask, self).__init__(*args, **kwargs)\n self.smartqos = smartx.SmartQos(client)\n\n def execute(self, lun_info):\n qos_id = lun_info.get('IOCLASSID')\n if qos_id:\n self.smartqos.remove(qos_id, lun_info['ID'])\n\n\nclass DeleteCacheTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeleteCacheTask, self).__init__(*args, **kwargs)\n self.smartcache = smartx.SmartCache(client)\n\n def execute(self, lun_info):\n cache_id = lun_info.get('SMARTCACHEPARTITIONID')\n if cache_id:\n self.smartcache.remove(cache_id, lun_info['ID'])\n\n\nclass DeletePartitionTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeletePartitionTask, self).__init__(*args, **kwargs)\n self.smartpartition = smartx.SmartPartition(client)\n\n def execute(self, lun_info):\n partition_id = lun_info.get('CACHEPARTITIONID')\n if partition_id:\n self.smartpartition.remove(partition_id, lun_info['ID'])\n\n\nclass DeleteLunTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeleteLunTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, lun_id):\n self.client.delete_lun(lun_id)\n\n\nclass CreateMigratedLunTask(task.Task):\n default_provides = ('tgt_lun_id', 'tgt_lun_info')\n\n def __init__(self, client, host, feature_support, *args, **kwargs):\n super(CreateMigratedLunTask, self).__init__(*args, **kwargs)\n self.client = client\n self.host = host\n self.feature_support = feature_support\n\n def execute(self, lun_info, opts=None):\n if not self.feature_support['SmartMigration']:\n msg = _(\"Huawei storage doesn't support SmartMigration.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n target_device = self.host['capabilities']['location_info']\n if target_device != self.client.device_id:\n msg = _(\"Migrate target %(tgt)s is not the same storage as \"\n \"%(org)s.\") % {'tgt': target_device,\n 'org': self.client.device_id}\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n pool_name = self.host['capabilities']['pool_name']\n pool_id = self.client.get_pool_id(pool_name)\n if not pool_id:\n msg = _(\"Pool %s doesn't exist in storage.\") % pool_name\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if opts:\n new_lun_type = opts.get('LUNType')\n tier_policy = opts.get('policy')\n else:\n new_lun_type = None\n tier_policy = None\n\n lun_keys = ('DESCRIPTION', 'ALLOCTYPE', 'CAPACITY', 'WRITEPOLICY',\n 'PREFETCHPOLICY', 'PREFETCHVALUE', 'DATATRANSFERPOLICY',\n 'OWNINGCONTROLLER')\n lun_params = {k: lun_info[k] for k in lun_keys if k in lun_info}\n lun_params['NAME'] = lun_info['NAME'][:-4] + '-mig'\n lun_params['PARENTID'] = pool_id\n if new_lun_type:\n lun_params['ALLOCTYPE'] = new_lun_type\n if tier_policy:\n lun_params['DATATRANSFERPOLICY'] = tier_policy\n if lun_info.get(\"WORKLOADTYPENAME\") and lun_info.get(\n \"WORKLOADTYPEID\"):\n lun_params[\"WORKLOADTYPEID\"] = lun_info[\"WORKLOADTYPEID\"]\n\n lun = self.client.create_lun(lun_params)\n return lun['ID'], lun\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_lun(result[0])\n\n\nclass CreateMigrateTask(task.Task):\n default_provides = 'migration_id'\n\n def __init__(self, client, *args, **kwargs):\n super(CreateMigrateTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, src_lun_id, tgt_lun_id):\n migration = self.client.create_lun_migration(src_lun_id, tgt_lun_id)\n return migration['ID']\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_lun_migration(result)\n\n\nclass WaitMigrateDoneTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(WaitMigrateDoneTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, migration_id, tgt_lun_id):\n def _migrate_done():\n migration = self.client.get_lun_migration(migration_id)\n if (migration['RUNNINGSTATUS'] in\n constants.MIGRATION_STATUS_IN_PROCESS):\n return False\n elif (migration['RUNNINGSTATUS'] in\n constants.MIGRATION_STATUS_COMPLETE):\n return True\n else:\n msg = _(\"Migration %s error.\") % migration_id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n huawei_utils.wait_for_condition(_migrate_done,\n constants.DEFAULT_WAIT_INTERVAL,\n constants.DEFAULT_WAIT_TIMEOUT)\n self.client.delete_lun_migration(migration_id)\n self.client.delete_lun(tgt_lun_id)\n\n\nclass CheckSnapshotExistTask(task.Task):\n default_provides = ('snapshot_info', 'snapshot_id')\n\n def __init__(self, client, *args, **kwargs):\n super(CheckSnapshotExistTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot):\n snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot)\n if not snapshot_info:\n msg = _(\"Snapshot %s does not exist.\") % snapshot.id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return snapshot_info, snapshot_info['ID']\n\n\nclass GetSnapshotIDTask(task.Task):\n default_provides = 'snapshot_id'\n\n def __init__(self, client, *args, **kwargs):\n super(GetSnapshotIDTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot):\n snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot)\n if not snapshot_info:\n LOG.error(\"Snapshot %s does not exist.\", snapshot.id)\n return None\n\n return snapshot_info['ID']\n\n\nclass CreateLunCopyTask(task.Task):\n default_provides = 'luncopy_id'\n\n def __init__(self, client, feature_support, configuration,\n *args, **kwargs):\n super(CreateLunCopyTask, self).__init__(*args, **kwargs)\n self.client = client\n self.feature_support = feature_support\n self.configuration = configuration\n\n def execute(self, volume, snapshot_id, lun_id):\n if not self.feature_support['HyperCopy']:\n msg = _(\"Huawei storage doesn't support HyperCopy.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n copy_name = huawei_utils.encode_name(volume.id)\n metadata = huawei_utils.get_volume_private_data(volume)\n copyspeed = metadata.get('copyspeed')\n if not copyspeed:\n copyspeed = self.configuration.lun_copy_speed\n elif copyspeed not in constants.LUN_COPY_SPEED_TYPES:\n msg = (_(\"LUN copy speed is: %(speed)s. It should be between \"\n \"%(low)s and %(high)s.\")\n % {\"speed\": copyspeed,\n \"low\": constants.LUN_COPY_SPEED_LOW,\n \"high\": constants.LUN_COPY_SPEED_HIGH})\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n luncopy_id = self.client.create_luncopy(\n copy_name, snapshot_id, lun_id, copyspeed)\n return luncopy_id\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_luncopy(result)\n\n\nclass WaitLunCopyDoneTask(task.Task):\n def __init__(self, client, configuration, *args, **kwargs):\n super(WaitLunCopyDoneTask, self).__init__(*args, **kwargs)\n self.client = client\n self.configuration = configuration\n\n def execute(self, luncopy_id):\n self.client.start_luncopy(luncopy_id)\n\n def _luncopy_done():\n luncopy = self.client.get_luncopy_info(luncopy_id)\n if luncopy['HEALTHSTATUS'] != constants.STATUS_HEALTH:\n msg = _(\"Luncopy %s is abnormal.\") % luncopy_id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return (luncopy['RUNNINGSTATUS'] in\n constants.LUNCOPY_STATUS_COMPLETE)\n huawei_utils.wait_for_condition(\n _luncopy_done, self.configuration.lun_copy_wait_interval,\n self.configuration.lun_timeout)\n\n self.client.delete_luncopy(luncopy_id)\n\n\nclass CreateClonePairTask(task.Task):\n default_provides = 'clone_pair_id'\n\n def __init__(self, client, feature_support, configuration,\n *args, **kwargs):\n super(CreateClonePairTask, self).__init__(*args, **kwargs)\n self.client = client\n self.feature_support = feature_support\n self.configuration = configuration\n\n def execute(self, source_id, target_id):\n clone_speed = self.configuration.lun_copy_speed\n clone_pair_id = self.client.create_clone_pair(\n source_id, target_id, clone_speed)\n return clone_pair_id\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_clone_pair(result)\n\n\nclass WaitClonePairDoneTask(task.Task):\n def __init__(self, client, configuration, *args, **kwargs):\n super(WaitClonePairDoneTask, self).__init__(*args, **kwargs)\n self.client = client\n self.configuration = configuration\n\n def execute(self, clone_pair_id):\n def _clone_pair_done():\n clone_pair_info = self.client.get_clone_pair_info(clone_pair_id)\n if clone_pair_info['copyStatus'] != constants.CLONE_STATUS_HEALTH:\n msg = _(\"ClonePair %s is abnormal.\") % clone_pair_id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n return (clone_pair_info['syncStatus'] in\n constants.CLONE_STATUS_COMPLETE)\n\n self.client.sync_clone_pair(clone_pair_id)\n huawei_utils.wait_for_condition(\n _clone_pair_done, self.configuration.lun_copy_wait_interval,\n self.configuration.lun_timeout)\n self.client.delete_clone_pair(clone_pair_id)\n\n\nclass CreateLunCloneTask(task.Task):\n default_provides = 'lun_id', 'lun_info'\n\n def __init__(self, client, *args, **kwargs):\n super(CreateLunCloneTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, volume, src_id):\n name = huawei_utils.encode_name(volume.id)\n lun_info = self.client.create_lunclone(src_id, name)\n lun_id = lun_info[\"ID\"]\n expected_size = int(volume.size) * constants.CAPACITY_UNIT\n try:\n if int(lun_info['CAPACITY']) < expected_size:\n self.client.extend_lun(lun_id, expected_size)\n self.client.split_lunclone(lun_id)\n except Exception:\n LOG.exception('Split clone lun %s error.', lun_id)\n self.client.delete_lun(lun_id)\n raise\n\n lun_info = self.client.get_lun_info_by_id(lun_id)\n return lun_info['ID'], lun_info\n\n\nclass LunClonePreCheckTask(task.Task):\n def __init__(self, *args, **kwargs):\n super(LunClonePreCheckTask, self).__init__(*args, **kwargs)\n\n @staticmethod\n def execute(volume, src_volume):\n if volume.volume_type_id != src_volume.volume_type_id:\n msg = _(\"Volume type must be the same as source \"\n \"for fast clone.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n\nclass CreateSnapshotTask(task.Task):\n default_provides = 'snapshot_id'\n\n def __init__(self, client, feature_support, *args, **kwargs):\n super(CreateSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n self.feature_support = feature_support\n\n def execute(self, snapshot):\n if not self.feature_support['HyperSnap']:\n msg = _(\"Huawei storage doesn't support snapshot.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n lun_info = huawei_utils.get_lun_info(self.client, snapshot.volume)\n if not lun_info:\n msg = _(\"Source volume %s to create snapshot does not exist.\"\n ) % snapshot.volume.id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n name = huawei_utils.encode_name(snapshot.id)\n snapshot_info = self.client.create_snapshot(\n lun_info['ID'], name, snapshot.id)\n return snapshot_info['ID']\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_snapshot(result)\n\n\nclass CreateTempSnapshotTask(task.Task):\n default_provides = 'snapshot_id'\n\n def __init__(self, client, feature_support, *args, **kwargs):\n super(CreateTempSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n self.feature_support = feature_support\n\n def execute(self, src_id):\n if not self.feature_support['HyperSnap']:\n msg = _(\"Huawei storage doesn't support snapshot.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n snap_id = six.text_type(uuid.uuid4())\n name = huawei_utils.encode_name(snap_id)\n snapshot_info = self.client.create_snapshot(src_id, name, snap_id)\n return snapshot_info['ID']\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.delete_snapshot(result)\n\n\nclass ActiveSnapshotTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(ActiveSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot_id):\n self.client.activate_snapshot(snapshot_id)\n\n def revert(self, snapshot_id):\n self.client.stop_snapshot(snapshot_id)\n\n\nclass WaitSnapshotReadyTask(task.Task):\n default_provides = 'snapshot_wwn'\n\n def __init__(self, client, *args, **kwargs):\n super(WaitSnapshotReadyTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot_id):\n def _snapshot_ready():\n self.snapshot = self.client.get_snapshot_info_by_id(snapshot_id)\n if self.snapshot['HEALTHSTATUS'] != constants.STATUS_HEALTH:\n msg = _(\"Snapshot %s is fault.\") % snapshot_id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return not (self.snapshot['RUNNINGSTATUS'] ==\n constants.SNAPSHOT_INITIALIZING)\n\n huawei_utils.wait_for_condition(_snapshot_ready,\n constants.DEFAULT_WAIT_INTERVAL,\n constants.DEFAULT_WAIT_TIMEOUT)\n return self.snapshot['WWN']\n\n\nclass DeleteSnapshotTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeleteSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot_info):\n if snapshot_info['RUNNINGSTATUS'] == constants.SNAPSHOT_ACTIVATED:\n self.client.stop_snapshot(snapshot_info['ID'])\n self.client.delete_snapshot(snapshot_info['ID'])\n\n\nclass DeleteTempSnapshotTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(DeleteTempSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot_id):\n self.client.stop_snapshot(snapshot_id)\n self.client.delete_snapshot(snapshot_id)\n\n\nclass RevertToSnapshotTask(task.Task):\n def __init__(self, client, rollback_speed, *args, **kwargs):\n super(RevertToSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n self.rollback_speed = rollback_speed\n\n def execute(self, snapshot_info, snapshot_id):\n running_status = snapshot_info.get(\"RUNNINGSTATUS\")\n health_status = snapshot_info.get(\"HEALTHSTATUS\")\n\n if running_status not in (\n constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED,\n constants.SNAPSHOT_RUNNING_STATUS_ROLLINGBACK):\n err_msg = (_(\"The running status %(status)s of snapshot %(name)s.\")\n % {\"status\": running_status, \"name\": snapshot_id})\n LOG.error(err_msg)\n raise exception.InvalidSnapshot(reason=err_msg)\n\n if health_status not in (constants.SNAPSHOT_HEALTH_STATUS_NORMAL,):\n err_msg = (_(\"The health status %(status)s of snapshot %(name)s.\")\n % {\"status\": running_status, \"name\": snapshot_id})\n LOG.error(err_msg)\n raise exception.InvalidSnapshot(reason=err_msg)\n\n if constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED == snapshot_info.get(\n 'RUNNINGSTATUS'):\n self.client.rollback_snapshot(snapshot_id, self.rollback_speed)\n\n def revert(self, result, snapshot_id, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.cancel_rollback_snapshot(snapshot_id)\n\n\nclass WaitSnapshotRollbackDoneTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(WaitSnapshotRollbackDoneTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot_id):\n def _snapshot_rollback_finish():\n snapshot_info = self.client.get_snapshot_info_by_id(snapshot_id)\n\n if snapshot_info.get('HEALTHSTATUS') not in (\n constants.SNAPSHOT_HEALTH_STATUS_NORMAL,):\n msg = _(\"The snapshot %s is abnormal.\") % snapshot_id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if (snapshot_info.get('ROLLBACKRATE') ==\n constants.SNAPSHOT_ROLLBACK_PROGRESS_FINISH or\n snapshot_info.get('ROLLBACKENDTIME') != '-1'):\n LOG.info(\"Snapshot %s rollback successful.\", snapshot_id)\n return True\n return False\n\n huawei_utils.wait_for_condition(_snapshot_rollback_finish,\n constants.DEFAULT_WAIT_INTERVAL,\n constants.DEFAULT_WAIT_TIMEOUT)\n\n\nclass ExtendVolumeTask(task.Task):\n default_provides = 'lun_info'\n\n def __init__(self, client, *args, **kwargs):\n super(ExtendVolumeTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, lun_id, new_size):\n lun_info = self.client.get_lun_info_by_id(lun_id)\n if int(lun_info['CAPACITY']) < new_size:\n self.client.extend_lun(lun_id, new_size)\n LOG.info('Extend LUN %(id)s to size %(new_size)s.',\n {'id': lun_id,\n 'new_size': new_size})\n lun_info = self.client.get_lun_info_by_id(lun_id)\n return lun_info\n\n\nclass ExtendHyperMetroTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(ExtendHyperMetroTask, self).__init__(*args, **kwargs)\n self.hypermetro = hypermetro.HuaweiHyperMetro(\n local_cli, remote_cli, config)\n self.local_cli = local_cli\n\n def execute(self, volume, new_size):\n metadata = huawei_utils.get_volume_private_data(volume)\n if not metadata.get('hypermetro'):\n return\n\n hypermetro = huawei_utils.get_hypermetro(self.local_cli, volume)\n if not hypermetro:\n msg = _('Volume %s is not in hypermetro pair') % volume.id\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n self.hypermetro.extend_hypermetro(hypermetro['ID'], new_size)\n\n\nclass ExtendReplicationTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(ExtendReplicationTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n\n def execute(self, volume, new_size):\n data = huawei_utils.get_replication_data(volume)\n pair_id = data.get('pair_id')\n if pair_id:\n self.replication.extend_replica(pair_id, new_size)\n\n\nclass UpdateLunTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(UpdateLunTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, lun_info, opts):\n data = {}\n compression_check = lun_info.get('ENABLECOMPRESSION') == 'true'\n if not opts['compression'] and compression_check:\n data[\"ENABLECOMPRESSION\"] = 'false'\n\n dedup_check = lun_info.get('ENABLESMARTDEDUP') == 'true'\n if not opts['dedup'] and dedup_check:\n data[\"ENABLESMARTDEDUP\"] = 'false'\n\n if (opts.get('policy') and\n opts['policy'] != lun_info.get('DATATRANSFERPOLICY')):\n data[\"DATATRANSFERPOLICY\"] = opts['policy']\n\n if data:\n self.client.update_lun(lun_info['ID'], data)\n\n\nclass UpdateQoSTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(UpdateQoSTask, self).__init__(*args, **kwargs)\n self.client = client\n self.smartqos = smartx.SmartQos(client)\n\n def execute(self, lun_info, opts):\n qos_id = lun_info.get('IOCLASSID')\n if opts.get('qos'):\n if qos_id:\n self.smartqos.update(qos_id, opts['qos'], lun_info['ID'])\n else:\n self.smartqos.add(opts['qos'], lun_info['ID'])\n elif qos_id:\n self.smartqos.remove(qos_id, lun_info['ID'])\n\n\nclass UpdateCacheTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(UpdateCacheTask, self).__init__(*args, **kwargs)\n self.smartcache = smartx.SmartCache(client)\n\n def execute(self, lun_info, opts):\n cache_id = lun_info.get('SMARTCACHEPARTITIONID')\n if opts.get('smartcache'):\n if cache_id:\n self.smartcache.update(\n cache_id, opts['cachename'], lun_info['ID'])\n else:\n self.smartcache.add(opts['cachename'], lun_info['ID'])\n elif cache_id:\n self.smartcache.remove(cache_id, lun_info['ID'])\n\n\nclass UpdatePartitionTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(UpdatePartitionTask, self).__init__(*args, **kwargs)\n self.smartpartition = smartx.SmartPartition(client)\n\n def execute(self, lun_info, opts):\n partition_id = lun_info.get('CACHEPARTITIONID')\n if opts.get('smartpartition'):\n if partition_id:\n self.smartpartition.update(\n partition_id, opts['partitionname'], lun_info['ID'])\n else:\n self.smartpartition.add(opts['partitionname'], lun_info['ID'])\n elif partition_id:\n self.smartpartition.remove(partition_id, lun_info['ID'])\n\n\nclass ManageVolumePreCheckTask(task.Task):\n default_provides = ('lun_info', 'lun_id')\n\n def __init__(self, client, volume, existing_ref, configuration,\n *args, **kwargs):\n super(ManageVolumePreCheckTask, self).__init__(*args, **kwargs)\n self.client = client\n self.volume = volume\n self.existing_ref = existing_ref\n self.configuration = configuration\n\n def _get_external_lun(self):\n lun_info = huawei_utils.get_external_lun_info(\n self.client, self.existing_ref)\n if not lun_info:\n msg = _('External lun %s not exist.') % self.existing_ref\n LOG.error(msg)\n raise exception.ManageExistingInvalidReference(\n existing_ref=self.existing_ref, reason=msg)\n\n return lun_info\n\n def _check_lun_abnormal(self, lun_info, *args):\n return lun_info['HEALTHSTATUS'] != constants.STATUS_HEALTH\n\n def _check_pool_inconsistency(self, lun_info, *args):\n pool = volume_utils.extract_host(self.volume.host, 'pool')\n return pool != lun_info['PARENTNAME']\n\n def _check_lun_in_use(self, lun_info, *args):\n return (lun_info.get('ISADD2LUNGROUP') == 'true' or\n lun_info.get('EXPOSEDTOINITIATOR') == 'true')\n\n def _check_lun_in_hypermetro(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('HyperMetro') == 'TRUE'\n\n def _check_lun_in_replication(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('RemoteReplication') == 'TRUE'\n\n def _check_lun_in_splitmirror(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('SplitMirror') == 'TRUE'\n\n def _check_lun_in_hypermirror(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('LUNMirror') == 'TRUE'\n\n def _check_lun_in_luncopy(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('LunCopy') == 'TRUE'\n\n def _check_lun_in_migration(self, lun_info, *args):\n rss = {}\n if 'HASRSSOBJECT' in lun_info:\n rss = json.loads(lun_info['HASRSSOBJECT'])\n return rss.get('LunMigration') == 'TRUE'\n\n def _check_lun_not_common(self, lun_info, *args):\n return (lun_info.get('MIRRORTYPE') != '0' or\n lun_info.get('SUBTYPE') != '0')\n\n def _check_lun_consistency(self, lun_info, opts):\n return ('LUNType' in opts and\n opts['LUNType'] != lun_info['ALLOCTYPE'])\n\n def _check_lun_dedup_consistency(self, lun_info, opts):\n dedup_flag = False\n if opts.get('dedup') is not None:\n dedup_enabled = lun_info['ENABLESMARTDEDUP'] == 'true'\n if opts['dedup'] != dedup_enabled:\n dedup_flag = True\n return dedup_flag\n\n def _check_lun_compresison_consistency(self, lun_info, opts):\n compression_flag = False\n if opts.get('compression') is not None:\n compression_enabled = lun_info['ENABLECOMPRESSION'] == 'true'\n if opts['compression'] != compression_enabled:\n compression_flag = True\n return compression_flag\n\n def execute(self, opts):\n lun_info = self._get_external_lun()\n\n for i in dir(self):\n if callable(getattr(self, i)) and i.startswith('_check_'):\n func = getattr(self, i)\n if func(lun_info, opts):\n msg = _(\"Volume managing pre check %s failed.\"\n ) % func.__name__\n LOG.error(msg)\n raise exception.ManageExistingInvalidReference(\n existing_ref=self.existing_ref, reason=msg)\n\n return lun_info, lun_info['ID']\n\n\nclass ManageLunTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(ManageLunTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, volume, lun_info):\n new_name = huawei_utils.encode_name(volume.id)\n self.client.rename_lun(lun_info['ID'], new_name, volume.name)\n\n def revert(self, result, lun_info, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.rename_lun(lun_info['ID'], lun_info['NAME'],\n lun_info['DESCRIPTION'])\n\n\nclass ManageSnapshotPreCheckTask(task.Task):\n default_provides = 'snapshot_info'\n\n def __init__(self, client, snapshot, existing_ref, *args, **kwargs):\n super(ManageSnapshotPreCheckTask, self).__init__(*args, **kwargs)\n self.client = client\n self.snapshot = snapshot\n self.existing_ref = existing_ref\n\n def _get_external_snapshot(self):\n snapshot_info = huawei_utils.get_external_snapshot_info(\n self.client, self.existing_ref)\n if not snapshot_info:\n msg = _('External snapshot %s not exist.') % self.existing_ref\n LOG.error(msg)\n raise exception.ManageExistingInvalidReference(\n existing_ref=self.existing_ref, reason=msg)\n\n return snapshot_info\n\n def _check_snapshot_abnormal(self, snapshot_info):\n return snapshot_info['HEALTHSTATUS'] != constants.STATUS_HEALTH\n\n def _check_snapshot_in_use(self, snapshot_info):\n return snapshot_info.get('EXPOSEDTOINITIATOR') == 'true'\n\n def _check_parent_volume_inconsistency(self, snapshot_info):\n parent_info = huawei_utils.get_lun_info(\n self.client, self.snapshot.volume)\n return (not parent_info or\n snapshot_info.get('PARENTID') != parent_info['ID'])\n\n def execute(self):\n snapshot_info = self._get_external_snapshot()\n for i in dir(self):\n if callable(getattr(self, i)) and i.startswith('_check_'):\n func = getattr(self, i)\n if func(snapshot_info):\n msg = _(\"Snapshot managing pre check %s failed.\"\n ) % func.__name__\n LOG.error(msg)\n raise exception.ManageExistingInvalidReference(\n existing_ref=self.existing_ref, reason=msg)\n\n return snapshot_info\n\n\nclass ManageSnapshotTask(task.Task):\n def __init__(self, client, *args, **kwargs):\n super(ManageSnapshotTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, snapshot, snapshot_info):\n new_name = huawei_utils.encode_name(snapshot.id)\n data = {'NAME': new_name}\n self.client.update_snapshot(snapshot_info['ID'], data)\n\n if (snapshot_info.get('RUNNINGSTATUS') ==\n constants.SNAPSHOT_UNACTIVATED):\n self.client.activate_snapshot(snapshot_info['ID'])\n\n\nclass GroupOptsCheckTask(task.Task):\n default_provides = 'opts'\n\n def __init__(self, *args, **kwargs):\n super(GroupOptsCheckTask, self).__init__(*args, **kwargs)\n\n def execute(self, opts):\n for opt in opts:\n huawei_utils.check_volume_type_valid(opt)\n return opts\n\n\nclass CreateHyperMetroGroupTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, feature_support,\n *args, **kwargs):\n super(CreateHyperMetroGroupTask, self).__init__(*args, **kwargs)\n self.hypermetro = hypermetro.HuaweiHyperMetro(\n local_cli, remote_cli, config)\n self.feature_support = feature_support\n\n def execute(self, group, opts):\n if any(opt for opt in opts if opt['hypermetro']):\n if not self.feature_support['HyperMetro']:\n msg = _(\"Huawei storage doesn't support HyperMetro.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n self.hypermetro.create_consistencygroup(group.id)\n\n def revert(self, result, group, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.hypermetro.delete_consistencygroup(group.id, [])\n\n\nclass CreateReplicationGroupTask(task.Task):\n def __init__(self, local_cli, remote_cli, config, feature_support,\n *args, **kwargs):\n super(CreateReplicationGroupTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n self.feature_support = feature_support\n\n def execute(self, group, opts):\n create_group = False\n replication_type = set()\n for opt in opts:\n if opt['replication_enabled']:\n create_group = True\n replication_type.add(opt['replication_type'])\n\n if create_group:\n if not self.feature_support['HyperReplication']:\n msg = _(\"Huawei storage doesn't support HyperReplication.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if len(replication_type) != 1:\n msg = _(\"Multiple replication types exist in group.\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n self.replication.create_group(group.id, replication_type.pop())\n\n def revert(self, result, group, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.replication.delete_group(group.id, [])\n\n\nclass GetISCSIConnectionTask(task.Task):\n default_provides = ('target_ips', 'target_iqns', 'target_eths',\n 'config_info')\n\n def __init__(self, client, iscsi_info, *args, **kwargs):\n super(GetISCSIConnectionTask, self).__init__(*args, **kwargs)\n self.client = client\n self.iscsi_info = iscsi_info\n\n def _get_config_target_ips(self, ini):\n if ini and ini.get('TargetIP'):\n target_ips = [ip.strip() for ip in ini['TargetIP'].split()\n if ip.strip()]\n else:\n target_ips = self.iscsi_info['default_target_ips']\n return target_ips\n\n def _get_port_ip(self, port_id):\n iqn_info = port_id.split(',', 1)[0]\n return iqn_info.split(':', 5)[5]\n\n def _get_port_iqn(self, port_id):\n iqn_info = port_id.split(',', 1)[0]\n return iqn_info.split('+')[1]\n\n def execute(self, connector):\n ip_iqn_map = {}\n target_ports = self.client.get_iscsi_tgt_ports()\n for port in target_ports:\n ip = self._get_port_ip(port['ID'])\n normalized_ip = ipaddress.ip_address(six.text_type(ip)).exploded\n ip_iqn_map[normalized_ip] = (port['ID'], port['ETHPORTID'])\n\n config_info = huawei_utils.find_config_info(self.iscsi_info,\n connector=connector)\n\n config_ips = self._get_config_target_ips(config_info)\n LOG.info('Configured iscsi ips %s.', config_ips)\n\n target_ips = []\n target_iqns = []\n target_eths = []\n\n for ip in config_ips:\n ip_addr = ipaddress.ip_address(six.text_type(ip))\n normalized_ip = ip_addr.exploded\n if normalized_ip in ip_iqn_map:\n iqn = self._get_port_iqn(ip_iqn_map[normalized_ip][0])\n target_iqns.append(iqn)\n target_eths.append(ip_iqn_map[normalized_ip][1])\n\n for iqn in target_iqns:\n ip = iqn.split(':', 5)[5]\n if ipaddress.ip_address(six.text_type(ip)).version == 6:\n ip = '[' + ip + ']'\n target_ips.append(ip)\n\n if not target_ips or not target_iqns or not target_eths:\n msg = _('Get iSCSI target ip&iqnð error.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n LOG.info('Get iscsi target_ips: %s, target_iqns: %s, target_eths: %s.',\n target_ips, target_iqns, target_eths)\n\n return target_ips, target_iqns, target_eths, config_info\n\n\nclass CreateHostTask(task.Task):\n default_provides = 'host_id'\n\n def __init__(self, client, iscsi_info, configuration, *args, **kwargs):\n super(CreateHostTask, self).__init__(*args, **kwargs)\n self.client = client\n self.iscsi_info = iscsi_info\n self.configuration = configuration\n\n def _get_new_alua_info(self, config):\n info = {'accessMode': '0'}\n if config.get('ACCESSMODE') and config.get('HYPERMETROPATHOPTIMIZED'):\n info.update({\n 'accessMode': config['ACCESSMODE'],\n 'hyperMetroPathOptimized': config['HYPERMETROPATHOPTIMIZED']\n })\n\n return info\n\n def execute(self, connector):\n orig_host_name = connector['host']\n host_id = huawei_utils.get_host_id(self.client, orig_host_name)\n info = {}\n if self.configuration.is_dorado_v6:\n config_info = huawei_utils.find_config_info(\n self.iscsi_info, connector=connector)\n info = self._get_new_alua_info(config_info)\n if host_id:\n self.client.update_host(host_id, info)\n if not host_id:\n host_name = huawei_utils.encode_host_name(orig_host_name)\n host_id = self.client.create_host(host_name, orig_host_name, info)\n return host_id\n\n\nclass AddISCSIInitiatorTask(task.Task):\n default_provides = 'chap_info'\n\n def __init__(self, client, iscsi_info, configuration, *args, **kwargs):\n super(AddISCSIInitiatorTask, self).__init__(*args, **kwargs)\n self.client = client\n self.iscsi_info = iscsi_info\n self.configuration = configuration\n\n def _get_chap_info(self, config):\n chap_config = config.get('CHAPinfo')\n if not chap_config:\n return {}\n\n chap_name, chap_password = chap_config.split(';')\n return {'CHAPNAME': chap_name,\n 'CHAPPASSWORD': chap_password}\n\n def _get_alua_info(self, config):\n alua_info = {'MULTIPATHTYPE': '0'}\n if config.get('ACCESSMODE') and self.configuration.is_dorado_v6:\n return alua_info\n\n if config.get('ALUA'):\n alua_info['MULTIPATHTYPE'] = config['ALUA']\n\n if alua_info['MULTIPATHTYPE'] == '1':\n for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'):\n if config.get(k):\n alua_info[k] = config[k]\n\n return alua_info\n\n def execute(self, connector, host_id, config_info):\n initiator = connector['initiator']\n self.client.add_iscsi_initiator(initiator)\n\n alua_info = self._get_alua_info(config_info)\n self.client.associate_iscsi_initiator_to_host(\n initiator, host_id, alua_info)\n\n chap_info = self._get_chap_info(config_info)\n ini_info = self.client.get_iscsi_initiator(initiator)\n if (ini_info['USECHAP'] == 'true' and not chap_info) or (\n ini_info['USECHAP'] == 'false' and chap_info):\n self.client.update_iscsi_initiator_chap(initiator, chap_info)\n\n return chap_info\n\n\nclass CreateHostGroupTask(task.Task):\n default_provides = 'hostgroup_id'\n\n def __init__(self, client, *args, **kwargs):\n super(CreateHostGroupTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, host_id):\n hostgroup_name = constants.HOSTGROUP_PREFIX + host_id\n hostgroup_id = self.client.create_hostgroup(hostgroup_name)\n self.client.associate_host_to_hostgroup(hostgroup_id, host_id)\n return hostgroup_id\n\n\nclass CreateLunGroupTask(task.Task):\n default_provides = 'lungroup_id'\n\n def __init__(self, client, configuration, *args, **kwargs):\n super(CreateLunGroupTask, self).__init__(*args, **kwargs)\n self.client = client\n self.configuration = configuration\n\n def execute(self, host_id, lun_id, lun_type):\n lungroup_name = constants.LUNGROUP_PREFIX + host_id\n lungroup_id = self.client.create_lungroup(lungroup_name)\n mapping_view = self.client.get_mappingview_by_lungroup_id(lungroup_id)\n is_associated_host = True if mapping_view else False\n self.client.associate_lun_to_lungroup(\n lungroup_id, lun_id, lun_type,\n self.configuration.is_dorado_v6, is_associated_host)\n return lungroup_id\n\n def revert(self, result, lun_id, lun_type, **kwargs):\n if isinstance(result, failure.Failure):\n return\n self.client.remove_lun_from_lungroup(result, lun_id, lun_type)\n\n\nclass CreateMappingViewTask(task.Task):\n default_provides = ('mappingview_id', 'hostlun_id', 'aval_host_lun_ids')\n\n def __init__(self, client, *args, **kwargs):\n super(CreateMappingViewTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def _get_hostlun_id(self, func, host_id, lun_id):\n hostlun_id = func(host_id, lun_id)\n if hostlun_id is None:\n import time\n time.sleep(3)\n hostlun_id = func(host_id, lun_id)\n\n if hostlun_id is None:\n msg = _(\"Can not get hostlun id. Maybe the storage is busy, \"\n \"Please try it later\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n return hostlun_id\n\n def execute(self, lun_id, lun_type, host_id, hostgroup_id, lungroup_id,\n lun_info, portgroup_id=None):\n mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id\n mappingview_id = self.client.create_mappingview(mappingview_name)\n self.client.associate_hostgroup_to_mappingview(\n mappingview_id, hostgroup_id)\n self.client.associate_lungroup_to_mappingview(\n mappingview_id, lungroup_id)\n if portgroup_id:\n self.client.associate_portgroup_to_mappingview(\n mappingview_id, portgroup_id)\n\n if lun_type == constants.LUN_TYPE:\n hostlun_id = self._get_hostlun_id(\n self.client.get_lun_host_lun_id, host_id, lun_info)\n else:\n hostlun_id = self._get_hostlun_id(\n self.client.get_snapshot_host_lun_id, host_id, lun_id)\n\n mappingview_info = self.client.get_mappingview_by_id(mappingview_id)\n aval_host_lun_ids = json.loads(\n mappingview_info['AVAILABLEHOSTLUNIDLIST'])\n return mappingview_id, hostlun_id, aval_host_lun_ids\n\n\nclass GetISCSIPropertiesTask(task.Task):\n default_provides = 'mapping_info'\n\n def execute(self, connector, hostlun_id, target_iqns, target_ips,\n chap_info, mappingview_id, aval_host_lun_ids, lun_id,\n lun_info):\n hostlun_id = int(hostlun_id)\n mapping_info = {\n 'target_discovered': False,\n 'hostlun_id': hostlun_id,\n 'mappingview_id': mappingview_id,\n 'aval_host_lun_ids': aval_host_lun_ids,\n 'lun_id': lun_id,\n }\n\n if connector.get('multipath'):\n mapping_info.update({\n 'target_iqns': target_iqns,\n 'target_portals': ['%s:3260' % ip for ip in target_ips],\n 'target_luns': [hostlun_id] * len(target_ips),\n })\n else:\n mapping_info.update({\n 'target_iqn': target_iqns[0],\n 'target_portal': '%s:3260' % target_ips[0],\n 'target_lun': hostlun_id,\n })\n\n if chap_info:\n mapping_info['auth_method'] = 'CHAP'\n mapping_info['auth_username'] = chap_info['CHAPNAME']\n mapping_info['auth_password'] = chap_info['CHAPPASSWORD']\n\n if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE:\n mapping_info['discard'] = True\n\n return mapping_info\n\n\nclass GetHyperMetroRemoteLunTask(task.Task):\n default_provides = ('lun_id', 'lun_info')\n\n def __init__(self, client, hypermetro_id, *args, **kwargs):\n super(GetHyperMetroRemoteLunTask, self).__init__(*args, **kwargs)\n self.client = client\n self.hypermetro_id = hypermetro_id\n\n def execute(self):\n hypermetro_info = self.client.get_hypermetro_by_id(self.hypermetro_id)\n remote_lun_id = hypermetro_info['LOCALOBJID']\n remote_lun_info = self.client.get_lun_info_by_id(remote_lun_id)\n return remote_lun_id, remote_lun_info\n\n\nclass GetLunMappingTask(task.Task):\n default_provides = ('mappingview_id', 'lungroup_id', 'hostgroup_id',\n 'portgroup_id', 'host_id')\n\n def __init__(self, client, *args, **kwargs):\n super(GetLunMappingTask, self).__init__(*args, **kwargs)\n self.client = client\n\n def execute(self, connector, lun_id):\n if connector is None or 'host' not in connector:\n mappingview_id, lungroup_id, hostgroup_id, portgroup_id, host_id = (\n huawei_utils.get_mapping_info(self.client, lun_id))\n return (mappingview_id, lungroup_id, hostgroup_id, portgroup_id,\n host_id)\n host_name = connector['host']\n host_id = huawei_utils.get_host_id(self.client, host_name)\n if not host_id:\n LOG.warning('Host %s not exist, return success for '\n 'connection termination.', host_name)\n return None, None, None, None, None\n\n mappingview_name = constants.MAPPING_VIEW_PREFIX + host_id\n mappingview = self.client.get_mappingview_by_name(mappingview_name)\n if not mappingview:\n LOG.warning('Mappingview %s not exist, return success for '\n 'connection termination.', mappingview_name)\n return None, None, None, None, host_id\n\n lungroup_id = self.client.get_lungroup_in_mappingview(\n mappingview['ID'])\n portgroup_id = self.client.get_portgroup_in_mappingview(\n mappingview['ID'])\n hostgroup_id = self.client.get_hostgroup_in_mappingview(\n mappingview['ID'])\n\n return (mappingview['ID'], lungroup_id, hostgroup_id, portgroup_id,\n host_id)\n\n\nclass ClearLunMappingTask(task.Task):\n default_provides = 'ini_tgt_map'\n\n def __init__(self, client, configuration, fc_san=None, is_fc=False, *args, **kwargs):\n super(ClearLunMappingTask, self).__init__(*args, **kwargs)\n self.client = client\n self.fc_san = fc_san\n self.is_fc = is_fc\n self.configuration = configuration\n\n def _get_obj_count_of_lungroup(self, lungroup_id):\n lun_count = self.client.get_lun_count_of_lungroup(lungroup_id)\n snap_count = self.client.get_snapshot_count_of_lungroup(lungroup_id)\n return lun_count + snap_count\n\n def _delete_portgroup(self, mappingview_id, portgroup_id):\n self.client.remove_portgroup_from_mappingview(\n mappingview_id, portgroup_id)\n\n eth_ports = self.client.get_eth_ports_in_portgroup(portgroup_id)\n fc_ports = self.client.get_fc_ports_in_portgroup(portgroup_id)\n for p in [p['ID'] for p in eth_ports] + [p['ID'] for p in fc_ports]:\n self.client.remove_port_from_portgroup(portgroup_id, p)\n self.client.delete_portgroup(portgroup_id)\n\n def _delete_lungroup(self, mappingview_id, lungroup_id):\n self.client.remove_lungroup_from_mappingview(\n mappingview_id, lungroup_id)\n self.client.delete_lungroup(lungroup_id)\n\n def _delete_hostgroup(self, mappingview_id, hostgroup_id, host_id):\n self.client.remove_hostgroup_from_mappingview(\n mappingview_id, hostgroup_id)\n self.client.remove_host_from_hostgroup(hostgroup_id, host_id)\n self.client.delete_hostgroup(hostgroup_id)\n\n def _delete_host(self, host_id):\n iscsi_initiators = self.client.get_host_iscsi_initiators(host_id)\n for ini in iscsi_initiators:\n self.client.remove_iscsi_initiator_from_host(ini)\n\n fc_initiators = self.client.get_host_fc_initiators(host_id)\n for ini in fc_initiators:\n self.client.remove_fc_initiator_from_host(ini)\n\n self.client.delete_host(host_id)\n\n def _get_ini_tgt_map(self, connector, host_id):\n ini_tgt_map = {}\n portgroup = self.client.get_portgroup_by_name(\n constants.PORTGROUP_PREFIX + host_id)\n if portgroup:\n ports = self.client.get_fc_ports_in_portgroup(portgroup['ID'])\n port_wwns = [p['WWN'] for p in ports]\n wwns = map(lambda x: x.lower(), connector['wwpns'])\n for wwn in wwns:\n ini_tgt_map[wwn] = port_wwns\n\n return ini_tgt_map\n\n def execute(self, connector, lun_id, lun_type, host_id, mappingview_id,\n lungroup_id, hostgroup_id, portgroup_id):\n obj_count = 0\n if lun_id and lungroup_id:\n self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type)\n obj_count = self._get_obj_count_of_lungroup(lungroup_id)\n\n # If lungroup still has member objects, don't clear mapping relation.\n if obj_count > 0:\n LOG.info('Lungroup %(lg)s still has %(count)s members.',\n {'lg': lungroup_id, 'count': obj_count})\n return {}\n if self.configuration.retain_storage_mapping:\n return {}\n\n ini_tgt_map = {}\n if self.fc_san and host_id:\n ini_tgt_map = self._get_ini_tgt_map(connector, host_id)\n\n if mappingview_id and portgroup_id:\n self._delete_portgroup(mappingview_id, portgroup_id)\n if mappingview_id and not self.is_fc:\n self.client.update_iscsi_initiator_chap(\n connector.get('initiator'), chap_info=None)\n if mappingview_id and lungroup_id:\n self._delete_lungroup(mappingview_id, lungroup_id)\n if mappingview_id and hostgroup_id:\n self._delete_hostgroup(mappingview_id, hostgroup_id, host_id)\n if mappingview_id:\n self.client.delete_mapping_view(mappingview_id)\n if host_id and not self.client.is_host_associate_inband_lun(host_id):\n self._delete_host(host_id)\n\n return ini_tgt_map\n\n\nclass GetFCConnectionTask(task.Task):\n default_provides = ('ini_tgt_map', 'tgt_port_wwns')\n\n def __init__(self, client, fc_san, configuration, *args, **kwargs):\n super(GetFCConnectionTask, self).__init__(*args, **kwargs)\n self.client = client\n self.fc_san = fc_san\n self.configuration = configuration\n\n def _get_fc_ports(self, wwns):\n contr_map = {}\n slot_map = {}\n port_map = {}\n\n fc_ports = self.client.get_fc_ports()\n for port in fc_ports:\n if port['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED:\n contr = port['PARENTID'].split('.')[0]\n slot = port['PARENTID']\n port_wwn = port['WWN']\n\n if contr not in contr_map:\n contr_map[contr] = [slot]\n elif slot not in contr_map[contr]:\n contr_map[contr].append(slot)\n\n if slot not in slot_map:\n slot_map[slot] = [port_wwn]\n elif port_wwn not in slot_map[slot]:\n slot_map[slot].append(port_wwn)\n\n port_map[port_wwn] = {\n 'id': port['ID'],\n 'runspeed': int(port['RUNSPEED']),\n 'slot': slot,\n }\n\n fabrics = self._get_fabric(wwns, list(port_map.keys()))\n if not fabrics:\n msg = _(\"No valid fabric connection..\")\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return contr_map, slot_map, port_map, fabrics\n\n def _get_fabric(self, ini_port_wwns, tgt_port_wwns):\n ini_tgt_map = self.fc_san.get_device_mapping_from_network(\n ini_port_wwns, tgt_port_wwns)\n\n def _filter_not_connected_fabric(fabric_name, fabric):\n ini_port_wwn_list = fabric.get('initiator_port_wwn_list')\n tgt_port_wwn_list = fabric.get('target_port_wwn_list')\n\n if not ini_port_wwn_list or not tgt_port_wwn_list:\n LOG.warning(\"Fabric %(fabric_name)s doesn't really \"\n \"connect host and array: %(fabric)s.\",\n {'fabric_name': fabric_name,\n 'fabric': fabric})\n return None\n\n return set(ini_port_wwn_list), set(tgt_port_wwn_list)\n\n valid_fabrics = []\n for fabric in ini_tgt_map:\n pair = _filter_not_connected_fabric(fabric, ini_tgt_map[fabric])\n if pair:\n valid_fabrics.append(pair)\n\n LOG.info(\"Got fabric: %s.\", valid_fabrics)\n return valid_fabrics\n\n def _count_port_weight(self, port):\n port_bandwidth = port['runspeed']\n portgroup_ids = self.client.get_portgroup_by_port_id(port['id'], 212)\n weight = 1.0 / port_bandwidth if port_bandwidth > 0 else 1.0\n\n return len(portgroup_ids), weight\n\n def _select_port_per_fabric(self, port_map, candid_ports, used_slots):\n used_slot_pairs = []\n other_slot_pairs = []\n for p in candid_ports:\n weight = self._count_port_weight(port_map[p])\n\n if port_map[p]['slot'] in used_slots:\n used_slot_pairs.append((weight, p))\n else:\n other_slot_pairs.append((weight, p))\n\n new_port = None\n if other_slot_pairs:\n sorted_pairs = sorted(other_slot_pairs, key=lambda a: a[0])\n new_port = sorted_pairs[0][1]\n if not new_port and used_slot_pairs:\n sorted_pairs = sorted(used_slot_pairs, key=lambda a: a[0])\n new_port = sorted_pairs[0][1]\n\n return new_port\n\n def _select_ports_per_contr(self, fabrics, slots, slot_map, port_map):\n contr_ports = set()\n for slot in slots:\n contr_ports.update(slot_map[slot])\n\n if len(fabrics) == 1:\n select_fabrics = fabrics * 2\n else:\n select_fabrics = fabrics\n\n used_slots = set()\n selected_ports = set()\n for fabric in select_fabrics:\n new_port = self._select_port_per_fabric(\n port_map, fabric[1] & contr_ports, used_slots)\n if new_port:\n selected_ports.add(new_port)\n used_slots.add(port_map[new_port]['slot'])\n\n return selected_ports\n\n def _get_ports_in_use(self, host_id):\n portgroup = self.client.get_portgroup_by_name(\n constants.PORTGROUP_PREFIX + host_id)\n if not portgroup:\n return []\n ports = self.client.get_fc_ports_in_portgroup(portgroup['ID'])\n return [p['WWN'] for p in ports]\n\n def _get_fc_zone(self, wwns, host_id):\n selected_ports = set()\n ini_tgt_map = {}\n\n used_ports = self._get_ports_in_use(host_id)\n if not used_ports:\n contr_map, slot_map, port_map, fabrics = self._get_fc_ports(wwns)\n for contr in contr_map:\n ports = self._select_ports_per_contr(\n fabrics, contr_map[contr], slot_map, port_map)\n selected_ports.update(ports)\n\n for fabric in fabrics:\n for ini in fabric[0]:\n ini_tgt_map[ini] = list(selected_ports & fabric[1])\n\n return ini_tgt_map, list(selected_ports) + used_ports\n\n def _get_divided_wwns(self, wwns, host_id):\n invalid_wwns, effective_wwns = [], []\n for wwn in wwns:\n wwn_info = self.client.get_fc_init_info(wwn)\n if not wwn_info:\n LOG.info(\"%s is not found in device, ignore it.\", wwn)\n continue\n\n if wwn_info.get('RUNNINGSTATUS') == constants.FC_INIT_ONLINE:\n if wwn_info.get('ISFREE') == 'true':\n effective_wwns.append(wwn)\n continue\n\n if wwn_info.get('PARENTTYPE') == constants.PARENT_TYPE_HOST \\\n and wwn_info.get('PARENTID') == host_id:\n effective_wwns.append(wwn)\n continue\n\n invalid_wwns.append(wwn)\n\n return invalid_wwns, effective_wwns\n\n def _get_fc_link(self, wwns, host_id):\n invalid_wwns, effective_wwns = self._get_divided_wwns(wwns, host_id)\n\n if invalid_wwns:\n if (self.configuration.min_fc_ini_online ==\n constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE):\n msg = _(\"There are invalid initiators %s. If you want to \"\n \"continue to attach volume to host, configure \"\n \"MinFCIniOnline in the XML file.\") % invalid_wwns\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if len(effective_wwns) < self.configuration.min_fc_ini_online:\n msg = ((\"The number of online fc initiator %(wwns)s less than\"\n \" the set number: %(set)s.\")\n % {\"wwns\": effective_wwns,\n \"set\": self.configuration.min_fc_ini_online})\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n ini_tgt_map = {}\n tgt_port_wwns = set()\n\n for ini in effective_wwns:\n tgts = self.client.get_fc_target_wwpns(ini)\n ini_tgt_map[ini] = tgts\n tgt_port_wwns.update(tgts)\n\n return ini_tgt_map, list(tgt_port_wwns)\n\n def execute(self, connector, host_id):\n wwns = map(lambda x: x.lower(), connector['wwpns'])\n\n if self.fc_san:\n ini_tgt_map, tgt_port_wwns = self._get_fc_zone(wwns, host_id)\n else:\n ini_tgt_map, tgt_port_wwns = self._get_fc_link(wwns, host_id)\n\n if not tgt_port_wwns:\n msg = _('No fc connection for wwns %s.') % wwns\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n return ini_tgt_map, tgt_port_wwns\n\n\nclass AddFCInitiatorTask(task.Task):\n def __init__(self, client, fc_info, configuration, *args, **kwargs):\n super(AddFCInitiatorTask, self).__init__(*args, **kwargs)\n self.client = client\n self.fc_info = fc_info\n self.configuration = configuration\n\n def _get_alua_info(self, config):\n alua_info = {'MULTIPATHTYPE': '0'}\n if config.get('ACCESSMODE') and self.configuration.is_dorado_v6:\n return alua_info\n\n if config.get('ALUA'):\n alua_info['MULTIPATHTYPE'] = config['ALUA']\n\n if alua_info['MULTIPATHTYPE'] == '1':\n for k in ('FAILOVERMODE', 'SPECIALMODETYPE', 'PATHTYPE'):\n if config.get(k):\n alua_info[k] = config[k]\n\n return alua_info\n\n def execute(self, host_id, ini_tgt_map, connector):\n for ini in ini_tgt_map:\n self.client.add_fc_initiator(ini)\n\n config_info = huawei_utils.find_config_info(self.fc_info, connector,\n initiator=ini)\n alua_info = self._get_alua_info(config_info)\n self.client.associate_fc_initiator_to_host(host_id, ini, alua_info)\n\n\nclass CreateFCPortGroupTask(task.Task):\n default_provides = 'portgroup_id'\n\n def __init__(self, client, fc_san, *args, **kwargs):\n super(CreateFCPortGroupTask, self).__init__(*args, **kwargs)\n self.client = client\n self.fc_san = fc_san\n\n def _get_fc_ports(self):\n port_map = {}\n fc_ports = self.client.get_fc_ports()\n for port in fc_ports:\n port_map[port['WWN']] = port['ID']\n return port_map\n\n def _get_ports_to_add(self, ini_tgt_map):\n ports = set()\n for tgts in six.itervalues(ini_tgt_map):\n ports |= set(tgts)\n return ports\n\n def execute(self, host_id, ini_tgt_map):\n if not self.fc_san:\n return None\n\n portgroup_name = constants.PORTGROUP_PREFIX + host_id\n portgroup_id = self.client.create_portgroup(portgroup_name)\n port_map = self._get_fc_ports()\n ports = self._get_ports_to_add(ini_tgt_map)\n for port in ports:\n self.client.add_port_to_portgroup(portgroup_id, port_map[port])\n return portgroup_id\n\n def revert(self, result, ini_tgt_map, **kwargs):\n if isinstance(result, failure.Failure):\n return\n if result:\n port_map = self._get_fc_ports()\n ports = self._get_ports_to_add(ini_tgt_map)\n for port in ports:\n self.client.remove_port_from_portgroup(result, port_map[port])\n\n\nclass GetFCPropertiesTask(task.Task):\n default_provides = 'mapping_info'\n\n def execute(self, ini_tgt_map, tgt_port_wwns, hostlun_id, mappingview_id,\n aval_host_lun_ids, lun_id, lun_info):\n hostlun_id = int(hostlun_id)\n mapping_info = {\n 'hostlun_id': hostlun_id,\n 'mappingview_id': mappingview_id,\n 'aval_host_lun_ids': aval_host_lun_ids,\n 'target_discovered': True,\n 'target_wwn': tgt_port_wwns,\n 'target_lun': hostlun_id,\n 'initiator_target_map': ini_tgt_map,\n 'lun_id': lun_id,\n }\n\n if lun_info.get('ALLOCTYPE') == constants.THIN_LUNTYPE:\n mapping_info['discard'] = True\n\n return mapping_info\n\n\nclass ClassifyVolumeTask(task.Task):\n default_provides = ('normal_volumes', 'replication_volumes')\n\n def execute(self, volumes):\n normal_volumes = []\n replication_volumes = []\n\n for v in volumes:\n data = huawei_utils.to_dict(v.replication_driver_data)\n if 'pair_id' in data:\n replication_volumes.append(v)\n else:\n normal_volumes.append(v)\n\n return normal_volumes, replication_volumes\n\n\nclass FailoverVolumeTask(task.Task):\n default_provides = 'volumes_update'\n\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(FailoverVolumeTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n\n def _failover_normal_volumes(self, volumes):\n volumes_update = []\n for v in volumes:\n volume_update = {'volume_id': v.id,\n 'updates': {'status': 'error'}}\n volumes_update.append(volume_update)\n\n return volumes_update\n\n def execute(self, replication_volumes, normal_volumes):\n volumes_update = self.replication.failover(replication_volumes)\n volumes_update += self._failover_normal_volumes(normal_volumes)\n return volumes_update\n\n\nclass FailbackVolumeTask(task.Task):\n default_provides = 'volumes_update'\n\n def __init__(self, local_cli, remote_cli, config, *args, **kwargs):\n super(FailbackVolumeTask, self).__init__(*args, **kwargs)\n self.replication = replication.ReplicationManager(\n local_cli, remote_cli, config)\n\n def _failback_normal_volumes(self, volumes):\n volumes_update = []\n for v in volumes:\n volume_update = {'volume_id': v.id,\n 'updates': {'status': 'available'}}\n volumes_update.append(volume_update)\n\n return volumes_update\n\n def execute(self, replication_volumes, normal_volumes):\n volumes_update = self.replication.failback(replication_volumes)\n volumes_update += self._failback_normal_volumes(normal_volumes)\n return volumes_update\n\n\ndef create_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli,\n configuration, feature_support):\n store_spec = {'volume': volume}\n\n work_flow = linear_flow.Flow('create_volume')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration),\n CreateLunTask(local_cli, configuration, feature_support),\n WaitLunOnlineTask(local_cli),\n AddQoSTask(local_cli, configuration),\n AddCacheTask(local_cli),\n AddPartitionTask(local_cli),\n CreateHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration,\n is_sync=False),\n AddHyperMetroGroupTask(\n local_cli, hypermetro_rmt_cli, configuration),\n CreateReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n AddReplicationGroupTask(\n local_cli, replication_rmt_cli, configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n lun_id = engine.storage.fetch('lun_id')\n lun_info = engine.storage.fetch('lun_info')\n hypermetro_id = engine.storage.fetch('hypermetro_id')\n replication_id = engine.storage.fetch('replication_id')\n return lun_id, lun_info['WWN'], hypermetro_id, replication_id\n\n\ndef delete_volume(volume, local_cli, hypermetro_rmt_cli, replication_rmt_cli,\n configuration):\n store_spec = {'volume': volume}\n work_flow = linear_flow.Flow('delete_volume')\n work_flow.add(\n CheckLunExistTask(local_cli),\n CheckLunMappedTask(local_cli,\n configuration),\n DeleteReplicationTask(local_cli, replication_rmt_cli,\n configuration),\n DeleteHyperMetroTask(local_cli, hypermetro_rmt_cli,\n configuration),\n DeletePartitionTask(local_cli),\n DeleteCacheTask(local_cli),\n DeleteQoSTask(local_cli),\n DeleteLunTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef migrate_volume(volume, host, local_cli, feature_support, configuration):\n store_spec = {'volume': volume}\n\n work_flow = linear_flow.Flow('migrate_volume')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration),\n CheckLunExistTask(local_cli),\n CreateMigratedLunTask(local_cli, host, feature_support),\n WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}),\n CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}),\n WaitMigrateDoneTask(local_cli),\n AddCacheTask(local_cli),\n AddPartitionTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef create_volume_from_snapshot(\n volume, src_obj, local_cli, hypermetro_rmt_cli, replication_rmt_cli,\n configuration, feature_support):\n store_spec = {'volume': volume}\n metadata = huawei_utils.get_volume_metadata(volume)\n work_flow = linear_flow.Flow('create_volume_from_snapshot')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration),\n CheckSnapshotExistTask(local_cli, inject={'snapshot': src_obj}))\n\n if (strutils.bool_from_string(metadata.get('fastclone', False)) or\n (metadata.get('fastclone') is None and\n configuration.clone_mode == \"fastclone\")):\n work_flow.add(\n LunClonePreCheckTask(inject={'src_volume': src_obj}),\n CreateLunCloneTask(local_cli,\n rebind={'src_id': 'snapshot_id'})\n )\n elif configuration.is_dorado_v6:\n work_flow.add(\n CreateLunTask(local_cli, configuration, feature_support,\n inject={\"src_size\": src_obj.volume_size}),\n WaitLunOnlineTask(local_cli),\n CreateClonePairTask(local_cli, feature_support, configuration,\n rebind={'source_id': 'snapshot_id',\n 'target_id': 'lun_id'}),\n WaitClonePairDoneTask(local_cli, configuration),)\n else:\n work_flow.add(\n CreateLunTask(local_cli, configuration, feature_support),\n WaitLunOnlineTask(local_cli),\n CreateLunCopyTask(local_cli, feature_support, configuration),\n WaitLunCopyDoneTask(local_cli, configuration),)\n\n general_params = {'local_cli': local_cli,\n 'hypermetro_rmt_cli': hypermetro_rmt_cli,\n 'replication_rmt_cli': replication_rmt_cli,\n 'configuration': configuration}\n return _create_volume_from_src(\n work_flow, volume, store_spec, general_params)\n\n\ndef create_volume_from_volume(\n volume, src_obj, local_cli, hypermetro_rmt_cli, replication_rmt_cli,\n configuration, feature_support):\n store_spec = {'volume': volume}\n metadata = huawei_utils.get_volume_metadata(volume)\n work_flow = linear_flow.Flow('create_volume_from_volume')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration),\n CheckLunExistTask(local_cli, provides=('src_lun_info', 'src_id'),\n inject={'volume': src_obj}),\n )\n\n if (strutils.bool_from_string(metadata.get('fastclone', False)) or\n (metadata.get('fastclone') is None and\n configuration.clone_mode == \"fastclone\")):\n work_flow.add(\n LunClonePreCheckTask(inject={'src_volume': src_obj}),\n CreateLunCloneTask(local_cli)\n )\n elif configuration.is_dorado_v6:\n work_flow.add(\n CreateLunTask(local_cli, configuration, feature_support,\n inject={\"src_size\": src_obj.size}),\n WaitLunOnlineTask(local_cli),\n CreateClonePairTask(local_cli, feature_support, configuration,\n rebind={'source_id': 'src_id',\n 'target_id': 'lun_id'}),\n WaitClonePairDoneTask(local_cli, configuration),)\n else:\n work_flow.add(\n CreateTempSnapshotTask(local_cli, feature_support),\n WaitSnapshotReadyTask(local_cli),\n ActiveSnapshotTask(local_cli),\n CreateLunTask(local_cli, configuration, feature_support),\n WaitLunOnlineTask(local_cli),\n CreateLunCopyTask(local_cli, feature_support, configuration),\n WaitLunCopyDoneTask(local_cli, configuration),\n DeleteTempSnapshotTask(local_cli),\n )\n\n general_params = {'local_cli': local_cli,\n 'hypermetro_rmt_cli': hypermetro_rmt_cli,\n 'replication_rmt_cli': replication_rmt_cli,\n 'configuration': configuration}\n return _create_volume_from_src(\n work_flow, volume, store_spec, general_params)\n\n\ndef _create_volume_from_src(\n work_flow, volume, store_spec, general_params):\n \"\"\"\n Extracting Common Methods for create_volume_from_volume\n and create_volume_from_snapshot\n \"\"\"\n local_cli = general_params.get('local_cli')\n configuration = general_params.get('configuration')\n replication_rmt_cli = general_params.get('replication_rmt_cli')\n hypermetro_rmt_cli = general_params.get('hypermetro_rmt_cli')\n work_flow.add(\n ExtendVolumeTask(local_cli, inject={\n \"new_size\": int(volume.size) * constants.CAPACITY_UNIT}),\n AddQoSTask(local_cli, configuration),\n AddCacheTask(local_cli),\n AddPartitionTask(local_cli),\n CreateHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration),\n AddHyperMetroGroupTask(\n local_cli, hypermetro_rmt_cli, configuration),\n CreateReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n AddReplicationGroupTask(\n local_cli, replication_rmt_cli, configuration),)\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n lun_id = engine.storage.fetch('lun_id')\n lun_info = engine.storage.fetch('lun_info')\n hypermetro_id = engine.storage.fetch('hypermetro_id')\n replication_id = engine.storage.fetch('replication_id')\n return lun_id, lun_info['WWN'], hypermetro_id, replication_id\n\n\ndef create_snapshot(snapshot, local_cli, feature_support):\n store_spec = {'snapshot': snapshot}\n\n work_flow = linear_flow.Flow('create_snapshot')\n work_flow.add(\n CreateSnapshotTask(local_cli, feature_support),\n WaitSnapshotReadyTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n snapshot_id = engine.storage.fetch('snapshot_id')\n snapshot_wwn = engine.storage.fetch('snapshot_wwn')\n\n return snapshot_id, snapshot_wwn\n\n\ndef delete_snapshot(snapshot, local_cli):\n store_spec = {'snapshot': snapshot}\n work_flow = linear_flow.Flow('delete_snapshot')\n work_flow.add(\n CheckSnapshotExistTask(local_cli),\n DeleteSnapshotTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef extend_volume(volume, new_size, local_cli, hypermetro_rmt_cli,\n replication_rmt_cli, configuration):\n store_spec = {'volume': volume,\n 'new_size': int(new_size) * constants.CAPACITY_UNIT}\n work_flow = linear_flow.Flow('extend_volume')\n work_flow.add(\n CheckLunExistTask(local_cli),\n ExtendHyperMetroTask(local_cli, hypermetro_rmt_cli, configuration),\n ExtendReplicationTask(local_cli, replication_rmt_cli, configuration),\n ExtendVolumeTask(local_cli)\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef retype(volume, new_opts, local_cli, hypermetro_rmt_cli,\n replication_rmt_cli, configuration, feature_support):\n store_spec = {'volume': volume}\n\n work_flow = linear_flow.Flow('retype_volume')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration, new_opts),\n CheckLunExistTask(local_cli),\n CheckLunIsInUse(),\n UpdateLunTask(local_cli),\n UpdateQoSTask(local_cli),\n UpdateCacheTask(local_cli),\n UpdatePartitionTask(local_cli),\n DeleteHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration),\n DeleteReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n CreateHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration),\n CreateReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n hypermetro_id = engine.storage.fetch('hypermetro_id')\n replication_id = engine.storage.fetch('replication_id')\n return hypermetro_id, replication_id\n\n\ndef retype_by_migrate(volume, new_opts, host, local_cli, hypermetro_rmt_cli,\n replication_rmt_cli, configuration, feature_support):\n store_spec = {'volume': volume}\n\n work_flow = linear_flow.Flow('retype_volume_by_migrate')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration, new_opts),\n CheckLunExistTask(local_cli),\n CheckLunIsInUse(),\n CreateMigratedLunTask(local_cli, host, feature_support),\n WaitLunOnlineTask(local_cli, rebind={'lun_id': 'tgt_lun_id'}),\n CreateMigrateTask(local_cli, rebind={'src_lun_id': 'lun_id'}),\n WaitMigrateDoneTask(local_cli),\n UpdateQoSTask(local_cli),\n AddCacheTask(local_cli),\n AddPartitionTask(local_cli),\n CreateHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration,\n rebind={'lun_info': 'tgt_lun_info'}),\n CreateReplicationTask(\n local_cli, replication_rmt_cli, configuration,\n rebind={'lun_info': 'tgt_lun_info'}),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n hypermetro_id = engine.storage.fetch('hypermetro_id')\n replication_id = engine.storage.fetch('replication_id')\n return hypermetro_id, replication_id\n\n\ndef manage_existing(volume, existing_ref, local_cli, hypermetro_rmt_cli,\n replication_rmt_cli, configuration, feature_support):\n store_spec = {'volume': volume}\n\n work_flow = linear_flow.Flow('manage_volume')\n work_flow.add(\n LunOptsCheckTask(local_cli, feature_support, configuration),\n ManageVolumePreCheckTask(\n local_cli, volume, existing_ref, configuration),\n ManageLunTask(local_cli),\n UpdateQoSTask(local_cli),\n UpdateLunTask(local_cli),\n UpdateCacheTask(local_cli),\n UpdatePartitionTask(local_cli),\n DeleteHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration),\n DeleteReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n CreateHyperMetroTask(\n local_cli, hypermetro_rmt_cli, configuration),\n CreateReplicationTask(\n local_cli, replication_rmt_cli, configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n lun_info = engine.storage.fetch('lun_info')\n hypermetro_id = engine.storage.fetch('hypermetro_id')\n replication_id = engine.storage.fetch('replication_id')\n return lun_info['ID'], lun_info['WWN'], hypermetro_id, replication_id\n\n\ndef manage_existing_snapshot(snapshot, existing_ref, local_cli):\n store_spec = {'snapshot': snapshot}\n\n work_flow = linear_flow.Flow('manage_snapshot')\n work_flow.add(\n ManageSnapshotPreCheckTask(local_cli, snapshot, existing_ref),\n ManageSnapshotTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n snapshot_info = engine.storage.fetch('snapshot_info')\n return snapshot_info['ID'], snapshot_info['WWN']\n\n\ndef create_group(group, local_cli, hypermetro_rmt_cli, replication_rmt_cli,\n configuration, feature_support):\n opts = huawei_utils.get_group_type_params(group, configuration.is_dorado_v6)\n store_spec = {'group': group,\n 'opts': opts}\n\n work_flow = linear_flow.Flow('create_group')\n work_flow.add(\n GroupOptsCheckTask(),\n CreateHyperMetroGroupTask(\n local_cli, hypermetro_rmt_cli, configuration,\n feature_support),\n CreateReplicationGroupTask(\n local_cli, replication_rmt_cli, configuration,\n feature_support),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef initialize_iscsi_connection(lun, lun_type, connector, client,\n configuration):\n store_spec = {'connector': connector,\n 'lun': lun,\n 'lun_type': lun_type}\n work_flow = linear_flow.Flow('initialize_iscsi_connection')\n\n if lun_type == constants.LUN_TYPE:\n work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'}))\n else:\n work_flow.add(\n CheckSnapshotExistTask(\n client, provides=('lun_info', 'lun_id'),\n rebind={'snapshot': 'lun'}))\n\n work_flow.add(\n CreateHostTask(client, configuration.iscsi_info, configuration),\n GetISCSIConnectionTask(client, configuration.iscsi_info),\n AddISCSIInitiatorTask(client, configuration.iscsi_info, configuration),\n CreateHostGroupTask(client),\n CreateLunGroupTask(client, configuration),\n CreateMappingViewTask(client),\n GetISCSIPropertiesTask(),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n return engine.storage.fetch('mapping_info')\n\n\ndef initialize_remote_iscsi_connection(hypermetro_id, connector,\n client, configuration):\n store_spec = {'connector': connector,\n 'lun_type': constants.LUN_TYPE}\n work_flow = linear_flow.Flow('initialize_remote_iscsi_connection')\n\n work_flow.add(\n GetHyperMetroRemoteLunTask(client, hypermetro_id),\n CreateHostTask(client, configuration.hypermetro['iscsi_info'],\n configuration),\n GetISCSIConnectionTask(client, configuration.hypermetro['iscsi_info']),\n AddISCSIInitiatorTask(client, configuration.hypermetro['iscsi_info'],\n configuration),\n CreateHostGroupTask(client),\n CreateLunGroupTask(client, configuration),\n CreateMappingViewTask(client),\n GetISCSIPropertiesTask(client),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n return engine.storage.fetch('mapping_info')\n\n\ndef terminate_iscsi_connection(lun, lun_type, connector, client,\n configuration):\n store_spec = {'connector': connector,\n 'lun': lun,\n 'lun_type': lun_type}\n work_flow = linear_flow.Flow('terminate_iscsi_connection')\n\n if lun_type == constants.LUN_TYPE:\n work_flow.add(\n GetLunIDTask(client, rebind={'volume': 'lun'}),\n )\n else:\n work_flow.add(\n GetSnapshotIDTask(\n client, provides='lun_id', rebind={'snapshot': 'lun'}),\n )\n\n work_flow.add(\n GetLunMappingTask(client),\n ClearLunMappingTask(client, configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef terminate_remote_iscsi_connection(hypermetro_id, connector, client,\n configuration):\n store_spec = {'connector': connector}\n work_flow = linear_flow.Flow('terminate_remote_iscsi_connection')\n\n work_flow.add(\n GetHyperMetroRemoteLunTask(client, hypermetro_id),\n GetLunMappingTask(client),\n ClearLunMappingTask(client, configuration,\n inject={'lun_type': constants.LUN_TYPE}),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n\ndef initialize_fc_connection(lun, lun_type, connector, fc_san, client,\n configuration):\n store_spec = {'connector': connector,\n 'lun': lun,\n 'lun_type': lun_type}\n work_flow = linear_flow.Flow('initialize_fc_connection')\n\n if lun_type == constants.LUN_TYPE:\n work_flow.add(CheckLunExistTask(client, rebind={'volume': 'lun'}))\n else:\n work_flow.add(\n CheckSnapshotExistTask(\n client, provides=('lun_info', 'lun_id'),\n rebind={'snapshot': 'lun'}))\n\n work_flow.add(\n CreateHostTask(client, configuration.fc_info, configuration),\n GetFCConnectionTask(client, fc_san, configuration),\n AddFCInitiatorTask(client, configuration.fc_info, configuration),\n CreateHostGroupTask(client),\n CreateLunGroupTask(client, configuration),\n CreateFCPortGroupTask(client, fc_san),\n CreateMappingViewTask(client),\n GetFCPropertiesTask(),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n return engine.storage.fetch('mapping_info')\n\n\ndef initialize_remote_fc_connection(hypermetro_id, connector, fc_san, client,\n configuration):\n store_spec = {'connector': connector,\n 'lun_type': constants.LUN_TYPE}\n work_flow = linear_flow.Flow('initialize_remote_fc_connection')\n\n work_flow.add(\n GetHyperMetroRemoteLunTask(client, hypermetro_id),\n CreateHostTask(client, configuration.hypermetro['fc_info'],\n configuration),\n GetFCConnectionTask(client, fc_san, configuration),\n AddFCInitiatorTask(client, configuration.hypermetro['fc_info'],\n configuration),\n CreateHostGroupTask(client),\n CreateLunGroupTask(client, configuration),\n CreateFCPortGroupTask(client, fc_san),\n CreateMappingViewTask(client),\n GetFCPropertiesTask(),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n return engine.storage.fetch('mapping_info')\n\n\ndef terminate_fc_connection(lun, lun_type, connector, fc_san, client,\n configuration):\n store_spec = {'connector': connector,\n 'lun': lun,\n 'lun_type': lun_type}\n work_flow = linear_flow.Flow('terminate_fc_connection')\n\n if lun_type == constants.LUN_TYPE:\n work_flow.add(\n GetLunIDTask(client, rebind={'volume': 'lun'}),\n )\n else:\n work_flow.add(\n GetSnapshotIDTask(\n client, provides='lun_id', rebind={'snapshot': 'lun'}),\n )\n\n work_flow.add(\n GetLunMappingTask(client),\n ClearLunMappingTask(client, configuration, fc_san, is_fc=True),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n return engine.storage.fetch('ini_tgt_map')\n\n\ndef terminate_remote_fc_connection(hypermetro_id, connector, fc_san, client,\n configuration):\n store_spec = {'connector': connector}\n work_flow = linear_flow.Flow('terminate_remote_fc_connection')\n\n work_flow.add(\n GetHyperMetroRemoteLunTask(client, hypermetro_id),\n GetLunMappingTask(client),\n ClearLunMappingTask(client, configuration, fc_san, is_fc=True,\n inject={'lun_type': constants.LUN_TYPE}),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n return engine.storage.fetch('ini_tgt_map')\n\n\ndef failover(volumes, local_cli, replication_rmt_cli, configuration):\n store_spec = {'volumes': volumes}\n work_flow = linear_flow.Flow('failover')\n work_flow.add(\n ClassifyVolumeTask(),\n FailoverVolumeTask(local_cli, replication_rmt_cli,\n configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n volumes_update = engine.storage.fetch('volumes_update')\n return volumes_update\n\n\ndef failback(volumes, local_cli, replication_rmt_cli, configuration):\n store_spec = {'volumes': volumes}\n work_flow = linear_flow.Flow('failback')\n work_flow.add(\n ClassifyVolumeTask(),\n FailbackVolumeTask(local_cli, replication_rmt_cli,\n configuration),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n\n volumes_update = engine.storage.fetch('volumes_update')\n return volumes_update\n\n\ndef revert_to_snapshot(snapshot, local_cli, rollback_speed):\n store_spec = {'snapshot': snapshot}\n work_flow = linear_flow.Flow('revert_to_snapshot')\n work_flow.add(\n CheckSnapshotExistTask(local_cli),\n RevertToSnapshotTask(local_cli, rollback_speed),\n WaitSnapshotRollbackDoneTask(local_cli),\n )\n\n engine = taskflow.engines.load(work_flow, store=store_spec)\n engine.run()\n","sub_path":"Cinder/Zed/huawei_flow.py","file_name":"huawei_flow.py","file_ext":"py","file_size_in_byte":103768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"163671922","text":"#!/usr/bin/python\n\nimport nmap3\nimport json\n\npynmap_version = nmap3.Nmap()\nversion_scan = pynmap_version.nmap_version_detection('11.11.11.171')\ntype(version_scan)\nversion_json = json.dumps(version_scan, indent = 5)\nprint(version_json)\n\n","sub_path":"Nmap/exemplos/exemplos/nmap_version/versionscan_json.py","file_name":"versionscan_json.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"576817340","text":"from dictogram import Dictogram\nfrom random import choice\nfrom analyze import read_file\n\nsample_text = read_file('opticks.txt')\nmarkov = {}\n\n\ndef first_order():\n\tfor word in sample_text:\n\t\tmarkov[word] = Dictogram()\n\t\n\tfor index in range(len(sample_text) - 1):\n\t\tmarkov[sample_text[index]].add_count(sample_text[index + 1])\n\t\n\t# print(markov['fish'].sample())\n\tsentence = \" \"\n\tword = choice(list(markov.keys()))\n\tsentence += word\n\tfor i in range(4):\n\t\tword = markov[word].sample()\n\t\tsentence += \" \" + word\n\treturn print(sentence)\n\n\ndef second_order():\n\tfor index in range(len(sample_text) - 2):\n\t\t\n\t\tfirst_word = sample_text[index]\n\t\tmiddle_word = sample_text[index + 1]\n\t\tlast_word = sample_text[index + 2]\n\t\t\n\t\tif (first_word, middle_word) not in markov:\n\t\t\tmarkov[(first_word, middle_word)] = Dictogram([last_word])\n\t\telse:\n\t\t\tmarkov[(first_word, middle_word)].add_count(last_word)\n\treturn markov\n\n\ndef get_tuples(word):\n\tmarkov = second_order()\n\tall_tuples = [each_tuple for each_tuple in list(markov) if word == each_tuple[0]]\n\treturn all_tuples\n\n\ndef sentence_generator():\n\tmarkov = second_order()\n\tword = choice(choice(list(markov)))\n\tsentence = \" \" + word\n\tfor i in range(7):\n\t\tpair_choice = choice(get_tuples(word))\n\t\tsentence += ' ' + pair_choice[1]\n\t\tword = pair_choice[1]\n\treturn sentence\n\n\nprint(sentence_generator())","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"317798103","text":"import pytest\n\nfrom mtxapi.api.Mtx_Login import Mtx_Login\nfrom mtxapi.api.Mtx_colletion import Mtx_Colletion\nfrom mtxapi.log.Logger import Logger\n\n\nclass Test_Collection():\n\n def setup_class(self):\n self.colltionobj = Mtx_Colletion()\n self.loggerobj = Logger().get_logger()\n self.loginobj = Mtx_Login()\n\n @pytest.mark.parametrize('account,pwd',[('li40','123456')])\n def test_case_login(self,account,pwd):\n res = self.loginobj.login(account,pwd)\n pytest.assume(res['msg']=='登录成功')\n self.loggerobj.info('登录操作')\n\n def test_case_collection(self):\n res = self.colltionobj.colletion()\n # print(res)\n # assert \"我的收藏 - 码同学实战系统\" in res.text\n pytest.assume('我的收藏 - 码同学实战系统' in res.text)\n self.loggerobj.info('收藏列表页')\n\nif __name__ == '__main__':\n pytest.main(['-s'])","sub_path":"mtxapi/testcase/test_colletion.py","file_name":"test_colletion.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386966559","text":"from flask import Flask\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import flash\nfrom flask_login import current_user\nfrom flask_login import login_required\nfrom flask import request\nfrom pprint import pprint\nfrom datetime import date\nimport csv\nimport requests\n\nfundamentals = Blueprint('fundamentals',__name__)\n\n\n@fundamentals.route('/fundamentals/stock-symbols/')\n@login_required\ndef stock_symbols():\n return render_template('fundamentals/stock_symbol.html',user=current_user)\n\n@fundamentals.route('/fundamentals/all-stocks/')\n@login_required\ndef all_stocks():\n return render_template('fundamentals/allstocks.html',user=current_user)\n\n@fundamentals.route('/fundamentals/company-profile/',methods=['GET','POST'])\n@login_required\ndef profile():\n\n if request.method == 'POST':\n comp_name = request.form['company']\n url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol='+comp_name+'&apikey=WH75LQJ4BD7S15TO'\n r = requests.get(url)\n data = r.json()\n if data:\n\n name=data['Name']\n symbol=data['Symbol']\n sector=data['Sector']\n industry=data['Industry']\n country=data['Country']\n asset=data['AssetType']\n address=data['Address']\n comp=data['Description']\n exchange=data['Exchange']\n return render_template('fundamentals/company_profile.html',\n user=current_user,comp=comp,name=name,symbol=symbol,sector=sector,\n industry=industry,country=country,asset=asset,address=address,exchange=exchange,data=data)\n else:\n message='No data exists with our data providers for such given code input'\n return render_template('fundamentals/company_profile.html',\n user=current_user,message=message)\n # pprint(data)\n \n \n return render_template('fundamentals/company_profile.html',user=current_user)\n\n\n@fundamentals.route('/fundamentals/all-exchanges/',methods=['GET','POST'])\n@login_required\ndef exchanges():\n # url='https://api.twelvedata.com/exchanges?source=docs'\n # re=requests.get(url)\n # req=re.json()\n \n # dets=req['data']\n # pprint(dets['code'])\n return render_template('fundamentals/exchange.html',user=current_user)\n\n@fundamentals.route('/fundamentals/all-tickers/',methods=['GET','POST'])\n@login_required\ndef tickers():\n data=[]\n rich=True\n url='https://api.polygon.io/v3/reference/tickers?active=true&sort=ticker&order=asc&limit=1000&apiKey=Uzuvj8JwkDonC3dGzEcxu42LcwwyBHUk'\n\n while(rich):\n re=requests.get(url)\n req=re.json()\n # pprint(req['results'])\n # print('mid')\n \n \n \n data.append(req['results'])\n key_to_lookup = 'next_url'\n if req.__contains__(key_to_lookup):\n url=req['next_url']+'&active=true&sort=ticker&order=asc&limit=1000&apiKey=Uzuvj8JwkDonC3dGzEcxu42LcwwyBHUk'\n else:\n rich=False\n \n\n \n print(len(data))\n return render_template('fundamentals/alltickers.html',user=current_user,datas=data)\n\n\n\n\ndef intro(data):\n act=date.today()\n url='https://finnhub.io/api/v1/company-news?symbol='+data+'&from=2021-01-10&to='+f'{act}'+'&token=c2vgio2ad3i9mrpv9i2g'\n print(data)\n req=requests.get(url)\n datas=req.json()\n # dope.append(datas)\n message='Error in your input'\n pprint(datas)\n \n\n return datas\n\n@fundamentals.route('/fundamentals/get-news/',methods=['GET','POST'])\n@login_required\ndef getnews():\n \n if request.method == 'POST':\n name= request.form['companyName']\n datas = intro(name)\n return render_template('fundamentals/news.html',user=current_user,datas=datas)\n else:\n return render_template('fundamentals/getnews.html',user=current_user)\n\n@fundamentals.route('/fundamentals/finance-insider/',methods=['GET','POST'])\n@login_required\ndef insider():\n return render_template('fundamentals/insider.html',user=current_user)\n \n\ndef getEarnings(read):\n req=requests.get(read)\n datas=req.json()\n \n if datas: \n return datas\n else:\n return 0\n\n\n\n@fundamentals.route('/fundamentals/earnings/',methods=['GET','POST'])\n@login_required\ndef earnings():\n message='No data found'\n xaxes=[];\n yaxes=[];\n rdate=[];\n reportedEPS=[];\n estimatedEPS=[];\n surprise=[];\n surprisePercentage=[];\n\n if request.method == 'POST':\n code = request.form['earning']\n # print(code)\n url='https://www.alphavantage.co/query?function=EARNINGS&symbol='+code+'&apikey=WH75LQJ4BD7S15TO'\n notes=getEarnings(url)\n if notes:\n cotes=notes['annualEarnings']\n gotes=notes['quarterlyEarnings']\n \n for i in cotes:\n xaxes.append(i['fiscalDateEnding'])\n yaxes.append(i['reportedEPS'])\n # print(i['fiscalDateEnding'])\n # print(i['reportedEPS'])\n\n for j in gotes:\n rdate.append(j['reportedDate'])\n reportedEPS.append(j['reportedEPS'])\n estimatedEPS.append(j['estimatedEPS'])\n surprise.append(j['surprise'])\n surprisePercentage.append(j['surprisePercentage'])\n print(j['surprisePercentage'])\n\n leny=len(yaxes)\n lenx=len(xaxes)\n return render_template('fundamentals/earnings.html',\n user=current_user,xaxes=xaxes,yaxes=yaxes,lenx=lenx,\n leny=leny,rdate=rdate,report=reportedEPS,estimate=estimatedEPS,sur=surprise,surprise=surprisePercentage)\n else:\n return render_template('fundamentals/earnings.html',\n user=current_user,message=message)\n\n return render_template('fundamentals/earnings.html',user=current_user)\n \n\n\n@fundamentals.route('/fundamentals/ipo/',methods=['GET','POST'])\n@login_required\ndef ipo():\n act=date.today()\n url='https://finnhub.io/api/v1/calendar/ipo?from=2020-01-01&to='+f'{act}'+'&token=c2vgio2ad3i9mrpv9i2g'\n req=requests.get(url)\n datas=req.json()\n # pprint(datas)\n results=datas['ipoCalendar']\n return render_template('fundamentals/ipo.html',user=current_user,datas=results)\n\n\n\n\n@fundamentals.route('/fundamentals/market-news/',methods=['GET','POST'])\n@login_required\ndef marketNews():\n \n url='https://finnhub.io/api/v1/news?category=general&token=c2vgio2ad3i9mrpv9i2g'\n req=requests.get(url)\n datas=req.json()\n pprint(datas)\n \n return render_template('fundamentals/generalNews.html',user=current_user,datas=datas)\n\n\n\n@fundamentals.route('/fundamentals/filings/',methods=['GET','POST'])\n@login_required\ndef filings():\n \n \n return render_template('fundamentals/sec.html',user=current_user)\n\n\n@fundamentals.route('/fundamentals/reported-financials/',methods=['GET','POST'])\n@login_required\ndef reports():\n # swData=[];\n # bs=[];\n # cf=[];\n # ic=[];\n # if request.method=='POST':\n\n # st=request.form['repfin']\n # url='https://finnhub.io/api/v1/stock/financials-reported?symbol='+st+'&token=c2vgio2ad3i9mrpv9i2g'\n # response=requests.get(url)\n # responseData=response.json()\n # reqData=responseData['data']\n # # pprint(responseData)\n # if(len(reqData) == 0):\n # message='error in fetching data.Check your code input again'\n # return render_template('fundamentals/repFin.html',user=current_user,st=message)\n # else:\n # for i in reqData:\n # sal=i['report']\n \n # bas=sal['bs'];\n # # caf=j['cf'];\n # # ica=j['ic'];\n # for k in bas:\n # for gh in k:\n\n # bs.append(k[gh])\n # # for l in caf:\n # # cf.append(l['value'])\n # # for m in ica:\n # # ic.append(m['value'])\n \n\n\n\n # pprint(bs)\n # return render_template('fundamentals/repFin.html',user=current_user,st=st)\n \n return render_template('fundamentals/repFin.html',user=current_user)\n\n@fundamentals.route('/fundamentals/basic-financials/')\n@login_required\ndef basics():\n\n \n return render_template('fundamentals/basics.html',user=current_user)","sub_path":"project/fundamentals.py","file_name":"fundamentals.py","file_ext":"py","file_size_in_byte":8383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"93767569","text":"from typing import TypeVar, Generic, Callable\nfrom stack import ArrayStack\n\nK = TypeVar('Key')\nI = TypeVar('Item')\nT = TypeVar('T') # for the iterator\n\nclass BinaryTreeNode(Generic[K,I]):\n def __init__(self, key:K, item:I) -> None:\n self.key = key\n self.item = item\n self.left = None\n self.right = None\n \n def __str__(self):\n return \"(\"+str(self.key)+\", \"+str(self.item)+\")\"\n\nclass BinaryTree(Generic[K,I]):\n def __init__(self) -> None:\n self.root = None\n \n def is_empty(self) -> bool:\n return self.root is None\n\n def __len__(self) -> int:\n return self.len_aux(self.root)\n \n def len_aux(self, current:BinaryTreeNode[K,I]) -> int:\n if current is None:\n return 0\n else:\n return 1 + self.len_aux(current.left) + self.len_aux(current.right)\n\n def __contains__(self, key:K) -> bool:\n return self.contains_aux(self.root, key)\n \n def contains_aux(self, current:BinaryTreeNode[K,I], key:K) -> bool:\n if current is None:\n return False\n elif key < current.key:\n return self.contains_aux(current.left, key)\n elif key > current.key:\n return self.contains_aux(current.right, key)\n else:\n return True # key found\n \n def __getitem__(self, key:K) -> BinaryTreeNode:\n return self.getitem_aux(self.root, key)\n\n def getitem_aux(self, current:BinaryTreeNode[K,I], key:K) -> I:\n if current is None:\n raise KeyError(key)\n elif key < current.key:\n return self.contains_aux(current.left, key)\n elif key > current.key:\n return self.contains_aux(current.right, key)\n else:\n return current.item\n \n def __setitem__(self, key:K, item:I) -> None:\n self.root = self.insert_aux(self.root, key, item)\n \n def setitem_aux(self, current:BinaryTreeNode[K,I], key:K, item:I) -> BinaryTreeNode:\n if current is None:\n current = BinarySearchNode(key, item) # creates the new node\n elif key < current.key:\n current.left = self.insert_aux(current.left, key, item)\n elif key > current.key:\n current.right = self.insert_aux(current.right, key, item)\n else: # key == current.key\n current.item = item\n return current\n \n def insert(self, key:K, item:I=None) -> None:\n self.root = self.insert_aux(self.root, key, item)\n\n def insert_aux(self, current:BinaryTreeNode[K,I], key:K, item:I) -> BinaryTreeNode:\n if current is None:\n current = BinaryTreeNode(key, item) # creates the new node\n elif key < current.key:\n current.left = self.insert_aux(current.left, key, item)\n elif key > current.key:\n current.right = self.insert_aux(current.right, key, item)\n else: # key == current.key\n raise ValueError(\"Inserting duplicate item\")\n return current\n\n def delete(self, key:K) -> None:\n self.root = self.delete_aux(self, key)\n \n def delete_aux(self, current:BinaryTreeNode[K,I], key:K, item:I) -> BinaryTreeNode:\n if current is None:\n raise ValueError(\"Key doesn't exist\")\n elif key < current.key:\n current.left = self.delete_aux(current.left, key, item)\n elif key > current.key:\n current.right = self.delete_aux(current.right, key, item)\n else: # key == current.key\n return None # delete node\n return current\n \n def __iter__(self):\n return PreorderIteratorStack(self.root)\n\nclass PreorderIteratorStack(Generic[T]):\n def __init__(self, root:BinaryTreeNode[K,I]) -> None:\n self.stack = ArrayStack()\n self.stack.push(root)\n \n def __iter__(self):\n return self\n \n def __next__(self) -> T:\n if self.stack.is_empty():\n raise StopIteration\n current = self.stack.pop()\n if current.right is not None:\n self.stack.push(current.right)\n if current.left is not None:\n self.stack.push(current.left)\n return current.item\n \nmytree = BinaryTree()\nmytree.insert(4, \"+\")\nmytree.insert(2, \"/\")\nmytree.insert(1, \"1\")\nmytree.insert(3, \"3\")\nmytree.insert(8, \"/\")\nmytree.insert(6, \"*\")\nmytree.insert(5, \"6\")\nmytree.insert(7, \"7\")\nmytree.insert(9, \"4\")\n\nfor item in mytree:\n print(item)","sub_path":"Abstract Data Types/adt_binary_tree.py","file_name":"adt_binary_tree.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"203939711","text":"import pygame\nimport time\nfrom TileManager import TileManager\nfrom Apple import Apple\nfrom Snake import Snake\nfrom CounterTime import CounterTime\nfrom ClientManager import ClientManager\nfrom HUD import HUD\n\nSNAKE = \"s\"\nAPPLE = \"a\"\n\nclass BoardManager():\n \n def __init__(self, nw_tile, nh_tile):\n self.nw_tile = nw_tile # Set number of tile in x\n self.nh_tile = nh_tile # Set number of tile in y\n \n self.number_body = 3 # Set number of snake's body\n self.score = 0 # Set score of sanke\n \n self.db_save_size = 6 # Column size of data base\n self.id = 0 # index of database's column\n self.type = 1 \n self.x = 2\n self.y = 3\n self.s = 4\n self.t = 5\n \n self.enemy_dict = {} # Enemy snake dictionary\n self.client_state_dict = {'OK':0, 'notNStart':1, 'notNExit':2} # KList state of client\n self.client_state = self.client_state_dict['notNStart'] # State of cleint \n self.play_state = False # Playing state\n self.game_time = 0 # time of game\n \n self.tile_mng = TileManager(width, height, self.nw_tile, self.nh_tile) # Set TileManager\n self.snake = Snake(1, 1, self.tile_mng, green, SNAKE, raw_input(\"Enter : \"), self.number_body) # Set my sanke\n self.apple = Apple(0, 0, self.tile_mng, red, 0) # Set apple\n self.hud = HUD() # Set HUD\n \n # Update board\n def update(self):\n for event in pygame.event.get(): # Check all event\n if event.type == pygame.QUIT: # Click Quit to quit program\n pygame.quit() # quit programe\n quit()\n if event.type == pygame.KEYDOWN : # If pressed keyboard\n if event.key == pygame.K_LEFT and not self.snake.get_move_list('right'): # Pressed left and not go right\n self.snake.set_left() # Go left\n elif event.key == pygame.K_RIGHT and not self.snake.get_move_list('left'): # Pressed right and not go left\n self.snake.set_right() # Go right\n elif event.key == pygame.K_UP and not self.snake.get_move_list('down'): # Pressed up and not go down\n self.snake.set_up() # Go up\n elif event.key == pygame.K_DOWN and not self.snake.get_move_list('up'): # Pressed down and not go up\n self.snake.set_down() # Go down\n elif event.key == pygame.K_x: # Press x\n self.snake.snake_slide(self.snake.get_move(), self.apple) # Use slide snake skill\n elif event.key == pygame.K_c: # Press c\n self.snake.snake_running() # Use running snake skill\n \n x_pos_ls = [int(self.snake.get_x_pos()[i] / self.tile_mng.get_sizew_tile()) for i in range(self.snake.get_number_body())] # set x's list\n y_pos_ls = [int(self.snake.get_y_pos()[i] / self.tile_mng.get_sizeh_tile ())for i in range(self.snake.get_number_body())] # set y's list\n x_pos_n = self.client.pos_pattern(x_pos_ls) # save in string in postion pattern (x, y)\n y_pos_n = self.client.pos_pattern(y_pos_ls)\n pattern = self.client.set_pattern(self.snake.get_id(), [self.snake.get_type(), x_pos_n, y_pos_n, self.snake.get_score(), 0]) # set pattern of all data\n # id,type,xs,ys,score,time\n \n if self.client_state == self.client_state_dict['OK']: # Client state is \"OK\"\n self.snake.snake_eat_apple(self.apple) # Check does snake eat apple ?\n self.snake.update() # Update snake\n if self.apple.get_eaten(): # Apple is eaten by snake\n pattern = \"eaten\" # pattern is \"eaten\"\n self.apple.set_eaten(False) # Apple is not eaten\n self.client.send(pattern) # Send pattern to client\n \n data, addr = self.client.recv() # Receive data from server\n if data == None: # Data is none => exit method\n return\n if data == \"notNStart\" : # data is notNStart => set client state\n self.client_state = self.client_state_dict['notNStart']\n elif data == \"notNExit\" : # data is notNExit => set client state\n self.client_state = self.client_state_dict['notNExit']\n else : # otherwise\n self.client_state = self.client_state_dict['OK'] # Set client state to \"OK\"\n split_list = self.client.split(data) # Split data\n for ls in split_list :\n self.game_time = int(ls[self.t]) # set time\n if ls[self.id] == \"*\" : # See id of apple\n self.apple = Apple(int(ls[self.x]), int(ls[self.y]), self.tile_mng, red, str(ls[self.type]))\n elif ls[self.id] != self.snake.get_id() and ls[self.id] != \"*\": # See enemy snake's id\n self.enemy_dict[ls[self.id]] = Snake(0, 0, self.tile_mng, blue, ls[self.type], ls[self.id], 0) # Save in dictionary of enemy snake\n x_pos = self.client.pos_split(ls[self.x]) \n y_pos = self.client.pos_split(ls[self.y])\n x_list = [int(x_pos[i] * self.tile_mng.get_sizew_tile()) for i in range(len(x_pos))]\n y_list = [int(y_pos[i] * self.tile_mng.get_sizeh_tile()) for i in range(len(y_pos))]\n self.enemy_dict[ls[self.id]].set_pos(x_list, y_list) # Set postion enemy snake\n \n # Render board\n def render(self): # Render board\n game_display.fill(white) # Background\n if self.client_state == self.client_state_dict['OK']: # Client state is OK\n if self.snake.state == 'dead' : # If dead\n # Draw dead HUD\n self.hud.dead_hud(game_display, self.snake.get_dead_ellapse(), self.snake.get_dead_delay(), width / 2, height / 2, width / 3, height / 3) \n else :\n self.apple.render(game_display) # Render Apple\n self.snake.render(game_display, self.snake.get_number_body()-1) # render snake\n for key in self.enemy_dict: # Render enemy snake\n self.enemy_dict[key].render(game_display, self.enemy_dict[key].get_number_body()-1)\n # Draw time's hud, score' hud, gauge's hud\n self.hud.time_hud(game_display, self.game_time, width / 2, height * 0.05)\n self.hud.score_hud(game_display, self.snake.get_score(), width * 0.8, height * 0.05)\n self.hud.gauge_hud(game_display, min(self.snake.get_slide_ellapse() / self.snake.get_slide_delay(), 1), 0.6 * width, 0.95 * height, 0.4 * width, 0.02 * height)\n self.hud.gauge_hud(game_display, min(self.snake.get_run_use_ellapse() / self.snake.get_run_use_delay(), 1), 0.6 * width, 0.97 * height, 0.4 * width, 0.02 * height)\n pygame.display.update() # Update display\n \n def loop(self): # Loop to play always\n self.ip = raw_input(\"IP : \") # Get IP\n self.client = ClientManager(self.ip, 8000) # Set client \n packet = str(\"tile:\" + str(width) + \",\" + str(height) + \",\" + str(self.nw_tile) + \",\" + str(self.nh_tile)) # Set pattern : width, height program and number of tile program\n self.client.send(packet) # Send data to server\n self.client.close() # Disconnnet socket\n while True :\n try:\n self.client = ClientManager(self.ip, 8000) # Set client\n self.update() # Update program\n self.render() # Render program\n self.client.close() # Disconnect from socket\n except KeyboardInterrupt :\n pygame.quit() # Quit program\n quit()\n pygame.quit() # Quit program\n quit()\n \nwidth = 600 # width of program\nheight = 600 # height of program\n\nwhite = (255 ,255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\npygame.init() # Start pygame\ngame_display = pygame.display.set_mode([width, height]) # Start Frame\npygame.display.set_caption(\"SNAKEGAME_2D\") # Set caption\nclock = pygame.time.Clock() # Set counter\n\nboard_mng = BoardManager(8, 8) # Setup BoardManager\nboard_mng.loop() # Loop BoardManager","sub_path":"BoardManager.py","file_name":"BoardManager.py","file_ext":"py","file_size_in_byte":8338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"137222513","text":"import matplotlib\nimport numpy\nimport os\n\nfrom matplotlib import pyplot\nfrom scipy import stats\n\nfrom general_utils import prepare_folder\nfrom plotting.plot_violins import plot_violins\n\ndef plot_decoding_results_breakdown(args):\n\n matplotlib.rc('image', cmap='Dark2')\n\n subjects = 33\n labels = list()\n accs = dict()\n\n folder = prepare_folder(args)\n for f_selec in os.listdir(folder):\n \n file_name = '{}_{}_results.txt'.format(args.word_vectors, \\\n args.evaluation_method)\n file_path = os.path.join(folder, f_selec, file_name)\n\n if not os.path.exists(file_path):\n print('missing: {}'.format(file_path))\n else:\n with open(file_path) as i:\n lines = [l.strip().split('\\t') for l in i.readlines()]\n #if args.experiment_id == 'two':\n # lines = lines[1:]\n header_indices = [l for l in enumerate(lines[0]) if 'Accuracy' in l[1] or \\\n 'mixed_place_place' in l[1] or\\\n 'mixed_person_place' in l[1] or\\\n 'entity_persona' in l[1] or \\\n 'entity_person_person' in l[1] or\\\n 'entity_luogo' in l[1] or \\\n 'entity_place_place' in l[1] or\\\n 'entity_person_place' in l[1] or\\\n 'category_persona' in l[1] or \\\n 'category_person_person' in l[1] or\\\n 'category_luogo' in l[1] or \\\n 'category_place_place' in l[1] or\\\n 'category_person_place' in l[1] or\\\n 'mixed_persona' in l[1] or \\\n 'mixed_person_person' in l[1] or \\\n 'person_place' in l[1] or \\\n 'famous_famous' in l[1] or \\\n 'person_place_famous_famous' in l[1] or \\\n 'place_place' in l[1] or \\\n 'place_place_famous_famous' in l[1] or \\\n 'person_person' in l[1] or \\\n 'person_person_famous_famous' in l[1] or \\\n\n 'mixed_luogo' in l[1]]\n\n\n ### Final subjects are from 1 to 33; since the first line in txt is just the \n ### line corresponding to the header we can take the lines using final_subjects directly\n assert len(lines) == subjects+1\n lines = [lines[i] for i in range(1, subjects+1)]\n for h_i, h in header_indices:\n labels.append(f_selec)\n if h not in accs.keys():\n accs[h] = list()\n for l in lines:\n accs[h].append(float(l[h_i]))\n\n\n ### Printing out what's missing\n if len(accs.keys()) == 0:\n print('missing {}'.format(file_path))\n \n ### If there's at least one result, proceed\n else:\n\n random_baseline = 0.5\n assert len(list(set([len(v) for k, v in accs.items()]))) == 1\n #accs = numpy.array(accs, dtype=numpy.double) \n\n comp_model = args.word_vectors.replace('_en_mentions', '')\n #plot_path = os.path.join('plots', args.experiment_id, \\\n # '{}_results_breakdown'.format(args.analysis), \\\n # args.subsample, args.entities, \\\n # args.semantic_category)\n plot_path = prepare_folder(args).replace(args.analysis, \\\n '{}/results_breakdown'.format(args.analysis))\\\n .replace('results', 'plots')\n plot_path = os.path.join(plot_path, args.evaluation_method)\n os.makedirs(plot_path, exist_ok=True)\n ### txt file to compute correlations\n text_path = os.path.join(plot_path, \\\n '{}_{}_{}_{}_breakdown.txt'.format(\\\n comp_model, args.entities, args.semantic_category, \\\n args.analysis))\n with open(text_path,'w', encoding='utf-8') as o:\n for k in accs.keys():\n o.write('{}\\t'.format(k))\n o.write('\\n')\n for k_i in range(len(accs['Accuracy'])):\n for k, v in accs.items():\n o.write('{}\\t'.format(v[k_i]))\n o.write('\\n')\n\n plot_path = os.path.join(plot_path, \\\n #'{}_{}_{}_decoding_breakdown.pdf'.format(\\\n '{}_{}_{}_{}_breakdown_average{}.jpg'.format(\\\n comp_model, args.entities, args.semantic_category, \\\n args.analysis, args.average))\n\n ### Main plot properties\n\n plot_title = 'Comparing {} scores for various \\n'\\\n 'data splits for {} \\n'\\\n '- {} - {} - N={}'.format(\\\n args.analysis, \\\n comp_model, \\\n args.entities, \\\n args.semantic_category, \\\n len(accs['Accuracy'])).replace('_', ' ')\n plot_title = plot_title.replace('_', ' ')\n plot_violins(accs, labels, plot_path, plot_title, random_baseline)\n","sub_path":"minimal_exp/plotting/plot_decoding_results_breakdown.py","file_name":"plot_decoding_results_breakdown.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"555409046","text":"from __future__ import print_function\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.classification import GBTClassifier\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.sql import SparkSession\n\n# Read data\nspark = SparkSession.builder.appName(\"pyspark data source\").getOrCreate()\ndf = spark.read.load(\"../data/training.csv\", format=\"csv\", header=\"true\")\ntest_df = spark.read.load(\"../data/testing.csv\", format=\"csv\", header=\"true\")\n\n# Convert DataType and DataFrame\nfeatures_name = [f for f in df.columns if f not in ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index']]\nfor name in features_name:\n df = df.withColumn(name, df[name].cast(\"double\"))\n df_test = df_test.withColumn(name, df[name].cast(\"double\"))\ndf = df.withColumn(\"indexedLabel\", df[\"SK_ID_CURR\"].cast(\"double\"))\ndf_test = df_test.withColumn(\"indexedLabel\", df[\"SK_ID_CURR\"].cast(\"double\"))\n\n# Assemble features as sparse Vector\nassembler = VectorAssembler(inputCols=features_name, outputCol= \"features\")\nfeatures = assembler.transform(df)\ndf = df.withColumn(\"indexedFeatures\", features)\n\ntrainingData = df.select([\"indexedLabel\", \"indexedFeatures\"])\ntestData = df_test.select(\"indexedFeatures\")\ndel df, df_test\n\n# Train a GBT model.\ngbt = GBTClassifier(labelCol=\"indexedLabel\", featuresCol=\"indexedFeatures\", maxIter=10)\n\n# Train model.\nmodel = gbt.fit(trainingData)\n\n# Make predictions.\npredictions = model.transform(testData)\n\n# Select example rows to display.\npredictions.select(\"prediction\", \"indexedLabel\", \"features\").show(5)\n\n# Select (prediction, true label) and compute test error\nevaluator = MulticlassClassificationEvaluator(\n labelCol=\"indexedLabel\", predictionCol=\"prediction\", metricName=\"accuracy\")\naccuracy = evaluator.evaluate(predictions)\nprint(\"Test Error = %g\" % (1.0 - accuracy))\n\ngbtModel = model.stages[2]\nprint(gbtModel) # summary only\n\nspark.stop()","sub_path":"code/Spark_GBTC.py","file_name":"Spark_GBTC.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"234941548","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\n\ndef outputDEM(data, nameList, boundaryID, timeStep, xdataName, ydataName):\n bcData = data[(data['Boundary condition'] == boundaryID) & \n (data['Time step'] == timeStep)].sort_values(xdataName)\n \n DEMData = bcData[[xdataName, ydataName]]\n return DEMData\n\ndef outputDatafile(data, fileName):\n Nx = len(data)\n Ny = 1\n text = data.to_csv( sep=' ', float_format='%.4f', index=False, header=False)\n with open(fileName, 'w') as f:\n f.write(text)\n f.close()\n\n\n# In[3]:\n\n\nfilename = 'reference.dat'\n\n# Read in name list\nnameList = pd.read_csv( filename + '.names', \n delimiter=r': ',\n index_col=0, \n skiprows=range(8), \n names = ['ID','name'] )\n\n# Read in data\ndata = pd.read_csv( filename, \n delim_whitespace=True, \n names=nameList['name'])\n\n\n# In[4]:\n\n\ntopId = 3\nbedId = 1\nrightId = 2\ntimeStep = 10\n\ncoordXName = 'coordinate 1'\ncoordYName = 'coordinate 2'\nvelName= 'velocity 1'\n#betaName = 'stress vector 2'\n\n\nsurf = outputDEM(data, nameList, topId, timeStep, coordXName, coordYName)\nH = outputDEM(data, nameList, topId, timeStep, coordXName, 'height')\nbed = outputDEM(data, nameList, bedId, timeStep, coordXName, coordYName)\nUsurf = outputDEM(data, nameList, topId, timeStep, coordXName, velName)\nUright = outputDEM(data, nameList, rightId, timeStep, coordYName, velName)\n\n# In[5]:\n\n\n# Create UDEM\noutputDatafile(surf, 'zsDEM.dat') \noutputDatafile(bed, 'zbDEM.dat') \noutputDatafile(H, 'H.dat') \noutputDatafile(Usurf, 'UDEM.dat') \noutputDatafile(Uright, 'Calving.dat') \n\n\n","sub_path":"step0/extractSurf.py","file_name":"extractSurf.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"306768241","text":"import tensorflow as tf\nimport os\nfrom text_classification.settings import *\nfrom classification.methods.rnn.data_processing_rnn import batch_iter, sequence, process, process_text\nimport pandas as pd\n\n\nclass TextRnn(object):\n\n def __init__(self, pm):\n self.pm = pm\n self.input_x = tf.placeholder(tf.int32, shape=[None, pm.seq_length], name='input_x')\n self.input_y = tf.placeholder(tf.float32, shape=[None, pm.num_classes], name='input_y')\n self.seq_length = tf.placeholder(tf.int32, shape=[None], name='sequen_length')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.rnn()\n\n def rnn(self):\n with tf.device('/cpu:0'), tf.name_scope('embedding'):\n embedding = tf.get_variable('embedding', shape=[self.pm.vocab_size, self.pm.embedding_dim],\n initializer=tf.constant_initializer(self.pm.pre_trianing))\n self.embedding_input = tf.nn.embedding_lookup(embedding, self.input_x)\n\n with tf.name_scope('cell'):\n cell = tf.nn.rnn_cell.LSTMCell(self.pm.hidden_dim)\n cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)\n\n cells = [cell for _ in range(self.pm.num_layers)]\n Cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)\n\n with tf.name_scope('rnn'):\n # hidden一层 输入是[batch_size, seq_length, hidden_dim]\n # hidden二层 输入是[batch_size, seq_length, 2*hidden_dim]\n # 2*hidden_dim = embendding_dim + hidden_dim\n output, _ = tf.nn.dynamic_rnn(cell=Cell, inputs=self.embedding_input, sequence_length=self.seq_length,\n dtype=tf.float32)\n output = tf.reduce_sum(output, axis=1)\n # output:[batch_size, seq_length, hidden_dim]\n\n with tf.name_scope('dropout'):\n self.out_drop = tf.nn.dropout(output, keep_prob=self.keep_prob)\n\n with tf.name_scope('output'):\n w = tf.Variable(tf.truncated_normal([self.pm.hidden_dim, self.pm.num_classes], stddev=0.1), name='w')\n b = tf.Variable(tf.constant(0.1, shape=[self.pm.num_classes]), name='b')\n self.logits = tf.matmul(self.out_drop, w) + b\n self.predict = tf.argmax(tf.nn.softmax(self.logits), 1, name='predict')\n\n with tf.name_scope('loss'):\n losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)\n self.loss = tf.reduce_mean(losses)\n\n with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(self.pm.learning_rate)\n gradients, variables = zip(*optimizer.compute_gradients(self.loss)) # 计算变量梯度,得到梯度值,变量\n gradients, _ = tf.clip_by_global_norm(gradients, self.pm.clip)\n # 对g进行l2正则化计算,比较其与clip的值,如果l2后的值更大,让梯度*(clip/l2_g),得到新梯度\n self.optimizer = optimizer.apply_gradients(zip(gradients, variables), global_step=self.global_step)\n # global_step 自动+1\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(self.predict, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n\n def feed_data(self, x_batch, y_batch, seq_len, keep_prob):\n feed_dict = {self.input_x: x_batch,\n self.input_y: y_batch,\n self.seq_length: seq_len,\n self.keep_prob: keep_prob}\n\n return feed_dict\n\n def evaluate(self, sess, x, y):\n batch_test = batch_iter(x, y, self.pm.batch_size)\n for x_batch, y_batch in batch_test:\n seq_len = sequence(x_batch)\n feet_dict = self.feed_data(x_batch, y_batch, seq_len, 1.0)\n loss, accuracy = sess.run([self.loss, self.accuracy], feed_dict=feet_dict)\n\n return loss, accuracy\n\n\ndef getImageUrl(dataid, pm):\n return \"/static/csv2jpg/text_rnn/\" + make_dir_string(dataid, pm) + \"/train_info.png\"\n\n\ndef make_dir_string(dataid, pm):\n '''创建文件夹的名称:数据集+参数的组合形式'''\n return ('dataid_' + str(dataid) +\n '_num_layers_' + str(pm.num_layers) + \"_hidden_dim_\" + str(pm.hidden_dim) +\n \"_keep_prob_\" + str(pm.keep_prob) + '_learning_rate_' + str(pm.learning_rate) + \"_lr_decay_\" + str(\n pm.lr_decay) + \"_clip_\" + str(pm.clip) + \"_num_epochs_\" + str(pm.num_epochs) +\n \"_batch_size_\" + str(pm.batch_size))\n\n\ndef save_info(file_path, train_info):\n dataFrame = pd.DataFrame(train_info)\n if not os.path.exists(file_path):\n with open(file_path, \"a\") as f:\n dataFrame.to_csv(f)\n else:\n with open(file_path, \"a\") as f:\n dataFrame.to_csv(f, header=False)\n\n\ndef train(model, pm, wordid, cat_to_id, dataid):\n tensorboard_dir = os.path.join(TENSORBOARD_DIR, 'text_rnn', make_dir_string(dataid, pm))\n save_dir = os.path.join(CHECKPOINTS, 'text_rnn', make_dir_string(dataid, pm))\n\n if not os.path.exists(tensorboard_dir):\n os.makedirs(tensorboard_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_path = os.path.join(save_dir, 'best_validation')\n\n tf.summary.scalar('loss', model.loss)\n tf.summary.scalar('accuracy', model.accuracy)\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tensorboard_dir)\n saver = tf.train.Saver()\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n writer.add_graph(session.graph)\n\n x_train, y_train = process(pm.train_filename, wordid, cat_to_id, max_length=250)\n x_test, y_test = process(pm.test_filename, wordid, cat_to_id, max_length=250)\n for epoch in range(pm.num_epochs):\n print('Epoch:', epoch + 1)\n num_batchs = int((len(x_train) - 1) / pm.batch_size) + 1\n batch_train = batch_iter(x_train, y_train, batch_size=pm.batch_size)\n\n # 保存信息为pandas\n train_info = {\"global_step\": [], \"loss\": [], \"accuracy\": []} # 训练信息\n for x_batch, y_batch in batch_train:\n seq_len = sequence(x_batch)\n feed_dict = model.feed_data(x_batch, y_batch, seq_len, pm.keep_prob)\n _, global_step, _summary, train_loss, train_accuracy = session.run(\n [model.optimizer, model.global_step, merged_summary,\n model.loss, model.accuracy], feed_dict=feed_dict)\n train_info[\"global_step\"].append(global_step)\n train_info[\"loss\"].append(train_loss)\n train_info[\"accuracy\"].append(train_accuracy)\n if global_step % 100 == 0:\n test_loss, test_accuracy = model.evaluate(session, x_test, y_test)\n print('global_step:', global_step, 'train_loss:', train_loss, 'train_accuracy:', train_accuracy,\n 'test_loss:', test_loss, 'test_accuracy:', test_accuracy)\n\n if global_step % num_batchs == 0:\n print('Saving Model...')\n save_info(os.path.join(tensorboard_dir, \"train_info.csv\"), train_info)\n del train_info\n train_info = {\"global_step\": [], \"loss\": [], \"accuracy\": []}\n saver.save(session, save_path, global_step=global_step)\n\n pm.learning_rate *= pm.lr_decay\n\n\ndef val(model, pm, wordid, cat_to_id, data_id):\n pre_label = []\n label = []\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n save_path = tf.train.latest_checkpoint(os.path.join(CHECKPOINTS, 'text_rnn', make_dir_string(data_id,\n pm))) # os.path.join(MEDIA_ROOT,'checkpoints','text_cnn',make_dir_string(data_id, pm))\n saver = tf.train.Saver()\n saver.restore(sess=session, save_path=save_path)\n\n val_x, val_y = process(pm.val_filename, wordid, cat_to_id, max_length=250)\n batch_val = batch_iter(val_x, val_y, batch_size=64)\n for x_batch, y_batch in batch_val:\n seq_len = sequence(x_batch)\n pre_lab = session.run(model.predict,\n feed_dict={model.input_x: x_batch, model.seq_length: seq_len, model.keep_prob: 1.0})\n pre_label.extend(pre_lab)\n label.extend(y_batch)\n return pre_label, label\n\n\ndef val_text(model, text_data, pm, wordid, cat_to_id, data_id):\n pre_label = [] # 预测值\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n save_path = tf.train.latest_checkpoint(os.path.join(CHECKPOINTS, 'text_rnn', make_dir_string(data_id, pm))) # os.path.join(MEDIA_ROOT,'checkpoints','text_cnn',make_dir_string(data_id, pm))\n saver = tf.train.Saver()\n flag = os.path.exists(save_path)\n saver.restore(sess=session, save_path=save_path)\n\n val_x = process_text(text_data, wordid, cat_to_id, max_length=250)\n seq_len = sequence(val_x)\n pre_lab = session.run(model.predict, feed_dict={model.input_x: val_x,model.seq_length: seq_len, model.keep_prob: 1.0})\n\n # 将预测结果展示\n return pre_lab[0]\n","sub_path":"classification/methods/rnn/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"638324657","text":"import pandas as pd\nimport time\nimport re\ndata = pd.read_excel('/Users/Tyrone/Documents/坚果云/一艳/关闭率/关闭率.xlsx')\ndata = data[data.SERVICE_STATUS!=3]\ndef f1(x):\n test = u'测'\n if re.search(test,unicode(x)):\n return False\n else:\n return True\ndata['X']= data['SERVICE_DESC'].apply(f1)\ndata = data[data.X==True]#SERVICE_DESC不含'测'\ndata = data[data['close_time'].notnull()]\ndata = data[data.CATEGORY_NAME==u'公共维修'|data.CATEGORY_NAME==u'投诉']\n\ncolumns_name = []\nfor i in ['history','year','month','week','day']:\n for j in ['all','intime','ratio']:\n name = i+'_'+j\n columns_name.append(name)\nindex_name=df.drop_duplicates(['APARTMENT_NAME'])['APARTMENT_NAME']\ndff = pd.DataFrame(0,index=index_name,columns=columns_name)\n\nt = '2017-03-10'\nt_now = time.mktime(time.strptime(t,'%Y-%m-%d'))#设置当前时间\nt_dict = {}\nt_dict['history'] = 0\nt_dict['year'] = time.mktime(time.strptime(t[:4],'%Y'))\nt_dict['month']= time.mktime(time.strptime(t[:7],'%Y-%m'))\nt_dict['week'] = t_now-604800\nt_dict['day'] = t_now-86400\n\ndef check_intime(x):\n a = 0\n one = u'安保投诉|服务态度投诉|绿化投诉|清洁卫生投诉|'\n seven = u'停车投诉|报警设备|道闸故障|电梯故障|健身设施故障|其他设施设备故障|弱电系统|消防大类|照明故障'\n twentyfive = u'装修投诉'\n if re.search(one,unicode(x['TYPE_NAME'])):\n if time.mktime(time.strptime(str(x['close_time'])[:19],'%Y-%m-%d %H:%M:%S'))-time.mktime(time.strptime(str(x['CREATED_ON'])[:19],'%Y-%m-%d %H:%M:%S'))<86400:\n a = 1\n if re.search(seven,unicode(x['TYPE_NAME'])):\n if time.mktime(time.strptime(str(x['close_time'])[:19],'%Y-%m-%d %H:%M:%S'))-time.mktime(time.strptime(str(x['CREATED_ON'])[:19],'%Y-%m-%d %H:%M:%S'))<604800:\n a = 1\n if re.search(twentyfive,unicode(x['TYPE_NAME'])):\n if time.mktime(time.strptime(str(x['close_time'])[:19],'%Y-%m-%d %H:%M:%S'))-time.mktime(time.strptime(str(x['CREATED_ON'])[:19],'%Y-%m-%d %H:%M:%S'))<2160000:\n a = 1\n return a\n\nfor i in ['history','year','month','week','day']:\n data1 = data[(data.CREATED_ON_UNIXt_dict[i])]\n dff[i+'_all'] = data1.drop_duplicates(['SERVICE_SID']).groupby(data1.drop_duplicates(['SERVICE_SID'])['APARTMENT_NAME']).size()\n if len(data1)==0:\n data1['react_status'] = 0\n else:\n data1['react_status'] = data1.apply(lambda x:check_intime(x),1)\n data2 = data1[data1.react_status==1]\n dff[i+'_intime'] = data2.drop_duplicates(['SERVICE_SID']).groupby(data2.drop_duplicates('SERVICE_SID')['APARTMENT_NAME']).size()\n dff[i+'_ratio'] = dff[i+'_intime']*1.0/dff[i+'_all']\n","sub_path":"Extra/关闭率.py","file_name":"关闭率.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"255528539","text":"import pygame\nimport operations\n\npygame.init()\npygame.font.init()\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\nred = (255,0,0)\nblue = (0,0,255)\n\ndef hand_ui(button, screen, parameter):\n\n # parameter = thumb_fsr, index_fsr, middle_fsr, index_bend, middle_bend\n #line thickness => min=0 max=30\n #circle radius => min=0 max=30\n\n global black,white,red,blue\n\n text_pos = (35, 545)\n\n thumb_fsr_pos = (90,300)\n index_fsr_pos = (210,80)\n middle_fsr_pos = (310,30)\n\n index_bend_start_pos = (210,80)\n index_bend_end_pos = (260,280)\n middle_bend_start_pos = (310,30)\n middle_bend_end_pos = (330,260)\n\n hand = pygame.image.load(\"hand.jpg\")\n\n font = pygame.font.SysFont(\"Arial\", 23)\n text = font.render(\"switch screen\", True, black)\n\n screen.fill(white)\n screen.blit(hand,(0,0))\n\n pygame.draw.line(screen, blue, index_bend_start_pos, index_bend_end_pos, parameter[3])\n pygame.draw.line(screen, blue, middle_bend_start_pos, middle_bend_end_pos, parameter[4])\n\n pygame.draw.rect(screen, black, button, 2) # text box\n pygame.draw.circle(screen, red, thumb_fsr_pos, parameter[0])\n pygame.draw.circle(screen, red, index_fsr_pos, parameter[1])\n pygame.draw.circle(screen, red, middle_fsr_pos, parameter[2])\n\n screen.blit(text, text_pos)\n\n pygame.display.flip()","sub_path":"hand_ui.py","file_name":"hand_ui.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160265221","text":"import socket\r\nimport select\r\nimport sys\r\nimport math\r\nimport fractions\r\nimport random\r\n\r\nserverPort = 12348\r\nserverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\nserverSocket.bind(('',serverPort))\r\n\r\ndef main():\r\n\tcount = 0 #to keep count of number of times data has been sent so far\r\n\t# anomalous_count = 0 #to keep count of anomalous data sent so far\r\n\tcharacteristic_ratios = [[0.6,0.2,0.4], [0.1,0.2,0.3], [0.2,0.6,0.2]] #0-> file, 1-> jpg, 2-> vid\r\n\trng = 5.0 #max range percentage of data around the characteristic ratios\r\n\terr = 25.0 #error percentage of anomalous data\r\n\r\n\tserverSocket.listen(1)\r\n\tprint ('The server is ready')\r\n\tratio = [0,0,0]\r\n\r\n\twhile 1:\r\n\t\tconnectionSocket, addr = serverSocket.accept()\r\n\r\n\t\tsentence = connectionSocket.recv(1024)\r\n\r\n\t\tif sentence == 'Send ftp data':\r\n\t\t\tprint(\"Sending data to client \" + str(addr))\r\n\t\t\tcount += 1\r\n\r\n\t\t\tnum = random.randint(0,2)\r\n\r\n\t\t\tsend_anomalous = random.randint(0,100)\r\n\r\n\t\t\tif (count>1000) and (send_anomalous>70): #sending anomalous data with a probability of 30%\r\n\t\t\t\tdata_ftp = \"1\"\r\n\t\t\t\tdata_jpg = \"1\"\r\n\t\t\t\tdata_vid = \"1\"\r\n\r\n\t\t\t\t\r\n\r\n\t\t\t\tfor i in range(3):\r\n\t\t\t\t\terror = random.uniform(rng*1.05,err)\r\n\t\t\t\t\texp = random.randint(0,1)\r\n\t\t\t\t#\tratio[i] = (characteristic_ratios[num][i]+((pow(-1,exp)*(error/100))*characteristic_ratios[num][i])) * \r\n\t\t\t\t\tratio[i] = characteristic_ratios[num][i]+((pow(-1,exp)*(error/100))*characteristic_ratios[num][i])\r\n\t\t\t\r\n\t\t\telif (count>1000) and (send_anomalous<=70):\r\n\t\t\t\tdata_ftp = \"0\"\r\n\t\t\t\tdata_jpg = \"0\"\r\n\t\t\t\tdata_vid = \"0\"\r\n\t\t\t\t\r\n\t\t\t\t\r\n\r\n\t\t\t\tfor i in range(3):\r\n\t\t\t\t\tr = random.uniform(0,rng*1.05)\r\n\t\t\t\t\texp = random.randint(0,1)\r\n\t\t\t\t\tratio[i] = characteristic_ratios[num][i]+((pow(-1,exp)*(r/100))*characteristic_ratios[num][i])\r\n\r\n\r\n\t\t\telse: #randomly choosing one of the three characteristic ratios\r\n\t\t\t\tdata_ftp = \"0\"\r\n\t\t\t\tdata_jpg = \"0\"\r\n\t\t\t\tdata_vid = \"0\"\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\tfor i in range(3):\r\n\t\t\t\t\tr = random.uniform(0,rng)\r\n\t\t\t\t\texp = random.randint(0,1)\r\n\t\t\t\t\tratio[i] = (characteristic_ratios[num][i]+((pow(-1,exp)*(r/100))*characteristic_ratios[num][i]))\t\r\n\r\n\t\t\t\r\n\t\t\t# print(\"ratio = \")\r\n\t\t\t# print(ratio)\r\n\r\n\t\t\tdata_vid0 = get_vid()\r\n\t\t\tdata_ftp0 = get_ftp()\r\n\t\t\tdata_jpg0 = get_jpg()\r\n\r\n\t\r\n\r\n\t\t\t#appending bytes of each of the files to get the file size in the appropriate ratio\r\n\t\t\tfor i in range(int(ratio[0]*1000)):\r\n\t\t\t\tdata_ftp += data_ftp0[i]\r\n\r\n\t\t\tfor i in range(int(ratio[1]*1000)):\r\n\t\t\t\tdata_jpg += data_jpg0[i]\r\n\r\n\t\t\tfor i in range(int(ratio[2]*1000)):\r\n\t\t\t\tdata_vid += data_vid0[i]\r\n\r\n\t\t\tconnectionSocket.send(data_ftp)\r\n\t\t\tconnectionSocket.close()\t\t\t\r\n\r\n\t\tif sentence == 'Send jpg data':\r\n\t\t\tconnectionSocket.send(data_jpg)\r\n\t\t\tconnectionSocket.close()\r\n\r\n\t\tif sentence == 'Send video data':\r\n\t\t\tconnectionSocket.send(data_vid)\r\n\t\t\tconnectionSocket.close()\r\n\r\ndef get_ftp(): #reading and storing bytes of the text file\r\n\tftp_file = open('README','rb')\r\n\tdata_ftp = ftp_file.read()\r\n\tftp_file.close()\r\n\r\n\treturn data_ftp\r\n\r\ndef get_jpg(): #reading and storing bytes of the image file\r\n\timage = open(\"image3.png\",\"rb\")\r\n\tdata_jpg = image.read()\r\n\r\n\treturn data_jpg\r\n\r\n\r\ndef get_vid(): #reading and storing bytes of the byte file\r\n\tvideo = open(\"video2.mp4\",\"rb\")\r\n\tdata_vid = video.read()\r\n\r\n\treturn data_vid\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\tmain()\r\n","sub_path":"gen2.py","file_name":"gen2.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"341066619","text":"#pylint: disable=C0111\nfrom setuptools import setup\n\nDESCRIPITON = (\n 'Game of TicTacToe with a Neural network'\n ' that auto-leanrs how to play from zero')\n\n\ndef readme():\n with open('README.md') as _file:\n return _file.read()\n\n\nsetup(\n name=\"alvieja\",\n version=\"0.2\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Artificial Inteligence :: Neural Network',\n 'Programming Language :: Python :: 3',\n ],\n description=DESCRIPITON,\n long_description=readme(),\n author='Wolfang Torres',\n author_email='wolfang.torres@gmail.com',\n license='MIT',\n packages=['alvieja'],\n install_requires=['numpy', 'click'],\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'alvieja = alvieja.main:cli',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"194650248","text":"class Solution:\n def is_num(self, a):\n return '0' <= a <='9'\n\n def reformat(self, s: str) -> str:\n num_list = []\n char_list = []\n\n for i in s:\n if self.is_num(i):\n num_list.append(i)\n else:\n char_list.append(i)\n if abs(len(num_list)-len(char_list))>1:\n return \"\"\n elif len(num_list) 0:\n signe = '+'\n else:\n signe = '-'\n print('z = ', self.r, signe, 'i * ',abs(self.i))\n\n\nz = Complex(3.0, -4.5)\nz.afficherComplex()\nz = Complex(2.0, 4)\nz.afficherComplex()","sub_path":"tp_class.py","file_name":"tp_class.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"376047482","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='DynScrapper',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('name', models.CharField(unique=True, max_length=20)),\n ('scrawler_type', models.CharField(default='default', max_length=20)),\n ('url_pattern', models.CharField(null=True, blank=True, max_length=120)),\n ('counter', models.IntegerField(default=0)),\n ],\n ),\n ]\n","sub_path":"apps/dyn_scrapper/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"101820909","text":"import unittest\r\n# We're transforming the document using the server, not the API, so we're just\r\n# going to convert it here.\r\nfrom datetime import datetime \r\n\r\nimport sys, os # Needed to import using relative path.\r\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\r\n# Not super happy about using this, but it works.\r\n\r\nfrom sections import manipulation as m # Just want to make this easier to write.\r\n\r\ndata = m.load_json(m.read_file(\"sections/data.json\"))\r\n\r\nfor doc in data:\r\n m.transform_document(doc)\r\n\r\nclass TestInsert(unittest.TestCase):\r\n def test_insert_documents(self):\r\n \"\"\"\r\n This test will insert every single document in our data list into the\r\n db. \r\n \"\"\"\r\n result = m.insert_documents(data)\r\n # If this is true, then that means that we have at least one inserted\r\n # _id in our returned list.\r\n \r\n self.assertTrue(\"ids\" in result, \"ids does not exist in result\")\r\n self.assertTrue(len(result[\"ids\"]) > 0, \"length is not greater than 0\")\r\n \r\n def test_insert_single_document(self):\r\n \"\"\"\r\n We should also make sure that exactly one document can be inserted into\r\n our db.\r\n \"\"\"\r\n # Will insert first document in data array.\r\n result = m.insert_documents(data[0])\r\n expected = { \"Error\": \"batch op errors occurred\" }\r\n self.assertFalse(\"ids\" in result)\r\n self.assertEqual(result, expected)\r\n \r\n def test_insert_documents_no_list(self):\r\n \"\"\"\r\n The exception should catch if we try to insert an empty list into our\r\n db.\r\n \"\"\"\r\n result = m.insert_documents([])\r\n expected = { \"Error\": \"documents must be a non-empty list\" }\r\n self.assertEqual(result, expected)\r\n\r\nclass TestUpdate(unittest.TestCase):\r\n def test_update_will_fail_first_param(self):\r\n result = m.update_document(1, { \"k\": \"v\" })\r\n expected = { \"Error\": \"First parameter must be a dictionary.\" }\r\n self.assertEqual(result, expected)\r\n\r\n def test_update_will_fail_second_param(self):\r\n result = m.update_document({ \"k\": \"v\" }, 1)\r\n expected = { \"Error\": \"Second parameter must be a dictionary.\" }\r\n self.assertEqual(result, expected)\r\n\r\n def test_update_document(self):\r\n \"\"\"\r\n Test will update one document, and should return the raw result.\r\n \"\"\"\r\n u_data = { \"Volume\": 1000 }\r\n result = m.update_document({ \"status\": \"test\" }, u_data)\r\n updated = m.find_document({ \"_id\": result[\"_id\"] })\r\n\r\n self.assertNotEqual(updated[\"Volume\"], result[\"Volume\"])\r\n updated.pop(\"Volume\")\r\n result.pop(\"Volume\")\r\n\r\n self.assertDictEqual(updated, result)\r\n\r\nclass TestUpdateVolume(unittest.TestCase):\r\n def test_update_volume(self):\r\n \"\"\"\r\n Will update *only* the TEST ticker.\r\n \"\"\"\r\n original = m.update_volume(\"TEST\", 10)\r\n updated = m.find_document({ \"Ticker\": \"TEST\" })\r\n\r\n self.assertNotEqual(original[\"Volume\"], updated[\"Volume\"])\r\n original.pop(\"Volume\")\r\n self.assertEqual(updated[\"Volume\"], 10)\r\n updated.pop(\"Volume\")\r\n\r\n # Finally, ensure that both documents are the same without the change.\r\n self.assertEqual(original, updated)\r\n\r\nclass TestDelByTicker(unittest.TestCase):\r\n def test_delete_by_ticker(self):\r\n original = m.find_document({ \"Ticker\": \"TEST\" })\r\n result = m.delete_by_ticker(\"TEST\")\r\n expected = { \"ok\": 1, \"n\": 1 }\r\n\r\n self.assertDictEqual(result, expected)\r\n\r\n # Also, there shouldn't be another ticker named TEST in the db:\r\n deleted = m.delete_by_ticker(original)\r\n expected = { \"ok\": 1, \"n\": 0 }\r\n \r\n self.assertDictEqual(deleted, expected)\r\n \r\n\r\nclass TestDelete(unittest.TestCase):\r\n def test_will_fail_parameter(self):\r\n \"\"\"\r\n Function will fail after reading the sent variable.\r\n \"\"\"\r\n result = m.delete_documents(1)\r\n expected = { \"Error\": \"Search critera must be a dictionary.\" }\r\n\r\n self.assertEqual(result, expected)\r\n \r\n def test_will_delete_some_test_documents(self):\r\n search = { \"Ticker\": \"AA\", \"status\": \"test\" }\r\n result = m.delete_documents(search)\r\n expected = {'ok': 1, 'n': 1}\r\n \r\n self.assertDictEqual(result, expected)\r\n\r\n def test_will_delete_all_test_documents(self):\r\n search = { \"status\": \"test\" }\r\n result = m.delete_documents(search)\r\n expected = expected = {'ok': 1, 'n': 9}\r\n \r\n self.assertDictEqual(result, expected)\r\n \r\n \r\n\r\nclass Takedown(unittest.TestCase):\r\n def reset_db(self):\r\n \"\"\"\r\n This should be taken care of in delete testing, but I'll just be\r\n explicit about clearing the db of test documents.\r\n \"\"\"\r\n m.delete_documents({ \"status\": \"test\" })\r\n\r\n result = m.find_document({ \"status\": \"test\" })\r\n self.assertTrue(result is None)\r\n\r\n# if __name__ == '__main__':\r\n# unittest.main()","sub_path":"backend/stocks/tests/manipulation_test.py","file_name":"manipulation_test.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34207698","text":"# coding: utf-8\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.files.move import file_move_safe\nfrom django.conf import settings\nfrom django.contrib.staticfiles.storage import StaticFilesStorage as BaseStorage\nfrom . import ProtectionMixin\n\n\nclass MediaStorage(FileSystemStorage, ProtectionMixin):\n\n def url(self, name):\n return self.protected_url(name) or super(MediaStorage, self).url(name)\n\n def get_available_name(self, name, max_length=None):\n ow = getattr(settings, 'MYMEDIA_OVERWRITE', '')\n\n if self.exists(name):\n if ow == 'delete':\n self.delete(name)\n elif ow == 'backup':\n self.move(name, name + \".bak\")\n\n return super(MediaStorage, self).get_available_name(\n name, max_length=max_length)\n\n def move(self, src_name, to_name):\n file_move_safe(self.path(src_name), self.path(to_name))\n\n\nclass StaticStorage(BaseStorage):\n\n def get_available_name(self, name, max_length=None):\n ow = getattr(settings, 'MYMEDIA_OVERWRITE', 'delete')\n\n if self.exists(name):\n if ow == 'delete':\n self.delete(name)\n\n return super(StaticStorage, self).get_available_name(\n name, max_length=max_length)\n\n def move(self, src_name, to_name):\n file_move_safe(self.path(src_name), self.path(to_name))\n","sub_path":"mymedia/storages/locals.py","file_name":"locals.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"136228624","text":"\nimport requests\nfrom flask import Flask,request,make_response,redirect,url_for\nimport hashlib\nimport xml.etree.ElementTree as ET\nfrom joker import Joker\nfrom MyQR import myqr\nfrom os import path\nurl='https://www.qiushibaike.com/text/'\ntemplate='''\n\n \n \n %s\n \n \n\n'''\n\ntemplateImage='''\n\n \n \n %s\n \n \n \n \n\n'''\napp=Flask(__name__)\n@app.route('/')\ndef hello(username):\n return 'hello,{}'.format(username)\n@app.route('/',methods=['GET','POST'])\ndef wechat_auth():\n if request.method=='GET':\n print('get comming')\n data=request.args\n token='smile1225'\n signature=data.get('signature','')\n timestamp=data.get('timestamp','')\n nonce=data.get('nonce','')\n echostr=data.get('echostr','')\n s=[timestamp,nonce,token]\n s.sort()\n s=''.join(s)\n if(hashlib.sha1(s.encode('utf8')).hexdigest()==signature):\n return make_response(echostr) \n if request.method=='POST':\n xml_str=request.stream.read()\n xml=ET.fromstring(xml_str)\n toUserName=xml.find('ToUserName').text\n fromUserName=xml.find('FromUserName').text\n createTime=xml.find('CreateTime').text\n msgType=xml.find('MsgType').text\n content=xml.find('Content').text\n msgId=xml.find('MsgId').text\n if u'笑话' in content:\n url='http://xiaohua.zol.com.cn/'\n joker=Joker(url)\n \n if joker.is_empty():\n sourceLink=joker.get_list('//ul[contains(@class,\"recommend\")]/li/a/@href')\n jokerLink=list(map(lambda x:path.join(url,x[1:]) ,sourceLink))\n jokerXpath='//div[@class=\"article-text\"]/text()'\n joker.get_jokers(jokerXpath,jokerLink)\n content=joker.get_one_joker()\n elif u'二维码' in content:\n flag=False\n if ':' not in content and ':' not in content:\n content='格式错误,正确格式为:二维码:要存储的信息'\n else:\n target=content.split(':',1)[1] if ':' in content else content.split(':',1)[1]\n # im=myqr.run(target,save_dir='dist')\n try:\n im=myqr.run(target)\n msgType='image'\n MediaId=get_media_ID(im[2])\n flag=True\n except:\n content='暂不支持中文字符'\n if flag:\n reply=templateImage % (fromUserName,toUserName,createTime,msgType,MediaId)\n return reply\n else:\n if type(content).__name__=='unicode':\n content=content[::-1].encode('utf-8')\n elif type(content).__name__=='str':\n content=content[::-1]\n reply=template % (fromUserName,toUserName,createTime,msgType,content)\n return reply\n \ndef get_media_ID(path):\n img_url = 'https://api.weixin.qq.com/cgi-bin/material/add_material'\n payload_img = {\n 'access_token':get_token(),\n 'type' :'image'\n }\n data={'media':open(path,'rb')}\n r=requests.post(url=img_url,params=payload_img,files=data)\n resultDict=r.json()\n return resultDict['media_id']\n\ndef get_token():\n payload_access_token={\n 'grant_type':'client_credential',\n 'appid':'wxe4f11ccfba92405e',\n 'secret':'d1279eb12b5bcf9b431d8fbb33ab56c3'\n }\n token_url='https://api.weixin.qq.com/cgi-bin/token'\n r=requests.get(token_url,params=payload_access_token)\n dict_result= (r.json())\n return dict_result['access_token']\n\nif __name__ == \"__main__\":\n app.debug=True\n app.env='devlopment'\n app.run(port=8000)","sub_path":"17.WeChat public address/wechat2.py","file_name":"wechat2.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"368613430","text":"#!/usr/bin/env python3\n\n\nfrom ev3dev.ev3 import Motor, OUTPUT_A, OUTPUT_B, OUTPUT_C\n\nfrom track3r_rc_tank_ev3dev1 import Track3r\n\n\nclass Track3rWithClaw(Track3r):\n def __init__(\n self,\n left_motor_port: str = OUTPUT_B, right_motor_port: str = OUTPUT_C,\n medium_motor_port: str = OUTPUT_A):\n super().__init__(\n left_motor_port=left_motor_port, right_motor_port=right_motor_port,\n medium_motor_port=medium_motor_port)\n\n self.remote.on_beacon = self.move_claw\n\n\n def move_claw(self, state):\n if state:\n self.medium_motor.run_to_rel_pos(\n speed_sp=200,\n position_sp=-75,\n stop_action=Motor.STOP_ACTION_COAST)\n\n else:\n self.medium_motor.run_to_rel_pos(\n speed_sp=200,\n position_sp=75,\n stop_action=Motor.STOP_ACTION_COAST)\n\n\nif __name__ == '__main__':\n TRACK3R_WITH_CLAW = Track3rWithClaw()\n\n TRACK3R_WITH_CLAW.main()\n","sub_path":"Computing-Platforms/EV3/Home-Edition/Core-Robots/Track3r/Track3r-3-RCTank-with-Claw.EV3Dev1.py","file_name":"Track3r-3-RCTank-with-Claw.EV3Dev1.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"168746290","text":"from core.agility.common.AgilityModelBase import AgilityModelBase\n\n\nclass VariableValueSetBase(AgilityModelBase):\n '''\n classdocs\n '''\n def __init__(self, value=[], valuesource=None):\n AgilityModelBase.__init__(self)\n self._attrSpecs = getattr(self, '_attrSpecs', {})\n self._attrSpecs.update({'value': {'maxOccurs': 'unbounded', 'type': 'AssetProperty', 'name': 'value', 'minOccurs': '0', 'native': False}, 'valueSource': {'type': 'Asset', 'name': 'valuesource', 'minOccurs': '0', 'native': False}})\n self.value = value\n self.valuesource = valuesource \n","sub_path":"core/agility/v3_0/agilitymodel/base/VariableValueSet.py","file_name":"VariableValueSet.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43103471","text":"from flask_restx import Namespace, Resource, Api\nfrom flask import request\n\nfrom models import Genre, Movie, db\n\napi_genre = Namespace(\"genres\")\napi_movie = Namespace(\"movies\")\n\napi = Api()\napi.add_namespace(api_genre)\napi.add_namespace(api_movie)\n\n\n# /genres\n@api_genre.route(\"\")\nclass GenreList(Resource):\n def get(self):\n \"\"\"Return all genres\"\"\"\n genres = Genre.query.all()\n return {\"genres\": [genre.to_json() for genre in genres]}\n\n def post(self):\n \"\"\"Create new genre\"\"\"\n # 1. get request body\n data = request.get_json()\n # 2. create genre instance/object\n genre = Genre(name=data['name'])\n # 3. add genre instance/object to db transaction\n db.session.add(genre)\n # 4. commit transaction\n db.session.commit()\n # 5. return genre as response with 201 response code\n return {\"genre\": genre.to_json()}, 201\n\n\n# /genres/\n@api_genre.route(\"/\")\nclass GenreById(Resource):\n def get(self, genre_id):\n \"\"\"Return a genre\"\"\"\n genre = Genre.query.get(genre_id)\n if genre is None:\n # return 404\n return {\"message\": \"The given genre ID does not exist.\"}, 404\n return {\"genre\": genre.to_json()}\n\n def put(self, genre_id):\n \"\"\"Update a genre\"\"\"\n # get request body\n data = request.get_json()\n # 1. Find genre by id, return 404 if not found\n genre = Genre.query.get(genre_id)\n if genre is None:\n return {\"message\": \"The given genre ID does not exist.\"}, 404\n # 2. Update genre\n genre.name = data[\"name\"]\n # add genre to session/transaction & commit\n db.session.add(genre)\n db.session.commit()\n # 3. return genre\n return {\"genre\": genre.to_json()}, 200\n\n\n# /movies\n@api_movie.route(\"\")\nclass MovieList(Resource):\n def get(self):\n \"\"\"Return all movies\"\"\"\n movies = Movie.query.all()\n return {\"movies\": [movie.to_json() for movie in movies]}\n\n def post(self):\n \"\"\"Create new movie\"\"\"\n data = request.get_json()\n # 1. validate genre, return 404 if invalid\n genre = Genre.query.get(data[\"genre\"])\n if genre is None:\n # return 404\n return {\"message\": \"The given genre ID does not exist.\"}, 404\n # 2. create new movie instance\n movie = Movie()\n movie.title = data[\"title\"]\n movie.year = data[\"year\"]\n # movie.genre_id = data[\"genre\"]\n movie.genre = genre\n # 3. add instance to session, and commit\n db.session.add(movie)\n db.session.commit()\n # 4. return new created movie\n return {\"movie\": movie.to_json()}, 201\n\n\n# /movies/id\n@api_movie.route(\"/\")\nclass MovieById(Resource):\n def get(self, movie_id):\n \"\"\"Return a movie\"\"\"\n # ----- YOUR CODE HERE ----\n return {}\n\n def put(self, movie_id):\n \"\"\"Update a movie\"\"\"\n # ----- YOUR CODE HERE ------\n # 1. find movie, return 404 if not found\n # 2. Validate genre, return 404 if invalid\n # 3. Update movie (title, year, genre)\n # add movie to session, and commit\n # 4. return updated movie\n return {}\n","sub_path":"resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"540510654","text":"\nfrom threading import Thread\n\nfrom fsp_worker import Task\nfrom fsp_logger import logger\n\n\ndef start(urls):\n logger.debug('.....')\n logger.debug(urls)\n for url in urls:\n logger.debug('url looping...')\n Task(url, None)\n Thread(target=Task(url, None).run())\n","sub_path":"fps_main.py","file_name":"fps_main.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"62013751","text":"import csv\nimport itertools\n\n# Load data from\ndef load_data_from_files(from_year, to_year):\n # Recupere les matches des dernières saisons\n matches = []\n for season in range(from_year, to_year):\n\n if __name__ == \"__main__\":\n file = 'F' + str(season) + str(season + 1) + '.csv'\n else:\n file = 'history_analysis/F' + str(season) + str(season + 1) + '.csv'\n with open(file, 'r', newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for r in reader:\n if r['Div'] == '':\n continue\n row = { 'Season': season }\n for f in ['Date', 'Div', 'HomeTeam', 'AwayTeam', 'FTR']:\n row[f] = r[f]\n for f in ['FTHG', 'FTAG']:\n row[f] = int(r[f])\n matches.append(row)\n\n # Trouve toutes les équipes\n teams = set(r['HomeTeam'] for r in matches) | set(r['AwayTeam'] for r in matches)\n # Liste complète des matches, chaque match apparaisant deux fois, une par équipe,\n # Determine le nombre de matches joués par équipe\n match_list = [r['HomeTeam'] for r in matches] + [r['AwayTeam'] for r in matches]\n teams_count = { t: match_list.count(t) for t in teams}\n total_match = len(match_list)\n # Plus petit et plus grand\n max_score = max(max(r['FTHG'], r['FTAG']) for r in matches)\n min_score = min(min(r['FTHG'], r['FTAG']) for r in matches)\n return matches, total_match, teams, teams_count, min_score, max_score\n\n# Compute global sastistiques for the scores (% of 1/0, 3/2, etc)\ndef compute_global_scores(matches):\n # Premières statistiques des scores pour se faire une idée\n scores = {} # différentie 0:1 et 1:0\n scores_ordered = {} # cumule 0:1 et 1:0\n for r in matches:\n sc = (r['FTHG'], r['FTAG'])\n if r['FTHG'] <= r['FTAG']:\n sco = sc\n else:\n sco = (r['FTAG'], r['FTHG'])\n if sc in scores:\n scores[sc] += 1\n else:\n scores[sc] = 1\n if sco in scores_ordered:\n scores_ordered[sco] += 1\n else:\n scores_ordered[sco] = 1\n return scores, scores_ordered\n\n\n# Affichage desc scores (matriciel)\ndef print_scores(scores, min_score, max_score):\n goals = sum(v for v in scores.values())\n print(\" |\", end='')\n [print(\"{:^7}|\".format(i), end='') for i in range(min_score, max_score + 1)]\n print()\n for i in range(min_score, max_score + 1):\n print(\"{:^3}|\".format(i), end='')\n for j in range(min_score, max_score + 1):\n if (i, j) in scores:\n print(\"{:^7.3f}|\".format(100*scores[(i, j)] / goals), end='')\n else:\n print(\" |\", end='')\n print()\n\n# Affichage desc scores (liste)\ndef print_scores_list(scores, min_score, max_score):\n goals = sum(v for v in scores.values())\n for i in range(min_score, max_score + 1):\n for j in range(min_score, max_score + 1):\n if (i, j) in scores:\n print(\"{:^3}|{:^3}|{:^5.1f}|\".format(i, j, 100*scores[(i, j)] / goals))\n\n\n## Va chercher à séparer les équipes en groupes de meilleurs attaquants et défenseur\ndef split_teams_into_groups(matches, teams, NCAT):\n\n # On crée le meilleur groupe à la main. De niveau NCAT-1\n best_group = set([\"Lyon\", \"Monaco\", \"Paris SG\"])\n best_group = set()\n adjusted_teams = teams - best_group\n\n\n # Les matchs joués par équipe\n played = {t : [] for t in adjusted_teams}\n for r in matches:\n if r['HomeTeam'] in adjusted_teams:\n played[r['HomeTeam']].append(r)\n if r['AwayTeam'] in adjusted_teams:\n played[r['AwayTeam']].append(r)\n # Goals matked and received per team\n goal_marked = {t:0 for t in adjusted_teams}\n goal_received = {t:0 for t in adjusted_teams}\n for r in matches:\n if r['HomeTeam'] in adjusted_teams:\n goal_marked[r['HomeTeam']] += r['FTHG']\n goal_received[r['HomeTeam']] += r['FTAG']\n if r['AwayTeam'] in adjusted_teams:\n goal_marked[r['AwayTeam']] += r['FTAG']\n goal_received[r['AwayTeam']] += r['FTHG']\n # Average using the number of matches playes by a team\n # Nombre de buts moyens données ou recus. Les équipes sont classées par ordre de force croissante\n for t in adjusted_teams:\n goal_marked[t] /= len(played[t])\n goal_received[t] /= len(played[t])\n\n goal_marked_ordered = sorted(goal_marked.items(), key=lambda x:x[1], reverse=False)\n goal_received_ordered = sorted(goal_received.items(), key=lambda x:x[1], reverse=True)\n #print(goal_marked_ordered)\n #print(goal_received_ordered)\n\n # La répartition par groupe se fait de telle sorte que le nombre de matchs joués par groupe soit équivalent\n # sinon, à cause du bas de tableau changeant, on a une mauvaise répartition\n\n # OLD : grpoupes de même taille\n #group_size_old = len(goal_marked_ordered) / NCAT\n #attack_group_old = {t: int(i / group_size_old) for i,(t,_) in enumerate(goal_marked_ordered)}\n #defense_group_old = {t: int(i / group_size_old) for i,(t,_) in enumerate(goal_received_ordered)}\n\n total_match = sum(len(played[t]) for t in adjusted_teams)\n group_size = total_match / (NCAT - (1 if len(best_group) > 0 else 0)) # car j'ai séparé le groupe de tête à la main\n goal_marked_match = [len(played[t]) for t,_ in goal_marked_ordered]\n goal_marked_match = list(itertools.accumulate(goal_marked_match))\n goal_marked_match = [int(g / group_size) for g in goal_marked_match]\n attack_group = {t: min(g, NCAT - 1 - (1 if len(best_group)> 0 else 0)) for (t,_), g in zip(goal_marked_ordered, goal_marked_match)}\n #print(attack_group)\n\n goal_received_match = [len(played[t]) for t,_ in goal_received_ordered]\n goal_received_match = list(itertools.accumulate(goal_received_match))\n goal_received_match = [ int(g / group_size) for g in goal_received_match]\n defense_group = {t: min(g, NCAT - 1 - (1 if len(best_group) > 0 else 0)) for (t,_), g in zip(goal_received_ordered, goal_received_match)}\n #print(defense_group)\n\n # ajout du groupe de tête\n for t in best_group:\n defense_group[t] = NCAT - 1\n attack_group[t] = NCAT - 1\n return attack_group, defense_group\n\ndef compute_base_statistics(matches, attack_group, defense_group, min_score, max_score, NCAT):\n # Regroupement des scores par classes d'attaque\n base_statistics = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd, s1, s2)\n base_2 = {} # dictionnaire indexé par (Aa, Ad, Ba, Bd} = { 's': {(s1,s2):nbre de buts, 'p':(s1,s2):proba, 'l':# échantillons)\n\n # initialise base_2\n ra = range(NCAT)\n sca = range(min_score, max_score + 1)\n p_array = [[0 for _ in list(sca)] for _ in list(sca)]\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n base_2[(Aa, Ad, Ba, Bd)] = {'s': [[0 for _ in list(sca)] for _ in list(sca)], 'l': 0}\n\n for r in matches:\n Aa = attack_group[r['HomeTeam']]\n Ad = defense_group[r['HomeTeam']]\n Ba = attack_group[r['AwayTeam']]\n Bd = defense_group[r['AwayTeam']]\n s1 = r['FTHG']\n s2 = r['FTAG']\n k = (Aa, Ad, Ba, Bd, s1, s2)\n if k not in base_statistics:\n base_statistics[k] = 0\n base_statistics[k] += 1\n base_2[(Aa, Ad, Ba, Bd)]['l'] += 1\n base_2[(Aa, Ad, Ba, Bd)]['s'][s1][s2] += 1\n\n base_statistics = {k: v / len(matches) for k, v in base_statistics.items()}\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n n = base_2[(Aa, Ad, Ba, Bd)]['l']\n if n > 0:\n base_2[(Aa, Ad, Ba, Bd)]['p'] = [[base_2[(Aa, Ad, Ba, Bd)]['s'][i][j] / n for j in list(sca)] for i in list(sca)]\n\n return base_statistics, base_2\n\n\ndef print_base_statistics(stats):\n print()\n for (Aa, Ad, Ba, Bd, s1, s2), p in stats.items():\n print(\"{},{},{},{},{},{},{}\".format(Aa, Ad, Ba, Bd, s1, s2, p))\n\n# Affichage des base_statistics avec les deux vecteurs cumulés, en version matricielle\ndef print_base_statistics_array(stats, min_score, max_score, NCAT):\n ra = range(NCAT)\n sca = range(min_score, max_score + 1)\n data_length = []\n print(\"Stats\")\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n print(\"Aa:{}, Ad:{}, Ba:{}, Bd:{}\".format(Aa, Ad, Ba, Bd))\n extract = { (s1, s2): v for (Aa_, Ad_, Ba_, Bd_, s1, s2), v in stats.items() if Aa == Aa_ and Ad == Ad_ and Ba == Ba_ and Bd == Bd_ }\n if len(extract) > 10:\n print_scores(extract, min_score, max_score)\n data_length.append((Aa, Ad, Ba, Bd, len(extract)))\n return data_length\n\n# Affichage des base_statistics avec les deux vecteurs cumulés\ndef print_base_statistics_vector(stats, min_score, max_score, NCAT):\n ra = range(NCAT)\n sca = range(min_score, max_score + 1)\n print(\"Stats vector\")\n print(\" Aa , Ad , Ba , Bd ,\", end='')\n [print(\"A{} ,\".format(i), end='') for i in range(min_score, max_score + 1)]\n [print(\"D{} ,\".format(i), end='') for i in range(min_score, max_score + 1)]\n print(\"length\")\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n print(\"{:^5},{:^5},{:^5},{:^5},\".format(Aa, Ad, Ba, Bd), end='')\n extract = { (s1, s2): v for (Aa_, Ad_, Ba_, Bd_, s1, s2), v in stats.items() if Aa == Aa_ and Ad == Ad_ and Ba == Ba_ and Bd == Bd_ }\n # Total sur les buts marqués\n total = sum(v for v in extract.values())\n for i in list(sca):\n if total == 0:\n print(\"{:^5},\".format(''), end='')\n else:\n print(\"{:^5.2f},\".format(100 * sum(v for (s1, _),v in extract.items() if s1 == i) / total) , end='')\n for i in list(sca):\n if total == 0:\n print(\"{:^5},\".format(''), end='')\n else:\n print(\"{:^5.2f},\".format(100 * sum(v for (_, s2),v in extract.items() if s2 == i) / total), end='')\n print(len(extract))\n\n# Construire des vecteurs (de probabilité de mettre des buts/ probabilités d'en encaisser\ndef build_vectors(base_statistics, base_2, min_score, max_score, NCAT):\n vectors = {}\n ra = range(NCAT)\n sca = range(min_score, max_score + 1)\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n extract = {(s1, s2): v for (Aa_, Ad_, Ba_, Bd_, s1, s2), v in base_statistics.items() if Aa == Aa_ and Ad == Ad_ and Ba == Ba_ and Bd == Bd_}\n total = sum(v for v in extract.values())\n bm = [100 * sum(v for (s1, _), v in extract.items() if s1 == i) / total if total > 0 else 0 for i in list(sca)]\n br = [100 * sum(v for (_, s2), v in extract.items() if s2 == i) / total if total > 0 else 0 for i in list(sca)]\n vectors[(Aa, Ad, Ba, Bd)] = {\n 'bm':bm, # vecteur de probabilité de buts marqués\n 'br':br, # vecteur de probabilité de buts reçus\n # 'l': len(extract) # nombre d'échantillons (de valeurs non nulles dans la matrice de scores ??)\n 'l': base_2[(Aa, Ad, Ba, Bd)]['l'] # nombre d'échantillons (de valeurs non nulles dans la matrice de scores ??)\n }\n return vectors\n\n# distance entre deux Aa, Ad, Ba, Bd\ndef dist_v(a, b):\n Aa, Ad, Ba, Bd = a\n Aa_, Ad_, Ba_, Bd_ = b\n return abs(Aa - Aa_) + abs(Ad - Ad_) + abs(Ba - Ba_) + abs(Bd - Bd_)\n\n# Construit les vecteurs manquants\ndef build_vectors_rebuilt(vectors, threshold_1, threshold_2):\n vectors_rebuilt = {}\n sub_vector = {k:r for k,r in vectors.items() if r['l'] > threshold_1}\n zeros = False\n for k, r in list(vectors.items()):\n if not zeros:\n zeros = [0.0 for _ in r['bm']]\n if r['l'] >= threshold_1:\n vectors_rebuilt[k] = r\n continue\n # ordonne les vecteurs proches par distance croissante\n closest = sorted([(kc, rc['l'], dist_v(k, kc)) for kc, rc in vectors.items() if rc['l'] > 0], key=lambda x:x[2])\n closest_l = list(itertools.takewhile(lambda x: x < threshold_2, itertools.accumulate(l for _, l, _ in closest)))\n closest = closest[:len(closest_l) + 1]\n bm, br = zeros.copy(), zeros.copy()\n for kc, l, _ in closest:\n bm = [ x + l * y for x,y in zip(bm, vectors[kc]['bm'])]\n br = [ x + l * y for x,y in zip(br, vectors[kc]['br'])]\n t = sum(l for _, l ,_ in closest)\n bm = [ x / t for x in bm]\n br = [ x / t for x in br]\n vectors_rebuilt[k] = {\n 'bm': bm,\n 'br': br,\n 'l': t\n }\n return vectors_rebuilt\n\ndef print_vectors(vectors, min_score, max_score, NCAT):\n ra = range(NCAT)\n sca = range(min_score, max_score + 1)\n print(\"Stats vector\")\n print(\"Aa,Ad,Ba,Bd,\", end='')\n [print(\"A{},\".format(i), end='') for i in range(min_score, max_score + 1)]\n [print(\"D{},\".format(i), end='') for i in range(min_score, max_score + 1)]\n print(\"length\")\n for Aa, Ad, Ba, Bd in itertools.product(ra, ra, ra, ra):\n print(\"{},{},{},{},\".format(Aa, Ad, Ba, Bd), end='')\n r = vectors[(Aa, Ad, Ba, Bd)]\n for i in list(sca):\n print(\"{:^.4f},\".format(r['bm'][i]) , end='')\n for i in list(sca):\n print(\"{:^.4f},\".format(r['br'][i]) , end='')\n print(r['l'])\n\ndef significant_matrices(stats, min_score, max_score, threshold):\n flat = [(k, r['p'], r['l']) for k, r in stats.items() if r['l'] > 0]\n data_full = sorted([(k, p, l) for k, p, l in flat if l > 0], key=lambda x: x[2], reverse=True)\n data_full = list(itertools.takewhile(lambda x: x[2] > threshold, (x for x in data_full)))\n return data_full\n\ndef print_matrices(matrices):\n print(\"Aa,Ad,Ba,Bd,l,s1,s2,p\")\n for (Aa, Ad, Ba, Bd), p, l in matrices:\n for s1, row in enumerate(p):\n for s2, probability in enumerate(row):\n print(\"{},{},{},{},{},\".format(Aa, Ad, Ba, Bd, l), end='')\n print(\"{},{},{}\".format(s1, s2, probability))\n\n# Construit les vecteurs manquants\ndef build_matrices_rebuilt(stats, threshold_1, threshold_2):\n stats_rebuilt = {}\n flat = [(k, r['p'], r['l']) for k, r in stats.items() if r['l'] > 0]\n data_full = sorted([(k, p, l) for k, p, l in flat if l > 0], key=lambda x: x[2], reverse=True)\n for k, r in list(stats.items()):\n if r['l'] >= threshold_1:\n stats_rebuilt[k] = r\n continue\n # ordonne les vecteurs proches par distance croissante\n closest = sorted([(kc, rc['l'], dist_v(k, kc)) for kc, rc in stats.items() if rc['l'] > 0], key=lambda x:x[2])\n closest_l = list(itertools.takewhile(lambda x: x < threshold_2, itertools.accumulate(l for _, l, _ in closest)))\n closest = closest[:len(closest_l) + 1]\n new_matrix = [[0 for _ in row] for row in r['s']]\n t = 0\n for kc, l, d in closest:\n f = l / (1+d)\n t += f\n new_matrix = [\n [x1 + f * y1 for x1, y1 in zip(x,y)] for x,y in zip(new_matrix, stats[kc]['p'])\n ]\n #t = sum(l for _, l ,_ in closest)\n new_matrix = [ [x1 / t for x1 in x] for x in new_matrix]\n stats_rebuilt[k] = {\n 'p': new_matrix,\n 'l': sum(l for _, l ,_ in closest)\n }\n return stats_rebuilt\n\ndef print_rebuilt_matrices(matrices):\n print(\"Aa,Ad,Ba,Bd,l,s1,s2,p\")\n for (Aa, Ad, Ba, Bd), r in matrices.items():\n p = r['p']\n l = r['l']\n for s1, row in enumerate(p):\n for s2, proba in enumerate(row):\n print(\"{},{},{},{},{},\".format(Aa, Ad, Ba, Bd, l), end='')\n print(\"{},{},{}\".format(s1, s2, proba))\n\n# =================================================================================================\n## Load data from files, identify teams and number of matches per team\ndef compute_rebuilt_matrices(from_year, to_year, proba_table_file, threshold_1, threshold_2, NCAT, printing=True):\n matches, total_match, teams, teams_count, min_score, max_score = load_data_from_files(from_year, to_year)\n\n '''\n ## Statistiques globales des scores\n scores, scores_ordered = compute_global_scores(matches)\n if printing:\n print_scores(scores, min_score, max_score)\n print()\n print_scores(scores_ordered, min_score, max_score)\n print()\n '''\n ## Séparer les équipes en groupes de meilleurs attaquants et défenseur\n attack_group, defense_group = split_teams_into_groups(matches, teams, NCAT)\n\n # Première approche : cumuler par Aa,Ad, Ba,Bd,s1,s2\n base_statistics, base_2 = compute_base_statistics(matches, attack_group, defense_group, min_score, max_score, NCAT)\n #print_base_statistics_vector(base_statistics, min_score, max_score, NCAT)\n\n # Construire des vecteurs (de probabilité de mettre des buts/ probabilités d'en encaisser\n #vectors = build_vectors(base_statistics, base_2, min_score, max_score, NCAT)\n #vectors_rebuilt = build_vectors_rebuilt(vectors, threshold_1, threshold_2, NCAT)\n #print_vectors(vectors_rebuilt, min_score, max_score, NCAT)\n\n ordered_matrices = significant_matrices(base_2, min_score, max_score, 20)\n rebuilt_matrices = build_matrices_rebuilt(base_2, threshold_1, threshold_2)\n if printing:\n print_rebuilt_matrices(rebuilt_matrices)\n if proba_table_file is not '':\n with open(proba_table_file, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=['Aa', 'Ad', 'Ba', 'Bd', 'l', 's1', 's2', 'p'])\n writer.writeheader()\n for (Aa, Ad, Ba, Bd), r in rebuilt_matrices.items():\n p = r['p']\n l = r['l']\n for s1, row in enumerate(p):\n for s2, probability in enumerate(row):\n w_r = {'Aa': Aa, 'Ad':Ad, 'Ba':Ba, 'Bd':Bd, 'l':l, 's1':s1, 's2':s2, 'p':probability}\n writer.writerow(w_r)\n return rebuilt_matrices\n\n\nif __name__ == \"__main__\":\n compute_rebuilt_matrices(2010, 2018, 'data_built_m3_cat8_long.csv', threshold_1=1, threshold_2=1,NCAT=8, printing=False )","sub_path":"history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":18153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237582034","text":"class Solution:\n def reverseOnlyLetters(self, S: str) ->str:\n stack_1=[c for c in S if c.isalpha()]\n S_rev=[]\n for c in S:\n if c.isalpha()==1:\n S_rev.append(stack_1.pop())\n else:\n S_rev.append(c)\n return \"\".join(S_rev)","sub_path":"Week_01/d6_917_reverseOnlyLetters.py","file_name":"d6_917_reverseOnlyLetters.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"304020961","text":"import time\nimport statistics as stats\n\nfrom collections import defaultdict\n\nfrom defaultcontext import with_default_context\n\n\n@with_default_context\nclass Profiler(object):\n def __init__(self, prefix=''):\n self.data = defaultdict(list)\n self._prefix = prefix\n\n def compute_stats(self):\n result = {}\n for func_name, data_points in self.data.items():\n result[self._prefix + func_name] = {\n 'avg': stats.mean(data_points),\n 'min': min(data_points),\n 'max': max(data_points),\n 'num': len(data_points)\n }\n if len(data_points) >= 2:\n result[func_name]['std'] = stats.stdev(data_points)\n\n return result\n\n def __repr__(self):\n return 'Profiler(%s)' % repr(self.compute_stats)\n\n\ndef profiled(func):\n def wrapped(*args, **kwargs):\n profiler = Profiler.get_default()\n if profiler is None:\n return func(*args, **kwargs)\n\n t0 = time.time()\n result = func(*args, **kwargs)\n t1 = time.time()\n profiler.data[func.__name__].append(t1 - t0)\n return result\n\n return wrapped\n\n","sub_path":"claimchain/utils/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374645447","text":"\nimport unittest\nfrom log import log_factory\n\nclass TestLogFactory(unittest.TestCase):\n\n def test_log_Factory_createLog(self):\n thrown = False\n try:\n lf = log_factory.LogFactory()\n lf.create_log(\"syslog\", \"text\")\n except ImportError as e:\n thrown = True\n self.assertFalse(thrown)\n\n def test_log_Factory_parse_class_name(self):\n\n lf = log_factory.LogFactory()\n exp = \"Syslog\"\n result = lf._parse_class_name(\"syslog\")\n self.assertEquals(exp, result)\n\n def test_log_factory_instantiate_class(self):\n lf = log_factory.LogFactory()\n _class = lf.instantiate_class(\"syslog\")\n self.assertIsNotNone(_class)","sub_path":"docker/log/test/test_log_factory.py","file_name":"test_log_factory.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43416083","text":"# r2c1 核心逻辑功能库\n# 2015.6.16 create by David.Yi\n# 增加 def do_show, 提供show 命令的解析,\n# 解析使用配置文件r2c1.conf 动态生成r2c1对象\n# 2015.6.20 edit log属性相关移到r2对象中, r2自己开始采用r2_log独立log文件\n# 2015.7.30 独立 解析命令行的参数 函数 def phrase_opts()\n# 2015.9.2 格式整理\n# 2015.9.6 r2_main.py --> r2c1_main.py\n# 2015.9.13 extra rules 命令参数的扩��规则解析\n# 2015.9.25 开始引入 r2_const.py 常量统一\n# 2015.10.17 引入 tokenize.py 加入参数是否符合数字表达式判断\n# 2015.11.22 12.1 #12046 #12047 #12048 #12049 优化增强 check_value(),支持money类数值参数\n# 2015.12.18 #12085 将整个文件修改为类模式,供其他调用\n# 2015.12.19 #12088 R2中的方法除了phrase_cmd() 都修改为保护方法\n# 2015.12.19 #12089 使用公共函数fpytools 中的 FpyCache class方式进行cf 读取缓存\n# 2015.12.27 edit by david.yi #12091 替换字符串+ 为 join 方法\n# 2016.2.27 edit 修改 import 方式, v1.0.2 #12100\n# 2016.3.4 edit #12102 所有opt相关变量增加opt 标示\n# 2016.3.31 edit #12098 lib fish_base 修改为标准 python 包 import 方式\n# 2016.4.1 move the project to github\n# 2016.4.3 edit python package fish_base usage, FpyCache to FishCache, get_cache to get_cf_cachet\n# 2016.4.3 ready for upload to Pypi\n\n\nimport configparser # 配置文件读写\nimport getopt # 命令行参数处理\nimport json\nimport logging\nimport logging.config\nimport os\nimport re\n\nimport fish_base\nfrom fish_base import FishCache\nfrom .r2c1_const import * # r2c1 私有常量\n\nfrom r2c1 import r2_tokenize\n\n\n# r2 对象,保存一条r2命令需要的相关的信息\n# 2015.7.25 add rf\n# 2015.7.26 add pattern\n# 2015.12.9 add token 相关\n# 2015.12.17 #12085 class化\nclass R2(object):\n # edit 2015.9.5.\n # r2c1 对象,和r2命令的参数对应的对象,和json对象对应\n class R2C1(object):\n pass\n\n r2c1_class = R2C1() # r2c1 class\n\n r2_cache = FishCache() # 申明conf 文件使用的cache\n\n # 定义类内部变量\n\n json_path = ''\n\n r2c1_conf_long_filename = ''\n\n setattr_list = []\n\n cf_args = ''\n cf_short_opt = ''\n cf_long_opt = []\n\n cf = configparser.ConfigParser() # 配置文件\n cf_opt_section = [] # 配置文件section列表\n\n r2c1_info_list = [] # 记录处理过程中的信息列表\n\n pattern = False\n pattern_name = ''\n\n cmd0 = '' # 用户输入的命令,eg. get\n cmd1 = '' # 用户输入的命令后面的参数\n\n args0_list = '' # 用户输入的参数列表\n opt0_list = [] # 用户输入的选项列表\n\n opt = '' # 选项的key,eg. business\n opt0 = '' # 选项的key原来值,eg. -b\n cur_opt_value = ''\n\n token_mark = False\n\n glist_count = 0 # group 处理用\n glist = []\n\n support_cmd = [] # 支持命令\n\n # r2_cache = {} # 定义缓存\n\n # 版本和日期修改为从 consts 文件中读取\n def __init__(self):\n\n self.ver = r2c1_version\n self.date = r2c1_date\n self.tag = r2c1_tag\n\n # my_init()\n # r2 初始化,定义所有属性\n # 2015.7.7 7.11 7.25 edit by david.yi\n # 2015.7.27 增加json path, conf path\n # 2015.8.30 调整 log 文件的路径统一到 conf 目录 edit by david.yi\n # 2015.9.25 删除 init_r2c1_str() 函数,常量统一引用 r2_const.py\n # 2015.12.15 #12082 conf文件使用参数方式调入\n # 2015.12.17 #12085 class化\n # 2016.2.28 #13001 获得路径方法修改\n @staticmethod\n def my_init(conf_name):\n\n # 设置 conf 文件名\n R2.r2c1_conf_long_filename = conf_name\n\n # 2016.2.28\n cur_dir = os.path.split(os.path.realpath(__file__))[0]\n\n # json 文件存放路径\n R2.json_path = os.path.join(cur_dir, 'json')\n\n # 拼接获得完整r2c1 conf 文件名,包含路径\n # #12082 conf文件使用参数传入\n # #12083 conf文件如果不存在,返回错误信息\n # #1 修改 conf 文件传入方式, 直接判断 conf 文件名是否存在\n if not os.path.isfile(R2.r2c1_conf_long_filename):\n s = (r2_info['R2C1_CONFIG_FILE_NOT_EXISTS'])\n R2.r2c1_info_list.append(s)\n return False, R2.r2c1_info_list, None\n\n # log 记录 r2_main开始\n logging.info('import r2_main.py ok')\n\n logging.info(' '.join(['json path:', 'R2.json_path', 'conf path:', 'R2.conf_path']))\n\n # 检查value标志是否合法属性,初始化为False\n R2.check_value_mark = False\n\n # cf: 准备读取r2c1配置文件,其中包括 -b=business 这样的转换预定义\n # 读入配置文件\n R2.cf.read(R2.r2c1_conf_long_filename)\n\n # #12073 从配置文件中 读取 group 内容\n # 有多少group rule 记录到glist_count\n R2.glist_count = int(R2.cf['re_group']['group_rule_count'])\n\n # 根据group rule数量循环判断\n for i in range(R2.glist_count):\n # 从 conf 文件中读取真实的规则\n R2.glist.append(R2.cf['re_group'][''.join(['re_', str(i)])])\n\n # #12084 读入r2c1 支持的命令\n R2.support_cmd = R2.cf['r2c1_cmd']['support_cmd'].split(',')\n\n return True\n\n # phrase_cmd()\n # 执行r2命令的c1过程,解析传入的命令参数,返回结果,json解析结果或者错误信息\n # 输入: cmd0: 命令 cmd1: 选项参数值,\n # 输出: 返回值1:参数检验是否正确,true/false\n # 返回值2:如果为true,返回参数解析的json串;如果为false,返回相信错误信息\n #\n # 2015.6.16 create and edit by david.yi\n # 2015.7.4 7.6 优化\n # 2015.7.10 7.11 7.13 7.15 参数匹配规则\n # 2015.7.28 show --> phrase 命令标准化,准备增加更多命令\n # 2015.7.30 修改细节,加入错误判断,增加判断必须输入选项\n # 2015.9.4 修改返回参数,修改为返回成功标志、结果和错误信息三个参数\n # 2015.9.14 增加判断扩展规则功能,调用 check_extra_rule()\n # 2015.9.20 写对象属性时候,考虑日期范围类型,比如2014-2015\n # 2015.12.6 代码优化,常用变量写入到对象,便于全局调用\n # 2015.12.8 #12070 如果标准检查已经有错误,则扩展规则检查不需要,优化性能\n # 2015.12.13 #12080 增加判断cmd不在预设命令的判断\n # 2016.2.13 #12092 日志记录cmd0和cmd1参数,修正原来日志错误\n # 2016.3.5 v1.0.2 开始处理 args 设计 #12103 #12104 #12105\n def phrase_cmd(self, cmd0, cmd1):\n\n # 设置记录检查value错误列表为空\n R2.r2c1_info_list = []\n\n # 用户输入的命令和后面的参数值等\n R2.cmd0 = cmd0\n R2.cmd1 = cmd1\n logging.info(' '.join(['cmd0: ', R2.cmd0, 'cmd1: ', R2.cmd1]))\n\n # #12080 检查命令是否在支持命令中\n if not (self._check_cmd(R2.cmd0)):\n # 不支持的命令,返回错误\n s = (r2_info['COMMAND_NOT_EXISTS']).format(info_cmd=R2.cmd0)\n R2.r2c1_info_list.append(s)\n return False, R2.r2c1_info_list, None\n\n # >>> 处理 conf args 开始\n # 生成解析命令参数需要的conf 文件中的 section 名称 #12103\n temp_args = ''.join([R2.cmd0, '_args'])\n\n # 从 conf 获得参数 args 的设置\n temp_s = self.r2_cache.get_cf_cache(R2.cf, temp_args, 'args')\n R2.cf_args = temp_s.split(',')\n logging.info(''.join(['cf args: ', temp_s]))\n\n # <<< 处理 conf args 结束\n\n # >>> 处理 conf opt 开始\n # 生成解析命令需要的conf文件中的section 名称\n temp_opt = ''.join([R2.cmd0, '_opt'])\n temp_opt_common = ''.join([R2.cmd0, '_opt_common'])\n temp_opt_common2 = ''.join([R2.cmd0, '_opt_common2'])\n\n # 从 conf 获得短选项,长选项的设置\n R2.cf_short_opt = self.r2_cache.get_cf_cache(R2.cf, temp_opt, 'short_opt')\n # 长参数转换为list\n R2.cf_long_opt = self.r2_cache.get_cf_cache(R2.cf, temp_opt, 'long_opt').split(',')\n logging.info(''.join(['cf_short_opt:', R2.cf_short_opt]))\n logging.info(''.join(['cf_long_opt:', self.r2_cache.get_cf_cache(R2.cf, temp_opt, 'long_opt')]))\n\n # <<< 处理 conf opt 结束\n\n # 解析命令行的参数,获得短选项、长选项、开关参数等,调用内部 _phrase_option() 静态方法\n # 解析后的结果会放在 R2.opt0_list 和 R2.args0_list\n if not (self._phrase_option()):\n return False, R2.r2c1_info_list, None\n\n # >>> 处理 r2 args 开始 #12104\n\n # 按照用户输入的args 来循环\n args_in_conf = True\n # print(R2.args0_list)\n # print(R2.cf_args)\n for temp_a0 in R2.args0_list:\n # print(temp_a0)\n # 检查是否在 conf 规定的 args 中\n if not(temp_a0 in R2.cf_args):\n args_in_conf = False\n\n # print(args_in_conf)\n\n # 有不存在的 args , 返回错误\n if not args_in_conf:\n # 拼接提示错误信息\n R2.r2c1_info_list.append(r2_info['ARGS_ILLEGAL'].format(info_args=''.join(R2.args0_list)))\n return False, R2.r2c1_info_list, None\n\n # <<< 处理 r2 args 结束\n\n # 将 opt list转换为 dict类型,便于检索\n R2.opt_dict = dict(R2.opt0_list)\n\n # 读入配置文件中 common section设置,其中包含所有选项对应,比如 -b=business\n R2.cf_opt_section = R2.cf.options(temp_opt_common)\n\n # 检查必须输入的选项是否都输入\n need_opt = self.r2_cache.get_cf_cache(R2.cf, temp_opt_common2, 'need')\n need_opt_list = need_opt.split(',')\n\n need_opt_in_mark = False\n\n for key in R2.opt_dict:\n if key in need_opt_list:\n need_opt_in_mark = True\n\n # 如果有需要输入的参数没有输入,返回false,记录错误信息\n if not need_opt_in_mark:\n R2.r2c1_info_list.append(r2_info['OPTION_KEY_VALUE_EXPRESSION_ERROR'])\n return False, R2.r2c1_info_list, None\n\n # 如果 设置过r2c1 class属性,则需要先清空这些属性\n # r2c1 class实例中的属性是根据用户参数自动设置便于转换为json,所以要判断\n if len(R2.setattr_list) > 0:\n for i in range(len(R2.setattr_list)):\n delattr(self.r2c1_class, R2.setattr_list[i])\n # date 属性是硬加入的,也需要删除\n if hasattr(self.r2c1_class, 'date'):\n delattr(self.r2c1_class, 'date')\n\n # 设置属性列表为空\n R2.setattr_list = []\n\n # 写入r2c1 版本、命令等,供后续解析使用\n self.r2c1_class._version = self.ver\n self.r2c1_class._command = R2.cmd0\n self.r2c1_class._tag = self.tag\n\n # 生成需要检查key的列表,check_opt_list\n check_opt = self.r2_cache.get_cf_cache(R2.cf, temp_opt_common2, 'check')\n check_opt_list = check_opt.split(',')\n\n # 日志记录\n # r2.log.info('check key list: ' + check_opt)\n\n # args 内容写入 r2c1 class #12105\n for item in R2.args0_list:\n setattr(self.r2c1_class, item, True)\n R2.setattr_list.append(item)\n\n # 根据配置文件中的设定,匹配opt_dict中的key-value值,并动态设置为r2c1的属性名称和值\n # eg. [get_common] -b=business -f=fields\n for item in R2.cf_opt_section:\n # 如果用户输入的参数key中存在规则中的key\n if item in R2.opt_dict.keys():\n # 日志记录\n # r2.log.info('input key: ' + item)\n # 读取key, value, r2.cf_opt_section[i] 表示参数,比如 -b\n # r2.key = r2.cf.get(temp_opt_common, item)\n R2.opt = self.r2_cache.get_cf_cache(R2.cf, temp_opt_common, item)\n\n # value_str 记录value,以字符串形式\n value_str = R2.opt_dict[item]\n # value 记录为列表形式\n value = value_str.split(',')\n\n # r2.log.info('key: ' + r2.key)\n # r2.log.info('value: ' + value_str)\n\n R2.opt0 = item\n\n # 参数在需要检查的列表范围内,则进行参数检查\n if R2.opt0 in check_opt_list:\n\n # 检查输入的参数值是否正确\n for j in range(len(value)):\n R2.cur_opt_value = value[j]\n\n # 检查命令参数,调用 check_value()\n self._check_value()\n\n # 将正确的 r2 命令写入 r2c1 class, 这样可以转换为 json 格式\n # 动态设置r2c1 class 实例的属性和值\n setattr(self.r2c1_class, R2.opt, value)\n R2.setattr_list.append(R2.opt)\n\n # 根据记录 check value的错误列表内容,来判断是否所有参数检查通过,设置标志\n if len(R2.r2c1_info_list) == 0:\n R2.check_value_mark = True\n else:\n R2.check_value_mark = False\n\n # #12070 对于扩展规则性能优化\n # 检查参数错误\n if not R2.check_value_mark:\n\n logging.error('command check value error')\n return False, R2.r2c1_info_list, None\n\n # 检查参数标志为True,通过所有语法检查\n else:\n\n # 判断是否存在当前命令的扩展规则,类似 [get_extra_rules]\n if R2.cf.has_section(''.join([R2.cmd0, '_extra_rules'])):\n\n # 日志记录\n logging.info(''.join(['found extra rules:', R2.cmd0]))\n\n # 如果检查扩展规则返回为 False,说明没有通过检查,调用 check_extra_rule()\n if not (self._check_extra_rule()):\n # r2c1 直接返回 False 等错误信息\n return False, R2.r2c1_info_list, None\n\n # 生成 r2c1 的json string,回写到 r2 对象的json_str属性\n R2.json_str = json.dumps(fish_base.serialize_instance(self.r2c1_class),\n sort_keys=True, indent=4)\n\n logging.info(''.join(['r2c1 json: ', R2.json_str]))\n\n return True, R2.r2c1_info_list, R2.json_str\n\n # 测试将R2C1生成的json读入到python\n # j1 = json.loads(r2.json_str)\n # print('json to dict:', j1)\n # print(\"time\", j1[\"time\"])\n\n # phrase_option()\n # 解析命令行的参数\n # 2015.7.30 create\n @staticmethod\n def _phrase_option():\n try:\n # 解析命令行参数, opt0_list 为各类选项 a参数\n R2.opt0_list, R2.args0_list = getopt.getopt(R2.cmd1.split(), R2.cf_short_opt, R2.cf_long_opt)\n # 打日志,r2 命令选项和参数解析内容\n logging.info(' '.join(['opt list: ', str(R2.opt0_list)]))\n logging.info(' '.join(['args list: ', str(R2.args0_list)]))\n return True\n\n # 错误处理\n except getopt.GetoptError:\n\n # 记录错误信息\n s = r2_info['COMMAND_OPTIONS_ERROR']\n R2.r2c1_info_list.append(s)\n # 记录到日志\n logging.warning(s)\n\n return False\n\n # check_value() 检查r2c1中输入的命令行参数是否合法\n # 输入:无\n # eg. command='get' key='business' key0='-b' value='sky'\n # 输出:True or False\n # create 2015.7.5 7.11 by david.yi\n # 2015.7.13 测试正则表达式的检测 by david.yi\n # 2015.7.25 日期正则表达式抽象到 r2c1_rule.conf,减少重复设置\n # 2015.7.28 7.30 准备支持多命令,优化\n # 2015.9.3 进行多命令支持同样功能参数设计修改\n # 2015.9.5 去除r2c1_rule.conf调用,日期正则也写到 r2c1.conf 中\n # 2015.9.8 修改完善正则代理功能 #11106\n # 2015.9.20 修改正则规则解析,支持类似 yyyy-yyyy 这样情况 #12006\n # 2015.9.26 修改代码中的逻辑错误\n # 2015.10.17 增加判断value为数字表达式的情况\n # 2015.11.27 #12046 修改正则判断的各类扩展情况\n def _check_value(self):\n # 拼接配置文件中保存参数规则的section项名称\n # eg. 'get_rule_business'\n command_rule_key = ''.join([R2.cmd0, '_rule_', R2.opt])\n\n # 检查是否存在直接拼接的section名称\n if R2.cf.has_section(command_rule_key):\n\n # 存在直接命令参数规则的section, 设置到标准rule_key,作为后续操作\n rule_key = command_rule_key\n else:\n\n # 不存在直接命令rule key的section,则拼接 base rule key作为参考的section\n rule_key = ''.join(['base', '_rule_', R2.opt])\n\n # 日志记录,rule_key, 可以知道从command rule读取还是 base rule 读取\n # r2.log.info('config find section: ' + rule_key)\n\n # 读入配置文件中相应section的rule规则,section 的名称如 rule_business\n rule_type = self.r2_cache.get_cf_cache(R2.cf, rule_key, 'rule_type')\n\n # 如果规则是 list\n if rule_type == 'list':\n # 读取临时 list 内容\n l = self.r2_cache.get_cf_cache(R2.cf, rule_key, 'list')\n # 如果输入的 value 在 list 中\n if R2.cur_opt_value in l:\n return True\n else:\n # 记录错误信息\n s = (r2_info['OPTION_KEY_VALUE_NOT_IN_LIST']).format(info_key=R2.opt0, info_value=R2.cur_opt_value)\n R2.r2c1_info_list.append(s)\n logging.error(s)\n return False\n\n # 如果规则是 表达式 money expression\n # 如果规则是日期正则re,进行处理\n # 2015.10.17 11.28 12.6\n if rule_type == 'expression' or rule_type == 're_datetime':\n\n # 设置正则类检查的参数字典\n arg_dict_re = {'rule_type': rule_type, 'rule_key': rule_key}\n\n # print(arg_dict)\n\n # 表达式和正则检查,调用 check_value_re()\n if self._check_value_re(**arg_dict_re):\n return True\n else:\n return False\n\n # check_value_re()\n # 对于正则类规则进行检查,配合 check_value()\n # 2015.11.22 create by david.yi 移植自 check_value() 中原来相关代码\n # 2015.11.27 #12046 正则规则判断扩展\n # 2015.12.6 #12061 支持 expression 的更多判断\n def _check_value_re(self, **arg_dict_re):\n rule_key = arg_dict_re['rule_key']\n rule_type = arg_dict_re['rule_type']\n\n # print(arg_dict)\n\n value_separate = ''\n\n if rule_type == 're_datetime':\n value_separate = '-|\\||'\n elif rule_type == 'expression':\n value_separate = '\\||'\n\n # 读取配置文件是否使用正则代理标志,记录到 re_agent\n re_agent = R2.cf.getboolean(rule_key, 're_agent')\n\n # print(rule_key, re_agent)\n logging.info(' '.join(['re rule and agent:', rule_key, ',', str(re_agent)]))\n\n # 读取正则规则数量,记录到 rule_count\n rule_count = int(self.r2_cache.get_cf_cache(R2.cf, rule_key, 'rule_count'))\n\n # 分解 value 到 list 类型,记录到 value_list\n # 对应 2014-2015 这样时间范围,以及 2014|2016 这样\n value_list = re.split(value_separate, R2.cur_opt_value)\n\n # print('value_list:', value_list)\n\n # 建立判断扩展规则结果的初始值,之后通过正确次数和要判断的value个数比较\n value_check_count = 0\n\n # 根据value list进行循环,逐个value进行正则校验\n for value_temp in value_list:\n\n # print('value:' + value_temp)\n\n # 循环读取参数规则,准备逐一尝试匹配\n for i in range(rule_count):\n\n # 读取参数规则, 记录到 value_rule_temp\n # eg. yyyyhx, number\n value_rule_temp = self.r2_cache.get_cf_cache(R2.cf, rule_key, ''.join(['re_', str(i)]))\n\n # print(value_rule_temp)\n\n # 如果是使用正则规则代理的话,需要另外读取真正对应的正则内容\n # 也就是把 yyyyhx 这样的表达式翻译成整正的f正则表达式\n if re_agent:\n # 读取真正的正则表达式, 记录到 rule_real\n rule_real = self.r2_cache.get_cf_cache(R2.cf, rule_type, value_rule_temp)\n # print('re_agent true do this ', rule_type)\n\n # 如果是 money 参数,则不需要翻译正则,需要自己实现\n else:\n rule_real = value_rule_temp\n # print('re_agent false do this ', rule_type)\n\n # 记录日志,记录 value 和 rule\n # s = 're rule: {re_rule} , value: {check_value} '.format(re_rule=rule_real, check_value=value_temp)\n # r2.log.info(s)\n\n # rule_result_temp = False\n\n # 检查value是否符合正则, 调用 check_rule()\n rule_result_temp = self._check_rule(rule_type, rule_real, value_temp)\n\n # 参数符合正则\n if rule_result_temp:\n # value 检查计数器 +1\n value_check_count += 1\n\n # print('re rule match')\n break\n\n # 记录正确的正则匹配数量=元素个数,说明每个分解的value都检查正确,返回True,\n if value_check_count == len(value_list):\n return True\n\n # 有参数不匹配规则处理\n else:\n # 记录错误信息\n # print('key and value error:', key0, value)\n s = (r2_info['OPTION_KEY_VALUE_EXPRESSION_ERROR']).format(info_key=R2.opt0, info_value=R2.cur_opt_value)\n # print(s)\n R2.r2c1_info_list.append(s)\n logging.info(s)\n return False\n\n # check_rule()\n # 真正检查参数中的输入value是否符合正则或者表达式等\n # 输入参数: rule_type: 规则类型,rule_real: 真实需要匹配的规则,value: 需要检查的值, key: 参数\n # 返回参数: 符合规则的话,返回 True\n # 2015.11.22 create by david.yi\n # 2015.11.28 edit #12047 修改返回值,不管正则或者以后,统一返回True or False\n # 2015.11.29 edit #12047 针对money value 加入最基本token判断\n # 2015.11.30 edit #12048 针对money value 多个情况进行规则判断\n # 2015.12.1 12.5 edit 加入更多token 规则\n # 2015.12.6 #13003 (n1,n2) n1= 1:\n return True\n else:\n return False\n\n token_count = len(token_list)\n\n # token list 长度 2\n if token_count == 2:\n # token是否一个数字或者单词合法性\n if not r2_tokenize.exp_rule_09(token_list):\n return False\n elif token_count == 3 or token_count == 4 or token_count == 5:\n return False\n # token list 长度 6 (a-b) 样式\n elif token_count == 6:\n # 检查括号匹配\n if not r2_tokenize.exp_rule_07(token_list):\n return False\n # 检查 NINF 和 PINF 位置和拼写\n if not r2_tokenize.exp_rule_08(token_list):\n return False\n # #13003\n # 检查 (n1,n2) n1< n2\n if not r2_tokenize.exp_rule_10(token_list):\n return False\n # token list 长度大于 6\n else:\n\n # 检查是否有字母数字混合,如 (1-1s)\n if not r2_tokenize.exp_rule_04(token_list):\n return False\n\n # 检查是否有- 大于 1个情况\n if r2_tokenize.exp_rule_06(token_list):\n return False\n\n return True\n\n # check_extra_rule()\n # 检查命令扩展规则 by david.yi\n # 2015.9.13 create\n # 2015.9.15 edit #12003, #12004 增加opt_rule_2,目前支持判断两条规则,\n # 2015.9.24 代码优化\n # 2015.10.29 #12053 修改 result 逻辑\n # 2015.12.6 代码格式优化\n # 2015.12.7 #12063 增加规则4,将两个属性zip成一个属性\n # 2015.12.12 #12076 conf文件读采用缓冲方式\n def _check_extra_rule(self):\n # 建立判断扩展规则结果的初始值,表示没有错误\n err_count = 0\n\n erule_section = ''.join([R2.cmd0, '_extra_rules'])\n\n # 获得扩展规则的数量\n erule_count = int(self.r2_cache.get_cf_cache(R2.cf, erule_section, 'erule_count'))\n\n # 如果扩展规则数量小于1,说明没有规则需要判断,返回 True\n if erule_count < 1:\n return True\n\n # 根据扩展规则数量进行循环\n for i in range(erule_count):\n\n # 拼接 erule 的名称\n erule_pre = ''.join(['erule_', str(i), '_'])\n\n # 初始化 result 结果为 false\n result = False\n\n # 获得 扩展规则类型\n erule_type = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'type']))\n\n # 参数规则1\n if erule_type == 'extra_rule_1':\n\n # 获得需要的参数值\n erule_key = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key']))\n erule_value = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'value']))\n erule_key1 = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key1']))\n\n # print(erule_key)\n # print(erule_value)\n # print(erule_key1)\n\n # 检查 r2c1 对象中是否存在 extra_key\n if hasattr(self.r2c1_class, erule_key):\n temp_list = getattr(self.r2c1_class, erule_key)\n # 判断 value 是否在指定 key 中\n if erule_value in temp_list:\n # 判断是否存在指定 key\n if hasattr(self.r2c1_class, erule_key1):\n # 存在,返回 True\n result = True\n else:\n result = False\n err_count += 1\n else:\n result = True\n else:\n result = True\n\n # 参数规则2\n elif erule_type == 'extra_rule_2':\n\n # 获得需要的参数值\n erule_key = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key']))\n erule_value = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'value']))\n\n # 检查 r2c1 对象中是否存在 extra_key\n if hasattr(self.r2c1_class, erule_key):\n temp_list = getattr(self.r2c1_class, erule_key)\n # 判断 value 是否在指定 key 中\n if erule_value in temp_list:\n # 判断value的list元素是否等于1,是的话,说明指定value\n if len(temp_list) == 1:\n # 存在,返回 True\n result = True\n else:\n # 不存在,返回 False\n result = False\n err_count += 1\n else:\n result = True\n else:\n result = True\n\n # 参数规则3\n elif erule_type == 'extra_rule_3':\n\n # 获得需要的参数值\n erule_key = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key']))\n erule_key1 = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key1']))\n\n # 检查 r2c1 对象中是否存在 extra_key\n if hasattr(self.r2c1_class, erule_key):\n\n # print('has opt key ', erule_key)\n\n if hasattr(self.r2c1_class, erule_key1):\n # 存在,返回 True\n result = True\n else:\n # 不存在,返回 False\n result = False\n err_count += 1\n else:\n result = True\n\n # 参数规则4\n # 2015.12.7 #12063\n elif erule_type == 'extra_rule_4':\n\n # 获得需要的参数值\n erule_key = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre, 'key']))\n erule_key1 = self.r2_cache.get_cf_cache(R2.cf, erule_section, ''.join([erule_pre + 'key1']))\n\n # 合并属性\n # eg. get -buser --user=p2p --date_kind=open,acct --date=2014,2015\n # get -buser --user=p2p --money_kind=save --money=2000\n # 2015.12.7 edit\n if hasattr(self.r2c1_class, erule_key) and hasattr(self.r2c1_class, erule_key1):\n temp_dict = dict(zip(getattr(self.r2c1_class, erule_key), getattr(self.r2c1_class, erule_key1)))\n setattr(self.r2c1_class, erule_key1, temp_dict)\n\n result = True\n\n # 扩展规则正确处理\n if result:\n pass\n # 日志记录\n # s = 'extra rule:' + erule_type + ';' + r2_info['EXTRA_RULE_OK']\n # logging.info(s)\n else:\n # 记录错误信息\n s = r2_info['EXTRA_RULE_ERROR'].format(info_erule=erule_type)\n # 记录到错误信息列表\n R2.r2c1_info_list.append(s)\n # 日志记录错误信息\n logging.info(s)\n\n # 根据错误数量,返回结果\n if err_count == 0:\n return True\n else:\n return False\n\n # get_cf_cache()\n # conf 读取缓存机制\n # 输入\n # 2015.12.12 create #12076\n # @staticmethod\n # def get_cf_cache(cf, section, key):\n # \n # # 生成 key,用于 dict\n # temp_opt = section + '_' + key\n # \n # if not (temp_opt in R2.cache):\n # R2.cache[temp_opt] = cf[section][key]\n # \n # return R2.cache[temp_opt]\n\n # check_cmd()\n # 检查命令是否在r2支持命令列表中\n # 2015.9.28. create and edit by david.yi\n # 2015.12.19 #12084 修改判断cmd的列表,从conf文件读出\n @staticmethod\n def _check_cmd(cmd):\n if cmd in R2.support_cmd:\n return True\n else:\n return False\n\n\n# 生成 r2 命令的 md5 by david.yi\n# 输入: cmd: r2 命令\n# 输出: md5 e.g. 7c54c88c6e2f1cf32fb2bbd4a8e4bd9f\n# 2016.3.9 create #12107\ndef cal_r2cmd_md5(cmd):\n\n temp_list = cmd.split(' ')\n temp_list.sort()\n temp_s = ''.join(temp_list)\n temp_md5 = fish_base.get_md5(temp_s)\n\n return temp_md5\n\n# test case\n# 基本测试get命令: get -buser --user=p2p\n# get -buser --user=p2p --money=1000\n# get -buser --user=p2p --money=(0-10000)\n","sub_path":"r2c1/r2c1_main.py","file_name":"r2c1_main.py","file_ext":"py","file_size_in_byte":33463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479397233","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport jinja2\nimport json\nimport os\nimport sys\nimport uuid\nimport textwrap\n\nimport logging\n\nfrom converter import oozie_parser\nfrom definitions import TPL_PATH\nfrom utils import el_utils\n\nINDENT = 4\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n\n in_file_name = args.input\n out_file_name = args.output\n\n params = {'user.name': args.user or os.environ['USER']}\n params = el_utils.parse_els(args.properties, params)\n\n # Each OozieParser class corresponds to one workflow, where one can get\n # the workflow's required dependencies (imports), operator relations,\n # and operator execution sequence.\n parser = oozie_parser.OozieParser(oozie_wflow=in_file_name, params=params)\n parser.parse_workflow()\n\n relations = parser.get_relations()\n depens = parser.get_dependencies()\n ops = parser.get_operators()\n parser.update_trigger_rules()\n\n create_dag_file(ops, depens, relations, params, out_file_name)\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Convert Apache Oozie workflows to Apache Airflow workflows.\")\n parser.add_argument('-i', '--input', help='Path to input file name',\n required=True)\n parser.add_argument('-o', '--output', help='Desired output name')\n parser.add_argument('-d', '--dag', help='Desired DAG name')\n parser.add_argument('-p', '--properties',\n help='Path to the properties file')\n parser.add_argument('-u', '--user',\n help='The user to be used in place of all ${user.name},'\n ' if empty, then user who ran the conversion is used')\n return parser.parse_args(args)\n\n\ndef create_dag_file(operators, depends, relations, params, fn=None,\n dag_name=None):\n \"\"\"\n Writes to a file the Apache Oozie parsed workflow in Airflow's DAG format.\n\n :param operators: A dictionary of {'task_id': ParsedNode object}\n :param depends: A list of strings that will be interpreted as import\n statements\n :param relations: A list of strings corresponding to operator relations,\n such as task_1.set_downstream(task_2)\n :param params: A dictionary of params, in general this will be the parsed\n contents of job.properties\n :param fn: Desired output file name.\n :param dag_name: Desired output DAG name.\n \"\"\"\n if not fn:\n fn = '/tmp/' + str(uuid.uuid4())\n if not dag_name:\n dag_name = str(uuid.uuid4())\n\n with open(fn, 'w') as f:\n logging.info(\"Saving to file: {}\".format(fn))\n\n write_dependencies(f, depends)\n f.write('PARAMS = ' + json.dumps(params, indent=INDENT) + '\\n\\n')\n write_dag_header(f, dag_name)\n\n write_operators(f, operators)\n f.write('\\n\\n')\n write_relations(f, relations)\n\n\ndef write_operators(fp, operators, indent=INDENT):\n \"\"\"\n Writes the Airflow operators to the given opened file object.\n\n :param fp: The file pointer to write to.\n :param operators: Dictionary of {'task_id', ParsedNode}\n :param indent: integer of how many spaces to indent entire operator\n \"\"\"\n for op in operators.values():\n fp.write(textwrap.indent(op.operator.convert_to_text(), indent * ' '))\n logging.info(\n \"Wrote Airflow Task ID: {}\".format(op.operator.get_task_id()))\n\n\ndef write_relations(fp, relations, indent=INDENT):\n \"\"\"\n Each relation is in the form of: task_1.setdownstream(task_2)\n\n These are each written on a new line.\n \"\"\"\n logging.info(\"Writing control flow dependencies to file.\")\n for relation in relations:\n fp.write(textwrap.indent(relation, indent * ' '))\n fp.write('\\n')\n\n\ndef write_dependencies(fp, depends):\n \"\"\"\n Writes each dependency on a new line of the given file pointer.\n\n Of the form: from time import time, etc.\n \"\"\"\n logging.info(\"Writing imports to file\")\n fp.write('\\n'.join(depends))\n fp.write('\\n\\n')\n\n\ndef write_dag_header(fp, dag_name, template='dag.tpl'):\n \"\"\"\n Write the DAG header to the open file specified in the file pointer\n :param fp: Opened file to write to.\n :param dag_name: Desired name of DAG\n :param template: Desired template to use when creating the DAG header.\n \"\"\"\n template_loader = jinja2.FileSystemLoader(searchpath=TPL_PATH)\n template_env = jinja2.Environment(loader=template_loader)\n\n template = template_env.get_template(template)\n fp.write(template.render(dag_name=dag_name))\n logging.info(\"Wrote DAG header.\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"oozie-to-airflow/oozie_converter.py","file_name":"oozie_converter.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"422785145","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\r\n\r\nfrom config import opt\r\n\r\nuser_num = 1000\r\nitem_num = 1720\r\n\r\ndir = '../data/train'\r\nbigtag = np.loadtxt(dir + '/bigtag.txt', dtype=int)\r\nchoicetag = np.loadtxt(dir + '/choicetag.txt', dtype=int)\r\nmovie_data = np.loadtxt(dir + '/movie.txt', dtype=int)\r\nrating = np.loadtxt(dir + '/rating.txt', dtype=int)\r\nvalid_data = np.loadtxt('../data/valid/validation.txt', dtype=int)\r\nvalid_P1_data = np.loadtxt('../data/valid/validation_P1.txt', dtype=int)\r\nvalid_P2_data = np.loadtxt('../data/valid/validation_P2.txt', dtype=int)\r\n# test_data = np.loadtxt('../data/test/test_P1.txt', dtype=int)\r\ntest_data2 = np.loadtxt('../data/test/test_P2.txt', dtype=int)\r\nextract_alldata = np.loadtxt(dir + '/extract_alldata.txt', dtype=int)\r\nextract_bigtag = np.loadtxt(dir + '/extract_bigtag.txt', dtype=int)\r\nextract_choicetag = np.loadtxt(dir + '/extract_choicetag.txt', dtype=int)\r\nrating_rate = np.loadtxt(dir + '/rating_rate.txt')\r\n\r\n\r\n#========================================================\r\n# 根据 rating 数据集求每个 item ���访问的频率\r\nmovie = []\r\nfor i in range(movie_data.shape[0]):\r\n tmp = movie_data[i, 1:]\r\n movie.append(tmp)\r\n\r\nobs = np.zeros((user_num, item_num))\r\n# print(movie[int(rating_rate[0, 1])])\r\nfor i in range(rating_rate.shape[0]):\r\n row = int(rating_rate[i, 0])\r\n for tag in movie[int(rating_rate[i, 1])]:\r\n obs[row, tag] = 1\r\nfor i in range(extract_alldata.shape[0]): #\r\n row = int(extract_alldata[i, 0])\r\n col = int(extract_alldata[i, 1])\r\n obs[row, col] = 1\r\nobs = np.array(obs, dtype=int)\r\n\r\npop_item = np.zeros((item_num, 1))\r\nfor j in range(item_num):\r\n n = np.bincount(obs[:, j])\r\n if n[0] == 1000:\r\n pop_item[j] = 1e-6\r\n else:\r\n pop_item[j] = n[1] / np.sum(n)\r\n # n1 = obs[:, j].sum()\r\n # pop_item[j] = n1 / np.sum(obs)\r\n# print(obs)\r\n# Normalization\r\npop_item = (pop_item - np.min(pop_item)) / (np.max(pop_item) - np.min(pop_item))\r\nnp.savetxt(\"../data/train/pop_item.txt\", pop_item, fmt=\"%f\")\r\n\r\n\r\n#========================================================\r\n# item 在验证集上的受欢迎程度\r\nobs_val = np.zeros((user_num, item_num))\r\nfor i in range(valid_data.shape[0]): #\r\n row = int(valid_data[i, 0])\r\n col = int(valid_data[i, 1])\r\n obs_val[row, col] = 1\r\nobs_val = np.array(obs_val, dtype=int)\r\n\r\npop_item_val = np.zeros((item_num, 1))\r\nfor j in range(item_num):\r\n n = np.bincount(obs_val[:, j])\r\n if n[0] == 1000:\r\n pop_item_val[j] = 1e-6\r\n else:\r\n pop_item_val[j] = n[1] / np.sum(n)\r\n # n1 = obs_val[:, j].sum() #\r\n # pop_item_val[j] = n1 / np.sum(obs_val)\r\n# print(obs)\r\npop_item_val = (pop_item_val - np.min(pop_item_val)) / (np.max(pop_item_val) - np.min(pop_item_val))\r\nnp.savetxt(\"../data/valid/pop_item_val.txt\", pop_item_val, fmt=\"%f\")\r\n\r\n\r\n#=============================================================\r\n# item 在测试集上的受欢迎程度\r\n# obs_tst = np.zeros((user_num, item_num))\r\n# for i in range(test_data.shape[0]):\r\n# row = int(test_data[i, 0])\r\n# col = int(test_data[i, 1])\r\n# obs_tst[row, col] += 1\r\n# obs_tst = np.array(obs_tst, dtype=int)\r\n#\r\n# pop_item_tst = np.zeros((item_num, 1))\r\n# for j in range(item_num):\r\n# n = np.bincount(obs_tst[:, j])\r\n# if n[0] == 1000:\r\n# pop_item_tst[j] = 1e-6\r\n# else:\r\n# n1 = obs_tst[:, j].sum() #\r\n# pop_item_tst[j] = n1 / np.sum(obs_tst)\r\n# # print(obs)\r\n# pop_item_tst = (pop_item_tst - np.min(pop_item_tst)) / (np.max(pop_item_tst) - np.min(pop_item_tst))\r\n# np.savetxt(\"../data/test/pop_item_tst.txt\", pop_item_tst, fmt=\"%f\")\r\n\r\n\r\n# obs_tst = np.zeros((user_num, item_num))\r\n# for i in range(test_data2.shape[0]):\r\n# row = int(test_data2[i, 0])\r\n# col = int(test_data2[i, 1])\r\n# obs_tst[row, col] += 1\r\n# obs_tst = np.array(obs_tst, dtype=int)\r\n# \r\n# pop_item_tst = np.zeros((item_num, 1))\r\n# for j in range(item_num):\r\n# n = np.bincount(obs_tst[:, j])\r\n# if n[0] == 1000:\r\n# pop_item_tst[j] = 1e-6\r\n# else:\r\n# n1 = obs_tst[:, j].sum() #\r\n# pop_item_tst[j] = n1 / np.sum(obs_tst)\r\n# # print(obs)pop_item_tst2.txt\r\n# pop_item_tst = (pop_item_tst - np.min(pop_item_tst)) / (np.max(pop_item_tst) - np.min(pop_item_tst))\r\n# np.savetxt(\"../data/test/\", pop_item_tst, fmt=\"%f\")","sub_path":"PCIC-2021-CausE-3.0.0/create_data/cre_pop_item.py","file_name":"cre_pop_item.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"270697161","text":"import torch\nimport torchvision\nfrom torchvision.models import resnet18, resnet34, resnet50, squeezenet1_1, vgg19_bn\n\nclass TissueTileNet(torch.nn.Module):\n def __init__(self, model, n_classes, activation=None):\n super(TissueTileNet, self).__init__()\n if type(model) in [torchvision.models.resnet.ResNet]:\n model.fc = torch.nn.Linear(512, n_classes)\n elif type(model) == torchvision.models.squeezenet.SqueezeNet:\n list(model.children())[1][1] = torch.nn.Conv2d(512, n_classes, kernel_size=1, stride=1)\n else:\n raise NotImplementedError\n self.model = model\n self.activation = activation\n\n def forward(self, x):\n y = self.model(x)\n\n if self.activation:\n y = self.activation(y)\n\n return y\n\ndef get_transform ():\n \"\"\" Transformer which generates a torch tensor compatible with the model \"\"\"\n return torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(), \n torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n\ndef get_classifier (checkpoint_path='/gpfs/mskmind_ess/boehmk/histocox/checkpoints/2021-01-19_21.05.24_fold-2_epoch017.torch', activation=None, n_classes=4):\n \"\"\" Return model given checkpoint_path \"\"\"\n model = TissueTileNet(resnet18(), n_classes, activation=activation)\n model.load_state_dict(torch.load(\n checkpoint_path,\n map_location='cpu')\n )\n return model\n","sub_path":"luna_pathology/models/ov_tissuenet.py","file_name":"ov_tissuenet.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"268041036","text":"#coding=utf-8\n#author='Shichao-Dong'\n\nfrom PIL import Image\nimport math\nimport operator\nfrom functools import reduce\n\ndef compare(pic1,pic2):\n '''\n :param pic1: 图片1路径\n :param pic2: 图片2路径\n :return: 返回对比的结果\n '''\n image1 = Image.open(pic1)\n image2 = Image.open(pic2)\n\n histogram1 = image1.histogram()\n histogram2 = image2.histogram()\n\n differ = math.sqrt(reduce(operator.add, list(map(lambda a,b: (a-b)**2,histogram1, histogram2)))/len(histogram1))\n\n # print(differ)\n return differ\n\ncount = 1\nitemnumber = 3\nwhile (count < (itemnumber+1) ):\n print(\"Number \"+str(count)+\". item test\")\n if(compare(\"./capture/partial_count\"+str(count*2-1)+\".png\",\"./capture/partial_count\"+str(count*2)+\".png\") < 20 ):\n print(\"Move to action doesn't work\")\n else:\n print(\"Move to action does work\")\n count = count + 1\n\n\n","sub_path":"VScode/2019.01.21/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"153868522","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom debtcollector import removals\n\nfrom oslo_utils import versionutils\nfrom oslo_versionedobjects import base\nfrom oslo_versionedobjects import fields\n\nfrom os_vif.objects import base as osv_base\nfrom os_vif.objects import fields as osv_fields\n\n\n@base.VersionedObjectRegistry.register\nclass VIFBase(osv_base.VersionedObject, base.ComparableVersionedObject):\n \"\"\"Represents a virtual network interface.\"\"\"\n # Version 1.0: Initial version\n VERSION = '1.0'\n\n fields = {\n # Unique identifier of the VIF port\n 'id': fields.UUIDField(),\n\n # The guest MAC address\n 'address': fields.MACAddressField(nullable=True),\n\n # The network to which the VIF is connected\n 'network': fields.ObjectField('Network', nullable=True),\n\n # Name of the registered os_vif plugin\n 'plugin': fields.StringField(),\n\n # Whether the VIF is initially online\n 'active': fields.BooleanField(default=True),\n\n # Whether the host VIF should be preserved on unplug\n 'preserve_on_delete': fields.BooleanField(default=False),\n\n # Whether the network service has provided traffic filtering\n 'has_traffic_filtering': fields.BooleanField(default=False),\n\n # The virtual port profile metadata\n 'port_profile': fields.ObjectField('VIFPortProfileBase',\n subclasses=True)\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFGeneric(VIFBase):\n # For libvirt drivers, this maps to type=\"ethernet\" which\n # just implies a bare TAP device, all setup delegated to\n # the plugin\n\n VERSION = '1.0'\n\n fields = {\n # Name of the device to create\n 'vif_name': fields.StringField()\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFBridge(VIFBase):\n # For libvirt drivers, this maps to type='bridge'\n\n VERSION = '1.0'\n\n fields = {\n # Name of the virtual device to create\n 'vif_name': fields.StringField(),\n\n # Name of the physical device to connect to (eg br0)\n 'bridge_name': fields.StringField(),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFOpenVSwitch(VIFBase):\n # For libvirt drivers, this also maps to type='bridge'\n\n VERSION = '1.0'\n\n fields = {\n # Name of the virtual device to create\n 'vif_name': fields.StringField(),\n\n # Name of the physical device to connect to (eg br0)\n 'bridge_name': fields.StringField(),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFDirect(VIFBase):\n # For libvirt drivers, this maps to type='direct'\n\n VERSION = '1.0'\n\n fields = {\n # Name of the device to create\n 'vif_name': fields.StringField(),\n\n # The PCI address of the host device\n 'dev_address': fields.PCIAddressField(),\n\n # Port connection mode\n 'mode': osv_fields.VIFDirectModeField(),\n\n # The VLAN device name to use\n 'vlan_name': fields.StringField(),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFVHostUser(VIFBase):\n # For libvirt drivers, this maps to type='vhostuser'\n\n VERSION = '1.1'\n\n fields = {\n # Name of the vhostuser port to create\n 'vif_name': fields.StringField(),\n\n # UNIX socket path\n 'path': fields.StringField(),\n\n # UNIX socket access permissions\n 'mode': osv_fields.VIFVHostUserModeField(),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n super(VIFVHostUser, self).obj_make_compatible(primitive,\n target_version)\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1) and 'vif_name' in primitive:\n del primitive['vif_name']\n\n\n@base.VersionedObjectRegistry.register\nclass VIFHostDevice(VIFBase):\n # For libvirt drivers, this maps to type='hostdev'\n\n VERSION = '1.0'\n\n fields = {\n\n # The type of the host device.\n # Valid values are ethernet and generic.\n # Ethernet is \n # Generic is \n 'dev_type': osv_fields.VIFHostDeviceDevTypeField(),\n\n # The PCI address of the host device\n 'dev_address': fields.PCIAddressField(),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFNestedDPDK(VIFBase):\n # For kuryr-kubernetes nested DPDK interfaces\n\n VERSION = '1.0'\n\n fields = {\n # PCI address of the device.\n 'pci_address': fields.StringField(),\n\n # Name of the driver the device was previously bound to; it makes\n # the controller driver agnostic (virtio, sr-iov, etc.)\n 'dev_driver': fields.StringField(),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass DatapathOffloadBase(osv_base.VersionedObject,\n base.ComparableVersionedObject):\n # Base class for all types of datapath offload\n VERSION = '1.0'\n\n\n@base.VersionedObjectRegistry.register\nclass DatapathOffloadRepresentor(DatapathOffloadBase):\n # Offload type for VF Representors conforming to the switchdev model\n VERSION = '1.0'\n\n fields = {\n # Name to set on the representor (if set)\n 'representor_name': fields.StringField(nullable=True),\n\n # The PCI address of the Virtual Function\n 'representor_address': fields.StringField(nullable=True),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileBase(osv_base.VersionedObject,\n base.ComparableVersionedObject):\n # Base class for all types of port profile\n # Version 1.0: Initial release\n # Version 1.1: Added 'datapath_offload'\n VERSION = '1.1'\n\n fields = {\n # Datapath offload type of the port\n 'datapath_offload': fields.ObjectField('DatapathOffloadBase',\n nullable=True,\n subclasses=True),\n }\n\n obj_relationships = {\n 'datapath_offload': (('1.1', '1.0'),),\n }\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileOpenVSwitch(VIFPortProfileBase):\n # Port profile info for OpenVSwitch networks\n # Version 1.0: Initial release\n # Version 1.1: Added 'datapath_type'\n # Version 1.2: VIFPortProfileBase updated to 1.1\n VERSION = '1.2'\n\n fields = {\n 'interface_id': fields.UUIDField(),\n 'profile_id': fields.StringField(),\n\n # Datapath type of the bridge\n 'datapath_type': fields.StringField(nullable=True),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n super(VIFPortProfileOpenVSwitch, self).obj_make_compatible(\n primitive, target_version)\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1) and 'datapath_type' in primitive:\n del primitive['datapath_type']\n if target_version < (1, 2):\n super(VIFPortProfileOpenVSwitch, self).obj_make_compatible(\n primitive, \"1.0\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileFPOpenVSwitch(VIFPortProfileOpenVSwitch):\n # Port profile info for OpenVSwitch networks using fastpath\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileOpenVSwitch updated to 1.1\n # Version 1.2: VIFPortProfileOpenVSwitch updated to 1.2\n VERSION = '1.2'\n\n fields = {\n # Name of the bridge (managed by fast path) to connect to\n 'bridge_name': fields.StringField(),\n\n # Whether the OpenVSwitch network is using hybrid plug\n 'hybrid_plug': fields.BooleanField(default=False),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfileFPOpenVSwitch, self).obj_make_compatible(\n primitive, \"1.0\")\n if target_version < (1, 2):\n super(VIFPortProfileFPOpenVSwitch, self).obj_make_compatible(\n primitive, \"1.1\")\n\n\n@removals.removed_class(\"VIFPortProfileOVSRepresentor\",\n category=PendingDeprecationWarning)\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileOVSRepresentor(VIFPortProfileOpenVSwitch):\n # Port profile info for OpenVSwitch networks using a representor\n # This class is now frozen and retained for backwards compatibility. The\n # 'datapath_offload' field in port profiles should be used instead.\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileOpenVSwitch updated to 1.1\n # Version 1.2: VIFPortProfileOpenVSwitch updated to 1.2\n VERSION = '1.2'\n\n fields = {\n # Name to set on the representor (if set)\n 'representor_name': fields.StringField(nullable=True),\n\n # The PCI address of the Virtual Function\n 'representor_address': fields.PCIAddressField(nullable=True),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfileOVSRepresentor, self).obj_make_compatible(\n primitive, \"1.0\")\n if target_version < (1, 2):\n super(VIFPortProfileOVSRepresentor, self).obj_make_compatible(\n primitive, \"1.1\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileFPBridge(VIFPortProfileBase):\n # Port profile info for LinuxBridge networks using fastpath\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileBase updated to 1.1\n VERSION = '1.1'\n\n fields = {\n # Name of the bridge (managed by fast path) to connect to\n 'bridge_name': fields.StringField(),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfileFPBridge, self).obj_make_compatible(\n primitive, \"1.0\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileFPTap(VIFPortProfileBase):\n # Port profile info for Calico networks using fastpath\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileBase updated to 1.1\n VERSION = '1.1'\n\n fields = {\n # The mac address of the host vhostuser port\n 'mac_address': fields.MACAddressField(nullable=True),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfileFPTap, self).obj_make_compatible(\n primitive, \"1.0\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfile8021Qbg(VIFPortProfileBase):\n # Port profile info for VEPA 802.1qbg networks\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileBase updated to 1.1\n VERSION = '1.1'\n\n fields = {\n 'manager_id': fields.IntegerField(),\n 'type_id': fields.IntegerField(),\n 'type_id_version': fields.IntegerField(),\n 'instance_id': fields.UUIDField(),\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfile8021Qbg, self).obj_make_compatible(\n primitive, \"1.0\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfile8021Qbh(VIFPortProfileBase):\n # Port profile info for VEPA 802.1qbh networks\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileBase updated to 1.1\n VERSION = '1.1'\n\n fields = {\n 'profile_id': fields.StringField()\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfile8021Qbh, self).obj_make_compatible(\n primitive, \"1.0\")\n\n\n@base.VersionedObjectRegistry.register\nclass VIFPortProfileK8sDPDK(VIFPortProfileBase):\n # Port profile info for Kuryr-Kubernetes DPDK ports\n #\n # Version 1.0: Initial release\n # Version 1.1: VIFPortProfileBase updated to 1.1\n VERSION = '1.1'\n\n fields = {\n # Specify whether this vif requires L3 setup.\n 'l3_setup': fields.BooleanField(),\n\n # String containing URL representing object in Kubernetes API.\n 'selflink': fields.StringField(),\n\n # String used in Kubernetes v1 API to identifies\n # the server's internal version of this object.\n 'resourceversion': fields.StringField()\n }\n\n def obj_make_compatible(self, primitive, target_version):\n target_version = versionutils.convert_version_to_tuple(target_version)\n if target_version < (1, 1):\n super(VIFPortProfileK8sDPDK, self).obj_make_compatible(\n primitive, \"1.0\")\n","sub_path":"os_vif/objects/vif.py","file_name":"vif.py","file_ext":"py","file_size_in_byte":13562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"47385078","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n # mergesort\n if not head or not head.next:\n return head\n slow_point = head\n fast_point = head\n prev = None\n\n # find the mid of the linked list\n while fast_point and fast_point.next:\n prev = slow_point\n slow_point = slow_point.next\n fast_point = fast_point.next.next\n\n # delink the first half and second half of the linked list\n prev.next = None\n left_head = head\n right_head = slow_point\n\n l = self.sortList(left_head)\n r = self.sortList(right_head)\n\n # merge two linked list\n d = ListNode(-1)\n count = 0\n while l and r:\n if l.val <= r.val:\n d.next = ListNode(l.val)\n l = l.next\n else:\n d.next = ListNode(r.val)\n r = r.next\n d = d.next\n if count == 0:\n merged_head = d\n count += 1\n\n if l:\n d.next = l\n if r:\n d.next = r\n return merged_head\n\n\n\n\n","sub_path":"LeetCode31DaysChallenge-202010/Sort List.py","file_name":"Sort List.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"289099833","text":"from django.conf.urls import url, patterns\nfrom django.shortcuts import redirect\nfrom . import views\nfrom django.contrib.auth.decorators import login_required\n\nurlpatterns = [\n\turl(r'^$', views.Main.as_view()),\n\turl(r'^bookrraven/$', views.Main.as_view(), name='brr-landing'),\n\turl(r'^login/$', views.Login.as_view(), name='brr-login'),\n\turl(r'^register/$', views.Register.as_view(), name='brr-register'),\n\turl(r'^dashboard/$', login_required(views.Dashboard.as_view()), name='brr-dashboard'),\n\turl(r'^venue/(?P\\d+)', views.Venue.as_view(), name='brr-venue'),\n\turl(r'^artist/(?P\\d+)', views.Artist.as_view(), name='bbr-artist'),\n\turl(r'^logout/', views.Logout.as_view(), name='bbr-logout'),\n]\n","sub_path":"bookr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"59503413","text":"import webapp2, logging, sys\r\nfrom constants import \\\r\n MAXIMAL_ENTRY_COUNT, MINIMAL_ENTRY_COUNT, ONE_DAY, \\\r\n MAXIMAL_ENTRY_COUNT_DECREMENT\r\nfrom database import get_unfetched_feeds, store_feed, store_backup_feed\r\nfrom utils import get_feed_dom\r\n\r\ndef fetch(url):\r\n from google.appengine.api import urlfetch\r\n\r\n try:\r\n result = urlfetch.fetch(url)\r\n except:\r\n logging.error(\"Fetch failed miserably.\")\r\n logging.error(sys.exc_info()[0])\r\n logging.error(sys.exc_info()[1])\r\n return None\r\n\r\n status_code = result.status_code\r\n content = result.content or ''\r\n if status_code > 199 and status_code < 300 and content:\r\n return content.decode('utf-8')\r\n \r\n logging.error( \\\r\n \"Fetch failed. Status code - %s. Content[:100] - %s.\" % \\\r\n (str(status_code), content[:100]))\r\n return None\r\n\r\ndef clean_up_deprecated_state_if_appropriate(source):\r\n from datetime import datetime\r\n\r\n if not source.backup_feed:\r\n return\r\n backup_feed = source.backup_feed.get()\r\n if not backup_feed:\r\n return\r\n one_day_ago = datetime.now() - constants.one_day\r\n if backup_feed.deprecation_date > one_day_ago:\r\n return\r\n backup_feed.delete()\r\n logging.info(\"Deleted the backup.\")\r\n\r\ndef cap_feed(full_feed_dom, maximal_entry_count):\r\n full_feed_root = full_feed_dom.getElementsByTagName(\"feed\")[0]\r\n # Capping the entries in order not to exhaust the Google App Engine quota.\r\n full_feed_entries = full_feed_dom.getElementsByTagName(\"entry\")\r\n if len(full_feed_entries) > maximal_entry_count:\r\n for entry in full_feed_entries[maximal_entry_count:]:\r\n full_feed_root.removeChild(entry)\r\n\r\ndef update_full_feed(full_feed_dom, current_feed_dom, previous_urls):\r\n i = 0\r\n full_feed_root = full_feed_dom.getElementsByTagName(\"feed\")[0]\r\n current_feed_root = current_feed_dom.getElementsByTagName(\"feed\")[0]\r\n current_feed_entries = current_feed_dom.getElementsByTagName(\"entry\")\r\n current_feed_entries.reverse()\r\n\r\n for entry in current_feed_entries:\r\n url = entry.getElementsByTagName(\"link\")[0].getAttributeNode(\"href\").value\r\n if url in previous_urls:\r\n current_feed_root.removeChild(entry)\r\n else:\r\n previous_urls[url] = True\r\n if not current_feed_dom == full_feed_dom:\r\n full_feed_root.insertBefore(entry, full_feed_root.firstChild)\r\n i = i + 1\r\n\r\n if i > 19:\r\n logging.info(\r\n \"More than 19 new entries - \" + str(i) +\r\n \", something might have been missed.\") \r\n\r\ndef store(source, feed, full_feed_dom, timestamp, error_comment = ''):\r\n feed.xml = full_feed_dom.toxml()\r\n has_succeeded = store_feed(source, feed, timestamp, error_comment)\r\n return has_succeeded\r\n\r\ndef store_full_feed(source, feed, full_feed_dom, previous_urls, timestamp):\r\n import json\r\n\r\n # Storing in the database for persistent/backup storage.\r\n feed.urls = json.dumps(previous_urls)\r\n maximal_entry_count = MAXIMAL_ENTRY_COUNT\r\n has_stored_feed = store(source, feed, full_feed_dom, timestamp)\r\n\r\n while not has_stored_feed and \\\r\n maximal_entry_count > MINIMAL_ENTRY_COUNT:\r\n cap_feed(full_feed_dom, maximal_entry_count)\r\n maximal_entry_count = maximal_entry_count - \\\r\n MAXIMAL_ENTRY_COUNT_DECREMENT\r\n has_stored_feed = \\\r\n store(source, feed, full_feed_dom, timestamp, str(maximal_entry_count))\r\n \r\n return has_stored_feed\r\n\r\ndef get_full_feed(source, current_feed_dom, current_feed):\r\n full_feed = source.feed.get()\r\n has_full_feed = False\r\n full_feed_dom = None\r\n if full_feed:\r\n import json\r\n has_full_feed = True\r\n full_feed_dom = get_feed_dom(full_feed.xml)\r\n previous_urls = json.loads(full_feed.urls)\r\n else:\r\n from database import Feed\r\n has_full_feed = False\r\n full_feed = Feed()\r\n full_feed.source = source\r\n full_feed_dom = current_feed_dom\r\n previous_urls = {}\r\n \r\n return (full_feed, full_feed_dom, has_full_feed, previous_urls);\r\n \r\ndef store_feed_in_memory(source, current_feed):\r\n from google.appengine.api import memcache\r\n try:\r\n # Storing in the memory for quick access.\r\n memcache.set('last-feed-' + source.url, current_feed)\r\n except:\r\n pass\r\n\r\n\r\ndef scrape(source, manual):\r\n from google.appengine.api import memcache\r\n from datetime import datetime\r\n\r\n url = source.url\r\n timestamp = datetime.now()\r\n current_feed = fetch(url)\r\n \r\n if not current_feed:\r\n return False\r\n\r\n if not manual:\r\n memcache.set('last-' + url, datetime.utcnow())\r\n\r\n if memcache.get('last-feed-' + url) == current_feed:\r\n # No updates.\r\n return True\r\n\r\n clean_up_deprecated_state_if_appropriate(source)\r\n\r\n current_feed_dom = get_feed_dom(current_feed)\r\n \r\n (full_feed, full_feed_dom, has_full_feed, previous_urls) = \\\r\n get_full_feed(source, current_feed_dom, current_feed)\r\n \r\n update_full_feed(full_feed_dom, current_feed_dom, previous_urls)\r\n if store_full_feed(source, full_feed, full_feed_dom, previous_urls, timestamp):\r\n store_feed_in_memory(source, current_feed);\r\n return True\r\n\r\n logging.info(\r\n \"The full feed is too large for the datastore. \" +\r\n \"Backing the existing one up and storing the current feed instead.\")\r\n \r\n if not source.backup_feed:\r\n return False\r\n backup_feed = source.backup_feed.get()\r\n if not backup_feed:\r\n logging.info('A backup feed already exists. Bailing. Better luck next time.')\r\n return False\r\n\r\n if store_backup_feed(source, datetime.now()):\r\n logging.info('Stored a backup feed.')\r\n if not has_full_feed or \\\r\n store_full_feed(source, full_feed, current_feed_dom, previous_urls, \\\r\n timestamp):\r\n store_feed_in_memory(source, current_feed)\r\n logging.info('Oops. Even the current feed is too large. Giving up.')\r\n return False\r\n return True\r\n\r\nclass ScrapeHandler(webapp2.RequestHandler):\r\n def get(self):\r\n from datetime import datetime\r\n \r\n is_manual = self.request.get(\"manual\") == \"1\"\r\n \r\n sources = get_unfetched_feeds(datetime.now())\r\n for source in sources:\r\n logging.info('Scraping ' + source.url)\r\n scrape(source, is_manual)\r\n","sub_path":"scrape_handler.py","file_name":"scrape_handler.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"249966737","text":"from datetime import datetime, time\n\nfrom flask import render_template, url_for, redirect, request, abort, current_app\nfrom flask_login import login_required, current_user\nfrom sqlalchemy.exc import IntegrityError\nfrom wtforms.validators import NoneOf, DataRequired\n\nfrom app.extensions import db\nfrom app.admin import bp\nfrom app.admin.forms import ChangePasswordForm, ActivityCodeForm, RegisterForm, MachineForm, SettingsForm, ScheduleForm, \\\n MachineGroupForm\nfrom app.admin.helpers import admin_required\nfrom app.default.models import Machine, MachineGroup, Activity, ActivityCode, Job, Settings, Schedule, WorkflowType\nfrom app.default.models import SHIFT_STRFTIME_FORMAT\nfrom app.login.models import User\nfrom config import Config\n\n\n@bp.route('/adminhome', methods=['GET'])\n@login_required\n@admin_required\ndef admin_home():\n \"\"\" The default page for a logged-in user\"\"\"\n # Create default database entries\n return render_template('admin/adminhome.html',\n users=User.query.all(),\n schedules=Schedule.query.all(),\n machines=Machine.query.all(),\n machine_groups=MachineGroup.query.all(),\n activity_codes=ActivityCode.query.all(),\n jobs=Job.query.all())\n\n\n@bp.route('/settings', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef settings():\n # Get the current settings. There can only be one row in the settings table.\n current_settings = Settings.query.get_or_404(1)\n form = SettingsForm()\n if form.validate_on_submit():\n # Save the new settings\n current_settings.dashboard_update_interval_s = form.dashboard_update_interval.data\n current_settings.threshold = form.explanation_threshold.data\n db.session.add(current_settings)\n db.session.commit()\n current_app.logger.info(f\"Changed settings: {current_settings}\")\n return redirect(url_for('admin.admin_home'))\n\n # Set the form data to show the existing settings\n form.dashboard_update_interval.data = current_settings.dashboard_update_interval_s\n form.explanation_threshold.data = current_settings.threshold\n return render_template('admin/settings.html',\n form=form)\n\n\n@bp.route('/newuser', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef new_user():\n \"\"\" The screen to register a new user.\"\"\"\n\n form = RegisterForm()\n if form.validate_on_submit():\n # noinspection PyArgumentList\n u = User(username=form.username.data)\n u.set_password(form.password.data)\n db.session.add(u)\n db.session.commit()\n current_app.logger.info(f\"Created new user: {u}\")\n return redirect(url_for('admin.admin_home'))\n nav_bar_title = \"New User\"\n return render_template(\"admin/newuser.html\", title=\"Register\",\n nav_bar_title=nav_bar_title,\n form=form)\n\n\n@bp.route('/changepassword', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef change_password():\n \"\"\" The page to change a user's password. The user_id is passed to this page.\"\"\"\n if current_user.admin is not True:\n abort(403)\n user = User.query.get_or_404(request.args.get('user_id'))\n form = ChangePasswordForm()\n if form.validate_on_submit():\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n current_app.logger.info(f\"Changed password for {user}\")\n return redirect(url_for('admin.admin_home'))\n nav_bar_title = \"Change password for \" + str(user.username)\n return render_template(\"admin/changepassword.html\",\n nav_bar_title=nav_bar_title,\n user=user,\n form=form)\n\n\n@bp.route('/schedule', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef machine_schedule():\n form = ScheduleForm()\n\n schedule_id = request.args.get(\"schedule_id\", None)\n\n if schedule_id is None or 'new' in request.args and request.args['new'] == \"True\":\n # Create a new schedule\n schedule = Schedule(name=\"\")\n db.session.add(schedule)\n else:\n schedule = Schedule.query.get_or_404(schedule_id)\n\n if form.validate_on_submit():\n # Save the data from the form to the database\n schedule.name = form.name.data\n schedule.mon_start = form.mon_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.mon_end = form.mon_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.tue_start = form.tue_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.tue_end = form.tue_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.wed_start = form.wed_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.wed_end = form.wed_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.thu_start = form.thu_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.thu_end = form.thu_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.fri_start = form.fri_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.fri_end = form.fri_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.sat_start = form.sat_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.sat_end = form.sat_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.sun_start = form.sun_start.data.strftime(SHIFT_STRFTIME_FORMAT)\n schedule.sun_end = form.sun_end.data.strftime(SHIFT_STRFTIME_FORMAT)\n # Check that end is always after the start\n if schedule.mon_end < schedule.mon_start or \\\n schedule.tue_end < schedule.tue_start or \\\n schedule.wed_end < schedule.wed_start or \\\n schedule.thu_end < schedule.thu_start or \\\n schedule.fri_end < schedule.fri_start or \\\n schedule.sat_end < schedule.sat_start or \\\n schedule.sun_end < schedule.sun_start:\n db.session.rollback()\n return abort(400)\n db.session.commit()\n return redirect(url_for('admin.admin_home'))\n\n # Set the form data to show data from the database\n form.name.data = schedule.name\n blank_time = time().strftime(SHIFT_STRFTIME_FORMAT) # Replace empty times with 00:00\n form.mon_start.data = datetime.strptime(schedule.mon_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.mon_end.data = datetime.strptime(schedule.mon_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.tue_start.data = datetime.strptime(schedule.tue_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.tue_end.data = datetime.strptime(schedule.tue_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.wed_start.data = datetime.strptime(schedule.wed_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.wed_end.data = datetime.strptime(schedule.wed_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.thu_start.data = datetime.strptime(schedule.thu_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.thu_end.data = datetime.strptime(schedule.thu_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.fri_start.data = datetime.strptime(schedule.fri_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.fri_end.data = datetime.strptime(schedule.fri_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.sat_start.data = datetime.strptime(schedule.sat_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.sat_end.data = datetime.strptime(schedule.sat_end or blank_time, SHIFT_STRFTIME_FORMAT)\n form.sun_start.data = datetime.strptime(schedule.sun_start or blank_time, SHIFT_STRFTIME_FORMAT)\n form.sun_end.data = datetime.strptime(schedule.sun_end or blank_time, SHIFT_STRFTIME_FORMAT)\n\n return render_template(\"admin/schedule.html\",\n form=form)\n\n\n@bp.route('/editmachine', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_machine():\n \"\"\"The page to edit a machine (also used for creating a new machine)\n This page will allow an ID to be specified for new machines being created, but will not allow the ID\n to be changed for existing machines.\"\"\"\n form = MachineForm()\n ids = []\n\n # Get a list of schedules to create form dropdown\n schedules = []\n for s in Schedule.query.all():\n schedules.append((str(s.id), str(s.name))) # The ID has to be a string to match the data returned from client\n # Add the list of schedules to the form\n form.schedule.choices = schedules\n\n # Create machine group dropdown\n groups = []\n groups.append((\"0\", str(\"No group\")))\n for g in MachineGroup.query.all():\n groups.append((str(g.id), str(g.name)))\n form.group.choices = groups\n\n # Get a list of workflow types to create dropdown\n workflow_types = []\n for t in WorkflowType.query.all():\n workflow_types.append((str(t.id), str(t.name)))\n # Add the list of workflow types to the form\n form.workflow_type.choices = workflow_types\n\n # If new=true then the request is for a new machine to be created\n if 'new' in request.args and request.args['new'] == \"True\":\n creating_new_machine = True\n else:\n creating_new_machine = False\n\n if creating_new_machine:\n # Create a new machine\n machine = Machine(name=\"\", active=True)\n # Add and flush now to retrieve an id for the new entry\n db.session.add(machine)\n db.session.flush()\n message = \"Create new machine\"\n\n # Create a list of existing machine IDs to pass to data validation\n for m in Machine.query.all():\n ids.append(m.id)\n # Allow it to accept the id that has just been assigned for the new entry\n if machine.id in ids:\n ids.remove(machine.id)\n\n # Otherwise get the machine to be edited\n elif 'machine_id' in request.args:\n # Prevent the ID from being edited for existing machines\n form.id.render_kw = {'readonly': True}\n try:\n machine_id = int(request.args['machine_id'])\n machine = Machine.query.get_or_404(machine_id)\n except ValueError:\n current_app.logger.warn(f\"Error parsing machine_id in URL. \"\n f\"machine_id provided : {request.args['machine_id']}\")\n error_message = f\"Error parsing machine_id : {request.args['machine_id']}\"\n return abort(400, error_message)\n # Show a warning to the user\n message = f\"This machine ID ({machine_id}) can only be set when a new machine is being created. \" \\\n \"If the machine is no longer needed, deselect \\\"Active\\\" for this machine to hide it from the users.\"\n\n else:\n error_message = \"No machine_id specified\"\n current_app.logger.warn(\"No machine_id specified in URL\")\n return abort(400, error_message)\n\n # Create validators for the form\n # Create a list of existing names and IPs to prevent duplicates being entered\n names = []\n ips = []\n for m in Machine.query.all():\n names.append(str(m.name))\n ips.append(str(m.device_ip))\n # Don't prevent saving with its own current name/IP\n if machine.name in names:\n names.remove(machine.name)\n if machine.device_ip in ips:\n ips.remove(machine.device_ip)\n # Don't allow duplicate machine names\n form.name.validators = [NoneOf(names, message=\"Name already exists\"), DataRequired()]\n # Don't allow duplicate device IPs\n form.device_ip.validators = [NoneOf(ips, message=\"This device is already assigned to a machine\"), DataRequired()]\n # Don't allow duplicate IDs\n form.id.validators = [NoneOf(ids, message=\"A machine with that ID already exists\"), DataRequired()]\n\n if form.validate_on_submit():\n current_app.logger.info(f\"{machine} edited by {current_user}\")\n # Save the new values on submit\n machine.name = form.name.data\n machine.active = form.active.data\n machine.workflow_type_id = form.workflow_type.data\n machine.schedule_id = form.schedule.data\n # If no machine group is selected, null the column instead of 0\n if form.group.data == '0':\n machine.group_id = None\n else:\n machine.group_id = form.group.data\n # Save empty ip values as null to avoid unique constraint errors in the database\n if form.device_ip.data == \"\":\n machine.device_ip = None\n else:\n machine.device_ip = form.device_ip.data\n\n # If creating a new machine, save the ID and start an activity on the machine\n if creating_new_machine:\n machine.id = form.id.data\n current_app.logger.info(f\"{machine} created by {current_user}\")\n first_act = Activity(machine_id=machine.id,\n timestamp_start=datetime.now().timestamp(),\n machine_state=Config.MACHINE_STATE_OFF,\n activity_code_id=Config.NO_USER_CODE_ID)\n db.session.add(first_act)\n current_app.logger.debug(f\"{first_act} started on machine creation\")\n\n try:\n db.session.add(machine)\n db.session.commit()\n\n except IntegrityError as e:\n return str(e)\n\n return redirect(url_for('admin.admin_home'))\n\n # Fill out the form with existing values to display on the page\n form.id.data = machine.id\n form.active.data = machine.active\n form.schedule.data = str(machine.schedule_id)\n form.group.data = str(machine.group_id)\n form.workflow_type.data = str(machine.workflow_type_id)\n\n if not creating_new_machine:\n form.name.data = machine.name\n form.device_ip.data = machine.device_ip\n\n return render_template(\"admin/edit_machine.html\",\n form=form,\n message=message)\n\n\n@bp.route('/editmachinegroup', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_machine_group():\n \"\"\"The page to edit a machine (also used for creating a new machine)\n This page will allow an ID to be specified for new machines being created, but will not allow the ID\n to be changed for existing machines.\"\"\"\n form = MachineGroupForm()\n all_machine_groups = MachineGroup.query.all()\n ids = []\n\n # If new=true then the request is for a new machine group to be created\n if 'new' in request.args and request.args['new'] == \"True\":\n creating_new_group = True\n else:\n creating_new_group = False\n\n if creating_new_group:\n # Create a new machine\n machine_group = MachineGroup(name=\"\")\n db.session.add(machine_group)\n group_machines = [] # A blank list because the group has no machines yet\n\n # Otherwise get the machine group to be edited\n elif 'machine_group_id' in request.args:\n try:\n machine_group_id = int(request.args['machine_group_id'])\n machine_group = MachineGroup.query.get_or_404(machine_group_id)\n except ValueError:\n current_app.logger.warn(f\"Error parsing machine_group_id in URL. \"\n f\"machine_group_id provided : {request.args['machine_group_id']}\")\n error_message = f\"Error parsing machine_group_id : {request.args['machine_group_id']}\"\n return abort(400, error_message)\n\n else:\n error_message = \"No machine_group_id specified\"\n current_app.logger.warn(\"No machine_group_id specified in URL\")\n return abort(400, error_message)\n\n # Create validators for the form\n # Create a list of existing names to prevent duplicates being entered\n names = []\n for mg in all_machine_groups:\n names.append(str(mg.name))\n # Don't prevent saving with its own current name\n if machine_group.name in names:\n names.remove(machine_group.name)\n\n # Don't allow duplicate machine names\n form.name.validators = [NoneOf(names, message=\"Name already exists\"), DataRequired()]\n\n if form.validate_on_submit():\n current_app.logger.info(f\"{machine_group} edited by {current_user}\")\n # Save the new values on submit\n machine_group.name = form.name.data\n db.session.commit()\n\n return redirect(url_for('admin.admin_home'))\n\n # Fill out the form with existing values to display on the page\n if not creating_new_group:\n form.name.data = machine_group.name\n\n return render_template(\"admin/edit_machine_group.html\",\n form=form,\n group_machines=machine_group.machines)\n\n\n\n@bp.route('/editactivitycode', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_activity_code():\n \"\"\"The page to edit an activity code\"\"\"\n\n # If new=true then the request is for a new activity code to be created\n if 'new' in request.args and request.args['new'] == \"True\":\n # Create a new activity code\n activity_code = ActivityCode(active=True)\n message = \"Create new activity code\"\n\n # Otherwise get the activity code to be edited\n elif 'ac_id' in request.args:\n try:\n activity_code_id = int(request.args['ac_id'])\n activity_code = ActivityCode.query.get_or_404(activity_code_id)\n except ValueError:\n error_message = f\"Error parsing ac_id : {request.args['ac_id']} from URL\"\n current_app.logger.warn(error_message)\n return abort(400, error_message)\n # Show a warning to the user depending on the code being edited.\n if activity_code_id == Config.UPTIME_CODE_ID:\n message = f\"Warning: This entry (ID {activity_code_id}) should always represent uptime\"\n elif activity_code_id == Config.UNEXPLAINED_DOWNTIME_CODE_ID:\n message = f\"Warning: This entry (ID {activity_code_id}) should always represent unexplained downtime\"\n elif activity_code_id == Config.SETTING_CODE_ID:\n message = f\"Warning: This entry (ID {activity_code_id}) should always represent setting\"\n else:\n message = \"Warning: Changes to these values will be reflected in \" \\\n \"past readings with this activity code.
\\\n If this code is no longer needed, deselect \\\"Active\\\" for this code \" \\\n \"and create another activity code instead.\"\n else:\n error_message = \"No activity code specified in URL\"\n current_app.logger.warn(error_message)\n return abort(400, error_message)\n\n form = ActivityCodeForm()\n # Get a list of existing activity codes to use for form validation to prevent repeat codes\n all_activity_codes = []\n for ac in ActivityCode.query.all():\n all_activity_codes.append(str(ac.code))\n if activity_code.code in all_activity_codes:\n all_activity_codes.remove(activity_code.code) # Don't prevent the form from entering the current code\n form.code.validators = [NoneOf(all_activity_codes, message=\"Code already exists\"), DataRequired()]\n\n if form.validate_on_submit():\n activity_code.code = form.code.data\n activity_code.active = form.active.data\n activity_code.short_description = form.short_description.data\n activity_code.long_description = form.long_description.data\n activity_code.graph_colour = '#' + form.graph_colour.data\n db.session.add(activity_code)\n db.session.commit()\n return redirect(url_for('admin.admin_home'))\n\n # Fill out the form with existing values\n form.active.data = activity_code.active\n form.code.data = activity_code.code\n form.short_description.data = activity_code.short_description\n form.long_description.data = activity_code.long_description\n form.graph_colour.data = activity_code.graph_colour\n\n # Prevent duplicate codes from being created\n codes = []\n for ac in ActivityCode.query.all():\n codes.append(str(ac.code))\n if activity_code.code in codes:\n codes.remove(activity_code.code)\n form.code.validators = [NoneOf(codes), DataRequired()]\n\n return render_template(\"admin/edit_activity_code.html\",\n form=form,\n message=message)\n","sub_path":"app/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":20109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"142947482","text":"# Copyright (c) 2010 Alex Barrett \n#\n# Everyone is permitted to copy and distribute verbatim or modified\n# copies of this license document, and changing it is allowed as long\n# as the name is changed.\n#\n# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\n# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n#\n# 0. You just DO WHAT THE FUCK YOU WANT TO.\n# 2013-11-26, Seganku \n# v0.2.7: add -c switch for the option to pass output to a command\n# 2013-07-19, Sebastien Helleu \n# v0.2.6: use buffer received in command callback instead of current buffer\n# 2013-05-04, Rylai\n# v0.2.5: add -e switch for the option to destroy the eyes of all\n# who have the misfortune of seeing your text\n# 2013-04-26, Biohazard\n# v0.2.4: add support for using the command through keybindings\n# 2013-03-12, R1cochet\n# v0.2.3: add -b switch for backwards/reverse text\n# 2013-01-29, SuperT1R:\n# v0.2.2: add -m switch to append /me to the beginning of the output\n\n\nimport weechat as w\nimport random\nimport re\n\n\nSCRIPT_NAME = \"prism\"\nSCRIPT_AUTHOR = \"Alex Barrett \"\nSCRIPT_VERSION = \"0.2.7\"\nSCRIPT_LICENSE = \"WTFPL\"\nSCRIPT_DESC = \"Taste the rainbow.\"\n\n\n# red, lightred, brown, yellow, green, lightgreen, cyan,\n# lightcyan, blue, lightblue, magenta, lightmagenta\ncolors = [5, 4, 7, 8, 3, 9, 10, 11, 2, 12, 6, 13]\ncolor_count = len(colors)\n\n# keeping a global index means the coloring will pick up where it left off\ncolor_index = 0\n\n# spaces don't need to be colored and commas cannot be because mIRC is dumb\nchars_neutral = \" ,\"\nchars_control = \"\\x01-\\x1f\\x7f-\\x9f\"\n\nregex_chars = \"[^%(n)s%(s)s][%(n)s%(s)s]*\" % { 'n': chars_neutral, 's': chars_control }\nregex_words = \"[^%(n)s]+[%(n)s%(s)s]*\" % { 'n': chars_neutral, 's': chars_control }\n\n\nif w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, \"\", \"\"):\n w.hook_command(\"prism\",\n SCRIPT_DESC,\n \"[-rwmbe] text|-c[wbe] text\",\n \" -r: randomizes the order of the color sequence\\n\"\n \" -w: color entire words instead of individual characters\\n\"\n \" -m: append /me to beginning of output\\n\"\n \" -b: backwards text (entire string is reversed)\\n\"\n \" -e: eye-destroying colors (randomized background colors)\\n\"\n \" -c: specify a separator to turn on colorization\\n\"\n \" eg. -c : /topic :howdy howdy howdy\\n\"\n \" text: text to be colored\",\n \"-r|-w|-m|-b|-e|-c\", \"prism_cmd_cb\", \"\")\ndef find_another_color(colorCode):\n otherColor = (unicode(colors[random.randint(1, color_count - 1) % color_count]).rjust(2, \"0\"))\n while (otherColor == colorCode):\n otherColor = (unicode(colors[random.randint(1, color_count - 1) % color_count]).rjust(2, \"0\"))\n return otherColor\n\ndef prism_cmd_cb(data, buffer, args):\n global color_index\n\n input = args.decode(\"UTF-8\")\n input_method = \"command\"\n\n if not input:\n input = w.buffer_get_string(buffer, \"input\")\n input = input.decode(\"UTF-8\")\n input_method = \"keybinding\"\n\n # select a tokenizer and increment mode\n regex = regex_chars\n inc = 1\n bs = 0\n cmd = \"\"\n m = re.match(r'(-[rwmbec]+)\\s+(?:([^ ]+)\\s+(.+?)\\s*\\2)?(.*)', input)\n if m and input_method == \"command\":\n opts = m.group(1)\n input = m.group(4)\n if 'c' in opts:\n cmd = m.group(3)\n if 'w' in opts:\n regex = regex_words\n if 'r' in opts:\n inc = 0\n if 'm' in opts:\n cmd = \"/me\"\n if 'b' in opts:\n input = input[::-1]\n if 'e' in opts:\n bs = 1\n\n output = u\"\"\n tokens = re.findall(regex, input)\n for token in tokens:\n # prefix each token with a color code\n color_code = unicode(colors[color_index % color_count]).rjust(2, \"0\")\n if bs == 1:\n output += u'\\x03' + color_code + ',' + find_another_color(color_code) + token\n else:\n output += u\"\\x03\" + color_code + token\n\n # select the next color or another color at\n # random depending on the options specified\n if inc == 0:\n color_index += random.randint(1, color_count - 1)\n else:\n color_index += inc\n\n # output starting with a / will be executed as a\n # command unless we escape it with a preceding /\n # Commands should use the -c flag\n if len(output) > 0 and output[0] == \"/\":\n output = \"/\" + output\n if len(cmd) > 0:\n output = cmd + ' ' + output\n if input_method == \"keybinding\":\n w.buffer_set(buffer, \"input\", output.encode(\"UTF-8\"))\n else:\n w.command(buffer, output.encode(\"UTF-8\"))\n return w.WEECHAT_RC_OK\n","sub_path":"python/prism.py","file_name":"prism.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"585868939","text":"import pandas as pd\nfrom pprint import pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndays = [\n \"03-09-2020\",\n \"03-10-2020\",\n \"03-11-2020\",\n \"03-12-2020\",\n \"03-13-2020\",\n \"03-14-2020\",\n \"03-15-2020\",\n \"03-16-2020\",\n \"03-17-2020\",\n \"03-18-2020\",\n \"03-19-2020\",\n \"03-20-2020\",\n \"03-21-2020\",\n \"03-22-2020\",\n \"03-23-2020\",\n \"03-24-2020\",\n \"03-25-2020\",\n \"03-26-2020\",\n \"03-27-2020\",\n \"03-28-2020\",\n \"03-29-2020\",\n \"03-30-2020\",\n \"03-31-2020\",\n \"04-01-2020\",\n \"04-02-2020\",\n \"04-03-2020\",\n \"04-04-2020\",\n \"04-05-2020\",\n \"04-06-2020\",\n \"04-07-2020\",\n \"04-08-2020\",\n \"04-09-2020\",\n]\n\ndata_url_template = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/\" \\\n \"csse_covid_19_data/\" \\\n \"csse_covid_19_daily_reports/\"\n\nnames_dict = {\n 'Mainland China': 'China',\n 'China': 'China',\n 'Macau': 'China',\n 'South Korea': 'South Korea',\n 'Korea, South': 'South Korea',\n 'Republic of Korea': 'South Korea',\n 'Italy': 'Italy',\n 'Iran': 'Iran',\n 'Iran (Islamic Republic of)': 'Iran',\n 'France': 'France',\n 'Germany': 'Germany',\n 'Spain': 'Spain',\n 'Japan': 'Japan',\n 'Switzerland': 'Switzerland',\n 'UK': 'United Kingdom',\n 'United Kingdom': 'United Kingdom',\n 'Netherlands': 'Netherlands',\n 'Belgium': 'Belgium',\n 'Sweden': 'Sweden',\n 'Norway': 'Norway',\n 'Singapore': 'Singapore',\n 'Hong Kong': 'Hong Kong',\n 'Hong Kong SAR': 'Hong Kong',\n 'Malaysia': 'Malaysia',\n 'Bahrain': 'Bahrain',\n 'Austria': 'Austria',\n 'US': 'US',\n 'Kuwait': 'Kuwait',\n 'Iraq': 'Iraq',\n 'Iceland': 'Iceland',\n 'Thailand': 'Thailand',\n 'Greece': 'Greece',\n 'Taiwan': 'Taiwan',\n 'Taiwan*': 'Taiwan',\n 'Taipei and environs': 'Taiwan',\n 'United Arab Emirates': 'United Arab Emirates',\n 'India': 'India',\n 'Australia': 'Australia',\n 'Canada': 'Canada',\n 'Denmark': 'Denmark',\n 'San Marino': 'San Marino',\n 'Lebanon': 'Lebanon',\n 'Palestine': 'Palestine',\n 'occupied Palestinian territory': 'Palestine',\n 'West Bank and Gaza': 'Palestine',\n 'Israel': 'Israel',\n 'Portugal': 'Portugal',\n 'Czech Republic': 'Czech Republic',\n 'Ireland': 'Ireland',\n 'Republic of Ireland': 'Ireland',\n 'Vietnam': 'Vietnam',\n 'Viet Nam': 'Vietnam',\n 'Algeria': 'Algeria',\n 'Oman': 'Oman',\n 'Egypt': 'Egypt',\n 'Finland': 'Finland',\n 'Brazil': 'Brazil',\n 'Ecuador': 'Ecuador',\n 'Russia': 'Russia',\n 'Russian Federation': 'Russia',\n 'Croatia': 'Croatia',\n 'Estonia': 'Estonia',\n 'Azerbaijan': 'Azerbaijan',\n 'Romania': 'Romania',\n 'Argentina': 'Argentina',\n 'Qatar': 'Qatar',\n 'Slovenia': 'Slovenia',\n 'Belarus': 'Belarus',\n 'Mexico': 'Mexico',\n 'Pakistan': 'Pakistan',\n 'Philippines': 'Philippines',\n 'French Guiana': 'French Guiana',\n 'New Zealand': 'New Zealand',\n 'Poland': 'Poland',\n 'Saudi Arabia': 'Saudi Arabia',\n 'Chile': 'Chile',\n 'Georgia': 'Georgia',\n 'Hungary': 'Hungary',\n 'Indonesia': 'Indonesia',\n 'Senegal': 'Senegal',\n 'Bosnia and Herzegovina': 'Bosnia and Herzegovina',\n 'Malta': 'Malta',\n 'North Macedonia': 'Macedonia',\n 'Saint Barthelemy': 'Saint Barthelemy',\n 'Dominican Republic': 'Dominican Republic',\n 'Luxembourg': 'Luxembourg',\n 'Martinique': 'Martinique',\n 'Morocco': 'Morocco',\n 'Afghanistan': 'Afghanistan',\n 'Andorra': 'Andorra',\n 'Armenia': 'Armenia',\n 'Bhutan': 'Bhutan',\n 'Cambodia': 'Cambodia',\n 'Cameroon': 'Cameroon',\n 'Colombia': 'Colombia',\n 'Costa Rica': 'Costa Rica',\n 'Faroe Islands': 'Faroe Islands',\n 'Gibraltar': 'Gibraltar',\n 'Jordan': 'Jordan',\n 'Latvia': 'Latvia',\n 'Liechtenstein': 'Liechtenstein',\n 'Lithuania': 'Lithuania',\n 'Monaco': 'Monaco',\n 'Nepal': 'Nepal',\n 'Nigeria': 'Nigeria',\n 'Peru': 'Peru',\n 'Serbia': 'Serbia',\n 'Slovakia': 'Slovakia',\n 'South Africa': 'South Africa',\n 'Sri Lanka': 'Sri Lanka',\n 'Togo': 'Togo',\n 'Tunisia': 'Tunisia',\n 'Ukraine': 'Ukraine',\n 'Vatican City': 'Vatican City',\n 'Holy See': 'Vatican City',\n 'Bulgaria': 'Bulgaria',\n 'Maldives': 'Maldives',\n 'Macao SAR': 'Macao',\n 'Moldova': 'Moldova',\n 'Republic of Moldova': 'Moldova',\n 'St. Martin': 'Saint Martin',\n 'Saint Martin': 'Saint Martin',\n 'Bangladesh': 'Bangladesh',\n 'Paraguay': 'Paraguay',\n 'Albania': 'Albania',\n 'Cyprus': 'Cyprus',\n 'Brunei': 'Brunei',\n 'Burkina Faso': 'Burkina Faso',\n 'Mongolia': 'Mongolia',\n 'Panama': 'Panama',\n 'Czechia': 'Czech Republic',\n 'Bolivia': 'Bolivia',\n 'Honduras': 'Honduras',\n 'Congo (Kinshasa)': 'Congo',\n 'Republic of the Congo': 'Congo',\n 'Congo (Brazzaville)': 'Congo',\n \"Cote d'Ivoire\": \"Cote d'Ivoire\",\n 'Jamaica': 'Jamaica',\n 'Reunion': 'Reunion',\n 'Turkey': 'Turkey',\n 'Cuba': 'Cuba',\n 'Guyana': 'Guyana',\n 'Kazakhstan': 'Kazakhstan',\n 'Cayman Islands': 'Cayman Islands',\n 'Guadeloupe': 'Guadeloupe',\n 'Ethiopia': 'Ethiopia',\n 'Sudan': 'Sudan',\n 'Guinea': 'Guinea',\n 'Antigua and Barbuda': 'Antigua and Barbuda',\n 'Aruba': 'Aruba',\n 'Kenya': 'Kenya',\n 'Uruguay': 'Uruguay',\n 'Ghana': 'Ghana',\n 'Jersey': 'Jersey',\n 'Namibia': 'Namibia',\n 'Seychelles': 'Seychelles',\n 'Trinidad and Tobago': 'Trinidad and Tobago',\n 'Venezuela': 'Venezuela',\n 'Curacao': 'Curacao',\n 'Eswatini': 'Eswatini',\n 'Gabon': 'Gabon',\n 'Guatemala': 'Guatemala',\n 'Guernsey': 'Guernsey',\n 'Mauritania': 'Mauritania',\n 'Rwanda': 'Rwanda',\n 'Saint Lucia': 'Saint Lucia',\n 'Saint Vincent and the Grenadines': 'Saint Vincent and the Grenadines',\n 'Suriname': 'Suriname',\n 'Kosovo': 'Kosovo',\n 'Central African Republic': 'Central African Republic',\n 'Equatorial Guinea': 'Equatorial Guinea',\n 'Uzbekistan': 'Uzbekistan',\n 'Guam': 'Guam',\n 'Puerto Rico': 'Puerto Rico',\n 'Benin': 'Benin',\n 'Greenland': 'Greenland',\n 'Liberia': 'Liberia',\n 'Mayotte': 'Mayotte',\n 'Somalia': 'Somalia',\n 'Tanzania': 'Tanzania',\n 'The Bahamas': 'Bahamas',\n 'Bahamas, The': 'Bahamas',\n 'Bahamas': 'Bahamas',\n 'The Gambia': 'Gambia',\n 'Gambia, The': 'Gambia',\n 'Gambia': 'Gambia',\n 'Western Sahara': 'Western Sahara',\n 'Barbados': 'Barbados',\n 'Montenegro': 'Montenegro',\n 'Kyrgyzstan': 'Kyrgyzstan',\n 'Mauritius': 'Mauritius',\n 'Zambia': 'Zambia',\n 'Djibouti': 'Djibouti',\n 'Chad': 'Chad',\n 'El Salvador': 'El Salvador',\n 'Fiji': 'Fiji',\n 'Nicaragua': 'Nicaragua',\n 'Madagascar': 'Madagascar',\n 'Haiti': 'Haiti',\n 'Angola': 'Angola',\n 'Cabo Verde': 'Cape Verde',\n 'Cape Verde': 'Cape Verde',\n 'Niger': 'Niger',\n 'Papua New Guinea': 'Papua New Guinea',\n 'Zimbabwe': 'Zimbabwe',\n 'East Timor': 'East Timor',\n 'Eritrea': 'Eritrea',\n 'Uganda': 'Uganda',\n 'Dominica': 'Dominica',\n 'Grenada': 'Grenada',\n 'Mozambique': 'Mozambique',\n 'Syria': 'Syria',\n 'Belize': 'Belize',\n 'Laos': 'Laos',\n 'Libya': 'Libya',\n 'Guinea-Bissau': 'Guinea-Bissau',\n 'Mali': 'Mali',\n 'Saint Kitts and Nevis': 'Saint Kitts and Nevis',\n 'Burma': 'Burma',\n 'Botswana': 'Botswana',\n 'Burundi': 'Burundi',\n 'Sierra Leone': 'Sierra Leone',\n 'Malawi': 'Malawi',\n 'South Sudan': 'South Sudan',\n 'Sao Tome and Principe': 'Sao Tome and Principe'\n}\n\ndata_dict = {}\n\nfor count, day in enumerate(days):\n data = pd.read_csv(data_url_template + day + \".csv\", error_bad_lines=False)\n print(\"Day:\", day.split(\".\")[0])\n for index, item in data.iterrows():\n if data[\"Confirmed\"][index] == 0 and data[\"Deaths\"][index] == 0 and data[\"Recovered\"][index] == 0:\n continue\n if \"Country/Region\" in data.columns:\n if data[\"Country/Region\"][index] in names_dict.keys():\n if names_dict[data[\"Country/Region\"][index]] in data_dict.keys():\n if len(data_dict[names_dict[data[\"Country/Region\"][index]]]) == count + 1:\n data_dict[names_dict[data[\"Country/Region\"][index]]][count][\"Confirmed\"] += data[\"Confirmed\"][index]\n data_dict[names_dict[data[\"Country/Region\"][index]]][count][\"Deaths\"] += data[\"Deaths\"][index]\n data_dict[names_dict[data[\"Country/Region\"][index]]][count][\"Recovered\"] += data[\"Recovered\"][index]\n else:\n data_dict[names_dict[data[\"Country/Region\"][index]]].append({\n \"Confirmed\": data[\"Confirmed\"][index],\n \"Deaths\": data[\"Deaths\"][index],\n \"Recovered\": data[\"Recovered\"][index],\n \"Day\": day\n })\n else:\n data_dict.setdefault(names_dict[data[\"Country/Region\"][index]], [{\n \"Confirmed\": data[\"Confirmed\"][index],\n \"Deaths\": data[\"Deaths\"][index],\n \"Recovered\": data[\"Recovered\"][index],\n \"Day\": day\n }])\n else:\n if data[\"Country_Region\"][index] in names_dict.keys():\n if names_dict[data[\"Country_Region\"][index]] in data_dict.keys():\n if len(data_dict[names_dict[data[\"Country_Region\"][index]]]) == count + 1:\n data_dict[names_dict[data[\"Country_Region\"][index]]][count][\"Confirmed\"] += data[\"Confirmed\"][index]\n data_dict[names_dict[data[\"Country_Region\"][index]]][count][\"Deaths\"] += data[\"Deaths\"][index]\n data_dict[names_dict[data[\"Country_Region\"][index]]][count][\"Recovered\"] += data[\"Recovered\"][index]\n else:\n data_dict[names_dict[data[\"Country_Region\"][index]]].append({\n \"Confirmed\": data[\"Confirmed\"][index],\n \"Deaths\": data[\"Deaths\"][index],\n \"Recovered\": data[\"Recovered\"][index],\n \"Day\": day\n })\n else:\n data_dict.setdefault(names_dict[data[\"Country_Region\"][index]], [{\n \"Confirmed\": data[\"Confirmed\"][index],\n \"Deaths\": data[\"Deaths\"][index],\n \"Recovered\": data[\"Recovered\"][index],\n \"Day\": day\n }])\n\nfor country in data_dict:\n print(\"Country:\", country)\n ax = plt.subplot(projection='polar')\n plt.axis('off')\n\n theta = ((np.pi * 2) / len(data_dict[country]))\n width = ((np.pi * 2) / len(data_dict[country]))\n for index, day in enumerate(data_dict[country]):\n # Colors from\n # https://www.colourlovers.com/palette/56122/Sweet_Lolly\n\n # Confirmed\n ax.bar((np.pi / 2) + (theta * index), day[\"Confirmed\"] - day[\"Deaths\"] - day[\"Recovered\"], width=width,\n color='#FABE28', bottom=0.0, alpha=1)\n # Deaths\n ax.bar((np.pi / 2) + (theta * index), day[\"Deaths\"], width=width, color='#FF003C',\n bottom=day[\"Confirmed\"] - day[\"Deaths\"] - day[\"Recovered\"], alpha=1)\n # Recovered\n ax.bar((np.pi / 2) + (theta * index), day[\"Recovered\"], width=width, color='#88C100',\n bottom=day[\"Confirmed\"] - day[\"Recovered\"], alpha=1)\n\n plt.savefig(\n r\"C:\\Users\\killa\\Documents\\GitHub\\killascheuring.github.io\\images\\polar_graphs\\%s_plot.svg\" % country.lower().replace(\n \" \", \"_\"), transparent=True)\n ax.remove()\n","sub_path":"visualization_polar_bar_graph.py","file_name":"visualization_polar_bar_graph.py","file_ext":"py","file_size_in_byte":11707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"179608713","text":"from bs4 import BeautifulSoup\r\nfrom dotenv import load_dotenv\r\nimport re\r\nimport time\r\nimport os\r\nimport requests\r\nload_dotenv()\r\n\r\nHEADER = {'User-Agent': 'Mozilla / 5.0 (iPhone; CPU iPhone OS 10_3 come Mac OS X) AppleWebKit / 602.1.50 (KHTML, come Gecko) CriOS / 56.0.2924.75 Mobile / 14E5239e Safari / 602.1 RuxitSynthetic / 1.0 v8266421968 t1099441676816697146 ath9b965f92 altp ='}\r\nTOKEN = os.getenv('TOKEN')\r\nCHAT_ID = os.getenv('CHAT_ID')\r\nBASE_URL_1 = 'https://www.idealo.it/confronta-prezzi/'\r\nBASE_URL_2 = 'https://www.trovaprezzi.it/'\r\nBASE_URL_3 = 'https://www.amazon.it/'\r\n\r\nprint('Inviando')\r\nprint('.')\r\ntesto_inizio = \"⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️\"\r\nprint('.')\r\ni = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_inizio}')\r\nprint('.')\r\nprint('Inviato!')\r\n\r\nprint('Inviando')\r\nora = time.strftime(\"%H:%M:%S\")\r\nprint('.')\r\ndata = time.strftime(\"%d/%m/%Y\")\r\nprint('.')\r\ntesto_tempo = \"Sono le \" + ora + \" del \" + data + \", quindi:\"\r\nprint('.')\r\nt = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_tempo}')\r\nprint('Inviato!')\r\n\r\ndef main(event={}, context={}):\r\n name = 'Intel Core i5-10600K'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '200332057/intel-core-i5-10600k-box-socket-1200-14nm-bx8070110600k.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result1_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_1_1 = result1_1[0] + '.' + result1_1[1]\r\n prezzo_virgola_1_1 =result1_1[0] + ',' + result1_1[1]\r\n print('.')\r\n print (prezzo_virgola_1_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_processori_i_5_10600k.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result1_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_1_2 = result1_2[0] + '.' + result1_2[1]\r\n prezzo_virgola_1_2 =result1_2[0] + ',' + result1_2[1]\r\n print('.')\r\n print (prezzo_virgola_1_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Intel-Core-i5-10600K-attacco-LGA1200/dp/B0883NTLXM/ref=pd_sbs_5?pd_rd_w=WFz8m&pf_rd_p=467d4f56-31c8-431b-b939-b3298e95b84d&pf_rd_r=8A05BVXPZGZ7QC6A13K2&pd_rd_r=0cacb9a2-1ebf-4ef8-954d-f9a4f6860b82&pd_rd_wg=XM3rv&pd_rd_i=B0883NTLXM&psc=1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result1_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_1_3 = result1_3[0] + '.' + result1_3[1]\r\n prezzo_virgola_1_3 =result1_3[0] + ',' + result1_3[1]\r\n print('.')\r\n print (prezzo_virgola_1_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo1 = min (prezzo_finale_1_1, prezzo_finale_1_2, prezzo_finale_1_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo1 + ' €')\r\n print('Inviato!')\r\n \r\n database1 = open (\"Minimi1.txt\", \"r\").read()\r\n lista1 = re.findall(r'\\b\\d+\\b', database1)\r\n risultato1 = lista1[-2] + '.' + lista1[-1]\r\n risultato_virgola1 = lista1[-2] + ',' + lista1[-1]\r\n \r\n if minimo1 == risultato1:\r\n andamento1 = '😴 (fermo)'\r\n elif minimo1 < risultato1:\r\n andamento1 = '📉 (sta calando da ' + risultato_virgola1 + ' €)'\r\n else:\r\n andamento1 = '📈 (sta salendo da ' + risultato_virgola1 + ' €)'\r\n \r\n testo_messaggio1_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_1_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento1\r\n testo_messaggio1_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_1_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento1\r\n testo_messaggio1_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_1_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento1\r\n \r\n if prezzo_finale_1_3 == minimo1:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio1_3}')\r\n elif prezzo_finale_1_1 == minimo1:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio1_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio1_2}')\r\n \r\n database1 = open (\"Minimi1.txt\", \"a\")\r\n database1.write (\"\\n\" + minimo1)\r\n database1.close ()\r\n \r\n time.sleep(3)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'Intel Core i7-10700K'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '200331878/intel-core-i7-10700k.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result2_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_2_1 = result2_1[0] + '.' + result2_1[1]\r\n prezzo_virgola_2_1 =result2_1[0] + ',' + result2_1[1]\r\n print('.')\r\n print (prezzo_virgola_2_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_processori_i_7_10700k.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result2_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_2_2 = result2_2[0] + '.' + result2_2[1]\r\n prezzo_virgola_2_2 =result2_2[0] + ',' + result2_2[1]\r\n print('.')\r\n print (prezzo_virgola_2_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Intel-Core-i7-10700K-attacco-LGA1200/dp/B0883P8CNM/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&crid=19V4V2PJ3FTQ&dchild=1&keywords=i7+10700k&qid=1610974971&sprefix=i7+%2Caps%2C389&sr=8-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result2_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_2_3 = result2_3[0] + '.' + result2_3[1]\r\n prezzo_virgola_2_3 =result2_3[0] + ',' + result2_3[1]\r\n print('.')\r\n print (prezzo_virgola_2_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo2 = min (prezzo_finale_2_1, prezzo_finale_2_2, prezzo_finale_2_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo2 + ' €')\r\n print('Inviato!')\r\n \r\n database2 = open (\"Minimi2.txt\", \"r\").read()\r\n lista2 = re.findall(r'\\b\\d+\\b', database2)\r\n risultato2 = lista2[-2] + '.' + lista2[-1]\r\n risultato_virgola2 = lista2[-2] + ',' + lista2[-1]\r\n \r\n if minimo2 == risultato2:\r\n andamento2 = '😴 (fermo)'\r\n elif minimo2 < risultato2:\r\n andamento2 = '📉 (sta calando da ' + risultato_virgola2 + ' €)'\r\n else:\r\n andamento2 = '📈 (sta salendo da ' + risultato_virgola2 + ' €)'\r\n \r\n testo_messaggio2_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_2_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento2\r\n testo_messaggio2_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_2_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento2\r\n testo_messaggio2_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_2_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento2\r\n \r\n if prezzo_finale_2_3 == minimo2:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio2_3}')\r\n elif prezzo_finale_2_1 == minimo2:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio2_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio2_2}')\r\n \r\n database2 = open (\"Minimi2.txt\", \"a\")\r\n database2.write (\"\\n\" + minimo2)\r\n database2.close ()\r\n \r\n time.sleep(3)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'Intel Core i9-10900K'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '200329896/intel-core-i9-10900k.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result3_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_3_1 = result3_1[0] + '.' + result3_1[1]\r\n prezzo_virgola_3_1 =result3_1[0] + ',' + result3_1[1]\r\n print('.')\r\n print (prezzo_virgola_3_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_processori_i_9_10900k.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result3_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_3_2 = result3_2[0] + '.' + result3_2[1]\r\n prezzo_virgola_3_2 =result3_2[0] + ',' + result3_2[1]\r\n print('.')\r\n print (prezzo_virgola_3_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Intel-BX8070110900K-Core-i9-10900K-fase/dp/B0883NZC43'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result3_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_3_3 = result3_3[0] + '.' + result3_3[1]\r\n prezzo_virgola_3_3 =result3_3[0] + ',' + result3_3[1]\r\n print('.')\r\n print (prezzo_virgola_3_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo3 = min (prezzo_finale_3_1, prezzo_finale_3_2, prezzo_finale_3_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo3 + ' €')\r\n print('Inviato!')\r\n \r\n database3 = open (\"Minimi3.txt\", \"r\").read()\r\n lista3 = re.findall(r'\\b\\d+\\b', database3)\r\n risultato3 = lista3[-2] + '.' + lista3[-1]\r\n risultato_virgola3 = lista3[-2] + ',' + lista3[-1]\r\n \r\n if minimo3 == risultato3:\r\n andamento3 = '😴 (fermo)'\r\n elif minimo3 < risultato3:\r\n andamento3 = '📉 (sta calando da ' + risultato_virgola3 + ' €)'\r\n else:\r\n andamento3 = '📈 (sta salendo da ' + risultato_virgola3 + ' €)'\r\n \r\n testo_messaggio3_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_3_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento3\r\n testo_messaggio3_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_3_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento3\r\n testo_messaggio3_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_3_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento3\r\n \r\n if prezzo_finale_3_3 == minimo3:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio3_3}')\r\n elif prezzo_finale_3_1 == minimo3:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio3_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio3_2}')\r\n \r\n database3 = open (\"Minimi3.txt\", \"a\")\r\n database3.write (\"\\n\" + minimo3)\r\n database3.close () \r\n \r\n time.sleep(3)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'MSI MPG Z490 Gaming Plus'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '200295282/msi-mpg-z490-gaming-plus.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result4_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_4_1 = result4_1[0] + '.' + result4_1[1]\r\n prezzo_virgola_4_1 =result4_1[0] + ',' + result4_1[1]\r\n print('.')\r\n print (prezzo_virgola_4_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'schede-madri/prezzi-scheda-prodotto/msi_mpg_z490_gaming_plus-v'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"price_range\").text\r\n print('.')\r\n result4_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_4_2 = result4_2[0] + '.' + result4_2[1]\r\n prezzo_virgola_4_2 =result4_2[0] + ',' + result4_2[1]\r\n print('.')\r\n print (prezzo_virgola_4_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'MSI-Z490-GAMING-PLUS-S-1200/dp/B0886QSX7N/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=MSI+MPG+Z490+Gaming+Plus&qid=1611044080&sr=8-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result4_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_4_3 = result4_3[0] + '.' + result4_3[1]\r\n prezzo_virgola_4_3 =result4_3[0] + ',' + result4_3[1]\r\n print('.')\r\n print (prezzo_virgola_4_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo4 = min (prezzo_finale_4_1, prezzo_finale_4_2, prezzo_finale_4_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo4 + ' €')\r\n print('Inviato!')\r\n \r\n database4 = open (\"Minimi4.txt\", \"r\").read()\r\n lista4 = re.findall(r'\\b\\d+\\b', database4)\r\n risultato4 = lista4[-2] + '.' + lista4[-1]\r\n risultato_virgola4 = lista4[-2] + ',' + lista4[-1]\r\n \r\n if minimo4 == risultato4:\r\n andamento4 = '😴 (fermo)'\r\n elif minimo4 < risultato4:\r\n andamento4 = '📉 (sta calando da ' + risultato_virgola4 + ' €)'\r\n else:\r\n andamento4 = '📈 (sta salendo da ' + risultato_virgola4 + ' €)'\r\n \r\n testo_messaggio4_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_4_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento4\r\n testo_messaggio4_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_4_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento4\r\n testo_messaggio4_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_4_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento4\r\n \r\n if prezzo_finale_4_3 == minimo4:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio4_3}')\r\n elif prezzo_finale_4_1 == minimo4:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio4_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio4_2}')\r\n \r\n database4 = open (\"Minimi4.txt\", \"a\")\r\n database4.write (\"\\n\" + minimo4)\r\n database4.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'Corsair Vengeance RGB Pro 16 GB (2 x 8 GB) 3600 Mhz'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '6844731/corsair-vengeance-rgb-pro-16gb-kit-ddr4-3600-cl18-cmw16gx4m2d3600c18.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result5_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_5_1 = result5_1[0] + '.' + result5_1[1]\r\n prezzo_virgola_5_1 =result5_1[0] + ',' + result5_1[1]\r\n print('.')\r\n print (prezzo_virgola_5_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_ram_ddr4_16gb_kit_2x8gb_pc_3600_corsair_vengeance_rgb_pro_cmw16gx4m2d3600c18.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result5_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_5_2 = result5_2[0] + '.' + result5_2[1]\r\n prezzo_virgola_5_2 =result5_2[0] + ',' + result5_2[1]\r\n print('.')\r\n print (prezzo_virgola_5_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-Vengeance-Black-DDR4-RAM-memoria/dp/B07TB3R9JB/ref=sr_1_3?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=Corsair+Vengeance+RGB+Pro+16GB+Kit+DDR4-3600&qid=1611045205&sr=8-3'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result5_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_5_3 = result5_3[0] + '.' + result5_3[1]\r\n prezzo_virgola_5_3 =result5_3[0] + ',' + result5_3[1]\r\n print('.')\r\n print (prezzo_virgola_5_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo5 = min (prezzo_finale_5_1, prezzo_finale_5_2, prezzo_finale_5_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo5 + ' €')\r\n print('Inviato!')\r\n \r\n database5 = open (\"Minimi5.txt\", \"r\").read()\r\n lista5 = re.findall(r'\\b\\d+\\b', database5)\r\n risultato5 = lista5[-2] + '.' + lista5[-1]\r\n risultato_virgola5 = lista5[-2] + ',' + lista5[-1]\r\n \r\n if minimo5 == risultato5:\r\n andamento5 = '😴 (fermo)'\r\n elif minimo5 < risultato5:\r\n andamento5 = '📉 (sta calando da ' + risultato_virgola5 + ' €)'\r\n else:\r\n andamento5 = '📈 (sta salendo da ' + risultato_virgola5 + ' €)'\r\n \r\n testo_messaggio5_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_5_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento5\r\n testo_messaggio5_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_5_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento5\r\n testo_messaggio5_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_5_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento5\r\n \r\n if prezzo_finale_5_3 == minimo5:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio5_3}')\r\n elif prezzo_finale_5_1 == minimo5:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio5_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio5_2}')\r\n \r\n database5 = open (\"Minimi5.txt\", \"a\")\r\n database5.write (\"\\n\" + minimo5)\r\n database5.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'CoolerMaster MasterBox MB611 ARGB'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '7074513/coolermaster-masterbox-mb511-argb.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result6_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_6_1 = result6_1[0] + '.' + result6_1[1]\r\n prezzo_virgola_6_1 =result6_1[0] + ',' + result6_1[1]\r\n print('.')\r\n print (prezzo_virgola_6_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Cooler-Master-MasterBox-MB511-MCB-B511D-KGNN-RGA/dp/B0839XNV6H/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=CoolerMaster+MasterBox+MB511+ARGB&qid=1611046309&sr=8-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result6_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_6_3 = result6_3[0] + '.' + result6_3[1]\r\n prezzo_virgola_6_3 =result6_3[0] + ',' + result6_3[1]\r\n print('.')\r\n print (prezzo_virgola_6_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo6 = min (prezzo_finale_6_1, prezzo_finale_6_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo6 + ' €')\r\n print('Inviato!')\r\n \r\n database6 = open (\"Minimi6.txt\", \"r\").read()\r\n lista6 = re.findall(r'\\b\\d+\\b', database6)\r\n risultato6 = lista6[-2] + '.' + lista6[-1]\r\n risultato_virgola6 = lista6[-2] + ',' + lista6[-1]\r\n \r\n if minimo6 == risultato6:\r\n andamento6 = '😴 (fermo)'\r\n elif minimo6 < risultato6:\r\n andamento6 = '📉 (sta calando da ' + risultato_virgola6 + ' €)'\r\n else:\r\n andamento6 = '📈 (sta salendo da ' + risultato_virgola6 + ' €)'\r\n \r\n testo_messaggio6_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_6_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento6\r\n testo_messaggio6_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_6_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento6\r\n \r\n if prezzo_finale_6_3 == minimo6:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio6_3}')\r\n elif prezzo_finale_6_1 == minimo6:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio6_1}')\r\n \r\n database6 = open (\"Minimi6.txt\", \"a\")\r\n database6.write (\"\\n\" + minimo6)\r\n database6.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Corsair RM750X'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '6052663/corsair-rm750x-2018-750w-black.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result7_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_7_1 = result7_1[0] + '.' + result7_1[1]\r\n prezzo_virgola_7_1 =result7_1[0] + ',' + result7_1[1]\r\n print('.')\r\n print (prezzo_virgola_7_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'Fprezzo_case-alimentatori_corsair_rm750x.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"alert_price_tab\").text\r\n print('.')\r\n result7_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_7_2 = result7_2[0] + '.' + result7_2[1]\r\n prezzo_virgola_7_2 =result7_2[0] + ',' + result7_2[1]\r\n print('.')\r\n print (prezzo_virgola_7_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-RM750x-Alimentatore-Completamente-Modulare/dp/B07GY3VFW8/ref=sr_1_fkmr0_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=Corsair+RM750X&qid=1611050396&sr=8-1-fkmr0'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result7_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_7_3 = result7_3[0] + '.' + result7_3[1]\r\n prezzo_virgola_7_3 =result7_3[0] + ',' + result7_3[1]\r\n print('.')\r\n print (prezzo_virgola_7_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo7 = min (prezzo_finale_7_1, prezzo_finale_7_2, prezzo_finale_7_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo7 + ' €')\r\n print('Inviato!')\r\n \r\n database7 = open (\"Minimi7.txt\", \"r\").read()\r\n lista7 = re.findall(r'\\b\\d+\\b', database7)\r\n risultato7 = lista7[-2] + '.' + lista7[-1]\r\n risultato_virgola7 = lista7[-2] + ',' + lista7[-1]\r\n \r\n if minimo7 == risultato7:\r\n andamento7 = '😴 (fermo)'\r\n elif minimo7 < risultato7:\r\n andamento7 = '📉 (sta calando da ' + risultato_virgola7 + ' €)'\r\n else:\r\n andamento7 = '📈 (sta salendo da ' + risultato_virgola7 + ' €)'\r\n \r\n testo_messaggio7_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_7_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento7\r\n testo_messaggio7_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_7_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento7\r\n testo_messaggio7_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_7_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento7\r\n \r\n if prezzo_finale_7_3 == minimo7:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio7_3}')\r\n elif prezzo_finale_7_1 == minimo7:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio7_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio7_2}')\r\n \r\n database7 = open (\"Minimi7.txt\", \"a\")\r\n database7.write (\"\\n\" + minimo7)\r\n database7.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'Corsair RM850X'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '6317984/corsair-rm850x-2018-850w.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result8_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_8_1 = result8_1[0] + '.' + result8_1[1]\r\n prezzo_virgola_8_1 =result8_1[0] + ',' + result8_1[1]\r\n print('.')\r\n print (prezzo_virgola_8_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-RM850x-Alimentatore-Completamente-Modulare/dp/B07GY4ZCQT/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=RM850x&qid=1611053453&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result8_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_8_3 = result8_3[0] + '.' + result8_3[1]\r\n prezzo_virgola_8_3 =result8_3[0] + ',' + result8_3[1]\r\n print('.')\r\n print (prezzo_virgola_8_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo8 = min (prezzo_finale_8_1, prezzo_finale_8_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo8 + ' €')\r\n print('Inviato!')\r\n \r\n database8 = open (\"Minimi8.txt\", \"r\").read()\r\n lista8 = re.findall(r'\\b\\d+\\b', database8)\r\n risultato8 = lista8[-2] + '.' + lista8[-1]\r\n risultato_virgola8 = lista8[-2] + ',' + lista8[-1]\r\n \r\n if minimo8 == risultato8:\r\n andamento8 = '😴 (fermo)'\r\n elif minimo8 < risultato8:\r\n andamento8 = '📉 (sta calando da ' + risultato_virgola8 + ' €)'\r\n else:\r\n andamento8 = '📈 (sta salendo da ' + risultato_virgola8 + ' €)'\r\n \r\n testo_messaggio8_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_8_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento8\r\n testo_messaggio8_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_8_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento8\r\n \r\n if prezzo_finale_8_3 == minimo8:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio8_3}')\r\n elif prezzo_finale_8_1 == minimo8:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio8_1}')\r\n \r\n database8 = open (\"Minimi8.txt\", \"a\")\r\n database8.write (\"\\n\" + minimo8)\r\n database8.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Corsair RM850i'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '4830652/corsair-rm850i-850w.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result9_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_9_1 = result9_1[0] + '.' + result9_1[1]\r\n prezzo_virgola_9_1 =result9_1[0] + ',' + result9_1[1]\r\n print('.')\r\n print (prezzo_virgola_9_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'Fprezzo_case-alimentatori_corsair_rm850i.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"alert_price_tab\").text\r\n print('.')\r\n result9_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_9_2 = result9_2[0] + '.' + result9_2[1]\r\n prezzo_virgola_9_2 =result9_2[0] + ',' + result9_2[1]\r\n print('.')\r\n print (prezzo_virgola_9_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-Alimentatore-Completamente-Modulare-Digital/dp/B00ZRL7LUE/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=RM850i&qid=1611053830&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result9_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_9_3 = result9_3[0] + '.' + result9_3[1]\r\n prezzo_virgola_9_3 =result9_3[0] + ',' + result9_3[1]\r\n print('.')\r\n print (prezzo_virgola_9_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo9 = min (prezzo_finale_9_1, prezzo_finale_9_2, prezzo_finale_9_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo9 + ' €')\r\n print('Inviato!')\r\n \r\n database9 = open (\"Minimi9.txt\", \"r\").read()\r\n lista9 = re.findall(r'\\b\\d+\\b', database9)\r\n risultato9 = lista9[-2] + '.' + lista9[-1]\r\n risultato_virgola9 = lista9[-2] + ',' + lista9[-1]\r\n \r\n if minimo9 == risultato9:\r\n andamento9 = '😴 (fermo)'\r\n elif minimo9 < risultato9:\r\n andamento9 = '📉 (sta calando da ' + risultato_virgola9 + ' €)'\r\n else:\r\n andamento9 = '📈 (sta salendo da ' + risultato_virgola9 + ' €)'\r\n \r\n testo_messaggio9_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_9_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento9\r\n testo_messaggio9_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_9_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento9\r\n testo_messaggio9_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_9_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento9\r\n \r\n if prezzo_finale_9_3 == minimo9:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio9_3}')\r\n elif prezzo_finale_9_1 == minimo9:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio9_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio9_2}')\r\n \r\n database9 = open (\"Minimi9.txt\", \"a\")\r\n database9.write (\"\\n\" + minimo9)\r\n database9.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef main(event={}, context={}):\r\n name = 'Corsair RM1000X'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '4867188/corsair-rm1000x-1000w.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result10_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_10_1 = result10_1[0] + '.' + result10_1[1]\r\n prezzo_virgola_10_1 =result10_1[0] + ',' + result10_1[1]\r\n print('.')\r\n print (prezzo_virgola_10_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'Fprezzo_case-alimentatori_corsair_rm1000x.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"alert_price_tab\").text\r\n print('.')\r\n result10_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_10_2 = result10_2[0] + '.' + result10_2[1]\r\n prezzo_virgola_10_2 =result10_2[0] + ',' + result10_2[1]\r\n print('.')\r\n print (prezzo_virgola_10_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-RM1000x-Alimentatore-Completamente-Modulare/dp/B015Q7F5AQ/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=RM1000x&qid=1611054269&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result10_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_10_3 = result10_3[0] + '.' + result10_3[1]\r\n prezzo_virgola_10_3 =result10_3[0] + ',' + result10_3[1]\r\n print('.')\r\n print (prezzo_virgola_10_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo10 = min (prezzo_finale_10_1, prezzo_finale_10_2, prezzo_finale_10_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo10 + ' €')\r\n print('Inviato!')\r\n \r\n database10 = open (\"Minimi10.txt\", \"r\").read()\r\n lista10 = re.findall(r'\\b\\d+\\b', database10)\r\n risultato10 = lista10[-2] + '.' + lista10[-1]\r\n risultato_virgola10 = lista10[-2] + ',' + lista10[-1]\r\n \r\n if minimo10 == risultato10:\r\n andamento10 = '😴 (fermo)'\r\n elif minimo10 < risultato10:\r\n andamento10 = '📉 (sta calando da ' + risultato_virgola10 + ' €)'\r\n else:\r\n andamento10 = '📈 (sta salendo da ' + risultato_virgola10 + ' €)'\r\n \r\n testo_messaggio10_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_10_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento10\r\n testo_messaggio10_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_10_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento10\r\n testo_messaggio10_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_10_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento10\r\n \r\n if prezzo_finale_10_3 == minimo10:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio10_3}')\r\n elif prezzo_finale_10_1 == minimo10:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio10_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio10_2}')\r\n \r\n database10 = open (\"Minimi10.txt\", \"a\")\r\n database10.write (\"\\n\" + minimo10)\r\n database10.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Corsair RM1000i'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '4794385/corsair-rm1000i-1000w.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result11_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_11_1 = result11_1[0] + '.' + result11_1[1]\r\n prezzo_virgola_11_1 =result11_1[0] + ',' + result11_1[1]\r\n print('.')\r\n print (prezzo_virgola_11_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Corsair-RM1000i-Alimentatore-Completamente-Modulare/dp/B00ZRL7WYY/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=Corsair+RM1000i&qid=1611054481&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result11_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_11_3 = result11_3[0] + '.' + result11_3[1]\r\n prezzo_virgola_11_3 =result11_3[0] + ',' + result11_3[1]\r\n print('.')\r\n print (prezzo_virgola_11_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo11 = min (prezzo_finale_11_1, prezzo_finale_11_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo11 + ' €')\r\n print('Inviato!')\r\n \r\n database11 = open (\"Minimi11.txt\", \"r\").read()\r\n lista11 = re.findall(r'\\b\\d+\\b', database11)\r\n risultato11 = lista11[-2] + '.' + lista11[-1]\r\n risultato_virgola11 = lista11[-2] + ',' + lista11[-1]\r\n \r\n if minimo11 == risultato11:\r\n andamento11 = '😴 (fermo)'\r\n elif minimo11 < risultato11:\r\n andamento11 = '📉 (sta calando da ' + risultato_virgola11 + ' €)'\r\n else:\r\n andamento11 = '📈 (sta salendo da ' + risultato_virgola11 + ' €)'\r\n \r\n testo_messaggio11_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_11_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento11\r\n testo_messaggio11_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_11_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento11\r\n \r\n if prezzo_finale_11_3 == minimo11:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio11_3}')\r\n elif prezzo_finale_11_1 == minimo11:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio11_1}')\r\n \r\n database11 = open (\"Minimi11.txt\", \"a\")\r\n database11.write (\"\\n\" + minimo11)\r\n database11.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Dissipatore cpu a liquido Msi Mag Core Liquid 240R'\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_dissipatori-e-ventole_msi_mag_core_liquid_240r.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result12_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_12_2 = result12_2[0] + '.' + result12_2[1]\r\n prezzo_virgola_12_2 =result12_2[0] + ',' + result12_2[1]\r\n print('.')\r\n print (prezzo_virgola_12_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'MSI-CoreLiquid-Dissipatore-radiatore-Compatibile/dp/B089QZ9GX8/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=msi+mag+coreliquid+240r&qid=1611055106&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result12_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_12_3 = result12_3[0] + '.' + result12_3[1]\r\n prezzo_virgola_12_3 =result12_3[0] + ',' + result12_3[1]\r\n print('.')\r\n print (prezzo_virgola_12_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo12 = min (prezzo_finale_12_2, prezzo_finale_12_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo12 + ' €')\r\n print('Inviato!')\r\n \r\n database12 = open (\"Minimi12.txt\", \"r\").read()\r\n lista12 = re.findall(r'\\b\\d+\\b', database12)\r\n risultato12 = lista12[-2] + '.' + lista12[-1]\r\n risultato_virgola12 = lista12[-2] + ',' + lista12[-1]\r\n \r\n if minimo12 == risultato12:\r\n andamento12 = '😴 (fermo)'\r\n elif minimo12 < risultato12:\r\n andamento12 = '📉 (sta calando da ' + risultato_virgola12 + ' €)'\r\n else:\r\n andamento12 = '📈 (sta salendo da ' + risultato_virgola12 + ' €)'\r\n \r\n testo_messaggio12_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_12_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento12\r\n testo_messaggio12_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_12_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento12\r\n \r\n if prezzo_finale_12_3 == minimo12:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio12_3}')\r\n elif prezzo_finale_12_2 == minimo12:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio12_2}')\r\n \r\n database12 = open (\"Minimi12.txt\", \"a\")\r\n database12.write (\"\\n\" + minimo12)\r\n database12.close ()\r\n \r\n time.sleep(3)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Dissipatore cpu a liquido Msi Mag Core Liquid 360R'\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_dissipatori-e-ventole_msi_mag_core_liquid_360r.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result13_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_13_2 = result13_2[0] + '.' + result13_2[1]\r\n prezzo_virgola_13_2 =result13_2[0] + ',' + result13_2[1]\r\n print('.')\r\n print (prezzo_virgola_13_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'MSI-CoreLiquid-360R-Dissipatore-Compatibile/dp/B089QYGNFK/ref=sr_1_1?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=msi+mag+coreliquid+360r&qid=1611055475&s=pc&sr=1-1'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result13_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_13_3 = result13_3[0] + '.' + result13_3[1]\r\n prezzo_virgola_13_3 =result13_3[0] + ',' + result13_3[1]\r\n print('.')\r\n print (prezzo_virgola_13_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo13 = min (prezzo_finale_13_2, prezzo_finale_13_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo13 + ' €')\r\n print('Inviato!')\r\n \r\n database13 = open (\"Minimi13.txt\", \"r\").read()\r\n lista13 = re.findall(r'\\b\\d+\\b', database13)\r\n risultato13 = lista13[-2] + '.' + lista13[-1]\r\n risultato_virgola13 = lista13[-2] + ',' + lista13[-1]\r\n \r\n if minimo13 == risultato13:\r\n andamento13 = '😴 (fermo)'\r\n elif minimo13 < risultato13:\r\n andamento13 = '📉 (sta calando da ' + risultato_virgola13 + ' €)'\r\n else:\r\n andamento13 = '📈 (sta salendo da ' + risultato_virgola13 + ' €)'\r\n \r\n testo_messaggio13_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_13_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento13\r\n testo_messaggio13_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_13_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento13\r\n \r\n if prezzo_finale_13_3 == minimo13:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio13_3}')\r\n elif prezzo_finale_13_2 == minimo13:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio13_2}')\r\n \r\n database13 = open (\"Minimi13.txt\", \"a\")\r\n database13.write (\"\\n\" + minimo13)\r\n database13.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Ssd Crucial P2 M.2 Nvme 500 GB'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '200241235/crucial-p2-500gb-m-2.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result14_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_14_1 = result14_1[0] + '.' + result14_1[1]\r\n prezzo_virgola_14_1 =result14_1[0] + ',' + result14_1[1]\r\n print('.')\r\n print (prezzo_virgola_14_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_hard-disk_ssd_crucial_p2_pcie_m.2_nvme_500gb.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result14_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_14_2 = result14_2[0] + '.' + result14_2[1]\r\n prezzo_virgola_14_2 =result14_2[0] + ',' + result14_2[1]\r\n print('.')\r\n print (prezzo_virgola_14_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo14 = min (prezzo_finale_14_1, prezzo_finale_14_2)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo14 + ' €')\r\n print('Inviato!')\r\n \r\n database14 = open (\"Minimi14.txt\", \"r\").read()\r\n lista14 = re.findall(r'\\b\\d+\\b', database14)\r\n risultato14 = lista14[-2] + '.' + lista14[-1]\r\n risultato_virgola14 = lista14[-2] + ',' + lista14[-1]\r\n \r\n if minimo14 == risultato14:\r\n andamento14 = '😴 (fermo)'\r\n elif minimo14 < risultato14:\r\n andamento14 = '📉 (sta calando da ' + risultato_virgola14 + ' €)'\r\n else:\r\n andamento14 = '📈 (sta salendo da ' + risultato_virgola14 + ' €)'\r\n \r\n testo_messaggio14_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_14_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento14\r\n testo_messaggio14_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_14_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento14\r\n \r\n if prezzo_finale_14_1 == minimo14:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio14_1}')\r\n elif prezzo_finale_14_2 == minimo14:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio14_2}')\r\n \r\n database14 = open (\"Minimi14.txt\", \"a\")\r\n database14.write (\"\\n\" + minimo14)\r\n database14.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\ndef main(event={}, context={}):\r\n name = 'Ssd Samsung Evo M.2 Nvme 500 GB'\r\n \r\n print ('XXXXXXXXXXXXXXX')\r\n print ('IDEALO')\r\n print('Inviando')\r\n item = '6143328/samsung-970-evo-500gb-m-2.html'\r\n url = BASE_URL_1 + item\r\n r1 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r1.text, 'html.parser')\r\n print('.')\r\n price = soup.find('a', class_='productOffers-listItemOfferPrice').text\r\n print('.')\r\n result15_1 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_15_1 = result15_1[0] + '.' + result15_1[1]\r\n prezzo_virgola_15_1 =result15_1[0] + ',' + result15_1[1]\r\n print('.')\r\n print (prezzo_virgola_15_1 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('TROVAPREZZI')\r\n print('Inviando')\r\n item = 'prezzo_hard-disk_ssd_nvme_samsung_evo_500gb.aspx'\r\n url = BASE_URL_2 + item\r\n r2 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r2.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', style= \"white-space:nowrap\").text\r\n print('.')\r\n result15_2 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_15_2 = result15_2[0] + '.' + result15_2[1]\r\n prezzo_virgola_15_2 =result15_2[0] + ',' + result15_2[1]\r\n print('.')\r\n print (prezzo_virgola_15_2 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n \r\n print('AMAZON')\r\n print('Inviando')\r\n item = 'Samsung-MZ-V7E500BW-NVMe-Nero-Arancione/dp/B07CGGP7SV/ref=sr_1_2?__mk_it_IT=ÅMÅŽÕÑ&dchild=1&keywords=ssd+nvme+samsung+evo+500gb&qid=1611056278&s=pc&sr=1-2'\r\n url = BASE_URL_3 + item\r\n r3 = requests.get(url, headers=HEADER)\r\n soup = BeautifulSoup(r3.text, 'html.parser')\r\n print('.')\r\n price = soup.find('span', class_= \"a-size-medium a-color-price priceBlockBuyingPriceString\").text\r\n print('.')\r\n result15_3 = re.findall(r'\\b\\d+\\b', price)\r\n prezzo_finale_15_3 = result15_3[0] + '.' + result15_3[1]\r\n prezzo_virgola_15_3 =result15_3[0] + ',' + result15_3[1]\r\n print('.')\r\n print (prezzo_virgola_15_3 + ' €')\r\n print('Inviato!')\r\n print ('XXXXXXXXXXXXXXX')\r\n\r\n print('Inviando')\r\n print('.')\r\n minimo15 = min (prezzo_finale_15_1, prezzo_finale_15_2, prezzo_finale_15_3)\r\n print('.')\r\n print('.')\r\n print('Il prezzo più basso è: ' + minimo15 + ' €')\r\n print('Inviato!')\r\n \r\n database15 = open (\"Minimi15.txt\", \"r\").read()\r\n lista15 = re.findall(r'\\b\\d+\\b', database15)\r\n risultato15 = lista15[-2] + '.' + lista15[-1]\r\n risultato_virgola15 = lista15[-2] + ',' + lista15[-1]\r\n \r\n if minimo15 == risultato15:\r\n andamento15 = '😴 (fermo)'\r\n elif minimo15 < risultato15:\r\n andamento15 = '📉 (sta calando da ' + risultato_virgola15 + ' €)'\r\n else:\r\n andamento15 = '📈 (sta salendo da ' + risultato_virgola15 + ' €)'\r\n \r\n testo_messaggio15_1 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_15_1 + ' €' + \"*\" + \" su Idealo \" + \"\\n\" + andamento15\r\n testo_messaggio15_2 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_15_2 + ' €' + \"*\" + \" su Trovaprezzi \" + \"\\n\" + andamento15\r\n testo_messaggio15_3 = \"Vedi che \" + name + \" sta a \" + \"\\n\" + \"*\" + prezzo_virgola_15_3 + ' €' + \"*\" + \" su Amazon \" + \"\\n\" + andamento15\r\n \r\n if prezzo_finale_15_3 == minimo15:\r\n r3 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio15_3}')\r\n elif prezzo_finale_15_1 == minimo15:\r\n r1 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio15_1}')\r\n else:\r\n r2 = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_messaggio15_2}')\r\n \r\n database15 = open (\"Minimi15.txt\", \"a\")\r\n database15.write (\"\\n\" + minimo15)\r\n database15.close ()\r\n \r\n time.sleep(3)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\nprint('Inviando')\r\nprint('.')\r\ntesto_finale = \"⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️\"\r\nprint('.')\r\nf = requests.post(f'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={CHAT_ID}&parse_mode=Markdown&text={testo_finale}')\r\nprint('.')\r\nprint('Inviato!')","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":55373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"193502258","text":"from PIL import Image\n\nimport numpy\nimport cv2\nimport os\nimport pytesseract\n\n\ndef transcript(file_input):\n # convert the image to a NumPy array and then read it into\n # OpenCV format\n image = numpy.asarray(bytearray(file_input.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n\n # convert the image to grayscale and flip the foreground\n # and background to ensure foreground is now \"white\" and\n # the background is \"black\"\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n\n # threshold the image, setting all foreground pixels to\n # 255 and all background pixels to 0\n thresh = cv2.threshold(\n gray, 0, 255,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # grab the (x, y) coordinates of all pixel values that\n # are greater than zero, then use these coordinates to\n # compute a rotated bounding box that contains all\n # coordinates\n coords = numpy.column_stack(numpy.where(thresh > 0))\n angle = cv2.minAreaRect(coords)[-1]\n\n # the `cv2.minAreaRect` function returns values in the\n # range [-90, 0); as the rectangle rotates clockwise the\n # returned angle trends to 0 -- in this special case we\n # need to add 90 degrees to the angle\n if angle < -45:\n angle = -(90 + angle)\n # otherwise, just take the inverse of the angle to make\n # it positive\n else:\n angle = -angle\n\n # rotate the image to deskew it\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(\n image, M, (w, h),\n flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REPLICATE)\n\n # load the rotated image and convert it to grayscale\n gray_rotated = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)\n gray_rotated = cv2.threshold(\n gray_rotated, 0, 255,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # write the rotated grayscale image to disk as a temporary file so we can\n # apply OCR to it\n filename = \"/tmp/tmp_{}\".format(file_input.name)\n cv2.imwrite(filename, gray_rotated)\n\n # load the image as a PIL/Pillow image, apply OCR, and then delete\n # the temporary file\n text = pytesseract.image_to_string(Image.open(filename))\n os.remove(filename)\n return text\n","sub_path":"tesseractfield/tesseract.py","file_name":"tesseract.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"186738457","text":"def svoboda(vk):\n \"\"\"Выводит список свободных мест в вагоне\"\"\"\n sv = 0\n for kupe in vk:\n for i in range(1, 5):\n if kupe[str(i)] == None:\n sv += 1\n return sv\n\n\ndef supersvoboda(vk):\n \"\"\"Выводит полностью свободные купе\"\"\"\n ss = []\n tikkupe = 0\n for kupe in vk:\n tikkupe += 1\n tik = 1\n for i in range(1, 5):\n if kupe[str(i)] == None:\n tik += 1\n if tik == 4:\n ss.append(tikkupe)\n return ss\n\n\ndef sex(vk, s):\n \"\"\"Выводит купе в которых однополые пассажиры\"\"\"\n ss = []\n tikkupe = 0\n for kupe in vk:\n tikkupe += 1\n tik = 0\n tiks = 0\n for i in range(1, 5):\n tik += 1\n if kupe[str(i)] != s and kupe[str(i)] != None:\n break\n if kupe[str(i)] == s:\n tiks += 1\n if tik == 4 and tiks > 0:\n ss.append(tikkupe)\n return ss\n\n\ndef verch(vk):\n \"\"\"Выводит список пустых верхних мест\"\"\"\n ss = []\n tikkupe = 0\n for kupe in vk:\n tikkupe += 1\n tik = 0\n tikv = 0\n for i in range(1, 5):\n tik += 1\n if i % 2 == 0 and kupe[str(i)] == None:\n tikv += 1\n if tikv > 0 and tik == 4:\n ss.append(tikkupe)\n return ss\n\n\ndef niz(vk):\n \"\"\"Выводит список пустых нижних мест\"\"\"\n ss = []\n tikkupe = 0\n for kupe in vk:\n tikkupe += 1\n tik = 0\n tikv = 0\n for i in range(1, 5):\n tik += 1\n if i % 2 != 0 and kupe[str(i)] == None:\n tikv += 1\n if tikv > 0 and tik == 4:\n ss.append(tikkupe)\n return ss\n\n\nvk = [{\"1\": \"м\", \"2\": \"ж\", \"3\": \"м\", \"4\": \"ж\"},\n {\"1\": \"м\", \"2\": \"м\", \"3\": \"м\", \"4\": None},\n {\"1\": None, \"2\": None, \"3\": None, \"4\": None},\n {\"1\": \"ж\", \"2\": None, \"3\": \"ж\", \"4\": \"ж\"},\n {\"1\": \"м\", \"2\": \"м\", \"3\": \"м\", \"4\": None},\n {\"1\": \"м\", \"2\": \"м\", \"3\": None, \"4\": \"ж\"},\n {\"1\": None, \"2\": None, \"3\": \"м\", \"4\": \"ж\"},\n {\"1\": \"ж\", \"2\": \"ж\", \"3\": None, \"4\": None}]\n\nprint (\"Всего свободных мест: \", svoboda(vk))\nprint (\"Полностью свободные купе: \", supersvoboda(vk))\nprint (\"Полностью мужской сотав в купе: \", sex(vk, \"м\"))\nprint (\"Полностью женкский сотав в купе: \", sex(vk, \"ж\"))\nprint (\"Верхние места свободны в купе: \", verch(vk))\nprint (\"Нижние места свободны в купе: \", niz(vk))\n","sub_path":"05-chapter/5_2_12_vagonkupe.py","file_name":"5_2_12_vagonkupe.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647150759","text":"def reverse_file(oldfile, newfile):\n f = open(oldfile, \"r\")\n g = open(newfile, \"w\")\n t = f.readlines()\n f.close()\n t.reverse()\n for v in t:\n g.writelines(v)\n g.close()\nprint(reverse_file(\"zadatak\",\"novi_zadatak\"))","sub_path":"ex_13_11_1.py","file_name":"ex_13_11_1.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"26757844","text":"import requests\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport gspread\nfrom gspread_dataframe import set_with_dataframe\nimport time\n\ndef obtain_data(country):\n\tprint(country)\n\t\n\tx = requests.get('http://tarea-4.2021-1.tallerdeintegracion.cl/gho_{}.xml'.format(country))\n\tfile = open('gho_{}.xml'.format(country), 'w')\n\tfile.write(x.text)\n\tfile.close()\n\n\ttree = ET.parse('gho_{}.xml'.format(country))\n\troot = tree.getroot()\n\n\tcol = ['GHO', 'COUNTRY', 'SEX', 'YEAR', 'GHECAUSES', 'AGEGROUP', \n\t\t\t 'Display', 'Numeric', 'Low', 'High']\n\tdata = []\n\tfor fact in root.findall('Fact'):\n\t\tline = []\n\t\tfor c in col:\n\t\t\ttry:\n\t\t\t\tnode = fact.find(c).text #.replace('.',',')\n\t\t\texcept AttributeError:\n\t\t\t\tnode = None\n\t\t\tline.append(node)\n\t\tdata.append(line)\n\n\tdf = pd.DataFrame(data=data, columns=col)\n\tprint(df.shape)\n\treturn df\n\ndef export_spreadsheets(df):\n\tgc = gspread.service_account(filename='tarea4-angelacp-5be54505e12d.json')\n\tsh = gc.open_by_key('1F6NJ-ATTHuBmraMWI1S_ub4XA6mDb28YX503zDxLzB8')\n\tworksheet = sh.get_worksheet(0)\n\tworksheet.clear()\n\tset_with_dataframe(worksheet, df)\n\n# México MEX, Chile CHL, Japón JPN, Sudáfrica ZAF, Rusia RUS y España ESP\ndf_mex = obtain_data('MEX')\ndf_chl = obtain_data('CHL')\ndf_jpn = obtain_data('JPN')\ndf_zaf = obtain_data('ZAF')\ndf_rus = obtain_data('RUS')\ndf_esp = obtain_data('ESP')\n\ndf = pd.concat([df_mex, df_chl, df_jpn, df_zaf, df_rus, df_esp])\n\nexport_spreadsheets(df)\n\n","sub_path":"tarea4-angelacp/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"493579958","text":"from tkinter import *\nfrom tkinter import messagebox\nimport time\n\ndef licz():\n x=0\n m = int(Entry.get(n1)) # minuty\n s = int(Entry.get(n2))\n t=m*60\n t+=s\n for x in range(t):\n time.sleep(1)\n x+=1\n print(\"Waiting\")\n messagebox.showinfo(\"UWAGA\", \"JUZ CZAS\")\n\n\nroot = Tk()\nx= Entry(root)\nLabel(root, text=\"Ile minut:\").grid(row=0, column=0)\nLabel(root, text=\"Ile sekund:\").grid(row=1, column=0)\nn1= Entry(root)\nn1.grid(row=0, column=2)\nn2= Entry(root)\nn2.grid(row=1, column=2)\nButton(root, text=\"Start\", command=licz).grid(row=2, column=2)\ncheck = StringVar()\nroot.mainloop()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"57778832","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 5.2\n\nЗапросить у пользователя ввод IP-сети в формате: 10.1.1.0/24\n\nЗатем вывести информацию о сети и маске в таком формате:\n\nNetwork:\n10 1 1 0\n00001010 00000001 00000001 00000000\n\nMask:\n/24\n255 255 255 0\n11111111 11111111 11111111 00000000\n\nПроверить работу скрипта на разных комбинациях сеть/маска.\n\nПодсказка: Получить маску в двоичном формате можно так:\nIn [1]: \"1\" * 28 + \"0\" * 4\nOut[1]: '11111111111111111111111111110000'\n\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\"\"\"\n\nip = input('Ведите ip адрес и маску в формате ip/mask: ')\n\nip_split = ip.split('.')\n \nip_last = ip_split.pop(-1)\n\nip_last = ip_last.split('/')\n\nip_addr = '.'.join(ip_split) + '.' + ip_last[0]\n\nipa = ip_addr.split('.')\n\nmask = int(ip_last[1])\n\nmask0 = 32 - mask\n\nonezero_mask = '1' * mask + '0' * mask0\n\nfirst_oct = onezero_mask[0:8]\nsec_oct = onezero_mask[8:16]\nthird_oct = onezero_mask[16:24]\nforth_oct = onezero_mask[24:]\nall_mask = first_oct + '.' + sec_oct + '.' + third_oct + '.' + forth_oct\nall_mask = all_mask.split('.')\n\n#print(ip_addr)\n#print(mask)\n#print(mask0)\n#print(ipa)\n#print(onezero_mask)\n#print(all_mask)\n\nprint(f'''\nNetwork:\n{int(ipa[0]):<8} {int(ipa[1]):<8} {int(ipa[2]):<8} {int(ipa[3]):<8}\n{int(ipa[0]):08b} {int(ipa[1]):08b} {int(ipa[2]):08b} {int(ipa[3]):08b}''')\n\nprint(f'''\nMask:\n/{mask}\n{int(all_mask[0], 2):<8} {int(all_mask[1], 2):<8} {int(all_mask[2], 2):<8} {int(all_mask[3], 2):<8}\n{first_oct} {sec_oct} {third_oct} {forth_oct}\n''')\n","sub_path":"exercises/05_basic_scripts/task_5_2.py","file_name":"task_5_2.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"521679072","text":"#importing base module in order to prepare LSTM model\nimport build_model as bm\nimport pandas as pd\nimport numpy as np\nimport csv\n\nFILE_NAME = \"preprocessed_471.csv\"\nDISTANCE = 7 * 24 * 60\nTIME_BACK = 15\nTIME_FORWARD = 15\nSAMPLE_FREQUENCY = 5\nMODEL_ID = 1\nRUSH_SPEED = 40\n\n# index values of months (used for given start of sets for test and training)\nJAN = 1\nFEB = 2\nMAR = 3\nAPR = 4\nMAY = 5\nJUN = 6\nJUL = 7\nAUG = 8\nSEP = 9\nOCT = 10\nNOV = 11\nDEC = 12 \n\n\n\n\n#Read and scale data\ndata = bm.read_data(FILE_NAME)\n\n#CODE FOR CHECKING THE MISSING DATA\n'''\ndata = data[data.index.month == MARCH_INDEX]\ndata = data[data['Speed'] == -1]\nprint(data.groupby([data.index.day]).count())\n'''\n\n\ndata['Scaled'], sc = bm.scale_Data(data)\n\n#add one hots to data\ndata = bm.join_weekday_one_hot(data)\ndata = bm.join_daypart_one_hot(data)\n\n#drop the speed column which includes real speed values (scaled values will be used instead)\ndata.drop(['Speed'], axis = 'columns' ,inplace = True)\n\n#build trainig and test sets\nindexes = bm.find_indexes_of_month(data, APR)\n#indexes.extend(bm.find_indexes_of_month(data, MAY))\nx_train, y_train = bm.build_sets(data, indexes, DISTANCE, TIME_BACK,\n TIME_FORWARD, SAMPLE_FREQUENCY)\n\nindexes = bm.find_indexes_of_month(data, MAY)\nx_test, y_test = bm.build_sets(data, indexes, DISTANCE, TIME_BACK,\n TIME_FORWARD, SAMPLE_FREQUENCY)\n\n#one week from test set starting from may 2 (cause may 1 is holiday)\nx_test = x_test[2016:4032,:,:]\ny_test = y_test[2016:4032]\n\n#importing keras model and layers to construct LSTM model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Dropout\n\n#initializing regression model\nregressor = Sequential()\n\n#adding layer(s) to model\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], x_train.shape[2])))\nregressor.add(Dropout(0.5))\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], x_train.shape[2])))\nregressor.add(Dropout(0.5))\nregressor.add(LSTM(units = 33, return_sequences = True, input_shape = (x_train.shape[1], x_train.shape[2])))\n\nregressor.add(Flatten())\nregressor.add(Dense(units = 1))\n\n#compiling the model with mean_absolute_percentage_error and adam optimizer\nregressor.compile(optimizer = 'adam', loss = 'mean_absolute_percentage_error')\n#fitting model with training sets and validation set\nregressor.fit(x_train, y_train, epochs = 1, batch_size = 32, validation_data = (x_test, y_test))\nresults = regressor.predict(x_test)\n\n#extracting daily errors\ndaily_error = []\nfor i in range(0, results.shape[0] - 288, 288):\n error = bm.mean_absolute_percentage_error(y_test[i:i + 288], results[i:i + 288])\n daily_error.append(error)\n\n#extracting errors in rush hours\nunscaled = bm.inverse_scale(sc, results)\nrush_hour_errors = []\nfor i in range(0, results.shape[0] - 24, 12):\n trimmed_res = results[i:i + 24]\n if unscaled[i:i+24].mean() < RUSH_SPEED:\n error = bm.mean_absolute_percentage_error(y_test[i:i + 24], trimmed_res)\n rush_hour_errors.append(error)\n\n#saving daily errors and errors in rush hours\nnp.savetxt('daily_error_#'+str(MODEL_ID)+' .csv', daily_error, delimiter = \",\", fmt = '%s')\nnp.savetxt('rush_hours_errors_#'+str(MODEL_ID)+' .csv', rush_hour_errors, delimiter = \",\", fmt = '%s')\n\n\n#saving estimated values for test data\ndata = data[data.index.month == MAY]\ndata = pd.DataFrame(index = data.index[2016:4032], data = sc.inverse_transform(y_test.reshape(-1,1)), columns = ['actual speed'])\npreds = pd.DataFrame(data = sc.inverse_transform(results), columns = ['predicted speed'], index = data.index)\ndt = pd.concat([data, preds], axis = 1)\ndt.to_csv(\"Model_#\"+str(MODEL_ID)+\"_Estimations.csv\")\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237935128","text":"from discord.ext import commands\r\nimport requests\r\nimport discord\r\nimport os\r\nimport math\r\nimport datetime\r\nfrom staticmap import StaticMap, CircleMarker, IconMarker\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nfrom BravoOscarTango import get_prefix\r\n\r\navail_checklists = []\r\nfor checklists in os.listdir('./Utils/Checklists'):\r\n if checklists.endswith('.pdf'):\r\n avail_checklists.append(checklists[:-4])\r\n\r\nwith open('Private/WxAPI.txt', 'r') as x:\r\n xapi: str = x.read()\r\nhdr = {\"X-API-Key\": xapi}\r\n\r\nwith open('Private/FPD.com API.txt', 'r') as q:\r\n fpd_api: str = q.read()\r\n\r\nfltplan_id = 0\r\n\r\ncorrect_sim_types = ['xplane11', 'xplane', 'fsx', 'fs9', 'pmdg', 'pdf']\r\n\r\n\r\nclass FlightSim(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command(aliases=[\"wx\"])\r\n @commands.cooldown(1, 3)\r\n async def metar(self, ctx, icao: str):\r\n\r\n global gustbool\r\n if len(icao) != 4:\r\n await ctx.reply(\"ICAO code must be composed of 4 characters\", mention_author=False)\r\n return\r\n try:\r\n req = requests.get('https://api.checkwx.com/metar/{}/decoded'.format(icao), headers=hdr, timeout=5)\r\n except Exception as err:\r\n await ctx.reply(\"Timeout error while requesting METAR, API servers are probably offline. Please try again later\", mention_author=False)\r\n return print(err)\r\n\r\n print(f\"Requested METAR for {icao.upper()}\")\r\n\r\n if req.status_code != 200:\r\n print(f\"Failed to retrieve METAR for {icao.upper()}, error {req.status_code}\")\r\n return await ctx.reply(\r\n f\"Error {req.status_code} while retrieving info for {icao.upper()}, either the website is down or the airport you chose is not available\",\r\n mention_author=False)\r\n else:\r\n\r\n resp = req.json()\r\n\r\n if resp[\"results\"] == 0:\r\n await ctx.send(\"No results, please check for typos or try a different ICAO\")\r\n return\r\n\r\n # with open('./Utils/wx.json', 'w') as f:\r\n # json.dump(resp, f, indent=4)\r\n\r\n layerint = len(resp[\"data\"][0][\"clouds\"]) # integer for number of cloud layers\r\n\r\n wxint = 0\r\n if \"conditions\" in resp[\"data\"][0]:\r\n wxint = len(resp[\"data\"][0][\"conditions\"]) # presence of wx conditions\r\n\r\n visbool = \"visibility\" in resp[\"data\"][0] # presence of vis data\r\n\r\n if 'wind' in resp['data'][0]:\r\n gustbool = \"gust_kts\" in resp[\"data\"][0][\"wind\"] # presence of gusts\r\n wind_bool = True\r\n else:\r\n wind_bool = False\r\n\r\n name = resp[\"data\"][0][\"station\"][\"name\"]\r\n if wind_bool:\r\n degrees = resp[\"data\"][0][\"wind\"][\"degrees\"]\r\n speed = resp[\"data\"][0][\"wind\"][\"speed_kts\"]\r\n temp = resp[\"data\"][0][\"temperature\"][\"celsius\"]\r\n dew = resp[\"data\"][0][\"dewpoint\"][\"celsius\"]\r\n humidity = resp[\"data\"][0][\"humidity\"][\"percent\"]\r\n inhg = resp[\"data\"][0][\"barometer\"][\"hg\"]\r\n hpa = resp[\"data\"][0][\"barometer\"][\"hpa\"]\r\n obs = resp[\"data\"][0][\"observed\"]\r\n cond = resp[\"data\"][0][\"flight_category\"]\r\n raw = resp[\"data\"][0][\"raw_text\"]\r\n\r\n points = []\r\n\r\n lat: float = resp[\"data\"][0][\"station\"][\"geometry\"][\"coordinates\"][1]\r\n long: float = resp[\"data\"][0][\"station\"][\"geometry\"][\"coordinates\"][0]\r\n\r\n points.append(tuple([lat, long]))\r\n\r\n marker_outline = CircleMarker((long, lat), 'white', 18)\r\n marker = CircleMarker((long, lat), '#0036FF', 12)\r\n icon_flag = IconMarker((long + 0.008, lat), './Utils/icon-flag.png', 12, 32)\r\n\r\n m = StaticMap(700, 300, 10, 10)\r\n m.add_marker(marker_outline)\r\n m.add_marker(marker)\r\n m.add_marker(icon_flag)\r\n\r\n image = m.render(zoom=8)\r\n image.save('Utils/metar.png')\r\n file = discord.File('Utils/metar.png')\r\n\r\n metar = discord.Embed(\r\n title=\"Requested METAR for {} - {}\".format(icao.upper(), name),\r\n description=\"Raw: {}\".format(raw),\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n if wind_bool:\r\n if not gustbool:\r\n metar.add_field(name=\"Wind:\", value=\"{}° at {} kts\".format(degrees, speed))\r\n else:\r\n gust = resp[\"data\"][0][\"wind\"][\"gust_kts\"]\r\n metar.add_field(name=\"Wind:\", value=\"{}° at {} kts, gusts {} kts\".format(degrees, speed, gust))\r\n else:\r\n metar.add_field(name=\"Wind:\", value=\"Calm\")\r\n\r\n metar.add_field(name=\"Temp/Dewpoint:\", value=\"{}°C/ {}°C\".format(temp, dew))\r\n metar.add_field(name=\"Altimeter:\", value=\"{} hPa/ {} inHg\".format(hpa, inhg))\r\n if visbool:\r\n vismil = resp[\"data\"][0][\"visibility\"][\"miles\"]\r\n vismet = resp[\"data\"][0][\"visibility\"][\"meters\"]\r\n metar.add_field(name=\"Visibility:\", value=\"{} meters/ {} miles\".format(vismet, vismil))\r\n\r\n metar.add_field(name=\"Humidity:\", value=\"{}%\".format(humidity))\r\n metar.set_thumbnail(\r\n url=\"https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif\")\r\n metar.set_footer(text=\"Observed at {}. Flight category: {}\".format(obs, cond))\r\n if wxint > 0:\r\n weather = resp[\"data\"][0][\"conditions\"][0][\"text\"]\r\n metar.add_field(name=\"Weather condition:\", value=\"{}\".format(weather))\r\n\r\n if layerint == 1:\r\n clouds = resp[\"data\"][0][\"clouds\"][0][\"text\"]\r\n if 'feet' in resp[\"data\"][0][\"clouds\"][0]:\r\n clofeet = resp[\"data\"][0][\"clouds\"][0][\"feet\"]\r\n metar.add_field(name=\"Cloud condition:\", value=\"{}ft {}\".format(clofeet, clouds))\r\n else:\r\n metar.add_field(name=\"Cloud condition:\", value=\"{}\".format(clouds))\r\n elif layerint == 2:\r\n clouds = resp[\"data\"][0][\"clouds\"][0][\"text\"]\r\n clofeet = resp[\"data\"][0][\"clouds\"][0][\"feet\"]\r\n clouds1 = resp[\"data\"][0][\"clouds\"][1][\"text\"]\r\n clofeet1 = resp[\"data\"][0][\"clouds\"][1][\"feet\"]\r\n metar.add_field(name=\"Cloud condition:\",\r\n value=\"{}ft {}/ {}ft {}\".format(clofeet, clouds, clofeet1, clouds1))\r\n elif layerint == 3:\r\n clouds = resp[\"data\"][0][\"clouds\"][0][\"text\"]\r\n clofeet = resp[\"data\"][0][\"clouds\"][0][\"feet\"]\r\n clouds1 = resp[\"data\"][0][\"clouds\"][1][\"text\"]\r\n clofeet1 = resp[\"data\"][0][\"clouds\"][1][\"feet\"]\r\n clouds2 = resp[\"data\"][0][\"clouds\"][2][\"text\"]\r\n clofeet2 = resp[\"data\"][0][\"clouds\"][2][\"feet\"]\r\n metar.add_field(name=\"Cloud condition:\",\r\n value=\"{}ft {}/ {}ft {}/ {}ft {}\".format(clofeet, clouds, clofeet1, clouds1, clofeet2,\r\n clouds2))\r\n else:\r\n metar.add_field(name=\"Cloud condition:\", value=\"Not Specified / Cloud data error\")\r\n\r\n metar.set_image(url='attachment://metar.png')\r\n\r\n await ctx.send(embed=metar, file=file)\r\n\r\n @commands.command(aliases=[\"chart\", \"ch\"])\r\n async def charts(self, ctx, icao1: str):\r\n\r\n prfx = get_prefix(client=self, message=ctx.message)\r\n\r\n with ctx.typing():\r\n print(f\"Requested charts for {icao1.upper()}\")\r\n\r\n if len(icao1) != 4:\r\n await ctx.reply(\"ICAO code must be composed of 4 characters\", mention_author=False)\r\n return\r\n\r\n url = f\"http://www.uvairlines.com/admin/resources/{icao1.upper()}.pdf\"\r\n\r\n try:\r\n request = requests.get(url, headers=hdr, timeout=5)\r\n except Exception as err:\r\n await ctx.reply(\r\n \"Timeout error while requesting charts, website is probably offline. Please try again later\",\r\n mention_author=False)\r\n return print(err)\r\n\r\n if request.status_code != 200:\r\n print(f'Failed to retrieve charts for {icao1.upper()}, error: {request.status_code}')\r\n await ctx.reply(\r\n f\"Error {request.status_code} while retrieving charts for {icao1.upper()}, either the website is down or the airport you chose is not available\",\r\n mention_author=False)\r\n else:\r\n\r\n charts = discord.Embed(\r\n title=f\"Requested charts for {icao1.upper()}\",\r\n description=f\"Download or view the charts at: [Link]({url})\",\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n charts.set_footer(\r\n text=\"You can also check the METAR for this airport with `{}metar {}`\".format(prfx, icao1.upper()))\r\n charts.set_thumbnail(\r\n url=\"https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif\")\r\n await ctx.reply(embed=charts, mention_author=False)\r\n return\r\n\r\n @commands.command(aliases=[\"cl\"])\r\n async def checklist(self, ctx, plane='avail'):\r\n\r\n prfx = get_prefix(client=self, message=ctx.message)\r\n\r\n if plane == 'avail':\r\n avplanes = ', '.join(avail_checklists)\r\n await ctx.reply(f'Checklists are currenly available for these planes: **{avplanes}**', mention_author=False)\r\n\r\n elif len(plane) != 4:\r\n await ctx.reply(\r\n f\"You must input a correct 4 character ICAO code for your plane, check available checklists with {prfx}cl\",\r\n mention_author=False)\r\n elif plane.upper() in avail_checklists:\r\n clfile = discord.File(f'./Utils/Checklists/{plane.upper()}.pdf')\r\n await ctx.reply(f\"Here's your requested checklist for the {plane.upper()}:\", file=clfile,\r\n mention_author=False)\r\n else:\r\n await ctx.reply(\r\n f\"Unfortunately the checklist you requested is not available, check a list of available checklists with {prfx}cl\",\r\n mention_author=False)\r\n\r\n @commands.command(aliases=['fl', 'flp', 'fltplan', 'fp'])\r\n @commands.cooldown(1, 10)\r\n async def flightplan(self, ctx, dep='lmfao', arr='lmfao'):\r\n\r\n if dep == 'lmfao' or arr == 'lmfao':\r\n await ctx.send('You need to specify two valid airport ICAO codes to create a flight plan')\r\n self.flightplan.reset_cooldown(ctx)\r\n return\r\n elif len(dep) != 4 or len(arr) != 4:\r\n await ctx.send('You need to specify two valid airport ICAO codes to create a flight plan')\r\n self.flightplan.reset_cooldown(ctx)\r\n return\r\n\r\n loading = discord.Embed(\r\n title='Your flight plan is loading',\r\n description='Creating flight plan...'\r\n )\r\n loading.set_thumbnail(\r\n url='https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif')\r\n loading.set_footer(text='Using data from the Flight Plan Database (https://flightplandatabase.com)')\r\n message = await ctx.send(embed=loading)\r\n\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'Authorization': 'Basic ' + fpd_api\r\n }\r\n data = '{\"fromICAO\":\"immensecock\",\"toICAO\":\"giganticpenis\"}'\r\n data = data.replace(\"immensecock\", dep)\r\n data = data.replace(\"giganticpenis\", arr)\r\n print(f'Requested flight plan: {dep.upper()} to {arr.upper()}, generating...')\r\n\r\n try:\r\n response = requests.post('https://api.flightplandatabase.com/auto/generate', headers=headers, data=data, timeout=5)\r\n except Exception as err:\r\n await ctx.reply(\"Timeout error while requesting flight plan, API servers are probably offline. Please try again later\", mention_author=False)\r\n return print(err)\r\n\r\n if response.status_code != 201:\r\n await message.delete()\r\n await ctx.send(\r\n f\"Error {response.status_code} while generating the flight plan, try with different ICAO codes```{response.json()['errors'][0]['message']}```\")\r\n self.flightplan.reset_cooldown(ctx)\r\n return\r\n\r\n plan = (str(response.json()))[7:14]\r\n print('Requesting...')\r\n\r\n loading1 = discord.Embed(\r\n title='Your flight plan is loading',\r\n description='Flight plan created successfully, uploading...'\r\n )\r\n loading1.set_thumbnail(\r\n url='https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif')\r\n loading1.set_footer(text='Using data from the Flight Plan Database (https://flightplandatabase.com)')\r\n await message.delete()\r\n message1 = await ctx.send(embed=loading1)\r\n\r\n retrieve = requests.get(f'https://api.flightplandatabase.com/plan/{plan}')\r\n if retrieve.status_code != 200:\r\n await message1.delete()\r\n await ctx.send(\r\n f'There was an error while processing the flight plan: **{retrieve.status_code}**, try with different ICAO codes')\r\n self.flightplan.reset_cooldown(ctx)\r\n return\r\n print('Received flight plan')\r\n\r\n flp = retrieve.json()\r\n flpid = flp['id']\r\n dep_icao = flp['fromICAO']\r\n arr_icao = flp['toICAO']\r\n dep_name = flp['fromName']\r\n arr_name = flp['toName']\r\n dist = int(flp['distance'])\r\n cr_alt = flp['maxAltitude']\r\n wpt_num = flp['waypoints'] # important\r\n link = f'https://flightplandatabase.com/plan/{flpid}'\r\n airac = flp['cycle']['ident']\r\n\r\n global fltplan_id\r\n\r\n fltplan_id = flpid\r\n\r\n i = 1\r\n route = f'***{dep_icao}***'\r\n route += ' ' + '[SID]'\r\n\r\n while i <= wpt_num:\r\n\r\n if flp['route']['nodes'][i]['ident'] != f'{arr_icao}':\r\n\r\n if flp['route']['nodes'][i]['ident'] == flp['route']['nodes'][i + 1]['ident']:\r\n i += 1\r\n continue\r\n elif flp['route']['nodes'][i]['via'] is None:\r\n del flp['route']['nodes'][i]['via']\r\n flp['route']['nodes'][i]['via'] = {}\r\n flp['route']['nodes'][i]['via']['ident'] = 'None'\r\n if i != 1:\r\n if flp['route']['nodes'][i - 1]['via']['ident'] == 'None':\r\n route += f\" **DCT** {flp['route']['nodes'][i]['ident']}\"\r\n else:\r\n route += f\" {flp['route']['nodes'][i - 1]['ident']} **DCT** \" + \\\r\n flp['route']['nodes'][i]['ident']\r\n else:\r\n route += f\" {flp['route']['nodes'][i]['ident']}\"\r\n i += 1\r\n continue\r\n elif flp['route']['nodes'][i]['via']['ident'] != flp['route']['nodes'][i - 1]['via']['ident']:\r\n if flp['route']['nodes'][i - 1]['via']['ident'] == 'None':\r\n route += ' ' + '**DCT**' + ' ' + flp['route']['nodes'][i]['ident'] + ' ' + '**' + \\\r\n flp['route']['nodes'][i]['via']['ident'] + '**'\r\n else:\r\n route += ' ' + flp['route']['nodes'][i]['ident'] + ' ' + '**' + flp['route']['nodes'][i]['via'][\r\n 'ident'] + '**'\r\n i += 1\r\n continue\r\n else:\r\n i += 1\r\n else:\r\n route += ' ' + flp['route']['nodes'][i - 1]['ident'] + ' ' + '[STAR]' + ' '\r\n route += f'***{arr_icao}***'\r\n break\r\n\r\n base_url = 'https://www.flightplandatabase.com/plan/{fltplan_id}/download/'\r\n\r\n flp_embed = discord.Embed(\r\n title=f\"Here's your flight plan: **{dep_icao} → {arr_icao}**\",\r\n description=f\"Flight plan: {route}\",\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n flp_embed.set_thumbnail(\r\n url='https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif')\r\n flp_embed.add_field(name='Departure Airport:', value=dep_name)\r\n flp_embed.add_field(name='Arrival Airport:', value=arr_name)\r\n flp_embed.add_field(name='Distance / Cruise Altitude:', value=f'{dist}nm/{cr_alt}ft')\r\n flp_embed.add_field(name='Downloads:', value=f'[X-PLane 11]({base_url}xplane11) | [X-Plane 10/9]({base_url}xplane) | [FSX]({base_url}fsx) | [FS9]({base_url}fs9) | [PMDG]({base_url}pmdg) | [PDF]({base_url}pdf)')\r\n flp_embed.add_field(name='Link to full flight plan:', value=link)\r\n flp_embed.set_footer(\r\n text=f'Using data from the Flight Plan Database (https://flightplandatabase.com), AIRAC Cycle: {airac[3:]}')\r\n\r\n print('Success!')\r\n await message1.delete()\r\n await ctx.send(embed=flp_embed)\r\n\r\n # @commands.command()\r\n # async def download(self, ctx, sim_type='None'):\r\n #\r\n # prfx = get_prefix(client=self, message=ctx.message)\r\n #\r\n # if fltplan_id == 0:\r\n # await ctx.reply(f'You need to first create a flight plan with {prfx}flightplan', mention_author=False)\r\n # return\r\n # elif sim_type.lower() not in correct_sim_types:\r\n # await ctx.reply(\r\n # 'You need to specify a correct file format, supported formats are: xplane11 [X-Plane 11], xplane [X-Plane 10], fsx [FSX], fs9 [FS2004], pmdg [PMDG], pdf [PDF]',\r\n # mention_author=False)\r\n # return\r\n # else:\r\n # download_embed = discord.Embed(\r\n # title=f'Requested flight plan for {sim_type.upper()}',\r\n # description=f'To download the file click [here](https://www.flightplandatabase.com/plan/{fltplan_id}/download/{sim_type.lower()})',\r\n # colour=discord.Colour.from_rgb(97, 0, 215)\r\n # )\r\n # download_embed.set_thumbnail(\r\n # url='https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif')\r\n # download_embed.set_footer(text=f'Using data from the Flight Plan Database (https://flightplandatabase.com)')\r\n # await ctx.reply(embed=download_embed, mention_author=False)\r\n\r\n @commands.command()\r\n async def info(self, ctx, icao: str = 'NO INPUT'):\r\n\r\n prfx = get_prefix(client=self, message=ctx.message)\r\n\r\n if len(icao) != 4:\r\n await ctx.reply(\"Please input a correct ICAO code\", mention_author=False)\r\n return\r\n\r\n try:\r\n request = requests.get(f'https://api.flightplandatabase.com/nav/airport/{icao.upper()}', timeout=5)\r\n except Exception as err:\r\n await ctx.reply(\"Timeout error while requesting info, API servers are probably offline. Please try again later\", mention_author=False)\r\n return print(err)\r\n\r\n if request.status_code != 200:\r\n print(f'Failed to fetch info for {icao.upper()}, error {request.status_code}')\r\n return await ctx.reply(\r\n f\"Error {request.status_code} while retrieving info for {icao.upper()}, either the website is down or the airport you chose is not available ```{request.json()['message']}```\",\r\n mention_author=False)\r\n\r\n else:\r\n async with ctx.typing():\r\n info = request.json()\r\n # with open('./Utils/info.json', 'w') as f:\r\n # json.dump(info, f, indent=4)\r\n\r\n json_icao = info['ICAO']\r\n iata = info['IATA']\r\n name = info['name']\r\n lat: float = info[\"lat\"]\r\n lon: float = info[\"lon\"]\r\n runway_int = info['runwayCount']\r\n elevation = int(info['elevation'])\r\n mag_var = round(info['magneticVariation'], 2)\r\n timezone = info['timezone']['name']\r\n metar = info['weather']['METAR']\r\n taf = info['weather']['TAF']\r\n\r\n offset1 = info['timezone']['offset'] / 3600\r\n\r\n if offset1 == 1:\r\n offset1 = 0.0\r\n\r\n offset = str(offset1)\r\n offset = offset.replace('.', ':')\r\n offset += '0'\r\n if -10 < offset1 < 10:\r\n if '-' not in offset:\r\n offset = '+0' + offset\r\n else:\r\n offset = list(offset)\r\n del offset[0]\r\n offset = \"\".join(offset)\r\n offset = '-0' + offset\r\n if '-' not in offset and '+' not in offset:\r\n offset = '+' + offset\r\n offset = list(offset)\r\n if len(offset) == 7:\r\n del offset[6]\r\n if offset[4] != 0:\r\n if offset[4] == '5':\r\n offset[4] = '3'\r\n elif offset[4] == '7':\r\n offset[4] = '4'\r\n offset = \"\".join(offset)\r\n\r\n epoch = datetime.datetime.utcnow().timestamp()\r\n if info['timezone']['offset'] != 3600:\r\n local = epoch + info['timezone']['offset']\r\n else:\r\n local = epoch\r\n utc = datetime.datetime.fromtimestamp(epoch).strftime('%d-%m-%Y %H:%M:%S')\r\n local = datetime.datetime.fromtimestamp(local).strftime('%d-%m-%Y %H:%M:%S')\r\n\r\n marker_outline = CircleMarker((lon, lat), 'white', 18)\r\n marker = CircleMarker((lon, lat), '#0036FF', 12)\r\n m = StaticMap(500, 300, 10, 10, url_template='https://tiles.wmflabs.org/osm-no-labels/{z}/{x}/{y}.png')\r\n m.add_marker(marker_outline)\r\n m.add_marker(marker)\r\n\r\n image = m.render(zoom=13)\r\n image.save('Utils/info.png')\r\n\r\n info_embed = discord.Embed(\r\n title=f'{json_icao} - {iata}',\r\n description=name,\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n\r\n runways = ''\r\n\r\n i = 0\r\n # j = 0\r\n\r\n if runway_int == 1:\r\n runways += f\"{info['runways'][i]['ends'][0]['ident']} / {info['runways'][i]['ends'][1]['ident']}\"\r\n len_width = f\"{int(info['runways'][0]['length'])}ft x {int(info['runways'][0]['width'])}ft\"\r\n hdg = int(info['runways'][0]['bearing'])\r\n if hdg + 180 > 360:\r\n rwy_hdg = f'{hdg} - {hdg - 180}'\r\n else:\r\n rwy_hdg = f'{hdg}°/{hdg + 180}°'\r\n info_embed.add_field(name='Runways:', value=runways)\r\n info_embed.add_field(name='Airport Elevation:', value=f'{str(elevation)}ft')\r\n info_embed.add_field(name='Magnetic Variation:', value=f'{str(mag_var)}°')\r\n info_embed.add_field(name='Timezone:', value=f'{offset} UTC, {timezone}')\r\n # for ils in info['runways'][i]['navaids']:\r\n # if ils == \"LOC-ILS\":\r\n # print(info['runways'][i]['navaids'][j]['name'])\r\n # break\r\n # else:\r\n # j += 1\r\n img = Image.open('Utils/info.png')\r\n draw = ImageDraw.Draw(img)\r\n font = ImageFont.truetype('./Utils/font.ttf', 25)\r\n draw.text((0, 275), f'{runways} - {len_width} - {rwy_hdg}', fill=(0, 0, 0), font=font)\r\n img.save('Utils/info_done.png')\r\n else:\r\n img = Image.open('Utils/info.png')\r\n draw = ImageDraw.Draw(img)\r\n font = ImageFont.truetype('./Utils/font.ttf', 20)\r\n while i < runway_int:\r\n px = 280 - i * 19\r\n curr_rwy = f\"{info['runways'][i]['ends'][0]['ident']} / {info['runways'][i]['ends'][1]['ident']}\"\r\n runways += f\"{info['runways'][i]['ends'][0]['ident']} / {info['runways'][i]['ends'][1]['ident']} - \"\r\n len_width = f\"{int(info['runways'][i]['length'])}ft x {int(info['runways'][i]['width'])}ft\"\r\n\r\n draw.text((0, px), f'{curr_rwy} - {len_width}', (0, 0, 0), font=font)\r\n i += 1\r\n info_embed.add_field(name='Runways:', value=runways[:-3])\r\n info_embed.add_field(name='Airport Elevation:', value=f'{str(elevation)}ft')\r\n info_embed.add_field(name='Magnetic Variation:', value=f'{str(mag_var)}°')\r\n info_embed.add_field(name='Timezone:', value=f'{offset} UTC, {timezone}')\r\n img.save('Utils/info_done.png')\r\n\r\n info_embed.add_field(name='Zulu/Local Time:', value=f'Zulu: {utc}\\nLocal: {local}')\r\n info_embed.add_field(name='METAR:', value=metar)\r\n info_embed.add_field(name='TAF:', value=taf)\r\n info_embed.set_thumbnail(\r\n url='https://cdn.discordapp.com/attachments/356779184393158657/729351510974267513/plane-travel-icon-rebound2.gif')\r\n\r\n info_embed.set_image(url='attachment://info_done.png')\r\n info_embed.set_footer(\r\n text=f'Using data from the Flight Plan Database (https://flightplandatabase.com), check the full weather for this airport with {prfx}metar {icao.upper()}')\r\n\r\n file = discord.File('Utils/info_done.png')\r\n\r\n await ctx.send(embed=info_embed, file=file)\r\n\r\n @commands.command()\r\n async def notam(self, ctx, icao: str = 'NO INPUT', page=1):\r\n\r\n if len(icao) != 4:\r\n return await ctx.reply(\"Please input a correct ICAO code\", mention_author=False)\r\n\r\n if not isinstance(page, int):\r\n return await ctx.reply('Please insert a number for the embed page', mention_author=False)\r\n\r\n try:\r\n request = requests.get(f'https://api.autorouter.aero/v1.0/notam?itemas=[\"{icao.upper()}\"]&offset=0&limit=200', timeout=5)\r\n except Exception as err:\r\n await ctx.reply(\r\n \"Timeout error while requesting NOTAMs, API servers are probably offline. Please try again later\",\r\n mention_author=False)\r\n return print(err)\r\n\r\n if request.status_code != 200:\r\n print(f'Failed to fetch info for {icao.upper()}, error {request.status_code}')\r\n return await ctx.reply(\r\n f\"Error {request.status_code} while retrieving NOTAMs for {icao.upper()}, either the website is down or the airport you chose is not available\",\r\n mention_author=False)\r\n\r\n else:\r\n\r\n prfx = get_prefix(client=self, message=ctx.message)\r\n\r\n notam = request.json()\r\n\r\n notamemb = discord.Embed(\r\n title=f'NOTAM list for **{icao.upper()}**',\r\n description=f\"Number of active NOTAMs: {notam['total']}\",\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n\r\n # with open('./Utils/notam.json', 'w') as f:\r\n # json.dump(notam, f, indent=4)\r\n\r\n total = notam['total']\r\n\r\n numb = 0\r\n page -= 1\r\n i = page * 5\r\n\r\n for _ in notam['rows']:\r\n numb += 1\r\n\r\n total_pages = math.ceil(total / 5)\r\n\r\n if total_pages == 0:\r\n return await ctx.reply(f'No NOTAMs were found for {icao.upper()}!', mention_author=False)\r\n\r\n if page + 1 > total_pages:\r\n return await ctx.reply(f'Please select a page number between 1 and {total_pages}', mention_author=False)\r\n\r\n try:\r\n while page * 5 <= i < (page * 5) + 5:\r\n lon = (notam['rows'][i]['lon'] * 90) / (1 << 30)\r\n lat = (notam['rows'][i]['lat'] * 90) / (1 << 30)\r\n radius = str(notam['rows'][i]['radius'])\r\n lon_str = str(int(abs(lon)))\r\n lat_str = str(int(abs(lat)))\r\n\r\n if len(radius) < 3:\r\n radius = '0' + radius\r\n if len(radius) < 3:\r\n radius = '0' + radius\r\n\r\n if len(lon_str) == 2:\r\n lon_str = '0' + lon_str\r\n elif len(lon_str) == 1:\r\n lon_str = '00' + lon_str\r\n\r\n elif len(lat_str) == 1:\r\n lat_str = '0' + lat_str\r\n\r\n if lon < 0:\r\n lon = abs(lon)\r\n lon_str += str(int(round((lon - int(lon)) * 60, 0)))\r\n lon_str += 'W'\r\n else:\r\n lon_str += str(int(round((lon - int(lon)) * 60, 0)))\r\n lon_str += 'E'\r\n\r\n if lat < 0:\r\n lat = abs(lat)\r\n lat_str += str(int(round((lat - int(lat)) * 60, 0)))\r\n lat_str += 'S'\r\n else:\r\n lat_str += str(int(round((lat - int(lat)) * 60, 0)))\r\n lat_str += 'N'\r\n\r\n coordinates = lat_str + lon_str + radius\r\n\r\n first_line = str(notam['rows'][i]['series']) + str(notam['rows'][i]['number']) + '/' + str(\r\n notam['rows'][i]['year']) + ' ' + 'NOTAM' + str(notam['rows'][i]['type']) + ' '\r\n\r\n if notam['rows'][i]['lower'] == 0:\r\n notam['rows'][i]['lower'] = '000'\r\n\r\n line_q = 'Q) ' + notam['rows'][i]['fir'] + '/' + notam['rows'][i]['code23'] + notam['rows'][i][\r\n 'code45'] + '/' + notam['rows'][i]['traffic'] + '/' + notam['rows'][i]['purpose'] + '/' + \\\r\n notam['rows'][i]['scope'] + '/' + str(notam['rows'][i]['lower']) + '/' + str(\r\n notam['rows'][i]['upper']) + '/' + coordinates\r\n\r\n if notam['rows'][i]['estimation'] is not None:\r\n line_abc = f'A) {icao.upper()}' + ' B) ' + datetime.datetime.fromtimestamp(\r\n notam['rows'][i]['startvalidity']).strftime(\r\n '%Y-%m-%d %H:%M') + ' C) ' + datetime.datetime.fromtimestamp(\r\n notam['rows'][i]['endvalidity']).strftime('%Y-%m-%d %H:%M') + ' ' + notam['rows'][i][\r\n 'estimation']\r\n else:\r\n line_abc = f'A) {icao.upper()}' + ' B) ' + datetime.datetime.fromtimestamp(\r\n notam['rows'][i]['startvalidity']).strftime(\r\n '%Y-%m-%d %H:%M') + ' C) ' + datetime.datetime.fromtimestamp(\r\n notam['rows'][i]['endvalidity']).strftime('%Y-%m-%d %H:%M')\r\n\r\n if notam['rows'][i]['referredseries'] is not None:\r\n first_line += str(notam['rows'][i]['referredseries']) + str(\r\n notam['rows'][i]['referrednumber']) + '/' + str(notam['rows'][i]['referredyear'])\r\n\r\n notamemb.add_field(\r\n name=f\"{notam['rows'][i]['id']} | Modified at {datetime.datetime.fromtimestamp(notam['rows'][i]['modified']).strftime('%Y-%m-%d %H:%M')}\",\r\n value=f'{first_line}\\n'\r\n f'{line_q}\\n'\r\n f'{line_abc}\\n'\r\n f\"E) {notam['rows'][i]['iteme']}\", inline=False)\r\n i += 1\r\n\r\n except IndexError:\r\n print('index')\r\n\r\n notamemb.set_thumbnail(\r\n url='https://cdn.discordapp.com/attachments/356779184393158657/729351510974267513/plane-travel-icon-rebound2.gif')\r\n notamemb.set_footer(\r\n text=f'Page {page + 1} of {total_pages}, use {prfx}notam {icao.upper()} [page number] to select different pages')\r\n\r\n try:\r\n await ctx.send(embed=notamemb)\r\n except discord.HTTPException:\r\n await ctx.reply(\r\n f'Unfortunately this page has more characters than the max allowed limit of 2000, please try with a different ICAO or a different page',\r\n mention_author=False)\r\n\r\n @commands.command(aliases=['sb'])\r\n async def simbrief(self, ctx, username: str):\r\n\r\n request = requests.get(f'https://www.simbrief.com/api/xml.fetcher.php?username={username}&json=1')\r\n if request.status_code == 400:\r\n print(f'Failed to fetch latest flight plan for {username}, error {request.status_code}')\r\n error = request.json()\r\n return await ctx.reply(\r\n f\"Error {request.status_code} while retrieving flight plan for {username}, make sure you entered a correct username ```{error['fetch']['status']}```\",\r\n mention_author=False)\r\n\r\n else:\r\n\r\n info = request.json()\r\n # with open('./Utils/simbrief.json', 'w') as f:\r\n # json.dump(info, f, indent=4)\r\n # print(info)\r\n\r\n i = 0\r\n toc = False\r\n\r\n for _ in info:\r\n if info['navlog']['fix'][i]['name'] == 'TOP OF CLIMB':\r\n toc = True\r\n break\r\n else:\r\n i += 1\r\n\r\n desc = f\"Generated at: **{datetime.datetime.fromtimestamp(int(info['params']['time_generated'])).strftime('%H:%M %Y-%m-%d')}** | AIRAC: **{info['params']['airac']}** | Units: **{info['params']['units']}**\\n\\n\" \\\r\n f\"FL STEPS: **{info['general']['stepclimb_string']}**\\n\\n\" \\\r\n f\"ROUTE: **{info['origin']['icao_code']}/{info['origin']['plan_rwy']}** {info['general']['route']} **{info['destination']['icao_code']}/{info['destination']['plan_rwy']}**\"\r\n\r\n fuel = f\"TRIP {info['fuel']['enroute_burn']}\\n\" \\\r\n f\"CONT {info['fuel']['contingency']}\\n\" \\\r\n f\"ALTN {info['fuel']['alternate_burn']}\\n\" \\\r\n f\"FINRES {info['fuel']['reserve']}\\n\" \\\r\n f\"EXTRA {info['fuel']['extra']}\\n\" \\\r\n f\"TAXI {info['fuel']['taxi']}\\n\" \\\r\n f\"BLOCK FUEL {info['fuel']['plan_ramp']}\\n\"\r\n\r\n payload = f\"PAX {info['weights']['pax_count']}\\n\" \\\r\n f\"CARGO {round((int(info['weights']['cargo']) / 1000), 1)}\\n\" \\\r\n f\"PAYLOAD {round((int(info['weights']['payload']) / 1000), 1)}\\n\" \\\r\n f\"ZFW {round((int(info['weights']['est_zfw']) / 1000), 1)}\\n\" \\\r\n f\"FUEL {round((int(info['fuel']['plan_ramp']) / 1000), 1)}\\n\" \\\r\n f\"TOW {round((int(info['weights']['est_tow']) / 1000), 1)}\\n\" \\\r\n f\"LAW {round((int(info['weights']['est_ldw']) / 1000), 1)}\\n\"\r\n if toc:\r\n general = f\"Cost Index: {info['general']['cruise_profile']}\\n\" \\\r\n f\"Route Distance: {info['general']['route_distance']}nm\\n\" \\\r\n f\"Average Wind: {info['general']['avg_wind_dir']}°/{info['general']['avg_wind_comp']}kt\\n\" \\\r\n f\"Aircraft: {info['aircraft']['name']}\\n\" \\\r\n f\"Est. Time Enroute: {round((int(info['times']['est_time_enroute']) / 3600), 2)}hrs\\n\" \\\r\n f\"TOC Conditions: {info['navlog']['fix'][i]['wind_dir']}°/{info['navlog']['fix'][i]['wind_spd']}kt | OAT: {info['navlog']['fix'][i]['oat']} | ISA DEV: {info['navlog']['fix'][i]['oat_isa_dev']}\\n\"\r\n else:\r\n general = f\"Cost Index: {info['general']['cruise_profile']}\\n\" \\\r\n f\"Route Distance: {info['general']['route_distance']}\\n\" \\\r\n f\"Average Wind: {info['general']['avg_wind_dir']}°/{info['general']['avg_wind_comp']}kt\\n\" \\\r\n f\"Aircraft: {info['aircraft']['name']}\\n\" \\\r\n f\"Est. Time Enroute: {info['times']['est_time_enroute']}\\n\"\r\n\r\n directory = info['files']['directory']\r\n files = f\"[X-Plane 11]({directory}{info['fms_downloads']['xpe']['link']}) | [MSFS 2020]({directory}{info['fms_downloads']['mfs']['link']}) | \" \\\r\n f\"[FSX/P3D]({directory}{info['fms_downloads']['fsx']['link']}) | [PMDG]({directory}{info['fms_downloads']['pmr']['link']})\"\r\n\r\n sb_embed = discord.Embed(\r\n title=f\"Retrieved SimBrief flight plan: **{info['origin']['icao_code']} → {info['destination']['icao_code']}** ALTN: {info['alternate']['icao_code']}\",\r\n description=desc,\r\n colour=discord.Colour.from_rgb(97, 0, 215)\r\n )\r\n\r\n # sb_embed.set_thumbnail(\r\n # url=\"https://cdn.discordapp.com/attachments/651086904925749252/802617703809548298/ezgif.com-gif-maker_3.gif\")\r\n sb_embed.set_footer(\r\n text='If you prefer to keep your username private, you can use this command in a private chat')\r\n sb_embed.add_field(name='Fuel:', value=fuel)\r\n sb_embed.add_field(name='Weights:', value=payload)\r\n sb_embed.add_field(name='Info:', value=general)\r\n sb_embed.add_field(name='Departure METAR:', value=info['weather']['orig_metar'], inline=False)\r\n sb_embed.add_field(name='Destination METAR:', value=info['weather']['dest_metar'], inline=False)\r\n sb_embed.add_field(name='FMC Files:', value=files, inline=False)\r\n try:\r\n sb_embed.set_image(url=f\"{info['images']['directory']}{info['images']['map'][0]['link']}\")\r\n except Exception as err:\r\n print(err)\r\n await ctx.send(embed=sb_embed)\r\n return print('Sent SimBrief flight plan')\r\n\r\n @flightplan.error\r\n async def flp_error(self, ctx, error):\r\n if isinstance(error, discord.ext.commands.CommandOnCooldown):\r\n await ctx.reply(\r\n 'Command on cooldown, please wait a few seconds or wait for the current request to be processed',\r\n mention_author=False, delete_after=10)\r\n\r\n @metar.error\r\n async def metar_error(self, ctx, error):\r\n if isinstance(error, discord.ext.commands.CommandOnCooldown):\r\n await ctx.reply(\r\n 'Command on cooldown, please wait a few seconds or wait for the current request to be processed',\r\n mention_author=False, delete_after=5)\r\n\r\n\r\ndef setup(client):\r\n client.add_cog(FlightSim(client))\r\n","sub_path":"Cogs/FlightSim.py","file_name":"FlightSim.py","file_ext":"py","file_size_in_byte":39836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"625538418","text":"import pandas as pd \nimport os,sys\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport datetime as dt\n\n\n#===============define data path =============================#\nrootPath = os.getcwd()\n\nsales= pd.read_csv(os.path.join(rootPath,\"sales_train_validation.csv\"))\nsell_prices= pd.read_csv(os.path.join(rootPath,\"sell_prices.csv\"))\ncalendar= pd.read_csv(os.path.join(rootPath,\"calendar.csv\"))\n\ndef reduce_mem_usage(df, verbose=True):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2 \n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics: \n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64) \n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max< np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64) \n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df\n\ndef create_dataset(dataset, look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n a = dataset[i:(i+look_back), 0]\n dataX.append(a)\n dataY.append(dataset[i + look_back, 0])\n return np.array(dataX), np.array(dataY)\n\ndef dataSet():\n global sales,sell_prices,calendar\n train = reduce_mem_usage(sales)\n calendar = reduce_mem_usage(calendar)\n date_index = calendar['date']\n dates = date_index[0:1913]\n dates_list = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n\n train['item_store_id'] = train.apply(lambda x: x['item_id']+'_'+x['store_id'],axis=1)\n DF_Sales = train.loc[:,'d_1':'d_1913'].T\n DF_Sales.columns = train['item_store_id'].values\n DF_Sales = pd.DataFrame(DF_Sales).set_index([dates_list])\n DF_Sales.index = pd.to_datetime(DF_Sales.index)\n\n halfData=int(len(DF_Sales.columns)/2)\n firstHalfDF=DF_Sales[DF_Sales.columns[0:halfData]]\n data = np.array(firstHalfDF)\n scaler = MinMaxScaler(feature_range=(0, 1))\n dataset = scaler.fit_transform(data.reshape(-1, 1))\n\n train_size = int(len(dataset) * 0.7)\n test_size = len(dataset) - train_size\n train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\n X_train,y_train = create_dataset(train,28)\n X_test,y_test = create_dataset(test,28)\n return X_train,y_train,X_test,y_test \n\n\ndef createModel(trainX, y_train): \n import tensorflow as tf\n from tensorflow.keras.models import Sequential\n from tensorflow.keras.layers import Dense\n from tensorflow.keras.layers import LSTM\n from keras.callbacks import ModelCheckpoint, EarlyStopping\n\n model = Sequential()\n model.add(LSTM(512, input_shape=(28,1)))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',metrics=[\"accuracy\"])\n history=model.fit(trainX, y_train, epochs=30, batch_size=1, verbose=2,callbacks=[ckptCallback])\n\n traning_pred = model.predict(trainX)\n train_pred = pd.Series(scaler.inverse_transform(traning_pred).flatten())\n print(train_pred)\n\n plt.figure(num=None, figsize=(19, 6), dpi=80, facecolor='w', edgecolor='k')\n plt.plot(train_pred)\n plt.plot(train)\n plt.legend([\"Predicted\",\"Real\"])\n plt.savefig('1.png')\n\n\n test_pred = scaler.inverse_transform(model.predict(testX)).flatten()\n plt.figure(num=None, figsize=(19, 6), dpi=80, facecolor='w', edgecolor='k')\n plt.plot(test_pred)\n plt.plot(test)\n plt.legend([\"Predicted\",\"Real\"])\n plt.savefig('2.png')\n\nif __name__ == '__main__':\n X_train,y_train,X_test,y_test= dataSet()\n #below start to train process...\n createModel(trainX, y_train)","sub_path":"0427_dataset.py","file_name":"0427_dataset.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"443409083","text":"from __future__ import absolute_import\n\n\ndef test_index_page_loads(testapp):\n response = testapp.get('/')\n assert response.status_code == 200\n\n\ndef test_form_loads(testapp):\n response = testapp.get('/handbag')\n assert 'form' in response\n\n\ndef test_add_and_delete_metadata_field(testapp):\n handbag_page = testapp.get('/handbag')\n form = handbag_page.form\n assert 'metadata-item' in handbag_page\n delete = form.submit('delMetadata')\n assert 'metadata-item' not in delete\n add = form.submit('addMetadata')\n assert 'metadata-item' in add\n\n\ndef test_submit_valid_form(app, testapp):\n form = testapp.get('/handbag').form\n form['name'] = 'test'\n form['destinationName'] = 'test'\n form['destinationUrl'] = 'file:///safadf'\n form['destinationEmail'] = 'test@test.com'\n form['bagNameGenerator'] = 'test'\n form['packageFormat'] = 'zip'\n results = form.submit()\n print(results)\n assert results.status_code == 302\n","sub_path":"tests/test_handbag_form.py","file_name":"test_handbag_form.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"239512205","text":"from rest_framework import filters\n\n\nclass DjangoMappingFilterBackend(filters.DjangoFilterBackend):\n \"\"\"\n A filter backend that uses django-filter that fixes order_by.\n\n This backend supports additional attribute of a FilterSet named `order_by_mapping`.\n It maps ordering fields from user friendly ones to the ones that depend on\n the model relation innards.\n\n See https://github.com/alex/django-filter/issues/178#issuecomment-62129586\n\n Example usage:\n\n # models.py\n\n class Project(models.Model):\n name = models.CharField(max_length=10)\n\n\n class Instance(models.Model):\n hostname = models.CharField(max_length=10)\n instance = models.ForeignKey(Project)\n\n # filters.py\n\n class InstanceFilter(django_filters.FilterSet):\n class Meta(object):\n model = models.Instance\n\n # Filter fields go here\n order_by = [\n 'hostname',\n '-hostname',\n 'project__name',\n '-project__name',\n ]\n order_by_mapping = {\n # Fix order by parameters\n 'project_name': 'project__name',\n # '-project_name' mapping is handled automatically\n }\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n filter_class = self.get_filter_class(view, queryset)\n\n if filter_class:\n # XXX: The proper way would be to redefine FilterSetOptions,\n # but it's too much of a boilerplate\n mapping = getattr(filter_class.Meta, 'order_by_mapping', None)\n order_by_field = getattr(filter_class, 'order_by_field')\n\n if mapping:\n transform = lambda o: self._transform_ordering(mapping, o)\n\n params = request.QUERY_PARAMS.copy()\n ordering = map(transform, params.getlist(order_by_field))\n params.setlist(order_by_field, ordering)\n else:\n params = request.QUERY_PARAMS\n\n return filter_class(params, queryset=queryset).qs\n\n return queryset\n\n # noinspection PyMethodMayBeStatic\n def _transform_ordering(self, mapping, ordering):\n if ordering.startswith('-'):\n ordering = ordering[1:]\n reverse = True\n else:\n reverse = False\n\n try:\n ordering = mapping[ordering]\n except KeyError:\n pass\n\n if reverse:\n return '-' + ordering\n\n return ordering\n","sub_path":"nodeconductor/core/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110627978","text":"#!/usr/bin/python\r\n\r\n\"\"\"\r\nThe goal is to get familar with parameter tuning with spark ml\r\nplease ref to\r\nhttps://rawgit.com/jnwang/cmpt733/master/Assignments/A1/A1-instruction.html#Task-C:-Parameter-Tuning\r\nA preliminary trainning model is provided\r\n\r\nhow to run:\r\ntwo arguments (input and output file) should be provided together with script\r\n/usr/spark2.0.1/bin/spark-submit --master yarn --deploy-mode client ml_pipeline.py /path/of/traininput /path/of/testinput\r\nthe output will be print at stdout\r\nSpark: 2.0.1\r\nPython: 2.7.5\r\n\r\n\"\"\"\r\n\r\nimport sys\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.ml import Pipeline\r\nfrom pyspark.ml.classification import LogisticRegression\r\nfrom pyspark.ml.feature import HashingTF, Tokenizer\r\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\r\nfrom pyspark.ml.tuning import ParamGridBuilder, CrossValidator\r\n\r\ntraininput = sys.argv[1]\r\ntestinput = sys.argv[2]\r\n\r\ndef main():\r\n\r\n spark = SparkSession.builder.appName(\"ml pipeline\").getOrCreate()\r\n sc = spark.sparkContext\r\n\r\n # Read training data as a DataFrame\r\n trainDF = spark.read.parquet(traininput)\r\n\r\n # Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.\r\n tokenizer = Tokenizer(inputCol=\"text\", outputCol=\"words\")\r\n # hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(),\r\n # outputCol=\"features\",\r\n # numFeatures=1000)\r\n hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(),\r\n outputCol=\"features\")\r\n # lr = LogisticRegression(maxIter=20, regParam=0.1)\r\n lr = LogisticRegression(maxIter=20)\r\n pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])\r\n\r\n # cross validation\r\n paramGrid = ParamGridBuilder()\\\r\n .addGrid(hashingTF.numFeatures, [1000, 5000, 10000])\\\r\n .addGrid(lr.regParam, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\\\r\n .build()\r\n pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])\r\n evaluator = BinaryClassificationEvaluator()\r\n cv = CrossValidator(estimator=pipeline,\r\n estimatorParamMaps=paramGrid,\r\n evaluator=evaluator,\r\n numFolds=5)\r\n cvModel = cv.fit(trainDF)\r\n\r\n\r\n # Fit the pipeline to training data.\r\n model = pipeline.fit(trainDF)\r\n\r\n # Evaluate the model on testing data\r\n testDF = spark.read.parquet(testinput)\r\n prediction = model.transform(testDF)\r\n output = evaluator.evaluate(prediction)\r\n print(\"without parameter tuning output is:\", output)\r\n\r\n cvPrediction = cvModel.transform(testDF)\r\n cvOutput = evaluator.evaluate(cvPrediction)\r\n print(\"areaUnderROC with parameter tuning:\", cvOutput)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"ml_pipeline.py","file_name":"ml_pipeline.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"82034856","text":"# -*- coding: UTF-8 -*-\n\nimport shelve\nfrom person import Person, Manager\n\n\ndef makeDB():\n bob = Person('Bob Smith', 42, 30000, 'software')\n sue = Person('Sue Jones', 47, 40000, 'hardware')\n tom = Manager('Tom Doe', 50, 50000)\n\n db = shelve.open('class-shelve')\n db['bob'] = bob\n db['sue'] = sue\n db['tom'] = tom\n db.close()\n\ndef printDB():\n db = shelve.open('class-shelve')\n for key in db:\n print(db[key])\n db.close()\n\ndef updateDBSample():\n db = shelve.open('class-shelve')\n sue = db['sue']\n sue.job = '软件工程师'\n #sue.giveRaise(0.5)\n #Person.giveRaise(sue,0.5)\n db['sue'] = sue\n db.close()\n\nif __name__ == '__main__':\n makeDB()\n printDB()\n updateDBSample()\n printDB()\n","sub_path":"make_db_class.py","file_name":"make_db_class.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259023950","text":"import numpy as np\nfrom lmfit import Parameters, minimize, fit_report\n\nclass Calibrate:\n def __init__(self, model):\n self.model = model\n self.lmfitparams = Parameters()\n self.parameterdict = {}\n self.seriesdict = {}\n def parameter(self, name, par=None, layer=0, initial=0, pmin=None, pmax=None, vary=True):\n if par is not None:\n assert type(par) == np.ndarray, \"Error: par needs to be array\"\n p = par[layer:layer + 1]\n else:\n if name[:3] == 'kaq':\n layer = int(name[3:])\n p = self.model.aq.kaq[layer:layer + 1]\n elif name[:3] == 'Saq':\n layer = int(name[3:])\n p = self.model.aq.Saq[layer:layer + 1]\n else:\n print('parameter name not recognized or no par reference supplied')\n return\n self.lmfitparams.add(name, value=initial, min=pmin, max=pmax, vary=vary)\n self.parameterdict[name] = p\n def series(self, name, x, y, layer, t, h):\n s = Series(x, y, layer, t, h)\n self.seriesdict[name] = s\n def residuals(self, p):\n # p is lmfit.Parameters object\n print('.', end='')\n vals = p.valuesdict()\n for k in vals:\n self.parameterdict[k][:] = vals[k] # [:] needed to do set value in array\n # do something else when it is the storage coefficient\n # this needs to be replaced when Saq computation is moved to initialize\n if len(k) > 3:\n if k[:3] == 'Saq': \n layer = int(k[3:])\n if layer == 0:\n if self.model.aq.phreatictop:\n self.parameterdict[k][:] = vals[k]\n else:\n self.parameterdict[k][:] = vals[k] * self.model.aq.Haq[0]\n else:\n self.parameterdict[k][:] = vals[k] * self.model.aq.Haq[layer]\n self.model.solve(silent=True)\n rv = np.empty(0)\n for key in self.seriesdict:\n s = self.seriesdict[key]\n h = self.model.head(s.x, s.y, s.t, layers=s.layer)\n rv = np.append(rv, s.h - h)\n return rv\n def fit(self, report=True):\n self.fitresult = minimize(self.residuals, self.lmfitparams, epsfcn=1e-4)\n if report:\n print(fit_report(self.fitresult))\n \nclass Series:\n def __init__(self, x, y, layer, t, h):\n self.x = x\n self.y = y\n self.layer = layer\n self.t = t\n self.h = h\n\n\n","sub_path":"ttim/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"207421259","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/frauddetector/delete-event-type.html\nif __name__ == '__main__':\n \"\"\"\n\tget-event-types : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/frauddetector/get-event-types.html\n\tput-event-type : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/frauddetector/put-event-type.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # name : The name of the event type to delete.\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"frauddetector\", \"delete-event-type\", \"name\", add_option_dict)\n\n\n\n\n\n","sub_path":"frauddetector_write_1/event-type_delete.py","file_name":"event-type_delete.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"393869213","text":"class MyHashNode:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass MyHashMap:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.__bufLen = 999983 # 99877\n self.__buf = [None for i in range(self.__bufLen)]\n\n def put(self, key, value):\n \"\"\"\n value will always be non-negative.\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n idx = self.__getIdx(key)\n node = self.__buf[idx]\n if not node:\n node = MyHashNode(None, None)\n self.__buf[idx] = node\n while node.next and node.next.key != key:\n node = node.next\n if node.next:\n node.next.value = value\n else:\n node.next = MyHashNode(key, value)\n\n def get(self, key):\n \"\"\"\n Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key\n :type key: int\n :rtype: int\n \"\"\"\n node = self.__buf[self.__getIdx(key)]\n if not node:\n return -1\n while node and node.key != key:\n node = node.next\n return node.value if node else -1\n\n def remove(self, key):\n \"\"\"\n Removes the mapping of the specified value key if this map contains a mapping for the key\n :type key: int\n :rtype: void\n \"\"\"\n node = self.__buf[self.__getIdx(key)]\n if not node:\n return\n while node.next and node.next.key != key:\n node = node.next\n if node.next:\n node.next = node.next.next\n\n def __getIdx(self, key):\n return key % self.__bufLen\n\n\n# Your MyHashMap object will be instantiated and called as such:\n# obj = MyHashMap()\n# obj.put(key,value)\n# param_2 = obj.get(key)\n# obj.remove(key)\n\nmap = MyHashMap()\nmap.remove(14)\nmap.get(4)\nmap.put(7, 3)\nmap.put(11, 1)\nmap.put(12, 1)\nmap.get(7)\nmap.put(1, 19)\nmap.put(0, 3)\nmap.put(1, 8)\nmap.put(2, 6)\n","sub_path":"src/design-hashmap.py","file_name":"design-hashmap.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"256616091","text":"import abc\nimport os\nimport time\nimport logging\nimport subprocess\nimport threading\nimport socket\nimport errno\nimport shlex\n\nimport mbed_lstools\nfrom pexpect.popen_spawn import PopenSpawn\nfrom mcutk.appbase import APPBase\nfrom mcutk.gdb_session import GDBSession\nfrom mcutk.exceptions import GDBServerStartupError\n\n\nclass DebuggerBase(APPBase):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs):\n super(DebuggerBase, self).__init__(*args, **kwargs)\n self.gdbpath = kwargs.get(\"gdbpath\", \"\")\n self.version = kwargs.get(\"version\", \"unknown\")\n self._board = None\n self._callback_map = {\n \"before-load\": None\n }\n\n def set_board(self, board):\n self._board = board\n\n def gdb_init_template(self):\n \"\"\"Return a string about gdb init template.\n \"\"\"\n return \"\"\n\n @abc.abstractmethod\n def reset(self):\n \"\"\"Used to reset target CPU.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def erase(self):\n \"\"\"Used to erase flash.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def flash(self):\n \"\"\"Binary image programming.\n .bin\n .hex\n \"\"\"\n pass\n\n def get_gdbserver(self, board=None):\n \"\"\"Return a string about the command line of gdbserver\n \"\"\"\n pass\n\n def start_gdbserver(self, **kwargs):\n gdbserver_cmd = self.get_gdbserver(**kwargs)\n print (gdbserver_cmd)\n # start gdb server\n subprocess.call(gdbserver_cmd, shell=True)\n\n def list_connected_devices(self):\n mbeds = mbed_lstools.create()\n devices = mbeds.list_mbeds()\n for device in devices:\n device['usbid'] = device.pop('target_id_usb_id')\n device['type'] = device.pop('device_type')\n device['name'] = device.pop('platform_name')\n if device['type'] == 'daplink':\n device['debugger'] = 'pyocd'\n\n return devices\n\n def gdb_program(self,\n filename,\n gdbserver_cmdline=None,\n gdbinit_commands=None,\n board=None,\n timeout=200,\n **kwargs):\n \"\"\"Using gdb & gdbserver to programming image.\n Steps:\n 1> Start gdbserver at port: board.gdbport\n 2> Render gdbinit_template\n 3> Start gdb.exe:\n gdb.exe -x -se \n\n Arguments:\n filename - {str}: path to image file.\n gdbserver_cmdline - {str}: gdb server command line, used for starting gdb server.\n gdbinit_commands - {str}: gdb init commands to control gdb behaviour.\n timeout - {int}: set timeout for gdb & gdb server process. default 200 seconds.\n\n Returns:\n tuple --- (returncode, console-output)\n \"\"\"\n timer = None\n try:\n session, timer, gdb_output = self._start_gdb_session(\n filename,\n gdbserver_cmdline,\n gdbinit_commands,\n board,\n timeout,\n **kwargs)\n except GDBServerStartupError:\n return 1, \"\"\n\n finally:\n # Stop timeout timer when communicate call returns.\n if timeout is not None and timer:\n timer.cancel()\n\n if not session:\n return 1, gdb_output\n\n # gdb client disconnect the connection,\n # and gdbsever will automaticlly close\n session.close()\n session.gdb_server_proc.wait()\n logging.info(\"> gdb server exit! exit code: %s\", session.gdb_server_proc.returncode)\n\n # get gdb console output\n output = ''.join(gdb_output)\n output += session.console_output\n retcode = session.gdb_server_proc.returncode\n return retcode, output\n\n def _start_gdb_session(self,\n filename,\n gdbserver_cmdline=None,\n gdbinit_commands=None,\n board=None,\n timeout=None,\n **kwargs):\n \"\"\"Return a attached gdb session object\"\"\"\n\n if board is None:\n board = self._board\n\n if board is None:\n raise ValueError('no board is associated with debugger!')\n\n timer = None\n gdb_errorcode = 0\n\n # load gdb init template\n gdb_init_template = board.gdb_init_commands\n if gdb_init_template is None:\n gdb_init_template = self.gdb_init_template()\n\n gdbcommands = gdbinit_commands if gdbinit_commands else \\\n render_gdbinit(gdb_init_template, board)\n\n gdbserver_cmd = gdbserver_cmdline if gdbserver_cmdline else self.get_gdbserver(**kwargs)\n\n logging.info(\"> starting gdb server.\")\n logging.info(gdbserver_cmd)\n\n # start gdb server\n if os.name != \"nt\":\n gdbserver_cmd = shlex.split(gdbserver_cmd)\n gdbserver_proc = subprocess.Popen(\n gdbserver_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=False)\n\n gdbport = board.gdbport\n if not _validate_port_is_ready(gdbserver_proc, gdbport):\n logging.error(\"gdb server start failure\")\n if gdbserver_proc.poll() is None:\n _kill_popen_process(gdbserver_proc)\n output, _ = gdbserver_proc.communicate()\n logging.error(\">>>> gdb server console output: \\n\\n%s\", output)\n raise GDBServerStartupError(\"gdb server start failure\")\n\n logging.info(\"> gdb server is ready, pid: %s, port: %s.\", gdbserver_proc.pid, gdbport)\n gdb_output = list()\n\n def stdout_reader(process):\n for line in iter(process.stdout.readline, b''):\n gdb_output.append(line)\n\n # To resolve large output from subprocess PIPE, use a background thread to continues read\n # data from stdout.\n reader_thread = threading.Thread(target=stdout_reader, args=(gdbserver_proc, ))\n reader_thread.start()\n\n # start gdb client\n output = \"\"\n logging.info(\"> start gdb client to connect to server.\")\n gdb_cmd = \"{} --exec {} --silent\".format(self.gdbpath, filename)\n session = GDBSession.start(gdb_cmd)\n session.gdb_server_proc = gdbserver_proc\n\n # set timeout\n # Use a timer to stop the subprocess if the timeout is exceeded.\n if timeout is not None:\n ps_list = [gdbserver_proc, session]\n timer = threading.Timer(timeout, timeout_exceeded, (ps_list, ))\n timer.start()\n\n # convert string commands to a list\n _gdb_actions = [line.strip() for line in gdbcommands.split(\"\\n\") if line.strip()]\n _gdb_actions.remove(\"q\")\n for act in _gdb_actions:\n # call registerd callback function before-load command\n if act.startswith(\"load\"):\n self._call_registered_callback(\"before-load\")\n try:\n c = session.run_cmd(act).lower()\n if \"No connection could be made\" in c:\n gdb_errorcode = 1\n logging.error(c)\n break\n elif '\"monitor\" command not supported by this target' in c:\n gdb_errorcode = 1\n logging.error('gdb command execute error!')\n break\n except:\n logging.exception('gdb command run error, CMD: %s', act)\n gdb_errorcode = 1\n\n if gdb_errorcode == 1:\n logging.error(\"gdb init failed, close gdb session\")\n session.close()\n gdbserver_proc.wait()\n reader_thread.join()\n session = None\n\n return session, timer, gdb_output\n\n\n def start_gdb_debug_session(self,\n filename,\n gdbserver_cmdline=None,\n gdbinit_commands=None,\n board=None,\n **kwargs):\n \"\"\"Return a attached gdb session object\"\"\"\n session, _, _ = self._start_gdb_session(\n filename, gdbserver_cmdline, gdbinit_commands,\n board, timeout=None, **kwargs)\n\n return session\n\n def register(self, name):\n \"\"\"Declare a decorator to register callback to debugger instance.\n\n Arguments:\n name {str} -- before-load\n \"\"\"\n def func_wrapper(func, *args, **kwagrs):\n self._callback_map[name] = (func, args, kwagrs)\n return func\n return func_wrapper\n\n def _call_registered_callback(self, name=None):\n value = self._callback_map.get(name)\n if type(value) is tuple:\n func, args, kwargs = value\n if func:\n return func(*args, **kwargs)\n\n return None\n\ndef _kill_popen_process(process):\n # process.kill() just killed the parent process, and cannot kill the child process\n # that caused the popen process in running state.\n # force to use windows command to kill that the process!\n if not isinstance(process, subprocess.Popen):\n raise TypeError(\"argument is not a instance of subprocess.Popen\")\n\n if os.name == \"nt\":\n os.system(\"TASKKILL /F /PID {pid} /T\".format(pid=process.pid))\n else:\n process.kill()\n\n\ndef timeout_exceeded(ps):\n \"\"\"subprocess tiemout exceeded handler.\"\"\"\n for p in ps:\n if isinstance(p, PopenSpawn):\n p.kill(None)\n else:\n _kill_popen_process(p)\n logging.warning('pid: %s exceeded timeout!', p.pid)\n\n\ndef render_gdbinit(template, board):\n \"\"\"\n Render gdbinit template with board object.\n\n Render used '.foramt()' syntax:\n 'target remote localhost: {gdbport}'\n\n Example:\n 1. jlink\n\n \"\"\"\n dicta = board.__dict__\n # dicta[\"file\"] = executable\n return template.format(**dicta)\n\n\ndef _validate_port_is_ready(server_process, port, tiemout=30):\n \"\"\"Validate the port is open on localhost\"\"\"\n\n is_ready = False\n port = int(port)\n s = None\n\n assert server_process != None\n\n # delay 1 seconds wait server up\n time.sleep(1)\n for _ in range(tiemout):\n print(\" Wait for gdb server ready.\")\n time.sleep(0.5)\n\n if server_process.poll() is None:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((\"127.0.0.1\", port))\n except socket.error as e:\n if e.errno == errno.EADDRINUSE:\n is_ready = True\n break\n else:\n print(e)\n else:\n break\n\n if s is not None:\n s.close()\n\n return is_ready\n","sub_path":"mcutk/debugger/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"428177563","text":"\"\"\"\n给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n如果你最多只允许完成一笔交易(即买入和卖出一支股票),设计一个算法来计算你所能获取的最大利润。\n注意你不能在买入股票前卖出股票。\n\n示例 1:\n输入: [7,1,5,3,6,4]\n输出: 5\n解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。\n 注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格。\n\n示例 2:\n输入: [7,6,4,3,1]\n输出: 0\n解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。\n\"\"\"\n\n\nclass Solution:\n def maxProfit(self, prices):\n length = len(prices)\n if length < 2:\n return 0\n dp1 = [0] * length\n min_val = prices[0]\n for i in range(1, length):\n dp1[i] = max(dp1[i - 1], prices[i] - min_val)\n min_val = min(min_val, prices[i])\n return max(dp1)\n","sub_path":"LeedCode/数组/121. 买卖股票的最佳时机.py","file_name":"121. 买卖股票的最佳时机.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"84020196","text":"#!/usr/bin/env python2.6\r\n#-*- coding: utf-8 -*-\r\n\r\n# Filename: slate.py\r\n# Author: Xiangquan\r\n# Created: 2013/04/28/\r\n# Latest Modified: 2013/04/28/\r\n# Platform: Windows7\r\n# Copyright: Illumina ltd, PTD department, 2012\r\n\r\nimport os, sys\r\n\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nfrom dirAnalyse import DirAnalyse\r\n\r\nfrom PIL import Image #, ImageEnhance\r\n\r\nclass SlateInfo(object):\r\n def __init__(self, vfxcode = '', shot = '', duration = '', handles = '', timecode = '', status = '', date = '', \\\r\n lut = '', maskRatio = '', resolution = '', format = '', description = [], feedback = []):\r\n \"\"\" Constructor \"\"\"\r\n self.vfxcode = vfxcode\r\n self.shot = shot\r\n self.duration = duration\r\n self.handles = handles\r\n self.timecode = timecode\r\n self.status = status\r\n self.date = date\r\n self.lut = lut\r\n self.maskRatio = maskRatio\r\n self.resolution = resolution\r\n self.format = format\r\n self.description = description\r\n self.feedback = feedback\r\n self.dirAnalyse = ''\r\n \r\nif __name__ == '__main__':\r\n import sys\r\n app = QApplication(sys.argv)\r\n input = 'D:/testJpeg/fst_rig_wangcy_dementor_master/fst_rig_wangcy_dementor_master_04.$F4.jpg'\r\n dirAnalyse = DirAnalyse(input)\r\n \r\n sys.exit(app.exec_())\r\n\r\n\r\n","sub_path":"tronPipelineScript/IlluminaConverter_v002/slateInfo.py","file_name":"slateInfo.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"235549499","text":"# -*- coding: utf-8 -*-\n\nfrom requests.auth import AuthBase\nfrom requests.structures import CaseInsensitiveDict\nfrom datetime import datetime\nimport hashlib\nimport hmac\nimport base64\nimport re\n\n\n\n# Python 2/3 support\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nBUCKET_VHOST_MATCH = re.compile(r'^([a-z0-9\\-]+\\.)?s3\\.amazonaws\\.com$', flags=re.IGNORECASE)\n\nAWS_QUERY_PARAMS = ['versioning', 'location', 'acl', 'torrent', 'lifecycle', 'versionid',\n 'response-content-type', 'response-content-language', 'response-expires', 'response-cache-control',\n 'response-content-disposition', 'response-content-encoding', 'delete']\n\n\nclass S3Auth(AuthBase):\n \"\"\"\n Authenticate S3 requests\n \"\"\"\n\n def __init__(self, secret_key, access_key):\n \"\"\"\n\n \"\"\"\n self.secret_key = secret_key\n self.access_key = access_key\n\n def sign(self, string_to_sign):\n digest = hmac.new(self.secret_key.encode('utf8'),\n msg=string_to_sign.encode('utf8'),\n digestmod=hashlib.sha1).digest()\n\n return base64.b64encode(digest).strip().decode('ascii')\n\n\n def string_to_sign(self, request):\n h = CaseInsensitiveDict()\n h.update(request.headers)\n\n # Try to use\n\n if b'x-amz-date' in h or 'x-amz-date' in h:\n date = ''\n else:\n date = h.get('Date') or self._get_date()\n request.headers['Date'] = date\n\n # Set the date header\n request.headers['Date'] = date\n\n # A fix for the content type header extraction in python 3\n # This have to be done because requests will try to set application/www-url-encoded herader\n # if we pass bytes as the content, and the content-type is set with a key that is b'Content-Type' and not\n # 'Content-Type'\n content_type = ''\n if b'Content-Type' in request.headers:\n # Fix content type\n content_type = h.get(b'Content-Type')\n del request.headers[b'Content-Type']\n request.headers['Content-Type'] = content_type\n\n msg = [\n request.method,\n h.get(b'Content-MD5', '') or h.get('Content-MD5', ''),\n content_type or h.get('Content-Type', ''),\n date,\n self._get_canonicalized_amz_headers(h) + self._get_canonicalized_resource(request)\n ]\n\n return '\\n'.join(msg)\n\n def _get_canonicalized_amz_headers(self, headers):\n \"\"\"\n Collect the special Amazon headers, prepare them for signing\n \"\"\"\n\n amz_dict = {}\n\n for k, v in headers.items():\n if isinstance(k, bytes):\n k = k.decode('ascii')\n\n k = k.lower()\n\n if k.startswith('x-amz'):\n amz_dict[k] = v\n\n result = \"\"\n for k in sorted(amz_dict.keys()):\n result += \"%s:%s\\n\" % (k.strip(), amz_dict[k].strip().replace('\\n', ' '))\n\n return result\n\n def _get_canonicalized_resource(self, request):\n\n r = \"\"\n\n # parse our url\n parts = urlparse(request.url)\n\n # get the host, remove any port identifiers\n host = parts.netloc.split(':')[0]\n\n if host:\n # try to match our host to .s3.amazonaws.com/s3.amazonaws.com\n m = BUCKET_VHOST_MATCH.match(host)\n if m:\n bucket = (m.groups()[0] or '').rstrip('.')\n\n if bucket:\n r += ('/' + bucket)\n else:\n # It's a virtual host, add it to the result\n r += ('/' + host)\n\n # Add the path string\n r += parts.path or '/'\n\n # add the special query strings\n r += self._get_subresource(parts.query)\n\n return r\n\n def _get_subresource(self, qs):\n r = []\n\n keys = qs.split('&')\n for i in keys:\n item = i.split('=')\n k = item[0].lower()\n\n if k in AWS_QUERY_PARAMS:\n r.append(i)\n\n if r:\n return '?' + '&'.join(r)\n\n return ''\n\n def _get_date(self):\n return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n\n def __call__(self, r):\n msg = self.string_to_sign(r)\n r.headers['Authorization'] = \"AWS %s:%s\" % (self.access_key, self.sign(msg))\n return r","sub_path":"tinys3/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415367827","text":"# -*- coding: utf-8 -*-\n\"\"\"\nkorp.py is a WSGI application for querying corpora available on the server.\nCurrently it acts as a wrapper for the CQP Query Language of Corpus Workbench.\n\nConfiguration is done by editing config.py.\n\nhttps://spraakbanken.gu.se/korp/\n\"\"\"\n\n# Skip monkey patching if run through gunicorn (which does the patching for us)\nimport os\nif \"gunicorn\" not in os.environ.get(\"SERVER_SOFTWARE\", \"\"):\n from gevent import monkey\n monkey.patch_all(subprocess=False) # Patching needs to be done as early as possible, before other imports\n\nfrom gevent.pywsgi import WSGIServer\nfrom gevent.threadpool import ThreadPool\nfrom gevent.queue import Queue, Empty\n\n# gunicorn patches everything, and gevent's subprocess module can't be used in\n# native threads other than the main one, so we need to un-patch the subprocess module.\nfrom importlib import reload\nimport subprocess\nreload(subprocess)\n\nfrom concurrent import futures\nfrom concurrent.futures import ThreadPoolExecutor\nfrom collections import defaultdict\nfrom dateutil.relativedelta import relativedelta\nfrom copy import deepcopy\nimport datetime\nimport uuid\nimport binascii\nimport sys\nimport glob\nimport time\nimport re\nimport json\nimport zlib\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport base64\nimport hashlib\nimport itertools\nimport traceback\nimport functools\nimport math\nimport random\nimport markdown\nimport config\ntry:\n import pylibmc\nexcept ImportError:\n print(\"Could not load pylibmc. Caching will be disabled.\")\n cache_disabled = True\nelse:\n cache_disabled = False\nfrom flask import Flask, request, Response, stream_with_context, copy_current_request_context\nfrom flask_mysqldb import MySQL\nfrom flask_cors import CORS\n\n################################################################################\n# Nothing needs to be changed in this file. Use config.py for configuration.\n\n# The version of this script\nKORP_VERSION = \"7.1.1\"\n\n# URL for Språkbanken's Korp API (used for examples in documentation)\nSB_API_URL = \"https://ws.spraakbanken.gu.se/ws/korp/v7/\"\n\n# Special symbols used by this script; they must NOT be in the corpus\nEND_OF_LINE = \"-::-EOL-::-\"\nLEFT_DELIM = \"---:::\"\nRIGHT_DELIM = \":::---\"\n\n# Regular expressions for parsing parameters\nIS_NUMBER = re.compile(r\"^\\d+$\")\nIS_IDENT = re.compile(r\"^[\\w\\-,|]+$\")\n\nQUERY_DELIM = \",\"\n\n################################################################################\n\napp = Flask(__name__)\nCORS(app)\n\n# Configure database connection\napp.config[\"MYSQL_HOST\"] = config.DBHOST\napp.config[\"MYSQL_USER\"] = config.DBUSER\napp.config[\"MYSQL_PASSWORD\"] = config.DBPASSWORD\napp.config[\"MYSQL_DB\"] = config.DBNAME\napp.config[\"MYSQL_PORT\"] = config.DBPORT\napp.config[\"MYSQL_USE_UNICODE\"] = True\napp.config[\"MYSQL_CURSORCLASS\"] = \"DictCursor\"\nmysql = MySQL(app)\n\n\ndef main_handler(generator):\n \"\"\"Decorator wrapping all WSGI endpoints, handling errors and formatting.\n\n Global parameters are\n - callback: an identifier that the result should be wrapped in\n - encoding: the encoding for interacting with the corpus (default: UTF-8)\n - indent: pretty-print the result with a specific indentation\n - debug: if set, return some extra information (for debugging)\n \"\"\"\n @functools.wraps(generator) # Copy original function's information, needed by Flask\n def decorated(args=None, *pargs, **kwargs):\n internal = args is not None\n if not internal:\n if request.is_json:\n args = request.get_json()\n else:\n args = request.values.to_dict()\n\n args[\"internal\"] = internal\n\n if not isinstance(args.get(\"cache\"), bool):\n args[\"cache\"] = bool(not cache_disabled and\n not args.get(\"cache\", \"\").lower() == \"false\" and\n config.CACHE_DIR and os.path.exists(config.CACHE_DIR) and\n config.MEMCACHED_SERVERS)\n\n if internal:\n # Function is internally used\n return generator(args, *pargs, **kwargs)\n else:\n # Function is called externally\n def error_handler():\n \"\"\"Format exception info for output to user.\"\"\"\n exc = sys.exc_info()\n if isinstance(exc[1], CustomTracebackException):\n exc = exc[1].exception\n error = {\"ERROR\": {\"type\": exc[0].__name__,\n \"value\": str(exc[1])\n }}\n if \"debug\" in args:\n error[\"ERROR\"][\"traceback\"] = \"\".join(traceback.format_exception(*exc)).splitlines()\n return error\n\n def incremental_json(ff):\n \"\"\"Incrementally yield result as JSON.\"\"\"\n if callback:\n yield callback + \"(\"\n yield \"{\\n\"\n\n try:\n for response in ff:\n if not response:\n # Yield whitespace to prevent timeout\n yield \" \\n\"\n else:\n yield json.dumps(response)[1:-1] + \",\\n\"\n except GeneratorExit:\n raise\n except:\n error = error_handler()\n yield json.dumps(error)[1:-1] + \",\\n\"\n\n yield json.dumps({\"time\": time.time() - starttime})[1:] + \"\\n\"\n if callback:\n yield \")\"\n\n def full_json(ff):\n \"\"\"Yield full JSON at end, but keep returning newlines to prevent timeout.\"\"\"\n result = {}\n\n try:\n for response in ff:\n if not response:\n # Yield whitespace to prevent timeout\n yield \" \\n\"\n else:\n result.update(response)\n except GeneratorExit:\n raise\n except:\n result = error_handler()\n\n result[\"time\"] = time.time() - starttime\n\n if callback:\n result = callback + \"(\" + json.dumps(result, indent=indent) + \")\"\n else:\n result = json.dumps(result, indent=indent)\n yield result\n\n starttime = time.time()\n incremental = parse_bool(args, \"incremental\", False)\n callback = args.get(\"callback\")\n indent = int(args.get(\"indent\", 0))\n\n if incremental:\n # Incremental response\n return Response(stream_with_context(incremental_json(generator(args, *pargs, **kwargs))),\n mimetype=\"application/json\")\n else:\n # We still use a streaming response even when non-incremental, to prevent timeouts\n return Response(stream_with_context(full_json(generator(args, *pargs, **kwargs))),\n mimetype=\"application/json\")\n\n return decorated\n\n\ndef prevent_timeout(generator):\n \"\"\"Decorator for long-running functions that might otherwise timeout.\"\"\"\n @functools.wraps(generator)\n def decorated(args=None, *pargs, **kwargs):\n if args[\"internal\"]:\n # Internally used\n yield from generator(args, *pargs, **kwargs)\n return\n\n def f(queue):\n for response in generator(args, *pargs, **kwargs):\n queue.put(response)\n queue.put(\"DONE\")\n\n timeout = 15\n q = Queue()\n\n @copy_current_request_context\n def error_catcher(g, *pargs, **kwargs):\n try:\n g(*pargs, **kwargs)\n except Exception as e:\n q.put(sys.exc_info())\n\n pool = ThreadPool(1)\n pool.spawn(error_catcher, f, q)\n\n while True:\n try:\n msg = q.get(block=True, timeout=timeout)\n if msg == \"DONE\":\n break\n elif isinstance(msg, tuple):\n raise CustomTracebackException(msg)\n else:\n yield msg\n except Empty:\n yield {}\n\n return decorated\n\n\n################################################################################\n# ARGUMENT PARSING\n################################################################################\n\ndef parse_corpora(args):\n corpora = args.get(\"corpus\", [])\n if isinstance(corpora, str):\n corpora = corpora.upper().split(QUERY_DELIM)\n return sorted(set(corpora))\n\n\ndef parse_within(args):\n within = defaultdict(lambda: args.get(\"default_within\", args.get(\"defaultwithin\")))\n\n if args.get(\"within\"):\n if \":\" not in args.get(\"within\"):\n raise ValueError(\"Malformed value for key 'within'.\")\n within.update({x.split(\":\")[0].upper(): x.split(\":\")[1] for x in args.get(\"within\").split(QUERY_DELIM)})\n return within\n\n\ndef parse_cqp_subcqp(args):\n cqp = [args.get(key) for key in sorted([k for k in args.keys() if k.startswith(\"cqp\")],\n key=lambda x: int(x[3:]) if len(x) > 3 else 0)]\n subcqp = [args.get(key) for key in sorted([k for k in args.keys() if k.startswith(\"subcqp\")],\n key=lambda x: int(x[6:]) if len(x) > 6 else 0)]\n return cqp, subcqp\n\n\n################################################################################\n# INFO\n################################################################################\n\n@app.route(\"/sleep\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef sleep(args):\n t = int(args.get(\"t\", 5))\n for x in range(t):\n time.sleep(1)\n yield {\"%d\" % x: x}\n\n\n@app.route(\"/info\", methods=[\"GET\", \"POST\"])\n@main_handler\ndef info(args):\n \"\"\"Return information, either about a specific corpus\n or general information about the available corpora.\n \"\"\"\n if args.get(\"corpus\"):\n yield corpus_info(args)\n else:\n yield general_info(args)\n\n\ndef general_info(args):\n \"\"\"Return information about the available corpora.\"\"\"\n if args[\"cache\"]:\n with mc_pool.reserve() as mc:\n result = mc.get(\"%s:info\" % cache_prefix())\n if result:\n if \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"][\"cache_read\"] = True\n return result\n\n corpora = run_cqp(\"show corpora;\")\n version = next(corpora)\n protected = []\n\n if config.PROTECTED_FILE:\n with open(config.PROTECTED_FILE) as infile:\n protected = [x.strip() for x in infile.readlines()]\n\n result = {\"version\": KORP_VERSION, \"cqp-version\": version, \"corpora\": list(corpora), \"protected_corpora\": protected}\n\n if args[\"cache\"]:\n with mc_pool.reserve() as mc:\n added = mc.add(\"%s:info\" % cache_prefix(), result)\n if added and \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"][\"cache_saved\"] = True\n\n return result\n\n\ndef corpus_info(args, no_combined_cache=False):\n \"\"\"Return information about a specific corpus or corpora.\"\"\"\n assert_key(\"corpus\", args, IS_IDENT, True)\n\n corpora = parse_corpora(args)\n\n # Check if whole query is cached\n if args[\"cache\"]:\n checksum_combined = get_hash((sorted(corpora),))\n save_cache = []\n combined_cache_key = \"%s:info_%s\" % (cache_prefix(), checksum_combined)\n with mc_pool.reserve() as mc:\n result = mc.get(combined_cache_key)\n if result:\n if \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"][\"cache_read\"] = True\n result[\"DEBUG\"][\"checksum\"] = checksum_combined\n return result\n\n result = {\"corpora\": {}}\n total_size = 0\n total_sentences = 0\n\n cmd = []\n\n for corpus in corpora:\n # Check if corpus is cached\n if args[\"cache\"]:\n with mc_pool.reserve() as mc:\n corpus_result = mc.get(\"%s:info\" % cache_prefix(corpus))\n if corpus_result:\n result[\"corpora\"][corpus] = corpus_result\n else:\n save_cache.append(corpus)\n if corpus not in result[\"corpora\"]:\n cmd += [\"%s;\" % corpus]\n cmd += show_attributes()\n cmd += [\"info; .EOL.;\"]\n\n if cmd:\n cmd += [\"exit;\"]\n\n # Call the CQP binary\n lines = run_cqp(cmd)\n\n # Skip CQP version\n next(lines)\n\n for corpus in corpora:\n if corpus in result[\"corpora\"]:\n total_size += int(result[\"corpora\"][corpus][\"info\"][\"Size\"])\n sentences = result[\"corpora\"][corpus][\"info\"].get(\"Sentences\", \"\")\n if sentences.isdigit():\n total_sentences += int(sentences)\n continue\n\n # Read attributes\n attrs = read_attributes(lines)\n\n # Corpus information\n info = {}\n\n for line in lines:\n if line == END_OF_LINE:\n break\n if \":\" in line and not line.endswith(\":\"):\n infokey, infoval = (x.strip() for x in line.split(\":\", 1))\n info[infokey] = infoval\n if infokey == \"Size\":\n total_size += int(infoval)\n elif infokey == \"Sentences\" and infoval.isdigit():\n total_sentences += int(infoval)\n\n result[\"corpora\"][corpus] = {\"attrs\": attrs, \"info\": info}\n if args[\"cache\"]:\n if corpus in save_cache:\n with mc_pool.reserve() as mc:\n mc.add(\"%s:info\" % cache_prefix(corpus), result[\"corpora\"][corpus])\n\n result[\"total_size\"] = total_size\n result[\"total_sentences\"] = total_sentences\n\n if args[\"cache\"] and not no_combined_cache:\n # Cache whole query\n with mc_pool.reserve() as mc:\n try:\n saved = mc.add(combined_cache_key, result)\n except pylibmc.TooBig:\n pass\n else:\n if saved and \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"][\"cache_saved\"] = True\n\n return result\n\n\n################################################################################\n# QUERY\n################################################################################\n\n@app.route(\"/query_sample\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef query_sample(args):\n \"\"\"Run a sequential query in the selected corpora in random order until at least one\n hit is found, and then abort the query. Use to get a random sample sentence.\"\"\"\n\n corpora = parse_corpora(args)\n # Randomize corpus order\n random.shuffle(corpora)\n\n for i in range(len(corpora)):\n corpus = corpora[i]\n check_authentication([corpus])\n\n args[\"corpus\"] = corpus\n args[\"sort\"] = \"random\"\n\n result = generator_to_dict(query(args))\n if result[\"hits\"] > 0:\n yield result\n return\n\n yield result\n\n\n@app.route(\"/query\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef query(args):\n \"\"\"Perform a CQP query and return a number of matches.\"\"\"\n assert_key(\"cqp\", args, r\"\", True)\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"start\", args, IS_NUMBER)\n assert_key(\"end\", args, IS_NUMBER)\n # assert_key(\"context\", args, r\"^\\d+ [\\w-]+$\")\n assert_key(\"show\", args, IS_IDENT)\n assert_key(\"show_struct\", args, IS_IDENT)\n # assert_key(\"within\", args, IS_IDENT)\n assert_key(\"cut\", args, IS_NUMBER)\n assert_key(\"sort\", args, r\"\")\n assert_key(\"incremental\", args, r\"(true|false)\")\n\n incremental = parse_bool(args, \"incremental\", False)\n free_search = not parse_bool(args, \"in_order\", True)\n use_cache = args[\"cache\"]\n cut = args.get(\"cut\")\n\n corpora = parse_corpora(args)\n\n check_authentication(corpora)\n\n show = args.get(\"show\") or [] # We don't use .get(\"show\", []) since \"show\" might be the empty string.\n if isinstance(show, str):\n show = show.split(QUERY_DELIM)\n show = set(show + [\"word\"])\n\n show_structs = args.get(\"show_struct\") or []\n if isinstance(show_structs, str):\n show_structs = show_structs.split(QUERY_DELIM)\n show_structs = set(show_structs)\n\n expand_prequeries = parse_bool(args, \"expand_prequeries\", True)\n\n start, end = int(args.get(\"start\") or 0), int(args.get(\"end\") or 9)\n\n if config.MAX_KWIC_ROWS and end - start >= config.MAX_KWIC_ROWS:\n raise ValueError(\"At most %d KWIC rows can be returned per call.\" % config.MAX_KWIC_ROWS)\n\n within = parse_within(args)\n\n # Parse \"context\"/\"left_context\"/\"right_context\"/\"default_context\"\n default_context = args.get(\"default_context\", args.get(\"defaultcontext\")) or \"10 words\"\n context = defaultdict(lambda: (default_context,))\n contexts = {}\n\n for c in (\"left_context\", \"right_context\", \"context\"):\n cv = args.get(c, \"\")\n if cv:\n if \":\" not in cv:\n raise ValueError(\"Malformed value for key '%s'.\" % c)\n contexts[c] = {x.split(\":\")[0].upper(): x.split(\":\")[1] for x in cv.split(QUERY_DELIM)}\n else:\n contexts[c] = {}\n\n for corpus in set(k for v in contexts.values() for k in v.keys()):\n if corpus in contexts[\"left_context\"] or corpus in contexts[\"right_context\"]:\n context[corpus] = (contexts[\"left_context\"].get(corpus, default_context),\n contexts[\"right_context\"].get(corpus, default_context))\n else:\n context[corpus] = (contexts[\"context\"].get(corpus, default_context),)\n\n sort = args.get(\"sort\")\n sort_random_seed = args.get(\"random_seed\")\n\n # Sort numbered CQP-queries numerically\n cqp, _ = parse_cqp_subcqp(args)\n\n if len(cqp) > 1 and expand_prequeries and not all(within[c] for c in corpora):\n raise ValueError(\"Multiple CQP queries requires 'within' or 'expand_prequeries=false'\")\n\n # Parameters used for all queries\n queryparams = {\"free_search\": free_search,\n \"use_cache\": use_cache,\n \"show\": show,\n \"show_structs\": show_structs,\n \"expand_prequeries\": expand_prequeries,\n \"cut\": cut,\n \"cqp\": cqp,\n \"sort\": sort,\n \"random_seed\": sort_random_seed\n }\n\n result = {\"kwic\": []}\n\n # Checksum for whole query, used to verify 'query_data' from the client\n checksum = get_hash((sorted(corpora),\n cqp,\n sorted(within.items()),\n cut,\n expand_prequeries,\n free_search))\n\n debug = {}\n if \"debug\" in args:\n debug[\"checksum\"] = checksum\n\n ns = Namespace()\n ns.total_hits = 0\n statistics = {}\n\n saved_statistics = {}\n query_data = args.get(\"query_data\", args.get(\"querydata\"))\n\n if query_data:\n try:\n query_data = zlib.decompress(base64.b64decode(\n query_data.replace(\"\\\\n\", \"\\n\").replace(\"-\", \"+\").replace(\"_\", \"/\"))).decode(\"UTF-8\")\n except:\n if \"debug\" in args:\n debug[\"query_data_unparseable\"] = True\n else:\n if \"debug\" in args:\n debug[\"query_data_read\"] = True\n saved_checksum, stats_temp = query_data.split(\";\", 1)\n if saved_checksum == checksum:\n for pair in stats_temp.split(\";\"):\n corpus, hits = pair.split(\":\")\n saved_statistics[corpus] = int(hits)\n elif \"debug\" in args:\n debug[\"query_data_checksum_mismatch\"] = True\n\n if use_cache and not saved_statistics:\n # Query data parsing failed or was missing, so look for cached hits instead\n for corpus in corpora:\n corpus_checksum = get_hash((cqp,\n within[corpus],\n cut,\n expand_prequeries,\n free_search))\n with mc_pool.reserve() as mc:\n cached_corpus_hits = mc.get(\"%s:query_size_%s\" % (cache_prefix(corpus.split(\"|\")[0]), corpus_checksum))\n if cached_corpus_hits is not None:\n saved_statistics[corpus] = cached_corpus_hits\n\n ns.start_local = start\n ns.end_local = end\n\n if saved_statistics:\n if \"debug\" in args:\n debug[\"cache_coverage\"] = \"%d/%d\" % (len(saved_statistics), len(corpora))\n complete_hits = set(corpora) == set(saved_statistics.keys())\n else:\n complete_hits = False\n\n if complete_hits:\n # We have saved_statistics available for all corpora, so calculate which\n # corpora need to be queried and then query them in parallel.\n corpora_hits = which_hits(corpora, saved_statistics, start, end)\n ns.total_hits = sum(saved_statistics.values())\n statistics = saved_statistics\n corpora_kwics = {}\n ns.progress_count = 0\n\n if len(corpora_hits) == 0:\n pass\n elif len(corpora_hits) == 1:\n # If only hits in one corpus, it is faster to not use threads\n corpus, hits = list(corpora_hits.items())[0]\n result[\"kwic\"], _ = query_and_parse(corpus, within=within[corpus], context=context[corpus],\n start=hits[0], end=hits[1], **queryparams)\n else:\n if incremental:\n yield {\"progress_corpora\": list(corpora_hits.keys())}\n\n with ThreadPoolExecutor(max_workers=config.PARALLEL_THREADS) as executor:\n future_query = dict(\n (executor.submit(query_and_parse, corpus, within=within[corpus], context=context[corpus],\n start=corpora_hits[corpus][0], end=corpora_hits[corpus][1], **queryparams),\n corpus)\n for corpus in corpora_hits)\n\n for future in futures.as_completed(future_query):\n corpus = future_query[future]\n if future.exception() is not None:\n raise CQPError(future.exception())\n else:\n kwic, _ = future.result()\n corpora_kwics[corpus] = kwic\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: {\"corpus\": corpus,\n \"hits\": corpora_hits[corpus][1] -\n corpora_hits[corpus][0] + 1}}\n ns.progress_count += 1\n\n for corpus in corpora:\n if corpus in corpora_hits.keys():\n result[\"kwic\"].extend(corpora_kwics[corpus])\n else:\n # saved_statistics is missing or incomplete, so we need to query the corpora in\n # serial until we have the needed rows, and then query the remaining corpora\n # in parallel to get number of hits.\n if incremental:\n yield {\"progress_corpora\": corpora}\n ns.progress_count = 0\n ns.rest_corpora = []\n\n # Serial until we've got all the requested rows\n for i, corpus in enumerate(corpora):\n if ns.end_local < 0:\n ns.rest_corpora = corpora[i:]\n break\n skip_corpus = False\n if corpus in saved_statistics:\n nr_hits = saved_statistics[corpus]\n if nr_hits - 1 < ns.start_local:\n kwic = []\n skip_corpus = True\n\n if not skip_corpus:\n kwic, nr_hits = query_and_parse(corpus, within=within[corpus], context=context[corpus],\n start=ns.start_local, end=ns.end_local, **queryparams)\n\n statistics[corpus] = nr_hits\n ns.total_hits += nr_hits\n\n # Calculate which hits from next corpus we need, if any\n ns.start_local -= nr_hits\n ns.end_local -= nr_hits\n if ns.start_local < 0:\n ns.start_local = 0\n\n result[\"kwic\"].extend(kwic)\n\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: {\"corpus\": corpus, \"hits\": nr_hits}}\n ns.progress_count += 1\n\n if incremental:\n yield result\n result = {}\n\n if ns.rest_corpora:\n if saved_statistics:\n for corpus in ns.rest_corpora:\n if corpus in saved_statistics:\n statistics[corpus] = saved_statistics[corpus]\n ns.total_hits += saved_statistics[corpus]\n\n with ThreadPoolExecutor(max_workers=config.PARALLEL_THREADS) as executor:\n future_query = dict(\n (executor.submit(query_corpus, corpus, within=within[corpus],\n context=context[corpus], start=0, end=0, no_results=True, **queryparams),\n corpus)\n for corpus in ns.rest_corpora if corpus not in saved_statistics)\n\n for future in futures.as_completed(future_query):\n corpus = future_query[future]\n if future.exception() is not None:\n raise CQPError(future.exception())\n else:\n _, nr_hits, _ = future.result()\n statistics[corpus] = nr_hits\n ns.total_hits += nr_hits\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: {\"corpus\": corpus, \"hits\": nr_hits}}\n ns.progress_count += 1\n\n if \"debug\" in args:\n debug[\"cqp\"] = cqp\n\n result[\"hits\"] = ns.total_hits\n result[\"corpus_hits\"] = statistics\n result[\"corpus_order\"] = corpora\n result[\"query_data\"] = binascii.b2a_base64(zlib.compress(\n bytes(checksum + \";\" + \";\".join(\"%s:%d\" % (c, h) for c, h in statistics.items()),\n \"utf-8\"))).decode(\"utf-8\").replace(\"+\", \"-\").replace(\"/\", \"_\")\n result[\"querydata\"] = result[\"query_data\"] # For backward compatibility\n\n if debug:\n result[\"DEBUG\"] = debug\n\n yield result\n\n\n@app.route(\"/optimize\", methods=[\"GET\", \"POST\"])\n@main_handler\ndef optimize(args):\n assert_key(\"cqp\", args, r\"\", True)\n\n cqpparams = {\"within\": args.get(\"within\") or \"sentence\"}\n if args.get(\"cut\"):\n cqpparams[\"cut\"] = args[\"cut\"]\n\n free_search = not parse_bool(args, \"in_order\", True)\n\n cqp = args[\"cqp\"]\n result = {\"cqp\": query_optimize(cqp, cqpparams, find_match=False, expand=False, free_search=free_search)}\n yield result\n\n\ndef query_optimize(cqp, cqpparams, find_match=True, expand=True, free_search=False):\n \"\"\"Optimize simple queries with multiple words by converting them to MU queries.\n Optimization only works for queries with at least two tokens, or one token preceded\n by one or more wildcards. The query also must use \"within\".\n Return a tuple (return code, query)\n 0 = optimization successful\n 1 = optimization not needed (e.g. single word searches)\n 2 = optimization not possible (e.g. searches with repetition of non-wildcards)\n \"\"\"\n # Split query into tokens\n tokens, rest = parse_cqp(cqp)\n within = cqpparams.get(\"within\")\n\n leading_wildcards = False\n\n # Don't allow wildcards in free searches\n if free_search:\n if any([token.startswith(\"[]\") for token in tokens]):\n raise CQPError(\"Wildcards not allowed in free order query.\")\n else:\n # Remove leading and trailing wildcards since they will only slow us down\n while tokens and tokens[0].startswith(\"[]\"):\n leading_wildcards = True\n del tokens[0]\n while tokens and tokens[-1].startswith(\"[]\"):\n del tokens[-1]\n\n if len(tokens) == 0 or (len(tokens) == 1 and not leading_wildcards):\n # Query doesn't benefit from optimization\n return 1, make_query(make_cqp(cqp, **cqpparams))\n elif rest or not within:\n # Couldn't optimize this query\n return 2, make_query(make_cqp(cqp, **cqpparams))\n\n cmd = [\"MU\"]\n wildcards = {}\n\n for i in range(len(tokens) - 1):\n if tokens[i].startswith(\"[]\"):\n n1 = n2 = None\n if tokens[i] == \"[]\":\n n1 = n2 = 1\n elif re.search(r\"{\\s*(\\d+)\\s*,\\s*(\\d*)\\s*}$\", tokens[i]):\n n = re.search(r\"{\\s*(\\d+)\\s*,\\s*(\\d*)\\s*}$\", tokens[i]).groups()\n n1 = int(n[0])\n n2 = int(n[1]) if n[1] else 9999\n elif re.search(r\"{\\s*(\\d*)\\s*}$\", tokens[i]):\n n1 = n2 = int(re.search(r\"{\\s*(\\d*)\\s*}$\", tokens[i]).groups()[0])\n if n1 is not None:\n wildcards[i] = (n1, n2)\n continue\n elif re.search(r\"{.*?}$\", tokens[i]):\n # Repetition for anything other than wildcards can't be optimized\n return 2, make_query(make_cqp(cqp, **cqpparams))\n cmd[0] += \" (meet %s\" % (tokens[i])\n\n if re.search(r\"{.*?}$\", tokens[-1]):\n # Repetition for anything other than wildcards can't be optimized\n return 2, make_query(make_cqp(cqp, **cqpparams))\n\n cmd[0] += \" %s\" % tokens[-1]\n\n wildcard_range = [1, 1]\n for i in range(len(tokens) - 2, -1, -1):\n if i in wildcards:\n wildcard_range[0] += wildcards[i][0]\n wildcard_range[1] += wildcards[i][1]\n continue\n elif i + 1 in wildcards:\n if wildcard_range[1] >= 9999:\n cmd[0] += \" %s)\" % within\n else:\n cmd[0] += \" %d %d)\" % (wildcard_range[0], wildcard_range[1])\n wildcard_range = [1, 1]\n elif free_search:\n cmd[0] += \" %s)\" % within\n else:\n cmd[0] += \" 1 1)\"\n\n if find_match and not free_search:\n # MU searches only highlight the first keyword of each hit. To highlight all keywords we need to\n # do a new non-optimized search within the results, and to be able to do that we first need to expand the rows.\n # Most of the times we only need to expand to the right, except for when leading wildcards are used.\n if leading_wildcards:\n cmd[0] += \" expand to %s;\" % within\n else:\n cmd[0] += \" expand right to %s;\" % within\n cmd += [\"Last;\"]\n cmd += make_query(make_cqp(cqp, **cqpparams))\n elif expand or free_search:\n cmd[0] += \" expand to %s;\" % within\n else:\n cmd[0] += \";\"\n\n return 0, cmd\n\n\ndef query_corpus(corpus, cqp, within=None, cut=None, context=None, show=None, show_structs=None, start=0, end=10,\n sort=None, random_seed=None,\n no_results=False, expand_prequeries=True, free_search=False, use_cache=False):\n if use_cache:\n # Calculate checksum\n # Needs to contain all arguments that may influence the results\n checksum_data = (cqp,\n within,\n cut,\n expand_prequeries,\n free_search)\n\n checksum = get_hash(checksum_data)\n unique_id = str(uuid.uuid4())\n\n cache_query = \"query_data_%s\" % checksum\n cache_query_temp = cache_query + \"_\" + unique_id\n\n cache_filename = os.path.join(config.CACHE_DIR, \"%s:query_data_%s\" % (corpus.split(\"|\")[0], checksum))\n cache_filename_temp = cache_filename + \"_\" + unique_id\n\n cache_size_key = \"%s:query_size_%s\" % (cache_prefix(corpus.split(\"|\")[0]), checksum)\n\n with mc_pool.reserve() as mc:\n cache_hits = mc.get(cache_size_key)\n is_cached = cache_hits is not None and os.path.isfile(cache_filename)\n cached_no_hits = cache_hits == 0\n else:\n is_cached = False\n\n # Optimization\n do_optimize = True\n\n show = show.copy() # To not edit the original\n\n cqpparams = {\"within\": within,\n \"cut\": cut}\n\n # Handle aligned corpora\n if \"|\" in corpus:\n linked = corpus.split(\"|\")\n cqpnew = []\n\n for c in cqp:\n cs = c.split(\"LINKED_CORPUS:\")\n\n # In a multi-language query, the \"within\" argument must be placed directly\n # after the main (first language) query\n if len(cs) > 1 and within:\n cs[0] = \"%s within %s : \" % (cs[0].rstrip()[:-1], within)\n del cqpparams[\"within\"]\n\n c = [cs[0]]\n\n for d in cs[1:]:\n linked_corpora, link_cqp = d.split(None, 1)\n if linked[1] in linked_corpora.split(\"|\"):\n c.append(\"%s %s\" % (linked[1], link_cqp))\n\n cqpnew.append(\"\".join(c).rstrip(\": \"))\n\n cqp = cqpnew\n corpus = linked[0]\n show.add(linked[1].lower())\n\n # Sorting\n if sort == \"left\":\n sortcmd = [\"sort by word on match[-1] .. match[-3];\"]\n elif sort == \"keyword\":\n sortcmd = [\"sort by word;\"]\n elif sort == \"right\":\n sortcmd = [\"sort by word on matchend[1] .. matchend[3];\"]\n elif sort == \"random\":\n sortcmd = [\"sort randomize %s;\" % (random_seed or \"\")]\n elif sort:\n # Sort by positional attribute\n sortcmd = [\"sort by %s;\" % sort]\n else:\n sortcmd = []\n\n # Build the CQP query\n cmd = []\n\n if use_cache:\n cmd += ['set DataDirectory \"%s\";' % config.CACHE_DIR]\n\n cmd += [\"%s;\" % corpus]\n\n # This prints the attributes and their relative order:\n cmd += show_attributes()\n\n retcode = 0\n\n if is_cached:\n # This exact query has been done before. Read corpus positions from cache.\n if not cached_no_hits:\n cmd += [\"Last = %s;\" % cache_query]\n # Touch cache file to delay its removal\n os.utime(cache_filename)\n else:\n for i, c in enumerate(cqp):\n cqpparams_temp = cqpparams.copy()\n pre_query = i + 1 < len(cqp)\n\n if pre_query and expand_prequeries:\n cqpparams_temp[\"expand\"] = \"to \" + within\n\n if free_search:\n retcode, free_query = query_optimize(c, cqpparams_temp, free_search=True)\n if retcode == 2:\n raise CQPError(\"Couldn't convert into free order query.\")\n cmd += free_query\n elif do_optimize and expand_prequeries:\n # If expand_prequeries is False, we can't use optimization\n cmd += query_optimize(c, cqpparams_temp, find_match=(not pre_query))[1]\n else:\n cmd += make_query(make_cqp(c, **cqpparams_temp))\n\n if pre_query:\n cmd += [\"Last;\"]\n\n if use_cache and cached_no_hits:\n # Print EOL if no hits\n cmd += [\".EOL.;\"]\n else:\n # This prints the size of the query (i.e., the number of results):\n cmd += [\"size Last;\"]\n\n if use_cache and not is_cached:\n cmd += [\"%s = Last; save %s;\" % (cache_query_temp, cache_query_temp)]\n\n if not no_results and not (use_cache and cached_no_hits):\n if free_search and retcode == 0:\n tokens, _ = parse_cqp(cqp[-1])\n cmd += [\"Last;\"]\n cmd += [\"cut %s %s;\" % (start, end)]\n cmd += make_query(make_cqp(\"(%s)\" % \" | \".join(set(tokens)), **cqpparams))\n\n cmd += [\"show +%s;\" % \" +\".join(show)]\n if len(context) == 1:\n cmd += [\"set Context %s;\" % context[0]]\n else:\n cmd += [\"set LeftContext %s;\" % context[0]]\n cmd += [\"set RightContext %s;\" % context[1]]\n cmd += [\"set LeftKWICDelim '%s '; set RightKWICDelim ' %s';\" % (LEFT_DELIM, RIGHT_DELIM)]\n if show_structs:\n cmd += [\"set PrintStructures '%s';\" % \", \".join(show_structs)]\n cmd += [\"set ExternalSort yes;\"]\n cmd += sortcmd\n if free_search:\n cmd += [\"cat Last;\"]\n else:\n cmd += [\"cat Last %s %s;\" % (start, end)]\n\n cmd += [\"exit;\"]\n\n ######################################################################\n # Then we call the CQP binary, and read the results\n\n lines = run_cqp(cmd, attr_ignore=True)\n\n # Skip the CQP version\n next(lines)\n\n # Read the attributes and their relative order\n attrs = read_attributes(lines)\n\n # Read the size of the query, i.e., the number of results\n nr_hits = next(lines)\n nr_hits = 0 if nr_hits == END_OF_LINE else int(nr_hits)\n\n if use_cache and not is_cached and not cached_no_hits:\n # Save number of hits\n with mc_pool.reserve() as mc:\n mc.add(cache_size_key, nr_hits)\n\n try:\n os.rename(cache_filename_temp, cache_filename)\n except FileNotFoundError:\n pass\n\n return lines, nr_hits, attrs\n\n\ndef query_parse_lines(corpus, lines, attrs, show, show_structs, free_matches=False):\n \"\"\"Parse concordance lines from CWB.\"\"\"\n\n # Filter out unavailable attributes\n p_attrs = [attr for attr in attrs[\"p\"] if attr in show]\n nr_splits = len(p_attrs) - 1\n s_attrs = set(attr for attr in attrs[\"s\"] if attr in show)\n ls_attrs = set(attr for attr in attrs[\"s\"] if attr in show_structs)\n # a_attrs = set(attr for attr in attrs[\"a\"] if attr in shown)\n\n last_line_span = ()\n\n kwic = []\n for line in lines:\n linestructs = {}\n match = {}\n\n header, line = line.split(\":\", 1)\n if header[:3] == \"-->\":\n # For aligned corpora, every other line is the aligned result\n aligned = header[3:]\n else:\n # This is the result row for the query corpus\n aligned = None\n match[\"position\"] = int(header)\n\n # Handle PrintStructures\n if ls_attrs and not aligned:\n if \": \" in line:\n lineattr, line = line.rsplit(\": \", 1)\n else:\n # Sometimes, depending on context, CWB uses only one space instead of two as a separator\n lineattr, line = line.split(\">: \", 1)\n lineattr += \">\"\n\n lineattrs = lineattr[2:-1].split(\"><\")\n\n # Handle \"><\" in attribute values\n if not len(lineattrs) == len(ls_attrs):\n new_lineattrs = []\n for la in lineattrs:\n if not la.split(\" \", 1)[0] in ls_attrs:\n new_lineattrs[-1] += \"><\" + la\n else:\n new_lineattrs.append(la)\n lineattrs = new_lineattrs\n\n for s in lineattrs:\n if s in ls_attrs:\n s_key = s\n s_val = None\n else:\n s_key, s_val = s.split(\" \", 1)\n\n linestructs[s_key] = s_val\n\n words = line.split()\n tokens = []\n n = 0\n structs = defaultdict(list)\n struct = None\n struct_value = []\n\n try:\n for word in words:\n if struct:\n # Structural attrs can be split in the middle (),\n # so we need to finish the structure here\n if \">\" not in word:\n struct_value.append(word)\n continue\n\n struct_v, word = word.split(\">\", 1)\n structs[\"open\"].append(struct + \" \" + \" \".join(struct_value + [struct_v]))\n struct = None\n struct_value = []\n\n # We use special delimiters to see when we enter and leave the match region\n if word == LEFT_DELIM:\n match[\"start\"] = n\n continue\n elif word == RIGHT_DELIM:\n match[\"end\"] = n\n continue\n\n # We read all structural attributes that are opening (from the left)\n while word[0] == \"<\":\n if word[1:] in s_attrs:\n # We have found a structural attribute with a value ().\n # We continue to the next word to get the value\n struct = word[1:]\n break\n elif \">\" in word and word[1:word.find(\">\")] in s_attrs:\n # We have found a structural attribute without a value ()\n struct, word = word[1:].split(\">\", 1)\n structs[\"open\"].append(struct)\n struct = None\n else:\n # What we've found is not a structural attribute\n break\n\n if struct:\n # If we stopped in the middle of a struct (),\n # we need to continue with the next word\n continue\n\n # Now we read all s-attrs that are closing (from the right)\n while word[-1] == \">\" and \"\" or \"<\" can make some lines unparseable. We skip them\n # until we come up with better a solution.\n continue\n\n if aligned:\n # If this was an aligned row, we add it to the previous kwic row\n if words != [\"(no\", \"alignment\", \"found)\"]:\n kwic[-1].setdefault(\"aligned\", {})[aligned] = tokens\n else:\n if \"start\" not in match:\n # TODO: CQP bug - CQP can't handle too long sentences, skipping\n continue\n # Otherwise we add a new kwic row\n kwic_row = {\"corpus\": corpus, \"match\": match if not free_matches else [match]}\n if linestructs:\n kwic_row[\"structs\"] = linestructs\n kwic_row[\"tokens\"] = tokens\n\n if free_matches:\n line_span = (match[\"position\"] - match[\"start\"], match[\"position\"] - match[\"start\"] + len(tokens) - 1)\n if line_span == last_line_span:\n kwic[-1][\"match\"].append(match)\n else:\n kwic.append(kwic_row)\n last_line_span = line_span\n else:\n kwic.append(kwic_row)\n\n return kwic\n\n\ndef query_and_parse(corpus, cqp, within=None, cut=None, context=None, show=None, show_structs=None, start=0, end=10,\n sort=None, random_seed=None, no_results=False, expand_prequeries=True, free_search=False,\n use_cache=False):\n lines, nr_hits, attrs = query_corpus(corpus, cqp, within, cut, context, show, show_structs, start, end, sort,\n random_seed, no_results, expand_prequeries, free_search, use_cache)\n kwic = query_parse_lines(corpus, lines, attrs, show, show_structs, free_matches=free_search)\n return kwic, nr_hits\n\n\ndef which_hits(corpora, stats, start, end):\n corpus_hits = {}\n for corpus in corpora:\n hits = stats[corpus]\n if hits > start:\n corpus_hits[corpus] = (start, min(hits - 1, end))\n\n start -= hits\n end -= hits\n if start < 0:\n start = 0\n if end < 0:\n break\n\n return corpus_hits\n\n\n@app.route(\"/struct_values\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef struct_values(args):\n \"\"\"Get all available values for one or more structural attributes.\"\"\"\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"struct\", args, re.compile(r\"^[\\w_\\d,>]+$\"), True)\n assert_key(\"incremental\", args, r\"(true|false)\")\n\n incremental = parse_bool(args, \"incremental\", False)\n include_count = parse_bool(args, \"count\", False)\n\n per_corpus = parse_bool(args, \"per_corpus\", True)\n combined = parse_bool(args, \"combined\", True)\n corpora = parse_corpora(args)\n check_authentication(corpora)\n\n structs = args.get(\"struct\")\n if isinstance(structs, str):\n structs = structs.split(QUERY_DELIM)\n\n split = args.get(\"split\", \"\")\n if isinstance(split, str):\n split = split.split(QUERY_DELIM)\n\n ns = Namespace() # To make variables writable from nested functions\n\n result = {\"corpora\": defaultdict(dict), \"combined\": {}}\n total_stats = defaultdict(set)\n\n from_cache = set() # Keep track of what has been read from cache\n\n if args[\"cache\"]:\n all_cache = True\n for corpus in corpora:\n for struct in structs:\n checksum = get_hash((corpus, struct, split, include_count))\n with mc_pool.reserve() as mc:\n data = mc.get(\"%s:struct_values_%s\" % (cache_prefix(corpus), checksum))\n if data is not None:\n result[\"corpora\"].setdefault(corpus, {})\n result[\"corpora\"][corpus][struct] = data\n if \"debug\" in args:\n result.setdefault(\"DEBUG\", {\"caches_read\": []})\n result[\"DEBUG\"][\"caches_read\"].append(\"%s:%s\" % (corpus, struct))\n from_cache.add((corpus, struct))\n else:\n all_cache = False\n else:\n all_cache = False\n\n if not all_cache:\n ns.progress_count = 0\n if incremental:\n yield ({\"progress_corpora\": list(corpora)})\n\n with ThreadPoolExecutor(max_workers=config.PARALLEL_THREADS) as executor:\n future_query = dict((executor.submit(count_query_worker_simple, corpus, cqp=None,\n group_by=[(s, True) for s in struct.split(\">\")],\n use_cache=args[\"cache\"]), (corpus, struct))\n for corpus in corpora for struct in structs if not (corpus, struct) in from_cache)\n\n for future in futures.as_completed(future_query):\n corpus, struct = future_query[future]\n if future.exception() is not None:\n raise CQPError(future.exception())\n else:\n lines, nr_hits, corpus_size = future.result()\n\n corpus_stats = {} if include_count else set()\n vals_dict = {}\n struct_list = struct.split(\">\")\n\n for line in lines:\n freq, val = line.lstrip().split(\" \", 1)\n\n if \">\" in struct:\n vals = val.split(\"\\t\")\n\n if split:\n vals = [[x for x in n.split(\"|\") if x] if struct_list[i] in split and n else [n] for\n i, n in enumerate(vals)]\n vals_prod = itertools.product(*vals)\n else:\n vals_prod = [vals]\n\n for val in vals_prod:\n prev = vals_dict\n for i, n in enumerate(val):\n if include_count and i == len(val) - 1:\n prev.setdefault(n, 0)\n prev[n] += int(freq)\n break\n elif not include_count and i == len(val) - 1:\n prev.append(n)\n break\n elif not include_count and i == len(val) - 2:\n prev.setdefault(n, [])\n else:\n prev.setdefault(n, {})\n prev = prev[n]\n else:\n if struct in split:\n vals = [x for x in val.split(\"|\") if x] if val else [\"\"]\n else:\n vals = [val]\n for val in vals:\n if include_count:\n corpus_stats[val] = int(freq)\n else:\n corpus_stats.add(val)\n\n if \">\" in struct:\n result[\"corpora\"][corpus][struct] = vals_dict\n elif corpus_stats:\n result[\"corpora\"][corpus][struct] = corpus_stats if include_count else sorted(corpus_stats)\n\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: corpus}\n ns.progress_count += 1\n\n def merge(d1, d2):\n merged = deepcopy(d1)\n for key in d2:\n if key in d1:\n if isinstance(d1[key], dict) and isinstance(d2[key], dict):\n merged[key] = merge(d1[key], d2[key])\n elif isinstance(d1[key], int):\n merged[key] += d2[key]\n elif isinstance(d1[key], list):\n merged[key].extend(d2[key])\n merged[key] = sorted(set(merged[key]))\n else:\n merged[key] = d2[key]\n return merged\n\n if combined:\n for corpus in result[\"corpora\"]:\n result[\"combined\"] = merge(result[\"combined\"], result[\"corpora\"][corpus])\n else:\n del result[\"combined\"]\n\n if args[\"cache\"] and not all_cache:\n for corpus in corpora:\n for struct in structs:\n if (corpus, struct) in from_cache:\n continue\n checksum = get_hash((corpus, struct, split, include_count))\n cache_key = \"%s:struct_values_%s\" % (cache_prefix(corpus), checksum)\n try:\n with mc_pool.reserve() as mc:\n mc.add(cache_key, result[\"corpora\"][corpus].get(struct, {}))\n except pylibmc.TooBig:\n pass\n else:\n if \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"].setdefault(\"caches_saved\", [])\n result[\"DEBUG\"][\"caches_saved\"].append(\"%s:%s\" % (corpus, struct))\n\n if not per_corpus:\n del result[\"corpora\"]\n\n yield result\n\n\n################################################################################\n# COUNT\n################################################################################\n\n@app.route(\"/count\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef count(args):\n \"\"\"Perform a CQP query and return a count of the given words/attributes.\"\"\"\n assert_key(\"cqp\", args, r\"\", True)\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"group_by\", args, IS_IDENT, False)\n assert_key(\"group_by_struct\", args, IS_IDENT, False)\n assert_key(\"cut\", args, IS_NUMBER)\n assert_key(\"ignore_case\", args, IS_IDENT)\n assert_key(\"incremental\", args, r\"(true|false)\")\n\n incremental = parse_bool(args, \"incremental\", False)\n\n corpora = parse_corpora(args)\n check_authentication(corpora)\n\n group_by = args.get(\"group_by\", args.get(\"groupby\")) or []\n if isinstance(group_by, str):\n group_by = sorted(set(group_by.split(QUERY_DELIM)))\n\n group_by_struct = args.get(\"group_by_struct\", args.get(\"groupby_struct\")) or []\n if isinstance(group_by_struct, str):\n group_by_struct = sorted(set(group_by_struct.split(QUERY_DELIM)))\n\n if not group_by and not group_by_struct:\n group_by = [\"word\"]\n\n group_by = [(g, False) for g in group_by] + [(g, True) for g in group_by_struct]\n\n ignore_case = args.get(\"ignore_case\") or []\n if isinstance(ignore_case, str):\n ignore_case = ignore_case.split(QUERY_DELIM)\n ignore_case = set(ignore_case)\n\n within = parse_within(args)\n\n relative_to_struct = args.get(\"relative_to_struct\") or []\n if isinstance(relative_to_struct, str):\n relative_to_struct = sorted(set(relative_to_struct.split(QUERY_DELIM)))\n assert all(r in group_by_struct for r in\n relative_to_struct), \"All 'relative_to_struct' values also need to be present in 'group_by_struct'.\"\n\n relative_to = [(r, True) for r in relative_to_struct]\n\n start = int(args.get(\"start\") or 0)\n end = int(args.get(\"end\") or -1)\n\n split = args.get(\"split\") or []\n if isinstance(split, str):\n split = split.split(QUERY_DELIM)\n\n strip_pointer = args.get(\"strip_pointer\", \"\")\n if isinstance(strip_pointer, str):\n strip_pointer = strip_pointer.split(QUERY_DELIM)\n\n top = args.get(\"top\", \"\")\n if isinstance(top, str):\n if \":\" in top:\n top = dict((x.split(\":\")[0], int(x.split(\":\")[1])) for x in top.split(QUERY_DELIM))\n else:\n top = dict((x, 1) for x in top.split(QUERY_DELIM))\n\n expand_prequeries = parse_bool(args, \"expand_prequeries\", True)\n\n # Sort numbered CQP-queries numerically\n cqp, subcqp = parse_cqp_subcqp(args)\n\n if len(cqp) > 1 and expand_prequeries and not all(within[c] for c in corpora):\n raise ValueError(\"Multiple CQP queries requires 'within' or 'expand_prequeries=false'\")\n\n if subcqp:\n cqp.append(subcqp)\n\n simple = parse_bool(args, \"simple\", False)\n\n if cqp == [\"[]\"]:\n simple = True\n\n result = {\"corpora\": {}}\n debug = {}\n zero_hits = []\n read_from_cache = 0\n\n if args[\"cache\"]:\n for corpus in corpora:\n corpus_checksum = get_hash((cqp,\n group_by,\n within[corpus],\n sorted(ignore_case),\n relative_to,\n expand_prequeries))\n\n with mc_pool.reserve() as mc:\n cached_size = mc.get(\"%s:count_size_%s\" % (cache_prefix(corpus), corpus_checksum))\n if cached_size is not None:\n nr_hits = cached_size[0]\n read_from_cache += 1\n if nr_hits == 0:\n zero_hits.append(corpus)\n\n if \"debug\" in args:\n debug[\"cache_coverage\"] = \"%d/%d\" % (read_from_cache, len(corpora))\n\n total_stats = [{\"absolute\": defaultdict(int),\n \"relative\": defaultdict(float),\n \"sums\": {\"absolute\": 0, \"relative\": 0.0}} for i in range(len(subcqp) + 1)]\n\n ns = Namespace() # To make variables writable from nested functions\n ns.total_size = 0\n\n if relative_to:\n relative_args = {\n \"cqp\": \"[]\",\n \"corpus\": args.get(\"corpus\"),\n \"group_by_struct\": relative_to_struct,\n \"split\": split\n }\n\n relative_to_result = generator_to_dict(count(relative_args))\n relative_to_freqs = {\"total\": {}, \"corpora\": defaultdict(dict)}\n\n for row in relative_to_result[\"total\"][\"absolute\"]:\n relative_to_freqs[\"total\"][tuple(v for k, v in sorted(row[\"value\"].items()))] = row[\"freq\"]\n\n for corpus in relative_to_result[\"corpora\"]:\n for row in relative_to_result[\"corpora\"][corpus][\"absolute\"]:\n relative_to_freqs[\"corpora\"][corpus][tuple(v for k, v in sorted(row[\"value\"].items()))] = row[\"freq\"]\n\n count_function = count_query_worker if not simple else count_query_worker_simple\n\n ns.progress_count = 0\n if incremental:\n yield {\"progress_corpora\": list(c for c in corpora if c not in zero_hits)}\n\n for corpus in zero_hits:\n result[\"corpora\"][corpus] = [{\"absolute\": {},\n \"relative\": {},\n \"sums\": {\"absolute\": 0, \"relative\": 0.0}} for i in range(len(subcqp) + 1)]\n for i in range(len(subcqp)):\n result[\"corpora\"][corpus][i + 1][\"cqp\"] = subcqp[i]\n\n with ThreadPoolExecutor(max_workers=config.PARALLEL_THREADS) as executor:\n future_query = dict((executor.submit(count_function, corpus=corpus, cqp=cqp, group_by=group_by,\n within=within[corpus], ignore_case=ignore_case,\n expand_prequeries=expand_prequeries,\n use_cache=args[\"cache\"]), corpus)\n for corpus in corpora if corpus not in zero_hits)\n\n for future in futures.as_completed(future_query):\n corpus = future_query[future]\n if future.exception() is not None:\n raise CQPError(future.exception())\n else:\n lines, nr_hits, corpus_size = future.result()\n\n ns.total_size += corpus_size\n corpus_stats = [{\"absolute\": defaultdict(int),\n \"relative\": defaultdict(float),\n \"sums\": {\"absolute\": 0, \"relative\": 0.0}} for i in range(len(subcqp) + 1)]\n\n query_no = 0\n for line in lines:\n if line == END_OF_LINE:\n # EOL means the start of a new subcqp result\n query_no += 1\n if subcqp:\n corpus_stats[query_no][\"cqp\"] = subcqp[query_no - 1]\n continue\n freq, ngram = line.lstrip().split(\" \", 1)\n\n if len(group_by) > 1:\n ngram_groups = ngram.split(\"\\t\")\n else:\n ngram_groups = [ngram]\n\n all_ngrams = []\n relative_to_pos = []\n\n for i, ngram in enumerate(ngram_groups):\n # Split value sets and treat each value as a hit\n if group_by[i][0] in split:\n tokens = [t + \"|\" for t in ngram.split(\n \"| \")] # We can't split on just space due to spaces in annotations\n tokens[-1] = tokens[-1][:-1]\n if group_by[i][0] in top:\n split_tokens = [[x for x in token.split(\"|\") if x][:top[group_by[i][0]]]\n if not token == \"|\" else [\"|\"] for token in tokens]\n else:\n split_tokens = [[x for x in token.split(\"|\") if x] if not token == \"|\" else [\"\"]\n for token in tokens]\n ngrams = itertools.product(*split_tokens)\n ngrams = tuple(x for x in ngrams)\n else:\n if not group_by[i][1]:\n ngrams = (tuple(ngram.split(\" \")),)\n else:\n ngrams = (ngram,)\n\n # Remove multi word pointers\n if group_by[i][0] in strip_pointer:\n for j in range(len(ngrams)):\n for k in range(len(ngrams[j])):\n if \":\" in ngrams[j][k]:\n ngramtemp, pointer = ngrams[j][k].rsplit(\":\", 1)\n if pointer.isnumeric():\n ngrams[j][k] = ngramtemp\n\n all_ngrams.append(ngrams)\n\n if relative_to and group_by[i] in relative_to:\n relative_to_pos.append(i)\n\n cross = list(itertools.product(*all_ngrams))\n\n for ngram in cross:\n corpus_stats[query_no][\"absolute\"][ngram] += int(freq)\n corpus_stats[query_no][\"sums\"][\"absolute\"] += int(freq)\n total_stats[query_no][\"absolute\"][ngram] += int(freq)\n total_stats[query_no][\"sums\"][\"absolute\"] += int(freq)\n\n if relative_to:\n relativeto_ngram = tuple(ngram[pos] for pos in relative_to_pos)\n corpus_stats[query_no][\"relative\"][ngram] += int(freq) / float(\n relative_to_freqs[\"corpora\"][corpus][relativeto_ngram]) * 1000000\n corpus_stats[query_no][\"sums\"][\"relative\"] += int(freq) / float(\n relative_to_freqs[\"corpora\"][corpus][relativeto_ngram]) * 1000000\n total_stats[query_no][\"relative\"][ngram] += int(freq) / float(\n relative_to_freqs[\"total\"][relativeto_ngram]) * 1000000\n else:\n corpus_stats[query_no][\"relative\"][ngram] += int(freq) / float(corpus_size) * 1000000\n corpus_stats[query_no][\"sums\"][\"relative\"] += int(freq) / float(corpus_size) * 1000000\n\n result[\"corpora\"][corpus] = corpus_stats\n\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: corpus}\n ns.progress_count += 1\n\n result[\"count\"] = len(total_stats[0][\"absolute\"])\n\n # Calculate relative numbers for the total\n for query_no in range(len(subcqp) + 1):\n if end > -1 and (start > 0 or len(total_stats[0][\"absolute\"]) > (end - start) + 1):\n # Only a selected range of results requested\n total_stats[query_no][\"absolute\"] = dict(\n sorted(total_stats[query_no][\"absolute\"].items(), key=lambda x: x[1], reverse=True)[start:end + 1])\n\n for corpus in corpora:\n result[\"corpora\"][corpus][query_no][\"absolute\"] = {k: v for k, v in result[\"corpora\"][corpus][query_no][\n \"absolute\"].items() if k in total_stats[query_no][\"absolute\"]}\n result[\"corpora\"][corpus][query_no][\"relative\"] = {k: v for k, v in result[\"corpora\"][corpus][query_no][\n \"relative\"].items() if k in total_stats[query_no][\"absolute\"]}\n\n if not relative_to:\n for ngram, freq in total_stats[query_no][\"absolute\"].items():\n total_stats[query_no][\"relative\"][ngram] = freq / float(ns.total_size) * 1000000\n\n for corpus in corpora:\n for relabs in (\"absolute\", \"relative\"):\n new_list = []\n for ngram, freq in result[\"corpora\"][corpus][query_no][relabs].items():\n row = {\"value\": {key[0]: ngram[i] for i, key in enumerate(group_by)},\n \"freq\": freq}\n new_list.append(row)\n result[\"corpora\"][corpus][query_no][relabs] = new_list\n\n total_stats[query_no][\"sums\"][\"relative\"] = (total_stats[query_no][\"sums\"][\"absolute\"] / float(ns.total_size)\n * 1000000 if ns.total_size > 0 else 0.0)\n\n if subcqp and query_no > 0:\n total_stats[query_no][\"cqp\"] = subcqp[query_no - 1]\n\n for relabs in (\"absolute\", \"relative\"):\n new_list = []\n for ngram, freq in total_stats[query_no][relabs].items():\n row = {\"value\": dict((key[0], ngram[i]) for i, key in enumerate(group_by)),\n \"freq\": freq}\n new_list.append(row)\n total_stats[query_no][relabs] = new_list\n\n result[\"total\"] = total_stats if len(total_stats) > 1 else total_stats[0]\n\n if not subcqp:\n for corpus in corpora:\n result[\"corpora\"][corpus] = result[\"corpora\"][corpus][0]\n\n if \"debug\" in args:\n debug.update({\"cqp\": cqp, \"simple\": simple})\n result[\"DEBUG\"] = debug\n\n yield result\n\n\n@app.route(\"/count_all\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef count_all(args):\n \"\"\"Like /count but for every single value of the given attributes.\"\"\"\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key((\"group_by\", \"group_by_struct\", \"groupby\"), args, IS_IDENT, True)\n assert_key(\"cut\", args, IS_NUMBER)\n assert_key(\"ignore_case\", args, IS_IDENT)\n assert_key(\"incremental\", args, r\"(true|false)\")\n\n args[\"cqp\"] = \"[]\" # Dummy value, not used\n args[\"simple\"] = \"true\"\n\n yield generator_to_dict(count(args))\n\n\ndef remap_keys(mapping):\n return [{'key': k, 'value': v} for k, v in mapping.items()]\n\n\ndef strptime(date):\n \"\"\"Take a date in string format and return a datetime object.\n Input must be on the format \"YYYYMMDDhhmmss\".\n We need this since the built in strptime isn't thread safe (and this is much faster).\"\"\"\n year = int(date[:4])\n month = int(date[4:6]) if len(date) > 4 else 1\n day = int(date[6:8]) if len(date) > 6 else 1\n hour = int(date[8:10]) if len(date) > 8 else 0\n minute = int(date[10:12]) if len(date) > 10 else 0\n second = int(date[12:14]) if len(date) > 12 else 0\n return datetime.datetime(year, month, day, hour, minute, second)\n\n\n@app.route(\"/count_time\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef count_time(args):\n \"\"\"Count occurrences per time period.\"\"\"\n assert_key(\"cqp\", args, r\"\", True)\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"cut\", args, IS_NUMBER)\n assert_key(\"incremental\", args, r\"(true|false)\")\n assert_key(\"granularity\", args, r\"[ymdhnsYMDHNS]\")\n assert_key(\"from\", args, r\"^\\d{14}$\")\n assert_key(\"to\", args, r\"^\\d{14}$\")\n assert_key(\"strategy\", args, r\"^[123]$\")\n\n incremental = parse_bool(args, \"incremental\", False)\n\n corpora = parse_corpora(args)\n check_authentication(corpora)\n within = parse_within(args)\n expand_prequeries = parse_bool(args, \"expand_prequeries\", True)\n\n # Sort numbered CQP-queries numerically\n cqp, subcqp = parse_cqp_subcqp(args)\n\n if len(cqp) > 1 and expand_prequeries and not all(within[c] for c in corpora):\n raise ValueError(\"Multiple CQP queries requires 'within' or 'expand_prequeries=false'\")\n\n if subcqp:\n cqp.append(subcqp)\n granularity = (args.get(\"granularity\") or \"y\").lower()\n fromdate = args.get(\"from\", \"\")\n todate = args.get(\"to\", \"\")\n\n # Check that we have a suitable date range for the selected granularity\n df = None\n dt = None\n\n if fromdate or todate:\n if not fromdate or not todate:\n raise ValueError(\"When using 'from' or 'to', both need to be specified.\")\n\n # Get date range of selected corpora\n corpus_data = corpus_info({\"corpus\": QUERY_DELIM.join(corpora), \"cache\": args[\"cache\"]}, no_combined_cache=True)\n corpora_copy = corpora.copy()\n\n if fromdate and todate:\n df = strptime(fromdate)\n dt = strptime(todate)\n\n # Remove corpora not within selected date span\n for c in corpus_data[\"corpora\"]:\n firstdate = corpus_data[\"corpora\"][c][\"info\"].get(\"FirstDate\")\n lastdate = corpus_data[\"corpora\"][c][\"info\"].get(\"LastDate\")\n if firstdate and lastdate:\n firstdate = strptime(firstdate.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\"))\n lastdate = strptime(lastdate.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\"))\n\n if not (firstdate <= dt and lastdate >= df):\n corpora.remove(c)\n else:\n # If no date range was provided, use whole date range of the selected corpora\n for c in corpus_data[\"corpora\"]:\n firstdate = corpus_data[\"corpora\"][c][\"info\"].get(\"FirstDate\")\n lastdate = corpus_data[\"corpora\"][c][\"info\"].get(\"LastDate\")\n if firstdate and lastdate:\n firstdate = strptime(firstdate.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\"))\n lastdate = strptime(lastdate.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\"))\n\n if not df or firstdate < df:\n df = firstdate\n if not dt or lastdate > dt:\n dt = lastdate\n\n if df and dt:\n maxpoints = 3600\n\n if granularity == \"y\":\n add = relativedelta(years=maxpoints)\n elif granularity == \"m\":\n add = relativedelta(months=maxpoints)\n elif granularity == \"d\":\n add = relativedelta(days=maxpoints)\n elif granularity == \"h\":\n add = relativedelta(hours=maxpoints)\n elif granularity == \"n\":\n add = relativedelta(minutes=maxpoints)\n elif granularity == \"s\":\n add = relativedelta(seconds=maxpoints)\n\n if dt > (df + add):\n raise ValueError(\"The date range is too large for the selected granularity. \"\n \"Use 'to' and 'from' to limit the range.\")\n\n strategy = int(args.get(\"strategy\") or 1)\n\n if granularity in \"hns\":\n group_by = [(v, True) for v in (\"text_datefrom\", \"text_timefrom\", \"text_dateto\", \"text_timeto\")]\n else:\n group_by = [(v, True) for v in (\"text_datefrom\", \"text_dateto\")]\n\n result = {\"corpora\": {}}\n corpora_sizes = {}\n\n ns = Namespace()\n total_rows = [[] for i in range(len(subcqp) + 1)]\n ns.total_size = 0\n\n ns.progress_count = 0\n if incremental:\n yield {\"progress_corpora\": corpora}\n\n with ThreadPoolExecutor(max_workers=config.PARALLEL_THREADS) as executor:\n future_query = dict((executor.submit(count_query_worker, corpus=corpus, cqp=cqp, group_by=group_by,\n within=within[corpus],\n expand_prequeries=expand_prequeries,\n use_cache=args[\"cache\"]), corpus)\n for corpus in corpora)\n\n for future in futures.as_completed(future_query):\n corpus = future_query[future]\n if future.exception() is not None:\n if \"Can't find attribute ``text_datefrom''\" not in str(future.exception()):\n raise CQPError(future.exception())\n else:\n lines, _, corpus_size = future.result()\n\n corpora_sizes[corpus] = corpus_size\n ns.total_size += corpus_size\n\n query_no = 0\n for line in lines:\n if line == END_OF_LINE:\n query_no += 1\n continue\n count, values = line.lstrip().split(\" \", 1)\n values = values.strip(\" \")\n if granularity in \"hns\":\n datefrom, timefrom, dateto, timeto = values.split(\"\\t\")\n # Only use the value from the first token\n timefrom = timefrom.split(\" \")[0]\n timeto = timeto.split(\" \")[0]\n else:\n datefrom, dateto = values.split(\"\\t\")\n timefrom = \"\"\n timeto = \"\"\n\n # Only use the value from the first token\n datefrom = datefrom.split(\" \")[0]\n dateto = dateto.split(\" \")[0]\n\n total_rows[query_no].append({\"corpus\": corpus, \"df\": datefrom + timefrom, \"dt\": dateto + timeto,\n \"sum\": int(count)})\n\n if incremental:\n yield {\"progress_%d\" % ns.progress_count: corpus}\n ns.progress_count += 1\n\n corpus_timedata = generator_to_dict(timespan({\"corpus\": corpora, \"granularity\": granularity, \"from\": fromdate,\n \"to\": todate, \"strategy\": str(strategy), \"cache\": args[\"cache\"]},\n no_combined_cache=True))\n search_timedata = []\n search_timedata_combined = []\n for total_row in total_rows:\n temp = timespan_calculator(total_row, granularity=granularity, strategy=strategy)\n search_timedata.append(temp[\"corpora\"])\n search_timedata_combined.append(temp[\"combined\"])\n\n for corpus in corpora:\n corpus_stats = [{\"absolute\": defaultdict(int),\n \"relative\": defaultdict(float),\n \"sums\": {\"absolute\": 0, \"relative\": 0.0}} for i in range(len(subcqp) + 1)]\n\n basedates = dict([(date, None if corpus_timedata[\"corpora\"][corpus][date] == 0 else 0)\n for date in corpus_timedata[\"corpora\"].get(corpus, {})])\n\n for i, s in enumerate(search_timedata):\n\n prevdate = None\n for basedate in sorted(basedates):\n if not basedates[basedate] == prevdate:\n corpus_stats[i][\"absolute\"][basedate] = basedates[basedate]\n corpus_stats[i][\"relative\"][basedate] = basedates[basedate]\n prevdate = basedates[basedate]\n\n for row in s.get(corpus, {}).items():\n date, count = row\n corpus_date_size = float(corpus_timedata[\"corpora\"].get(corpus, {}).get(date, 0))\n if corpus_date_size > 0.0:\n corpus_stats[i][\"absolute\"][date] += count\n corpus_stats[i][\"relative\"][date] += (count / corpus_date_size * 1000000)\n corpus_stats[i][\"sums\"][\"absolute\"] += count\n corpus_stats[i][\"sums\"][\"relative\"] += (count / corpus_date_size * 1000000)\n\n if subcqp and i > 0:\n corpus_stats[i][\"cqp\"] = subcqp[i - 1]\n\n result[\"corpora\"][corpus] = corpus_stats if len(corpus_stats) > 1 else corpus_stats[0]\n\n total_stats = [{\"absolute\": defaultdict(int),\n \"relative\": defaultdict(float),\n \"sums\": {\"absolute\": 0, \"relative\": 0.0}} for i in range(len(subcqp) + 1)]\n\n basedates = dict([(date, None if corpus_timedata[\"combined\"][date] == 0 else 0)\n for date in corpus_timedata.get(\"combined\", {})])\n\n for i, s in enumerate(search_timedata_combined):\n\n prevdate = None\n for basedate in sorted(basedates):\n if not basedates[basedate] == prevdate:\n total_stats[i][\"absolute\"][basedate] = basedates[basedate]\n total_stats[i][\"relative\"][basedate] = basedates[basedate]\n prevdate = basedates[basedate]\n\n if s:\n for row in s.items():\n date, count = row\n combined_date_size = float(corpus_timedata[\"combined\"].get(date, 0))\n if combined_date_size > 0.0:\n total_stats[i][\"absolute\"][date] += count\n total_stats[i][\"relative\"][date] += (\n count / combined_date_size * 1000000) if combined_date_size else 0\n total_stats[i][\"sums\"][\"absolute\"] += count\n\n total_stats[i][\"sums\"][\"relative\"] = total_stats[i][\"sums\"][\"absolute\"] / float(\n ns.total_size) * 1000000 if ns.total_size > 0 else 0.0\n if subcqp and i > 0:\n total_stats[i][\"cqp\"] = subcqp[i - 1]\n\n result[\"combined\"] = total_stats if len(total_stats) > 1 else total_stats[0]\n\n # Add zero values for the corpora we removed because of the selected date span\n for corpus in set(corpora_copy).difference(set(corpora)):\n result[\"corpora\"][corpus] = {\"absolute\": 0, \"relative\": 0.0, \"sums\": {\"absolute\": 0, \"relative\": 0.0}}\n\n if \"debug\" in args:\n result[\"DEBUG\"] = {\"cqp\": cqp}\n\n yield result\n\n\ndef count_query_worker(corpus, cqp, group_by, within, ignore_case=[], cut=None, expand_prequeries=True,\n use_cache=False):\n subcqp = None\n if isinstance(cqp[-1], list):\n subcqp = cqp[-1]\n cqp = cqp[:-1]\n\n if use_cache:\n checksum = get_hash((cqp,\n subcqp,\n group_by,\n within,\n sorted(ignore_case),\n expand_prequeries))\n cache_key = \"%s:count_data_%s\" % (cache_prefix(corpus), checksum)\n cache_size_key = \"%s:count_size_%s\" % (cache_prefix(corpus), checksum)\n\n with mc_pool.reserve() as mc:\n cached_size = mc.get(cache_size_key)\n if cached_size is not None:\n corpus_hits, corpus_size = cached_size\n if corpus_hits == 0:\n return [END_OF_LINE] * len(subcqp) if subcqp else [], corpus_hits, corpus_size\n\n cached_result = mc.get(cache_key)\n if cached_result is not None:\n return cached_result, corpus_hits, corpus_size\n\n do_optimize = True\n cqpparams = {\"within\": within,\n \"cut\": cut}\n\n cmd = [\"%s;\" % corpus]\n for i, c in enumerate(cqp):\n cqpparams_temp = cqpparams.copy()\n pre_query = i + 1 < len(cqp)\n\n if pre_query and expand_prequeries:\n cqpparams_temp[\"expand\"] = \"to \" + cqpparams[\"within\"]\n\n if do_optimize:\n cmd += query_optimize(c, cqpparams_temp, find_match=(not pre_query))[1]\n else:\n cmd += make_query(make_cqp(c, **cqpparams_temp))\n\n if pre_query:\n cmd += [\"Last;\"]\n\n cmd += [\"size Last;\"]\n cmd += [\"info; .EOL.;\"]\n\n # TODO: Match targets in a better way\n has_target = any(\"@[\" in x for x in cqp)\n\n cmd += [\"\"\"tabulate Last %s > \"| sort | uniq -c | sort -nr\";\"\"\" % \", \".join(\"%s %s%s\" % (\n \"target\" if has_target else (\"match\" if g[1] else \"match .. matchend\"), g[0], \" %c\" if g[0] in ignore_case else \"\") for g in group_by)]\n\n if subcqp:\n cmd += [\"mainresult=Last;\"]\n if \"expand\" in cqpparams_temp:\n del cqpparams_temp[\"expand\"]\n for c in subcqp:\n cmd += [\".EOL.;\"]\n cmd += [\"mainresult;\"]\n cmd += query_optimize(c, cqpparams_temp, find_match=True)[1]\n cmd += [\"\"\"tabulate Last %s > \"| sort | uniq -c | sort -nr\";\"\"\" % \", \".join(\n \"match .. matchend %s\" % g[0] for g in group_by)]\n\n cmd += [\"exit;\"]\n\n lines = run_cqp(cmd)\n\n # Skip CQP version\n next(lines)\n\n # Size of the query result\n nr_hits = int(next(lines))\n\n # Get corpus size\n for line in lines:\n if line.startswith(\"Size:\"):\n _, corpus_size = line.split(\":\")\n corpus_size = int(corpus_size.strip())\n elif line == END_OF_LINE:\n break\n\n if use_cache:\n with mc_pool.reserve() as mc:\n mc.add(cache_size_key, (nr_hits, corpus_size))\n\n # Only save actual data if number of lines doesn't exceed the limit\n if nr_hits <= config.CACHE_MAX_STATS:\n lines = tuple(lines)\n with mc_pool.reserve() as mc:\n try:\n mc.add(cache_key, lines)\n except pylibmc.TooBig:\n pass\n\n return lines, nr_hits, corpus_size\n\n\ndef count_query_worker_simple(corpus, cqp, group_by, within=None, ignore_case=[], expand_prequeries=True,\n use_cache=False):\n \"\"\"Worker for simple statistics queries which can be run using cwb-scan-corpus.\n Currently only used for searches on [] (any word).\"\"\"\n lines = list(run_cwb_scan(corpus, [g[0] for g in group_by]))\n nr_hits = 0\n\n ic_index = []\n new_lines = {}\n if ignore_case:\n ic_index = [i for i, g in enumerate(group_by) if g[0] in ignore_case]\n\n for i in range(len(lines)):\n c, v = lines[i].split(\"\\t\", 1)\n nr_hits += int(c)\n\n if ic_index:\n v = \"\\t\".join(vv.lower() if i in ic_index else vv for i, vv in enumerate(v.split(\"\\t\")))\n new_lines[v] = new_lines.get(v, 0) + int(c)\n else:\n # Convert result to the same format as the regular CQP count\n lines[i] = \"%s %s\" % (c, v)\n\n if ic_index:\n lines = []\n for v, c in new_lines.items():\n # Convert result to the same format as the regular CQP count\n lines.append(\"%s %s\" % (c, v))\n\n # Corpus size equals number of hits since we count all tokens\n corpus_size = nr_hits\n return lines, nr_hits, corpus_size\n\n\n@app.route(\"/loglike\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef loglike(args):\n \"\"\"Do a log-likelihood comparison on two queries.\"\"\"\n def expected(total, wordtotal, sumtotal):\n \"\"\" The expected is that the words are uniformely distributed over the corpora. \"\"\"\n return wordtotal * (float(total) / sumtotal)\n\n def compute_loglike(wf1_tot1, wf2_tot2):\n \"\"\" Compute log-likelihood for a single pair. \"\"\"\n wf1, tot1 = wf1_tot1\n wf2, tot2 = wf2_tot2\n e1 = expected(tot1, wf1 + wf2, tot1 + tot2)\n e2 = expected(tot2, wf1 + wf2, tot1 + tot2)\n (l1, l2) = (0, 0)\n if wf1 > 0:\n l1 = wf1 * math.log(wf1 / e1)\n if wf2 > 0:\n l2 = wf2 * math.log(wf2 / e2)\n loglike = 2 * (l1 + l2)\n return round(loglike, 2)\n\n def critical(val):\n # 95th percentile; 5% level; p < 0.05; critical value = 3.84\n # 99th percentile; 1% level; p < 0.01; critical value = 6.63\n # 99.9th percentile; 0.1% level; p < 0.001; critical value = 10.83\n # 99.99th percentile; 0.01% level; p < 0.0001; critical value = 15.13\n return val > 15.13\n\n def select(w, ls):\n \"\"\" Split annotations on | and returns as list. If annotation is missing, returns the word instead. \"\"\"\n # for c in w:\n # if not (c.isalpha() or (len(w) > 1 and c in '-:')):\n # return []\n xs = [l for l in ls.split('|') if len(l) > 0]\n return xs or [w]\n\n def wf_frequencies(texts):\n freqs = []\n for (name, text) in texts:\n d = defaultdict(int) # Lemgram frequency\n tc = 0 # Total number of tokens\n for w in [r for s in text for (w, a) in s for r in select(w, a['lex'])]:\n tc += 1\n d[w] += 1\n freqs.append((name, d, tc))\n return freqs\n\n def reference_material(filename):\n d = defaultdict(int)\n tot = 0\n with open(filename, encoding='utf8') as f:\n for l in f:\n (wf, msd, lemgram, comp, af, rf) = l[:-1].split('\\t')\n for ll in select(wf, lemgram):\n tot += int(af) # Total number of tokens\n d[ll] += int(af) # Lemgram frequency\n return d, tot\n\n def compute_list(d1, tot1, ref, reftot):\n \"\"\" Compute log-likelyhood for lists. \"\"\"\n result = []\n all_w = set(d1.keys()).union(set(ref.keys()))\n for w in all_w:\n ll = compute_loglike((d1.get(w, 0), tot1), (ref.get(w, 0), reftot))\n result.append((ll, w))\n result.sort(reverse=True)\n return result\n\n def compute_ll_stats(ll_list, count, sets):\n \"\"\" Calculate max, min, average, and truncates word list. \"\"\"\n tot = len(ll_list)\n new_list = []\n\n set1count, set2count = 0, 0\n for ll_w in ll_list:\n ll, w = ll_w\n\n if (sets[0][\"freq\"].get(w) and not sets[1][\"freq\"].get(w)) or sets[0][\"freq\"].get(w) and (\n sets[0][\"freq\"].get(w, 0) / (sets[0][\"total\"] * 1.0)) > (\n sets[1][\"freq\"].get(w, 0) / (sets[1][\"total\"] * 1.0)):\n set1count += 1\n if set1count <= count or not count:\n new_list.append((ll * -1, w))\n else:\n set2count += 1\n if set2count <= count or not count:\n new_list.append((ll, w))\n\n if count and (set1count >= count and set2count >= count):\n break\n\n nums = [ll for (ll, _) in ll_list]\n return (\n new_list,\n round(sum(nums) / float(tot), 2) if tot else 0.0,\n min(nums) if nums else 0.0,\n max(nums) if nums else 0.0\n )\n\n assert_key(\"set1_cqp\", args, r\"\", True)\n assert_key(\"set2_cqp\", args, r\"\", True)\n assert_key(\"set1_corpus\", args, r\"\", True)\n assert_key(\"set2_corpus\", args, r\"\", True)\n assert_key((\"group_by\", \"group_by_struct\", \"groupby\"), args, IS_IDENT, True)\n assert_key(\"ignore_case\", args, IS_IDENT)\n assert_key(\"max\", args, IS_NUMBER, False)\n\n maxresults = int(args.get(\"max\") or 15)\n\n set1 = args.get(\"set1_corpus\").upper()\n if isinstance(set1, str):\n set1 = set1.split(QUERY_DELIM)\n set1 = set(set1)\n set2 = args.get(\"set2_corpus\").upper()\n if isinstance(set2, str):\n set2 = set2.split(QUERY_DELIM)\n set2 = set(set2)\n\n corpora = set1.union(set2)\n check_authentication(corpora)\n\n same_cqp = args.get(\"set1_cqp\") == args.get(\"set2_cqp\")\n\n result = {}\n\n # If same CQP for both sets, handle as one query for better performance\n if same_cqp:\n args[\"cqp\"] = args.get(\"set1_cqp\")\n args[\"corpus\"] = QUERY_DELIM.join(corpora)\n count_result = generator_to_dict(count(args))\n\n sets = [{\"total\": 0, \"freq\": defaultdict(int)}, {\"total\": 0, \"freq\": defaultdict(int)}]\n for i, cset in enumerate((set1, set2)):\n for corpus in cset:\n sets[i][\"total\"] += count_result[\"corpora\"][corpus][\"sums\"][\"absolute\"]\n if len(cset) == 1:\n sets[i][\"freq\"] = dict((tuple(\n (y[0], y[1] if isinstance(y[1], tuple) else (y[1],)) for y in sorted(x[\"value\"].items())),\n x[\"freq\"])\n for x in count_result[\"corpora\"][corpus][\"absolute\"])\n else:\n for w, f in ((tuple(\n (y[0], y[1] if isinstance(y[1], tuple) else (y[1],)) for y in sorted(x[\"value\"].items())),\n x[\"freq\"])\n for x in count_result[\"corpora\"][corpus][\"absolute\"]):\n sets[i][\"freq\"][w] += f\n\n else:\n args1, args2 = args.copy(), args.copy()\n args1[\"corpus\"] = QUERY_DELIM.join(set1)\n args1[\"cqp\"] = args.get(\"set1_cqp\")\n args2[\"corpus\"] = QUERY_DELIM.join(set2)\n args2[\"cqp\"] = args.get(\"set2_cqp\")\n count_result = [generator_to_dict(count(args1)), generator_to_dict(count(args2))]\n\n sets = [{}, {}]\n for i, cset in enumerate((set1, set2)):\n sets[i][\"total\"] = count_result[i][\"total\"][\"sums\"][\"absolute\"]\n sets[i][\"freq\"] = dict((tuple(\n (y[0], y[1] if isinstance(y[1], tuple) else (y[1],)) for y in sorted(x[\"value\"].items())), x[\"freq\"])\n for x in count_result[i][\"total\"][\"absolute\"])\n\n ll_list = compute_list(sets[0][\"freq\"], sets[0][\"total\"], sets[1][\"freq\"], sets[1][\"total\"])\n (ws, avg, mi, ma) = compute_ll_stats(ll_list, maxresults, sets)\n\n result[\"loglike\"] = {}\n result[\"average\"] = avg\n result[\"set1\"] = {}\n result[\"set2\"] = {}\n\n for (ll, w) in ws:\n w_formatted = \" \".join(w[0][1])\n result[\"loglike\"][w_formatted] = ll\n result[\"set1\"][w_formatted] = sets[0][\"freq\"].get(w, 0)\n result[\"set2\"][w_formatted] = sets[1][\"freq\"].get(w, 0)\n\n yield result\n\n\n################################################################################\n# LEMGRAM_COUNT\n################################################################################\n\n@app.route(\"/lemgram_count\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef lemgram_count(args):\n \"\"\"Return lemgram statistics per corpus.\"\"\"\n assert_key(\"lemgram\", args, r\"\", True)\n assert_key(\"corpus\", args, IS_IDENT)\n assert_key(\"count\", args, r\"(lemgram|prefix|suffix)\")\n\n corpora = parse_corpora(args)\n check_authentication(corpora)\n\n lemgram = args.get(\"lemgram\")\n if isinstance(lemgram, str):\n lemgram = lemgram.split(QUERY_DELIM)\n lemgram = set(lemgram)\n\n count = args.get(\"count\") or \"lemgram\"\n if isinstance(count, str):\n count = count.split(QUERY_DELIM)\n count = set(count)\n\n counts = {\"lemgram\": \"freq\",\n \"prefix\": \"freq_prefix\",\n \"suffix\": \"freq_suffix\"}\n\n sums = \" + \".join(\"SUM(%s)\" % counts[c] for c in count)\n\n lemgram_sql = \" lemgram IN (%s)\" % \", \".join(\"'%s'\" % sql_escape(l) for l in lemgram)\n corpora_sql = \" AND corpus IN (%s)\" % \", \".join(\"'%s'\" % sql_escape(c) for c in corpora) if corpora else \"\"\n\n sql = \"SELECT lemgram, \" + sums + \" AS freq FROM lemgram_index WHERE\" + lemgram_sql + corpora_sql + \\\n \" GROUP BY lemgram COLLATE utf8_bin;\"\n\n result = {}\n with app.app_context():\n cursor = mysql.connection.cursor()\n cursor.execute(sql)\n\n for row in cursor:\n # We need this check here, since a search for \"hår\" also returns \"här\" and \"har\".\n if row[\"lemgram\"] in lemgram and int(row[\"freq\"]) > 0:\n result[row[\"lemgram\"]] = int(row[\"freq\"])\n\n cursor.close()\n\n yield result\n\n\ndef sql_escape(s):\n with app.app_context():\n return mysql.connection.escape_string(s).decode(\"utf-8\") if isinstance(s, str) else s\n\n\n################################################################################\n# TIMESPAN\n################################################################################\n\n@app.route(\"/timespan\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef timespan(args, no_combined_cache=False):\n \"\"\"Calculate timespan information for corpora.\n The time information is retrieved from the database.\n \"\"\"\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"granularity\", args, r\"[ymdhnsYMDHNS]\")\n assert_key(\"combined\", args, r\"(true|false)\")\n assert_key(\"per_corpus\", args, r\"(true|false)\")\n assert_key(\"strategy\", args, r\"^[123]$\")\n assert_key(\"from\", args, r\"^(\\d{8}\\d{6}?|\\d{4}-\\d{2}-\\d{2}( \\d{2}:\\d{2}:\\d{2})?)$\")\n assert_key(\"to\", args, r\"^(\\d{8}\\d{6}?|\\d{4}-\\d{2}-\\d{2}( \\d{2}:\\d{2}:\\d{2})?)$\")\n\n corpora = parse_corpora(args)\n # check_authentication(corpora)\n\n granularity = (args.get(\"granularity\") or \"y\").lower()\n combined = parse_bool(args, \"combined\", True)\n per_corpus = parse_bool(args, \"per_corpus\", True)\n strategy = int(args.get(\"strategy\") or 1)\n fromdate = args.get(\"from\")\n todate = args.get(\"to\")\n\n if fromdate or todate:\n if not fromdate or not todate:\n raise ValueError(\"When using 'from' or 'to', both need to be specified.\")\n\n shorten = {\"y\": 4, \"m\": 7, \"d\": 10, \"h\": 13, \"n\": 16, \"s\": 19}\n\n cached_data = []\n corpora_rest = corpora[:]\n\n if args[\"cache\"]:\n # Check if whole query is cached\n combined_checksum = get_hash((granularity,\n combined,\n per_corpus,\n fromdate,\n todate,\n sorted(corpora)))\n cache_combined_key = \"%s:timespan_%s\" % (cache_prefix(), get_hash(combined_checksum))\n with mc_pool.reserve() as mc:\n result = mc.get(cache_combined_key)\n if result is not None:\n if \"debug\" in args:\n result.setdefault(\"DEBUG\", {})\n result[\"DEBUG\"][\"cache_read\"] = True\n yield result\n return\n\n # Look for per-corpus caches\n for corpus in corpora:\n corpus_checksum = get_hash((fromdate, todate, granularity, strategy))\n cache_key = \"%s:timespan_%s\" % (cache_prefix(corpus), corpus_checksum)\n with mc_pool.reserve() as mc:\n corpus_cached_data = mc.get(cache_key)\n\n if corpus_cached_data is not None:\n cached_data.extend(corpus_cached_data)\n corpora_rest.remove(corpus)\n\n ns = {}\n\n with app.app_context():\n if corpora_rest:\n corpora_sql = \"(%s)\" % \", \".join(\"'%s'\" % sql_escape(c) for c in corpora_rest)\n fromto = \"\"\n\n if strategy == 1:\n if fromdate and todate:\n fromto = \" AND ((datefrom >= %s AND dateto <= %s) OR (datefrom <= %s AND dateto >= %s))\" % (\n sql_escape(fromdate), sql_escape(todate), sql_escape(fromdate), sql_escape(todate))\n elif strategy == 2:\n if todate:\n fromto += \" AND datefrom <= '%s'\" % sql_escape(todate)\n if fromdate:\n fromto = \" AND dateto >= '%s'\" % sql_escape(fromdate)\n elif strategy == 3:\n if fromdate:\n fromto = \" AND datefrom >= '%s'\" % sql_escape(fromdate)\n if todate:\n fromto += \" AND dateto <= '%s'\" % sql_escape(todate)\n\n # TODO: Skip grouping on corpus when we only are after the combined results.\n # We do the granularity truncation and summation in the DB query if we can (depending on strategy),\n # since it's much faster than doing it afterwards\n\n timedata_corpus = \"timedata_date\" if granularity in (\"y\", \"m\", \"d\") else \"timedata\"\n if strategy == 1:\n # We need the full dates for this strategy, so no truncating of the results\n sql = \"SELECT corpus, datefrom AS df, dateto AS dt, SUM(tokens) AS sum FROM \" + timedata_corpus + \\\n \" WHERE corpus IN \" + corpora_sql + fromto + \" GROUP BY corpus, df, dt ORDER BY NULL;\"\n else:\n sql = \"SELECT corpus, LEFT(datefrom, \" + str(shorten[granularity]) + \") AS df, LEFT(dateto, \" + \\\n str(shorten[granularity]) + \") AS dt, SUM(tokens) AS sum FROM \" + timedata_corpus + \\\n \" WHERE corpus IN \" + corpora_sql + fromto + \" GROUP BY corpus, df, dt ORDER BY NULL;\"\n cursor = mysql.connection.cursor()\n cursor.execute(sql)\n else:\n cursor = tuple()\n\n if args[\"cache\"]:\n def save_cache(corpus, data):\n corpus_checksum = get_hash((fromdate, todate, granularity, strategy))\n cache_key = \"%s:timespan_%s\" % (cache_prefix(corpus), corpus_checksum)\n with mc_pool.reserve() as mc:\n try:\n mc.add(cache_key, data)\n except pylibmc.TooBig:\n pass\n\n corpus = None\n corpus_data = []\n for row in cursor:\n if corpus is None:\n corpus = row[\"corpus\"]\n elif not row[\"corpus\"] == corpus:\n save_cache(corpus, corpus_data)\n corpus_data = []\n corpus = row[\"corpus\"]\n corpus_data.append(row)\n cached_data.append(row)\n if corpus is not None:\n save_cache(corpus, corpus_data)\n\n ns[\"result\"] = timespan_calculator(itertools.chain(cached_data, cursor), granularity=granularity,\n combined=combined, per_corpus=per_corpus, strategy=strategy)\n\n if corpora_rest:\n cursor.close()\n\n if args[\"cache\"] and not no_combined_cache:\n # Save cache for whole query\n with mc_pool.reserve() as mc:\n try:\n mc.add(cache_combined_key, ns[\"result\"])\n except pylibmc.TooBig:\n pass\n\n yield ns[\"result\"]\n\n\ndef timespan_calculator(timedata, granularity=\"y\", combined=True, per_corpus=True, strategy=1):\n \"\"\"Calculate timespan information for corpora.\n\n The required parameters are\n - timedata: the time data to be processed\n\n The optional parameters are\n - granularity: granularity of result (y = year, m = month, d = day, h = hour, n = minute, s = second)\n (default: year)\n - combined: include combined results\n (default: true)\n - per_corpus: include results per corpus\n (default: true)\n \"\"\"\n\n gs = {\"y\": 4, \"m\": 6, \"d\": 8, \"h\": 10, \"n\": 12, \"s\": 14}\n\n def strftime(dt, fmt):\n \"\"\"Python datetime.strftime < 1900 workaround, taken from https://gist.github.com/2000837\"\"\"\n\n TEMPYEAR = 9996 # We need to use a leap year to support feb 29th\n\n if dt.year < 1900:\n # Create a copy of this datetime, just in case, then set the year to\n # something acceptable, then replace that year in the resulting string\n tmp_dt = datetime.datetime(TEMPYEAR, dt.month, dt.day,\n dt.hour, dt.minute,\n dt.second, dt.microsecond,\n dt.tzinfo)\n\n tmp_fmt = fmt\n tmp_fmt = re.sub('(?= t1' AND t2 <= t2') OR (t1 <= t1' AND t2 >= t2')\n if not datefrom_short == dateto_short:\n if not datefrom[gs[granularity]:] == datemin[gs[granularity]:]:\n # Add 1 to datefrom_short\n datefrom_short = plusminusone(str(datefrom_short), add, df)\n\n if not dateto[gs[granularity]:] == datemax[gs[granularity]:]:\n # Subtract 1 from dateto_short\n dateto_short = plusminusone(str(dateto_short), add, df, negative=True)\n\n # Check that datefrom is still before dateto\n if not datefrom < dateto:\n continue\n elif strategy == 2:\n # All overlaps permitted\n # t1 <= t2' AND t2 >= t1'\n pass\n elif strategy == 3:\n # Strict matching. No overlaps tolerated.\n # t1 >= t1' AND t2 <= t2'\n\n if not datefrom_short == dateto_short:\n continue\n\n r = {\"datefrom\": datefrom_short, \"dateto\": dateto_short, \"corpus\": corpus, \"freq\": int(row[\"sum\"])}\n if combined:\n rows[\"__combined__\"].append(r)\n nodes[\"__combined__\"].add((\"f\", datefrom_short))\n nodes[\"__combined__\"].add((\"t\", dateto_short))\n if per_corpus:\n rows[corpus].append(r)\n nodes[corpus].add((\"f\", datefrom_short))\n nodes[corpus].add((\"t\", dateto_short))\n\n corpusnodes = dict((k, sorted(v, key=lambda x: (x[1] if x[1] else 0, x[0])))\n for k, v in nodes.items())\n result = {}\n if per_corpus:\n result[\"corpora\"] = {}\n if combined:\n result[\"combined\"] = {}\n\n for corpus, nodes in corpusnodes.items():\n data = defaultdict(int)\n\n for i in range(0, len(nodes) - 1):\n start = nodes[i]\n end = nodes[i + 1]\n if start[0] == \"t\":\n start = plusminusone(str(start[1]), add, df) if start[1] else 0\n if start == end[1] and end[0] == \"f\":\n continue\n else:\n start = start[1]\n\n if not end[1]:\n end = 0\n else:\n end = end[1] if end[0] == \"t\" else plusminusone(str(end[1]), add, df, True)\n\n if start:\n data[\"%d\" % start] = 0\n\n for row in rows[corpus]:\n if row[\"datefrom\"] <= start and row[\"dateto\"] >= end:\n data[str(start if start else \"\")] += row[\"freq\"]\n\n if end:\n data[\"%d\" % plusminusone(str(end), add, df, False)] = 0\n\n if combined and corpus == \"__combined__\":\n result[\"combined\"] = data\n else:\n result[\"corpora\"][corpus] = data\n\n return result\n\n\n################################################################################\n# RELATIONS\n################################################################################\n\n@app.route(\"/relations\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef relations(args):\n \"\"\"Calculate word picture data.\"\"\"\n assert_key(\"corpus\", args, IS_IDENT, True)\n assert_key(\"word\", args, \"\", True)\n assert_key(\"type\", args, r\"(word|lemgram)\", False)\n assert_key(\"min\", args, IS_NUMBER, False)\n assert_key(\"max\", args, IS_NUMBER, False)\n assert_key(\"incremental\", args, r\"(true|false)\")\n\n corpora = parse_corpora(args)\n check_authentication(corpora)\n\n incremental = parse_bool(args, \"incremental\", False)\n\n word = args.get(\"word\")\n search_type = args.get(\"type\", \"\")\n minfreq = args.get(\"min\")\n sort = args.get(\"sort\") or \"mi\"\n maxresults = int(args.get(\"max\") or 15)\n minfreqsql = \" AND freq >= %s\" % minfreq if minfreq else \"\"\n\n result = {}\n\n with app.app_context():\n cursor = mysql.connection.cursor()\n cursor.execute(\"SET @@session.long_query_time = 1000;\")\n\n # Get available tables\n cursor.execute(\"SHOW TABLES LIKE '\" + config.DBWPTABLE + \"_%';\")\n tables = set(list(x.values())[0] for x in cursor)\n # Filter out corpora which don't exist in database\n corpora = [x for x in corpora if config.DBWPTABLE + \"_\" + x.upper() in tables]\n if not corpora:\n yield {}\n return\n\n relations_data = []\n corpora_rest = corpora[:]\n\n if args[\"cache\"]:\n for corpus in corpora:\n corpus_checksum = get_hash((word,\n search_type,\n minfreq))\n with mc_pool.reserve() as mc:\n cached_data = mc.get(\"%s:relations_%s\" % (cache_prefix(corpus), corpus_checksum))\n if cached_data is not None:\n relations_data.extend(cached_data)\n corpora_rest.remove(corpus)\n\n selects = []\n\n if search_type == \"lemgram\":\n lemgram_sql = \"'%s'\" % sql_escape(word)\n\n for corpus in corpora_rest:\n corpus_sql = \"'%s'\" % sql_escape(corpus).upper()\n corpus_table = config.DBWPTABLE + \"_\" + corpus.upper()\n\n selects.append((corpus.upper(),\n \"(SELECT S1.string AS head, S1.pos AS headpos, F.rel, S2.string AS dep, S2.pos AS deppos, S2.stringextra AS depextra, F.freq, R.freq AS rel_freq, HR.freq AS head_rel_freq, DR.freq AS dep_rel_freq, \" + corpus_sql + \" AS corpus, F.id \" +\n \"FROM `\" + corpus_table + \"_strings` AS S1, `\" + corpus_table + \"_strings` AS S2, `\" + corpus_table + \"` AS F, `\" + corpus_table + \"_rel` AS R, `\" + corpus_table + \"_head_rel` AS HR, `\" + corpus_table + \"_dep_rel` AS DR \" +\n \"WHERE S1.string = \" + lemgram_sql + \" COLLATE utf8_bin AND F.head = S1.id AND S2.id = F.dep \" +\n minfreqsql +\n \"AND F.bfhead = 1 AND F.bfdep = 1 AND F.rel = R.rel AND F.head = HR.head AND F.rel = HR.rel AND F.dep = DR.dep AND F.rel = DR.rel)\"\n ))\n selects.append((None,\n \"(SELECT S1.string AS head, S1.pos AS headpos, F.rel, S2.string AS dep, S2.pos AS deppos, S2.stringextra AS depextra, F.freq, R.freq AS rel_freq, HR.freq AS head_rel_freq, DR.freq AS dep_rel_freq, \" + corpus_sql + \" AS corpus, F.id \" +\n \"FROM `\" + corpus_table + \"_strings` AS S1, `\" + corpus_table + \"_strings` AS S2, `\" + corpus_table + \"` AS F, `\" + corpus_table + \"_rel` AS R, `\" + corpus_table + \"_head_rel` AS HR, `\" + corpus_table + \"_dep_rel` AS DR \" +\n \"WHERE S2.string = \" + lemgram_sql + \" COLLATE utf8_bin AND F.dep = S2.id AND S1.id = F.head \" +\n minfreqsql +\n \"AND F.bfhead = 1 AND F.bfdep = 1 AND F.rel = R.rel AND F.head = HR.head AND F.rel = HR.rel AND F.dep = DR.dep AND F.rel = DR.rel)\"\n ))\n else:\n word_sql = \"'%s'\" % sql_escape(word)\n word = word\n\n for corpus in corpora_rest:\n corpus_sql = \"'%s'\" % sql_escape(corpus).upper()\n corpus_table = config.DBWPTABLE + \"_\" + corpus.upper()\n\n selects.append((corpus.upper(),\n \"(SELECT S1.string AS head, S1.pos AS headpos, F.rel, S2.string AS dep, S2.pos AS deppos, S2.stringextra AS depextra, F.freq, R.freq AS rel_freq, HR.freq AS head_rel_freq, DR.freq AS dep_rel_freq, \" + corpus_sql + \" AS corpus, F.id \" +\n \"FROM `\" + corpus_table + \"_strings` AS S1, `\" + corpus_table + \"_strings` AS S2, `\" + corpus_table + \"` AS F, `\" + corpus_table + \"_rel` AS R, `\" + corpus_table + \"_head_rel` AS HR, `\" + corpus_table + \"_dep_rel` AS DR \" +\n \"WHERE S1.string = \" + word_sql + \" AND F.head = S1.id AND F.wfhead = 1 AND S2.id = F.dep \" +\n minfreqsql +\n \"AND F.rel = R.rel AND F.head = HR.head AND F.rel = HR.rel AND F.dep = DR.dep AND F.rel = DR.rel)\"\n ))\n selects.append((None,\n \"(SELECT S1.string AS head, S1.pos AS headpos, F.rel, S2.string AS dep, S2.pos AS deppos, S2.stringextra AS depextra, F.freq, R.freq AS rel_freq, HR.freq AS head_rel_freq, DR.freq AS dep_rel_freq, \" + corpus_sql + \" AS corpus, F.id \" +\n \"FROM `\" + corpus_table + \"_strings` AS S1, `\" + corpus_table + \"_strings` AS S2, `\" + corpus_table + \"` AS F, `\" + corpus_table + \"_rel` AS R, `\" + corpus_table + \"_head_rel` AS HR, `\" + corpus_table + \"_dep_rel` AS DR \" +\n \"WHERE S2.string = \" + word_sql + \" AND F.dep = S2.id AND F.wfdep = 1 AND S1.id = F.head \" +\n minfreqsql +\n \"AND F.rel = R.rel AND F.head = HR.head AND F.rel = HR.rel AND F.dep = DR.dep AND F.rel = DR.rel)\"\n ))\n\n cursor_result = []\n if corpora_rest:\n if incremental:\n yield {\"progress_corpora\": list(corpora_rest)}\n progress_count = 0\n for sql in selects:\n cursor.execute(sql[1])\n cursor_result.extend(list(cursor))\n if sql[0]:\n yield {\"progress_%d\" % progress_count: {\"corpus\": sql[0]}}\n progress_count += 1\n else:\n sql = \" UNION ALL \".join(x[1] for x in selects)\n cursor.execute(sql)\n cursor_result = cursor\n\n rels = {}\n counter = {}\n freq_rel = {}\n freq_head_rel = {}\n freq_rel_dep = {}\n\n do_caching = False\n corpus = None\n corpus_data = []\n\n def save_cache(corpus, data):\n corpus_checksum = get_hash((word, search_type, minfreq))\n with mc_pool.reserve() as mc:\n try:\n mc.add(\"%s:relations_%s\" % (cache_prefix(corpus), corpus_checksum), data)\n except pylibmc.TooBig:\n pass\n\n for row in itertools.chain(relations_data, (None,), cursor_result):\n if row is None and args[\"cache\"]:\n do_caching = True\n continue\n\n if do_caching:\n if corpus is None:\n corpus = row[\"corpus\"]\n elif not row[\"corpus\"] == corpus:\n save_cache(corpus, corpus_data)\n corpus_data = []\n corpus = row[\"corpus\"]\n corpus_data.append(row)\n\n head = (row[\"head\"], row[\"headpos\"])\n dep = (row[\"dep\"], row[\"deppos\"], row[\"depextra\"])\n rels.setdefault((head, row[\"rel\"], dep), {\"freq\": 0, \"source\": set()})\n rels[(head, row[\"rel\"], dep)][\"freq\"] += row[\"freq\"]\n rels[(head, row[\"rel\"], dep)][\"source\"].add(\"%s:%d\" % (row[\"corpus\"], row[\"id\"]))\n freq_rel.setdefault(row[\"rel\"], {})[(row[\"corpus\"], row[\"rel\"])] = row[\"rel_freq\"]\n freq_head_rel.setdefault((head, row[\"rel\"]), {})[(row[\"corpus\"], row[\"rel\"])] = row[\"head_rel_freq\"]\n freq_rel_dep.setdefault((row[\"rel\"], dep), {})[(row[\"corpus\"], row[\"rel\"])] = row[\"dep_rel_freq\"]\n\n if corpus is not None:\n save_cache(corpus, corpus_data)\n del corpus_data\n\n cursor.close()\n\n # Calculate MI\n for rel in rels:\n f_rel = sum(freq_rel[rel[1]].values())\n f_head_rel = sum(freq_head_rel[(rel[0], rel[1])].values())\n f_rel_dep = sum(freq_rel_dep[(rel[1], rel[2])].values())\n rels[rel][\"mi\"] = rels[rel][\"freq\"] * math.log((f_rel * rels[rel][\"freq\"]) / (f_head_rel * f_rel_dep * 1.0), 2)\n\n sortedrels = sorted(rels.items(), key=lambda x: (x[0][1], x[1][sort]), reverse=True)\n\n for rel in sortedrels:\n counter.setdefault((rel[0][1], \"h\"), 0)\n counter.setdefault((rel[0][1], \"d\"), 0)\n if search_type == \"lemgram\" and rel[0][0][0] == word:\n counter[(rel[0][1], \"h\")] += 1\n if maxresults and counter[(rel[0][1], \"h\")] > maxresults:\n continue\n else:\n counter[(rel[0][1], \"d\")] += 1\n if maxresults and counter[(rel[0][1], \"d\")] > maxresults:\n continue\n\n r = {\"head\": rel[0][0][0],\n \"headpos\": rel[0][0][1],\n \"rel\": rel[0][1],\n \"dep\": rel[0][2][0],\n \"deppos\": rel[0][2][1],\n \"depextra\": rel[0][2][2],\n \"freq\": rel[1][\"freq\"],\n \"mi\": rel[1][\"mi\"],\n \"source\": list(rel[1][\"source\"])\n }\n result.setdefault(\"relations\", []).append(r)\n\n yield result\n\n\n################################################################################\n# RELATIONS_SENTENCES\n################################################################################\n\n@app.route(\"/relations_sentences\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef relations_sentences(args):\n \"\"\"Execute a CQP query to find sentences with a given relation from a word picture.\"\"\"\n assert_key(\"source\", args, \"\", True)\n assert_key(\"start\", args, IS_NUMBER, False)\n assert_key(\"end\", args, IS_NUMBER, False)\n\n temp_source = args.get(\"source\")\n if isinstance(temp_source, str):\n temp_source = temp_source.split(QUERY_DELIM)\n source = defaultdict(set)\n for s in temp_source:\n c, i = s.split(\":\")\n source[c].add(i)\n\n check_authentication(source.keys())\n\n start = int(args.get(\"start\") or 0)\n end = int(args.get(\"end\") or 9)\n shown = args.get(\"show\") or \"word\"\n shown_structs = args.get(\"show_struct\") or []\n if isinstance(shown_structs, str):\n shown_structs = shown_structs.split(QUERY_DELIM)\n shown_structs = set(shown_structs)\n\n default_context = args.get(\"default_context\", args.get(\"defaultcontext\")) or \"1 sentence\"\n\n querystarttime = time.time()\n\n with app.app_context():\n cursor = mysql.connection.cursor()\n cursor.execute(\"SET @@session.long_query_time = 1000;\")\n selects = []\n counts = []\n\n # Get available tables\n cursor.execute(\"SHOW TABLES LIKE '\" + config.DBWPTABLE + \"_%';\")\n tables = set(list(x.values())[0] for x in cursor)\n # Filter out corpora which doesn't exist in database\n source = sorted([x for x in iter(source.items()) if config.DBWPTABLE + \"_\" + x[0].upper() in tables])\n if not source:\n yield {}\n return\n corpora = [x[0] for x in source]\n\n for s in source:\n corpus, ids = s\n ids = [int(i) for i in ids]\n ids_list = \"(\" + \", \".join(\"%d\" % i for i in ids) + \")\"\n\n corpus_table_sentences = config.DBWPTABLE + \"_\" + corpus.upper() + \"_sentences\"\n\n selects.append(\"(SELECT S.sentence, S.start, S.end, '\" + sql_escape(corpus.upper()) + \"' AS corpus \" +\n \"FROM `\" + corpus_table_sentences + \"` as S \" +\n \" WHERE S.id IN \" + ids_list + \")\"\n )\n counts.append(\"(SELECT '\" + sql_escape(corpus.upper()) + \"' AS corpus, COUNT(*) AS freq FROM `\" +\n corpus_table_sentences + \"` as S WHERE S.id IN \" + ids_list + \")\")\n\n sql_count = \" UNION ALL \".join(counts)\n cursor.execute(sql_count)\n\n corpus_hits = {}\n for row in cursor:\n corpus_hits[row[\"corpus\"]] = int(row[\"freq\"])\n\n sql = \" UNION ALL \".join(selects) + (\" LIMIT %d, %d\" % (start, end - start + 1))\n cursor.execute(sql)\n\n querytime = time.time() - querystarttime\n corpora_dict = {}\n for row in cursor:\n corpora_dict.setdefault(row[\"corpus\"], {}).setdefault(row[\"sentence\"], []).append(\n (row[\"start\"], row[\"end\"]))\n\n cursor.close()\n\n total_hits = sum(corpus_hits.values())\n\n if not corpora_dict:\n yield {\"hits\": 0}\n return\n\n cqpstarttime = time.time()\n result = {}\n\n for corp, sids in sorted(corpora_dict.items(), key=lambda x: x[0]):\n cqp = u' []* within sentence' % \"|\".join(set(sids.keys()))\n q = {\"cqp\": cqp,\n \"corpus\": corp,\n \"start\": \"0\",\n \"end\": str(end - start),\n \"show_struct\": [\"sentence_id\"] + list(shown_structs),\n \"default_context\": default_context}\n if shown:\n q[\"show\"] = shown\n result_temp = generator_to_dict(query(q))\n\n # Loop backwards since we might be adding new items\n for i in range(len(result_temp[\"kwic\"]) - 1, -1, -1):\n s = result_temp[\"kwic\"][i]\n sid = s[\"structs\"][\"sentence_id\"]\n r = sids[sid][0]\n sentence_start = s[\"match\"][\"start\"]\n s[\"match\"][\"start\"] = sentence_start + min(map(int, r)) - 1\n s[\"match\"][\"end\"] = sentence_start + max(map(int, r))\n\n # If the same relation appears more than once in the same sentence,\n # append copies of the sentence as separate results\n for r in sids[sid][1:]:\n s2 = deepcopy(s)\n s2[\"match\"][\"start\"] = sentence_start + min(map(int, r)) - 1\n s2[\"match\"][\"end\"] = sentence_start + max(map(int, r))\n result_temp[\"kwic\"].insert(i + 1, s2)\n\n result.setdefault(\"kwic\", []).extend(result_temp[\"kwic\"])\n\n result[\"hits\"] = total_hits\n result[\"corpus_hits\"] = corpus_hits\n result[\"corpus_order\"] = corpora\n result[\"querytime\"] = querytime\n result[\"cqptime\"] = time.time() - cqpstarttime\n\n yield result\n\n\n################################################################################\n# CACHE HANDLING\n################################################################################\n\n@app.route(\"/cache\", methods=[\"GET\", \"POST\"])\n@main_handler\n@prevent_timeout\ndef cache_handler(args):\n \"\"\"Check for updated corpora and invalidate caches where needed. Also remove old disk cache.\"\"\"\n if not config.CACHE_DIR or not config.MEMCACHED_SERVERS or cache_disabled:\n return {}\n\n result = {}\n\n # Set up caching if needed\n initial_setup = setup_cache()\n\n if initial_setup:\n result[\"initial_setup\"] = True\n else:\n result = {\"multi_invalidated\": False,\n \"corpora_invalidated\": 0,\n \"files_removed\": 0}\n now = time.time()\n\n # Get modification time of corpus registry files\n corpora = get_corpus_timestamps()\n\n with mc_pool.reserve() as mc:\n # Invalidate cache for updated corpora\n for corpus in corpora:\n if mc.get(\"%s:last_update\" % corpus, 0) < corpora[corpus]:\n mc.set(\"%s:version\" % corpus, mc.get(\"%s:version\" % corpus, 0) + 1)\n mc.set(\"%s:last_update\" % corpus, corpora[corpus])\n result[\"corpora_invalidated\"] += 1\n\n # Remove outdated query data\n for cachefile in glob.glob(os.path.join(config.CACHE_DIR, \"%s:*\" % corpus)):\n if os.path.getmtime(cachefile) < corpora[corpus]:\n os.remove(cachefile)\n result[\"files_removed\"] += 1\n\n # If any corpus has been updated or added, increase version to invalidate all combined caches\n if result[\"corpora_invalidated\"]:\n mc.set(\"multi:version\", mc.get(\"multi:version\", 0) + 1)\n result[\"multi_invalidated\"] = True\n\n # Remove old query data\n for cachefile in glob.glob(os.path.join(config.CACHE_DIR, \"*:query_data_*\")):\n if os.path.getmtime(cachefile) < (now - config.CACHE_LIFESPAN * 60):\n os.remove(cachefile)\n result[\"files_removed\"] += 1\n yield result\n\n\ndef cache_prefix(corpus=\"multi\"):\n with mc_pool.reserve() as mc:\n return \"%s:%d\" % (corpus, mc.get(\"%s:version\" % corpus, 0))\n\n\ndef get_corpus_timestamps():\n \"\"\"Get modification time of corpus registry files.\"\"\"\n corpora = dict((os.path.basename(f).upper(), os.path.getmtime(f)) for f in\n glob.glob(os.path.join(config.CWB_REGISTRY, \"*\")))\n return corpora\n\n\ndef setup_cache():\n \"\"\"Setup disk cache and Memcached if needed.\"\"\"\n if cache_disabled:\n return False\n\n action_needed = False\n\n # Create cache dir if needed\n if config.CACHE_DIR and not os.path.exists(config.CACHE_DIR):\n os.makedirs(config.CACHE_DIR)\n action_needed = True\n\n # Set up Memcached if needed\n with mc_pool.reserve() as mc:\n if \"multi:version\" not in mc:\n corpora = get_corpus_timestamps()\n mc.set(\"multi:version\", 1)\n for corpus in corpora:\n mc.set(\"%s:version\" % corpus, 1)\n mc.set(\"%s:last_update\" % corpus, corpora[corpus])\n action_needed = True\n\n return action_needed\n\n\n################################################################################\n# DOCUMENTATION\n################################################################################\n\n@app.route(\"/\")\ndef documentation():\n \"\"\"Render API documentation.\"\"\"\n if not os.path.isfile(\"docs/api.md\"):\n return \"API documentation missing.\"\n with open(\"docs/api.md\", encoding=\"UTF-8\") as doc:\n md_text = doc.read()\n\n # Replace placeholders\n md_text = md_text.replace(\"[SBURL]\", SB_API_URL)\n md_text = md_text.replace(\"[VERSION]\", KORP_VERSION)\n\n # Convert Markdown to HTML\n md = markdown.Markdown(extensions=[\"markdown.extensions.toc\",\n \"markdown.extensions.smarty\",\n \"markdown.extensions.def_list\",\n \"markdown.extensions.fenced_code\"])\n md_html = md.convert(md_text)\n\n html = [\"\"\"\n \n \n \n Korp API v%s\n \n \n \n \n \n \n \n \n
\n
\n
\n Korp API v%s\n
\n %s\n
\n
\n \"\"\" % (KORP_VERSION, KORP_VERSION, md.toc), md_html, \"
\"]\n\n return \"\\n\".join(html)\n\n\n################################################################################\n# Helper functions\n################################################################################\n\ndef parse_cqp(cqp):\n \"\"\"Try to parse a CQP query, returning identified tokens and a\n boolean indicating partial failure if True.\n \"\"\"\n sections = []\n last_start = 0\n in_bracket = 0\n in_quote = False\n in_curly = False\n escaping = False\n quote_type = \"\"\n\n for i in range(len(cqp)):\n c = cqp[i]\n\n if in_quote and not escaping and c == \"\\\\\":\n # Next character is being escaped\n escaping = True\n elif escaping:\n # Current character is being escaped\n escaping = False\n elif c in '\"\\'':\n if in_quote and quote_type == c:\n if i < len(cqp) - 1 and cqp[i + 1] == quote_type:\n # First character of a quote escaped by doubling\n escaping = True\n else:\n # End of a quote\n in_quote = False\n if not in_bracket:\n sections.append([last_start, i])\n elif not in_quote:\n # Beginning of a qoute\n in_quote = True\n quote_type = c\n if not in_bracket:\n last_start = i\n elif c == \"[\":\n if not in_bracket and not in_quote:\n # Beginning of a token\n last_start = i\n in_bracket = True\n if len(cqp) > i + 1 and cqp[i + 1] == \":\":\n # Zero-width assertion encountered, which can not be handled by MU query\n return [], True\n elif c == \"]\":\n if in_bracket and not in_quote:\n # End of a token\n sections.append([last_start, i])\n in_bracket = False\n elif c == \"{\" and not in_bracket and not in_quote:\n in_curly = True\n elif c == \"}\" and not in_bracket and not in_quote and in_curly:\n in_curly = False\n sections[-1][1] = i\n\n last_section = (0, 0)\n sections.append([len(cqp), len(cqp)])\n tokens = []\n rest = False\n\n for section in sections:\n if last_section[1] < section[0]:\n if cqp[last_section[1] + 1:section[0]].strip():\n rest = True\n last_section = section\n if cqp[section[0]:section[1] + 1]:\n tokens.append(cqp[section[0]:section[1] + 1])\n\n return tokens, rest\n\n\ndef make_cqp(cqp, within=None, cut=None, expand=None):\n \"\"\" Combine CQP query and extra options. \"\"\"\n for arg in ((\"within\", within), (\"cut\", cut), (\"expand\", expand)):\n if arg[1]:\n cqp += \" %s %s\" % arg\n return cqp\n\n\ndef make_query(cqp):\n \"\"\"Create web-safe commands for a CQP query.\n \"\"\"\n querylock = random.randrange(10 ** 8, 10 ** 9)\n return [\"set QueryLock %s;\" % querylock,\n \"%s;\" % cqp,\n \"unlock %s;\" % querylock]\n\n\ndef translate_undef(s):\n \"\"\"Translate '__UNDEF__' to None.\"\"\"\n return None if s == \"__UNDEF__\" else s\n\n\ndef get_hash(values):\n \"\"\"Get a hash for a list of values.\"\"\"\n return hashlib.sha256(bytes(\";\".join(v if isinstance(v, str) else str(v) for v in values), \"UTF-8\")).hexdigest()\n\n\nclass CQPError(Exception):\n pass\n\n\nclass KorpAuthenticationError(Exception):\n pass\n\n\nclass Namespace:\n pass\n\n\ndef run_cqp(command, encoding=None, executable=config.CQP_EXECUTABLE, registry=config.CWB_REGISTRY, attr_ignore=False):\n \"\"\"Call the CQP binary with the given command, and the request data.\n Yield one result line at the time, disregarding empty lines.\n If there is an error, raise a CQPError exception.\n \"\"\"\n env = os.environ.copy()\n env[\"LC_COLLATE\"] = config.LC_COLLATE\n encoding = encoding or config.CQP_ENCODING\n if not isinstance(command, str):\n command = \"\\n\".join(command)\n command = \"set PrettyPrint off;\\n\" + command\n command = command.encode(encoding)\n process = subprocess.Popen([executable, \"-c\", \"-r\", registry],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n reply, error = process.communicate(command)\n if error:\n error = error.decode(encoding)\n # Remove newlines from the error string:\n error = re.sub(r\"\\s+\", r\" \", error)\n # Keep only the first CQP error (the rest are consequences):\n error = re.sub(r\"^CQP Error: *\", r\"\", error)\n error = re.sub(r\" *(CQP Error:).*$\", r\"\", error)\n # Ignore certain errors:\n # 1) \"show +attr\" for unknown attr,\n # 2) querying unknown structural attribute,\n # 3) calculating statistics for empty results\n if not (attr_ignore and \"No such attribute:\" in error) \\\n and \"is not defined for corpus\" not in error \\\n and \"cl->range && cl->size > 0\" not in error \\\n and \"neither a positional/structural attribute\" not in error \\\n and \"CL: major error, cannot compose string: invalid UTF8 string passed to cl_string_canonical...\" not in error:\n raise CQPError(error)\n for line in reply.decode(encoding, errors=\"ignore\").split(\n \"\\n\"): # We don't use splitlines() since it might split on special characters in the data\n if line:\n yield line\n\n\ndef run_cwb_scan(corpus, attrs, encoding=config.CQP_ENCODING, executable=config.CWB_SCAN_EXECUTABLE,\n registry=config.CWB_REGISTRY):\n \"\"\"Call the cwb-scan-corpus binary with the given arguments.\n Yield one result line at the time, disregarding empty lines.\n If there is an error, raise a CQPError exception.\n \"\"\"\n process = subprocess.Popen([executable, \"-q\", \"-r\", registry, corpus] + attrs,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n reply, error = process.communicate()\n if error:\n # Remove newlines from the error string:\n error = re.sub(r\"\\s+\", r\" \", error.decode())\n # Ignore certain errors:\n # 1) \"show +attr\" for unknown attr,\n # 2) querying unknown structural attribute,\n # 3) calculating statistics for empty results\n raise CQPError(error)\n for line in reply.decode(encoding, errors=\"ignore\").split(\n \"\\n\"): # We don't use splitlines() since it might split on special characters in the data\n if line and len(line) < 65536:\n yield line\n\n\ndef show_attributes():\n \"\"\"Command sequence for returning the corpus attributes.\"\"\"\n return [\"show cd; .EOL.;\"]\n\n\ndef read_attributes(lines):\n \"\"\"Read the CQP output from the show_attributes() command.\"\"\"\n attrs = {'p': [], 's': [], 'a': []}\n for line in lines:\n if line == END_OF_LINE:\n break\n (typ, name, _rest) = (line + \" X\").split(None, 2)\n attrs[typ[0]].append(name)\n return attrs\n\n\ndef assert_key(key, attrs, regexp, required=False):\n \"\"\"Check that the value of the attribute 'key' in the request data\n matches the specification 'regexp'. If 'required' is True, then\n the key has to be in the form.\n \"\"\"\n if isinstance(key, (tuple, list)):\n for k in key:\n value = attrs.get(k)\n if value is not None:\n break\n else:\n value = attrs.get(key, \"\")\n key = (key,)\n if value and not isinstance(value, list):\n value = [value]\n if required and not value:\n raise KeyError(\"Key is required: <%s>\" % \"|\".join(key))\n if value and not all(re.match(regexp, x) for x in value):\n pattern = regexp.pattern if hasattr(regexp, \"pattern\") else regexp\n raise ValueError(\"Value(s) for key <%s> do(es) not match /%s/: %s\" % (\"|\".join(key), pattern, value))\n\n\n@app.route(\"/authenticate\", methods=[\"GET\", \"POST\"])\n@main_handler\ndef authenticate(_=None):\n \"\"\"Authenticate a user against an authentication server.\"\"\"\n\n auth_data = request.authorization\n\n if auth_data:\n postdata = {\n \"username\": auth_data[\"username\"],\n \"password\": auth_data[\"password\"],\n \"checksum\": hashlib.md5(bytes(auth_data[\"username\"] + auth_data[\"password\"] +\n config.AUTH_SECRET, \"utf-8\")).hexdigest()\n }\n\n try:\n contents = urllib.request.urlopen(config.AUTH_SERVER,\n urllib.parse.urlencode(postdata).encode(\"utf-8\")).read().decode(\"utf-8\")\n auth_response = json.loads(contents)\n except urllib.error.HTTPError:\n raise KorpAuthenticationError(\"Could not contact authentication server.\")\n except ValueError:\n raise KorpAuthenticationError(\"Invalid response from authentication server.\")\n except:\n raise KorpAuthenticationError(\"Unexpected error during authentication.\")\n\n if auth_response[\"authenticated\"]:\n permitted_resources = auth_response[\"permitted_resources\"]\n result = {\"corpora\": []}\n if \"corpora\" in permitted_resources:\n for c in permitted_resources[\"corpora\"]:\n if permitted_resources[\"corpora\"][c][\"read\"]:\n result[\"corpora\"].append(c.upper())\n yield result\n return\n\n yield {}\n\n\ndef check_authentication(corpora):\n \"\"\"Take a list of corpora, and if any of them are protected, run authentication.\n Raises an error if authentication fails.\"\"\"\n\n if config.PROTECTED_FILE:\n # Split parallel corpora\n corpora = [cc for c in corpora for cc in c.split(\"|\")]\n with open(config.PROTECTED_FILE) as infile:\n protected = [x.strip() for x in infile.readlines()]\n c = [c for c in corpora if c.upper() in protected]\n if c:\n auth = generator_to_dict(authenticate({}))\n unauthorized = [x for x in c if x.upper() not in auth.get(\"corpora\", [])]\n if not auth or unauthorized:\n raise KorpAuthenticationError(\"You do not have access to the following corpora: %s\" %\n \", \".join(unauthorized))\n\n\ndef generator_to_dict(generator):\n d = next(generator)\n for v in generator:\n d.update(v)\n return d\n\n\ndef parse_bool(args, key, default=True):\n if default:\n return args.get(key, \"\").lower() != \"false\"\n else:\n return args.get(key, \"\").lower() == \"true\"\n\n\nclass CustomTracebackException(Exception):\n def __init__(self, exception):\n self.exception = exception\n\n\n# Set up Memcached client pool\nif config.MEMCACHED_SERVERS and not cache_disabled:\n mc_client = pylibmc.Client(config.MEMCACHED_SERVERS)\n mc_pool = pylibmc.ClientPool(mc_client, config.MEMCACHED_POOL_SIZE or 1)\n with mc_pool.reserve() as mc:\n try:\n mc.get(\"test_connection\")\n except:\n print(\"Could not connect to Memcached. Caching will be disabled.\")\n cache_disabled = True\n\n# Set up caching\nsetup_cache()\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2 and sys.argv[1] == \"dev\":\n # Run using Flask (use only for development)\n app.run(debug=True, threaded=True, host=config.WSGI_HOST, port=config.WSGI_PORT)\n else:\n # Run using gevent\n print(\"Serving using gevent\")\n http = WSGIServer((config.WSGI_HOST, config.WSGI_PORT), app.wsgi_app)\n http.serve_forever()\n","sub_path":"korp.py","file_name":"korp.py","file_ext":"py","file_size_in_byte":134820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"623896683","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Generate the synthetic data for fibinet model training\"\"\"\nimport time\nimport numpy as np\nfrom src.model_utils.config import config\n\ndef generate_data(output_path, label_dim, number_examples, dense_dim, slot_dim, vocabulary_size, random_slot_values):\n \"\"\"\n This function generates the synthetic data of the web clicking data. Each row in the output file is as follows\n 'label\\tdense_feature[0] dense_feature[1] ... sparse_feature[0]...sparse_feature[1]...'\n Each value is dilimited by '\\t'.\n Args:\n output_path: string. The output file path of the synthetic data\n label_dim: int. The category of the label. For 0-1 clicking problem, it's value is 2\n number_examples: int. The row numbers of the synthetic dataset\n dense_dim: int. The number of continue features.\n slot_dim: int. The number of the category features\n vocabulary_size: int. The value of vocabulary size\n random_slot_values: bool. If true, the id is geneted by the random. If false, the id is set by the row_index\n mod part_size, where part_size the the vocab size for each slot\n \"\"\"\n\n part_size = (vocabulary_size - dense_dim) // slot_dim\n\n if random_slot_values is True:\n print('Each field size is supposed to be {}, so number of examples should be no less than this value'.format(\n part_size))\n\n start = time.time()\n\n buffer_data = []\n\n with open(output_path, 'w') as fp:\n for i in range(number_examples):\n example = []\n label = i % label_dim\n example.append(label)\n\n dense_feature = [\"{:.3f}\".format(j + 0.01 * i % 10) for j in range(dense_dim)]\n example.extend(dense_feature)\n\n if random_slot_values is True:\n for j in range(slot_dim):\n example.append(dense_dim + np.random.randint(j * part_size, min((j + 1) * part_size,\n vocabulary_size - dense_dim - 1)))\n else:\n sp = i % part_size\n example.extend(\n [dense_dim + min(sp + j * part_size, vocabulary_size - dense_dim - 1) for j in range(slot_dim)])\n\n buffer_data.append(\"\\t\".join([str(item) for item in example]))\n\n if (i + 1) % 10000 == 0:\n end = time.time()\n speed = 10000 / (end - start)\n start = time.time()\n print(\"Processed {} examples with speed {:.2f} examples/s\".format(i + 1, speed))\n fp.write('\\n'.join(buffer_data) + '\\n')\n buffer_data = []\n\n\nif __name__ == '__main__':\n config.random_slot_values = bool(config.random_slot_values)\n\n generate_data(output_path=config.output_file, label_dim=config.label_dim, number_examples=config.number_examples,\n dense_dim=config.dense_dim, slot_dim=config.slot_dim, vocabulary_size=config.vocabulary_size,\n random_slot_values=config.random_slot_values)\n","sub_path":"research/recommend/fibinet/src/generate_synthetic_data.py","file_name":"generate_synthetic_data.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"484548204","text":"import paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\nimport time\n\n\ndef mqtt_publish(result):\n broker = \"192.168.43.215\"\n\n client = mqtt.Client()\n print(\"Connect to broker\", broker)\n\n client.connect(broker, 1883, 60)\n\n client.publish(\"prediction\", result)\n\n client.disconnect()\n\n","sub_path":"AuthorizeUserConnection/src/Services/MqttConnectionSettings.py","file_name":"MqttConnectionSettings.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"55534907","text":"import pygame\nimport random\nimport time\n\npygame.font.init()\nstartTime = time.time()\nn = 151\nscreen = pygame.display.set_mode(\n (1024, 768)\n)\n\npygame.display.set_caption(\"SORTING VISUALISER\")\n\nrun = True\n\nwidth = 1024\nlength = 768\narray = [0] * n\narr_clr = [(0, 204, 102)] * n\nclr_ind = 0\nclr = [(0, 204, 102), (255, 0, 0), \\\n (0, 0, 153), (255, 102, 0)]\nfnt = pygame.font.SysFont(\"comicsans\", 30)\nfnt1 = pygame.font.SysFont(\"comicsans\", 20)\n\ndef generate_arr():\n for i in range(1, n):\n arr_clr[i] = clr[0]\n array[i] = random.randrange(1, 100)\n\ngenerate_arr()\n\ndef refill():\n screen.fill((255, 255, 255))\n draw()\n pygame.display.update()\n pygame.time.delay(10)\n\ndef insertionSort(arr):\n for i in range(1, len(arr)): #Cek dari 1 sampai panjang array\n pygame.event.pump()\n refill()\n key = arr[i] #Key melambangkan data yang saat ini dibandingkan\n arr_clr[i] = clr[2]\n j = i - 1 #j melambangkan posisi untuk data yang sedang dibandingkan\n while j >= 0 and key < arr[j]:\n arr_clr[j] = clr[2]\n arr[j+1] = arr[j] #Memindahkan element dari arr[0..i-1], yang paling gede dari key\n refill()\n arr_clr[j] = clr[3]\n j -= 1 #menuju satu posisi di depan posisi saat ini\n arr[j+1] = key\n refill()\n arr_clr[i] = clr[0]\n\ndef draw():\n txt = fnt.render(\"SORT: PRESS 'ENTER'\", \\\n 1, (0, 0, 0))\n screen.blit(txt, (20, 20))\n txt1 = fnt.render(\"NEW ARRAY: PRESS 'R'\", \\\n 1, (0, 0, 0))\n screen.blit(txt1, (20, 40))\n txt2 = fnt1.render(\"ALGORITHM USED:\" \\\n \"INSERTION SORT\", 1, (0, 0, 0))\n screen.blit(txt2, (600, 60))\n text3 = fnt1.render(\"Running Time(sec): \" + \\\n str(int(time.time() - startTime)), \\\n 1, (0, 0, 0))\n screen.blit(text3, (600, 20))\n element_width = (width - 200) // 200\n boundry_arr = 1024 / 200\n boundry_grp = 728 / 150\n pygame.draw.line(screen, (0, 0, 0), (0, 95), \\\n (1024, 95), 6)\n\n for i in range(1, n):\n pygame.draw.line(screen, arr_clr[i], \\\n (boundry_arr * i - 3, 100), \\\n (boundry_arr * i - 3, \\\n array[i] * boundry_grp + 100), element_width)\n\nwhile run:\n screen.fill((255, 255, 255))\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n generate_arr()\n if event.key == pygame.K_RETURN:\n insertionSort(array)\n draw()\n pygame.display.update()\n\npygame.quit()","sub_path":"InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"450977209","text":"class Portfolio:\n\tdef __init__(self, budget, symbols):\n\t\tself.budget = budget\n\t\tself.symbols = symbols\n\t\tself.symbols_index = {symbols[i]: i for i in range(len(symbols))}\n\t\tself.portfolio = [0 for i in range(len(symbols))]\n\tdef buy_stock(self, symbol, stock_price, shares):\n\t\tif stock_price * shares > self.budget:\n\t\t\tprint ('The price of the stock exceeds the budget')\n\t\t\treturn 0\n\t\telse:\n\t\t\tself.budget -= stock_price * shares\n\t\t\tself.portfolio[self.symbols_index[symbol]] += shares\n\t\t\treturn shares\n\n\tdef sell_stock(self, symbol, stock_price, shares):\n\t\tif self.portfolio[self.symbols_index[symbol]] < shares:\n\t\t\tprint ('We cannot sell more stocks than we have')\n\t\t\treturn 0\n\t\telse:\n\t\t\tself.budget += stock_price * shares\n\t\t\tself.portfolio[self.symbols_index[symbol]] -= shares\n\t\t\treturn shares\n\tdef evaluate_portfolio(self, stock_prices_today):\n\t\tportval = self.budget\n\t\tfor i in range(len(stock_prices_today)):\n\t\t\tportval += stock_prices_today[i] * self.portfolio[i]\n\t\treturn portval\n","sub_path":"portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"601086180","text":"from flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n\treturnText = \"Welcome, you are using KeepHerSafe Services... :)\\n\" + \\\n\t\t\"Sample Request Url: \" + \\\n\t\"\\\"http://keephersafe.azurewebsites.net/isEmergency/currentheartrate=92.32&heartratemean=85&heartratestdDev=20&scaleOfElimination=1\\\"\" + \\\n\t\"\\n\\n\" \n\treturn returnText\n\n\n#sample request\n#http://127.0.0.1:5000/isEmergency/currentheartrate=92.32&heartratemean=85&heartratestdDev=20&scaleOfElimination=1\n@app.route(\"/isEmergency/currentheartrate=&heartratemean=&heartratestdDev=&scaleOfElimination=\")\ndef checkCondition(hrv,mean, stdDev, scaleOfElimination):\n\thrv = float(hrv)\n\tmean = float(mean)\n\tstdDev = float(stdDev)\n\tscaleOfElimination = float(scaleOfElimination)\n\n\tif not 60 < hrv < 110:\n\t\treturn { \"status\" : 200 , \"isInputInRange\" : False}\n\n\tisLessThanLowerBound = hrv < (mean - stdDev* scaleOfElimination);\n\tisGreaterThanUpperBound = hrv > (mean + stdDev * scaleOfElimination);\n\tisOutOfBounds = isLessThanLowerBound or isGreaterThanUpperBound;\n\treturn { \"status\" : 200 , \"isInputInRange\" : True, \"isEmergency\" : isOutOfBounds}\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"17406025","text":"import pandas as pd\n\n\nheaders = ['symboling', 'normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price']\n\ndf = pd.read_csv('../Data/autos.csv', header=None)\n\ndf.columns = headers\n\npath = '../Data/del.csv'\ndf.to_csv(path)\n\nprint(df.dtypes)","sub_path":"Introduction/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"467459900","text":"#!/usr/bin python3\n#\n# Advent of Code 2018 - Day 3\n#\n\nimport re\n\nINPUTFILE = 'input.txt'\n\ndef load_input(infile):\n\tlines = []\n\twith open(infile, 'r') as fp:\n\t\tfor line in fp:\n\t\t\tline = line.strip()\n\t\t\tif line:\n\t\t\t\tlines.append(line)\n\n\treturn lines\n\n# PART 1\nclass Rectangle:\n\t\"\"\"A rectangle in four numbers... maybe a few more.\"\"\"\n\n\tdef __init__(self, id = None, x = 0, y = 0, w = 0 , h = 0):\n\t\tself.id = id\n\t\tself.x = int(x)\n\t\tself.y = int(y)\n\t\tself.w = int(w)\n\t\tself.h = int(h)\n\n\tdef __str__(self):\n\t\tresult = str(self.id) + \" (\" + str(self.x) + \",\" + str(self.y) + \") \"\n\t\tresult += str(self.w) + \"x\" + str(self.h)\n\t\treturn result\n\t\n\tdef left(self):\n\t\treturn self.x\n\t\n\tdef right(self):\n\t\treturn self.x + self.w\n\t\n\tdef top(self):\n\t\treturn self.y\n\n\tdef bottom(self):\n\t\treturn self.y + self.h\n\n\tdef area(self):\n\t\treturn self.w * self.h\n\ndef part1(arg):\n\toutput = 0\n\t# Construct a list of rectangles from the input lines.\n\tlist_rectangles = []\n\tfor line in arg:\n\t\tline = re.sub('[@:]', '', line)\n\t\tid,coords,dimensions = line.split()\n\t\tx,y = coords.split(',')\n\t\tw,h = dimensions.split('x')\n\n\t\tr = Rectangle(id, x, y, w, h)\n\t\tlist_rectangles.append(r)\n\n\tgrid = [[0 for x in range(1000)] for y in range(1000)]\n\t\n\t# Add 1 to each space in the grid occupied by a rectangle.\n\tfor rect in list_rectangles:\n\t\tfor x in range(rect.left(), rect.right()):\n\t\t\tfor y in range(rect.top(), rect.bottom()):\n\t\t\t\tgrid[x][y] += 1\n\t\t\t\n\t# Count up the 1s in the grid:\n\tfor x in grid:\n\t\tfor y in x:\n\t\t\tif y > 1:\n\t\t\t\toutput += 1\n\n\tprint(\"PART 1: \" + str(output))\n\t\n\t# Check each rectangle to see if the grid has a 1 in it for all the spaces in that rect.\n\tsingle_claim_rect_id = \"\"\n\tfor rect in list_rectangles:\n\t\tconflict = False\n\t\tfor x in range(rect.left(), rect.right()):\n\t\t\tfor y in range(rect.top(), rect.bottom()):\n\t\t\t\tif grid[x][y] > 1:\n\t\t\t\t\tconflict = True\n\t\tif not conflict:\n\t\t\tprint(rect.id)\t\n\t\t\tsingle_claim_rect_id = rect.id\n\t\n\tprint(\"PART 2: \", single_claim_rect_id)\n\nif __name__ == '__main__':\n\tprint(\"EXAMPLE 1: \")\n\tr1 = Rectangle(\"#1\", 1, 3, 4, 4)\n\tr2 = Rectangle(\"#2\", 3, 1, 4, 4)\n\tr3 = Rectangle(\"#3\", 5, 5, 2, 2)\n\tr4 = Rectangle(\"#4\", 0, 0, 10, 10)\n\tr5 = Rectangle(\"#5\", 9, 9, 1, 1)\n\texample1 = [\"#1 @ 1,3: 4x4\", \"#2 @ 3,1: 4x4\", \"#3 @ 5,5: 2x2\", \"#4 @ 0,0: 10x10\", \"#5 @ 9,9: 1x1\"]\n\tpart1(example1)\n\tprint(\"EXAMPLE 2: \")\n\tprint(\"END OF EXAMPLES\")\n\tinput = load_input(INPUTFILE)\n\tpart1(input)\n","sub_path":"day03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"}