diff --git "a/4798.jsonl" "b/4798.jsonl" new file mode 100644--- /dev/null +++ "b/4798.jsonl" @@ -0,0 +1,613 @@ +{"seq_id":"8336716970","text":"import json\nfrom completion_log import BotLog\nfrom datetime import datetime\nfrom aiofile import AIOFile\nimport traceback\nimport newrelic.agent\nimport requests\nfrom os import getenv\nfrom random import choice\nfrom time import sleep\nfrom pytz import timezone\nfrom Levenshtein import distance as levenshtein_distance\n\nfrom langchain.prompts import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\nAPI_BASE_URL = getenv('DISCORD_BOT_API_URL')\n\nlogger = BotLog('completion-manager')\n\n@newrelic.agent.background_task()\nasync def get_prompt(question, prompt_type, **kwargs):\n '''\n Loads prompt template from file to allow for rapid changing of prompts.\n :param query:\n :return:\n '''\n async with AIOFile(f'templates/{prompt_type}_system.txt', 'r') as f:\n is_this_ok_template = await f.read()\n\n async with AIOFile(f'templates/{prompt_type}_human.txt', 'r') as f:\n is_this_ok_human = await f.read()\n\n system_prompt = SystemMessagePromptTemplate.from_template(is_this_ok_template)\n human_prompt = HumanMessagePromptTemplate.from_template(is_this_ok_human)\n chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])\n\n # Get the current date and time in the specified format\n now = datetime.now()\n # Convert the datetime to Pacific Time (PT)\n pt_timezone = timezone('America/Los_Angeles')\n pt_now = now.astimezone(pt_timezone)\n\n formatted_datetime = pt_now.strftime(\"%A, %B %-d %Y %H:%M:%S\")\n\n # Add the 'datetime' kwarg if it's not provided\n kwargs.setdefault('datetime', formatted_datetime)\n\n # Pass keyword arguments to the format_prompt method\n return chat_prompt.format_prompt(question=question, **kwargs)\n\n@newrelic.agent.background_task()\nasync def archive_completion(prompt_messages, response):\n '''\n Save a simple text copy of every completion, because they're expensive and we'll probably want them again\n :param prompt_messages:\n :param response:\n :return:\n '''\n async with AIOFile('logs/completion_archive.txt', 'a') as f:\n await f.write(\"Prompt Messages:\\n\")\n for prompt in prompt_messages:\n try:\n await f.write(json.dumps(prompt, indent=4))\n except TypeError:\n await f.write(str(prompt))\n await f.write(\"\\n\")\n await f.write(\"\\nResponse:\\n\")\n await f.write(json.dumps(response, indent=4))\n await f.write(\"\\n\\n\")\n\nasync def determine_column_widths(query_results):\n column_widths = {}\n data_line = ''\n for entry in query_results:\n # If entry is a string type, skip it\n if isinstance(entry, str):\n continue\n try:\n for key, value in entry.items():\n if key == 'address':\n continue\n if key not in column_widths and 'Data above' not in str(value):\n column_widths[key] = len(str(key))\n if 'Data above' in str(value):\n data_line = str(value)\n continue\n column_widths[key] = max(column_widths[key], len(str(value)))\n except Exception as e:\n traceback_str = traceback.format_exc()\n logger.debug(f'Traceback: {traceback_str}')\n logger.error(f'Error making pretty (column widths): {e}')\n continue\n return column_widths, data_line\n\nasync def create_table(query_results):\n column_widths, data_line = await determine_column_widths(query_results)\n header = '| ' + ' | '.join([f\"{key:<{column_widths[key]}}\" for key in column_widths]) + ' |'\n separator = '+-' + '-+-'.join(['-' * column_widths[key] for key in column_widths]) + '-+'\n\n rows = []\n for i, entry in enumerate(query_results[:-1]): # Exclude the last entry containing the data line\n try:\n row = '| ' + ' | '.join([f\"{entry[key]:<{column_widths[key]}}\" for key in column_widths]) + ' |'\n except Exception as e:\n continue\n rows.append(row)\n\n table = '\\n'.join([header, separator] + rows)\n return '```\\n' + table + '\\n' + data_line + '\\n```' # Add the data line below the table\n\n@newrelic.agent.background_task()\nasync def make_pretty(query_results_list):\n logger.debug(f'Making pretty for query results: {query_results_list}')\n tables = []\n if query_results_list[0] == 'T' and query_results_list[1] == 'h':\n return ''\n for query_results in query_results_list:\n logger.debug(f'Query results: {query_results}')\n table = await create_table(query_results)\n tables.append(table)\n\n return '\\n'.join(tables)\n@newrelic.agent.background_task()\ndef send_message(message_str, user_id):\n url = f\"{API_BASE_URL}/message_user\"\n data = {\n \"message_str\": message_str,\n \"id\": user_id\n }\n headers = {\"Content-Type\": \"application/json\"}\n\n response = requests.post(url, data=json.dumps(data), headers=headers)\n return response.json()\n@newrelic.agent.background_task()\ndef send_reply(message_str, message_id):\n url = f\"{API_BASE_URL}/reply\"\n data = {\n \"message_str\": message_str,\n \"message_id\": message_id\n }\n headers = {\"Content-Type\": \"application/json\"}\n sleep(3)\n response = requests.post(url, data=json.dumps(data), headers=headers)\n logger.info(f'Called reply endpoint with response: {response.json()}')\n return response.json()\n\ndef get_funny():\n funny_list = [\n \"My circuits are working harder than an undergrad in exam season.\",\n \"Your request is in good hands, I'm a bot, not a freshman writing a research paper.\",\n \"I'm crunching the numbers like a hacker trying to crack a password.\",\n \"I'm like a digital DJ, mixing and remixing your request until it's perfect.\",\n \"The good news is, your request is in the queue.The bad news is, I know the guy that implemented the queue.\",\n \"My programming is bulletproof, but my response time could use some work.\",\n \"My circuits are faster than a speeding bullet, but they still need time to work their magic.\",\n \"I'm analyzing your request like a data scientist, but without the lab coat and goggles.\",\n \"My algorithms are like a secret recipe, and I'm cooking up a response that's sure to impress.\",\n \"I 'm like a tech support agent, but without the annoying hold music and the scripted responses.\",\n \"My circuits are overclocked and ready to go, processing your request with lightning speed.\",\n \"Processing your request like a CPU on steroids.\",\n \"Just a few more lines of code to go, thanks for your patience.\",\n \"Analyzing your request like a supercomputer analyzing data.\",\n \"Don't worry, I'm not buffering, just working on your request.\",\n \"Just give me a moment, I'm debugging my circuits.\",\n \"I'm compiling a response that'll knock your socks off, just hold on.\",\n \"The wheels are turning like a clock, your response will be ready soon.\",\n \"I'm working through the request like a data miner through information.\",\n \"Processing your request like a Google search, just with more precision.\",\n \"My circuits are running hot, but I'm still chugging along.\",\n \"I'm working on your request with the precision of a laser beam.\",\n \"Don't worry, I'm not stuck in a loop, just taking a moment to compute.\",\n \"I'm like a robot bartender, just taking my time to craft the perfect response for you.\",\n \"Just a few more clock cycles, and your request will be complete.\",\n \"My circuits are firing on all cylinders, working hard for you.\",\n \"Analyzing your request like a quantum computer, just without the quantum bit errors.\"\n ]\n return choice(funny_list)\n\nLOCATIONS = {\n \"North Parking Facility\": (\"A4\", \"Q4\"),\n \"Dr. Martin Luther King, Jr. Library\": (\"B1\", \"Q1\"),\n \"Hugh Gillis Hall\": (\"B1\", \"Q2\"),\n \"Administration\": (\"B2\", \"Q2\"),\n \"Clark Hall\": (\"B2\", \"Q4\"),\n \"Computer Center\": (\"B2\", \"Q3\"),\n \"Dudley Moorhead Hall\": (\"B2\", \"Q1\"),\n \"Instructional Resource Center\": (\"B2\", \"Q1\"),\n \"Morris Dailey Auditorium\": (\"B2\", \"Q3\"),\n \"Tower Hall SJSU\": (\"B2\", \"Q3\"),\n \"Engineering\": (\"B3\", \"Q1\"),\n \"Student Union\": (\"B3\", \"Q4\"),\n \"Associated Students House\": (\"B4\", \"Q4\"),\n \"Automated Bank Teller Facility\": (\"B4\", \"Q3\"),\n \"Industrial Studies\": (\"B4\", \"Q1\"),\n \"Science\": (\"C1\", \"Q1\"),\n \"Washington Square Hall\": (\"C1\", \"Q1\"),\n \"Yoshihiro Uchida Hall\": (\"C1\", \"Q3\"),\n \"Central Classroom Building\": (\"C2\", \"Q2\"),\n \"Dwight Bentel Hall\": (\"C2\", \"Q1\"),\n \"Faculty Office Building\": (\"C2\", \"Q1\"),\n \"Student Wellness Center\": (\"C2\", \"Q4\"),\n \"Art\": (\"C3\", \"Q2\"),\n \"Music\": (\"C3\", \"Q1\"),\n \"EC Provident Credit Union Event Center\": (\"C3\", \"Q3\"),\n \"Boccardo Business Classroom Building\": (\"C4\", \"Q2\"),\n \"Business Tower\": (\"C4\", \"Q2\"),\n \"Central Plant\": (\"C4\", \"Q4\"),\n \"Health Building\": (\"C4\", \"Q3\"),\n \"Duncan Hall\": (\"D1\", \"Q3\"),\n \"Interdisciplinary Science Building\": (\"D1\", \"Q3\"),\n \"West Parking Facility\": (\"D1\", \"Q1\"),\n \"MacQuarrie Hall\": (\"D2\", \"Q1\"),\n \"South Parking Facility\": (\"D2\", \"Q1\"),\n \"Sweeney Hall\": (\"D2\", \"Q2\"),\n \"UPD Building\": (\"D2\", \"Q4\"),\n \"Dining Commons\": (\"D3\", \"Q4\"),\n \"Spartan Recreation and Aquatic Center\": (\"D3\", \"Q1\"),\n \"Washburn Hall\": (\"D3\", \"Q3\"),\n \"Campus Village\": (\"D4\", \"Q2\"),\n \"Joe West Hall\": (\"D4\", \"Q3\")\n}\n\nasync def find_nearest_parking(location_name, locations=LOCATIONS):\n parking_facilities = {\n \"North Parking Facility\": (\"A4\", \"Q4\"),\n \"South Parking Facility\": (\"D2\", \"Q1\"),\n \"West Parking Facility\": (\"D1\", \"Q1\"),\n }\n logger.info(f'Finding nearest parking for {location_name}')\n def closest_key_match(parking_dict, search_string):\n search_words = search_string.split()\n min_distance = float('inf')\n closest_key = None\n logger.debug(f'Finding closest key for {search_string}')\n for key in parking_dict.keys():\n key_words = key.split()\n for search_word, key_word in zip(search_words, key_words):\n dist = levenshtein_distance(key_word, search_word)\n\n if dist < min_distance:\n min_distance = dist\n closest_key = key\n logger.debug(f'Found {closest_key} for {search_string}')\n return closest_key\n\n def parse_location(loc_string, quadrant_string):\n row, col = ord(loc_string[0]) - ord('A'), int(loc_string[1]) - 1\n quadrant = int(quadrant_string[1]) - 1\n x = col * 2 + quadrant % 2\n y = row * 2 + quadrant // 2\n return x, y\n\n def distance(location1, location2, exact_match=False):\n location1 = closest_key_match(locations, location1)\n location2 = closest_key_match(parking_facilities, location2)\n\n if not location1 or not location2:\n logger.error(f'Could not find distance between {location1} and {location2}')\n return float('inf')\n logger.info(f'Finding distance between {location1} and {location2}')\n\n x1 = x2 = y1 = y2 = float(0)\n try:\n x1, y1 = parse_location(*locations[location1])\n except KeyError:\n if exact_match:\n return float('inf')\n\n try:\n x2, y2 = parse_location(*parking_facilities[location2])\n except KeyError:\n if exact_match:\n return float('inf')\n\n dist = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n dist = round(dist, 3) if dist > 0 else float(5000)\n logger.info(f'Distance between {location1} and {location2} is {dist}')\n return dist\n\n distances = [{\"name\": facility, \"distance\": distance(location_name, facility)} for facility in parking_facilities]\n distances.sort(key=lambda x: x[\"distance\"])\n\n data_line = f\"Data above is distances from {location_name}\"\n distances.append({\"name\": data_line, \"distance\": 0.0})\n\n pretty_table = await make_pretty([distances])\n logger.debug(f'Got pretty distance table: {pretty_table}')\n logger.info(f'Finished finding nearest parking for {location_name}')\n return pretty_table\n\n# Prompts\n\n# Update this to take in a list of results and make them each pretty like this, then return them as a single string of tables separated by a newline\n\n# This is a list of Grid (A1, A2, etc) and quadrant with the quadrant of the grid (top left is 1, bottom left is 3, etc) and the name.\n#\n# Write a python function which takes in a location name string and calculates the distance using the pythagorean theorem to each of these garages\n# A4 Q4 North Parking Facility\n# D2 Q1 South Parking Facility\n# D1 Q1 West Parking Facility\n#\n# and returns a list of the garage names in order by distance. The list should just be a list of strings.\n#\n# Distance should be counted as units where each unit is a quadrant. It should calculate the a and b of the theorem to determine c.\n\n# =CONCAT(\"\\\"\", C2, '\" (\"\"', A2, ',\\\"', B2, '\\\")')\n#\n# Update this excel formula to take in this table:\n# Grid\tQuadrant\tLocation Name\n# A4\tQ4\tNorth Parking Facility\n#\n# and output the format needed for the dictionary\n\n\n# async def get_prompt(question, prompt_type, **kwargs):\n# ....\n# Update this message to convert the datetime to PT.\n\n# Write a python script with functions send_reply and send_message that hit these endpoints with given parameters\n\n# These three are having issues. They should take in this type of list:\n# [{\"fullness\":\"36%\",\"name\":\"North Garage \"},{\"fullness\":\"5%\",\"name\":\"South Campus Garage \"},{\"fullness\":\"68%\",\"name\":\"South Garage \"},{\"fullness\":\"40%\",\"name\":\"West Garage \"},{\"fullness\":\"37%\",\"name\":null},{\"fullness\":\"37%\",\"name\":\"Data above is average fullness for Monday at 09:00:00\"}]\n#\n# and return a formatted table. Right now, we're getting this error for entry.items():\n# [completion-manager]: Error: 'str' object has no attribute 'items')\n#\n# modify as needed for the make_pretty function to work and return the formatted table as expected\n\n# formatted_datetime = now.strftime(\"%H:%M %a, %b %-d %Y\")\n#\n# Change this to:\n# Saturday, April 22nd 2023 with the current time\n\n\n\n\n","repo_name":"Trolann/ParkingSucks","sub_path":"src/completion_manager/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14120,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"70883011932","text":"from astakos.im.models import (\n Resource, AstakosUser, Service,\n Project, ProjectMembership, ProjectResourceQuota)\nimport astakos.quotaholder_app.callpoint as qh\nfrom astakos.quotaholder_app.exception import NoCapacityError\nfrom django.db.models import Q\nfrom collections import defaultdict\n\n\nQuotaDict = lambda: defaultdict(lambda: defaultdict(dict))\n\nPROJECT_TAG = \"project:\"\nUSER_TAG = \"user:\"\n\n\ndef project_ref(value):\n return PROJECT_TAG + value\n\n\ndef get_project_ref(project):\n return project_ref(project.uuid)\n\n\ndef user_ref(value):\n return USER_TAG + value\n\n\ndef get_user_ref(user):\n return user_ref(user.uuid)\n\n\ndef from_holding(holding, is_project=False):\n limit, usage_min, usage_max = holding\n prefix = 'project_' if is_project else ''\n body = {prefix+'limit': limit,\n prefix+'usage': usage_max,\n prefix+'pending': usage_max-usage_min,\n }\n return body\n\n\ndef get_user_counters(users, resources=None, sources=None, flt=None):\n holders = [get_user_ref(user) for user in users]\n return qh.get_quota(holders=holders,\n resources=resources,\n sources=sources,\n flt=flt)\n\n\ndef get_project_counters(projects, resources=None, sources=None):\n holders = [get_project_ref(project) for project in projects]\n return qh.get_quota(holders=holders,\n resources=resources,\n sources=sources)\n\n\ndef strip_names(counters):\n stripped = {}\n for ((holder, source, resource), value) in counters.iteritems():\n prefix, sep, holder = holder.partition(\":\")\n assert prefix in [\"user\", \"project\"]\n if source is not None:\n prefix, sep, source = source.partition(\":\")\n assert prefix == \"project\"\n stripped[(holder, source, resource)] = value\n return stripped\n\n\ndef get_related_sources(counters):\n projects = set()\n for (holder, source, resource) in counters.iterkeys():\n projects.add(source)\n return list(projects)\n\n\ndef mk_quota_dict(users_counters, project_counters):\n quota = QuotaDict()\n for (holder, source, resource), u_value in users_counters.iteritems():\n p_value = project_counters[(source, None, resource)]\n values_dict = from_holding(u_value)\n values_dict.update(from_holding(p_value, is_project=True))\n quota[holder][source][resource] = values_dict\n return quota\n\n\ndef get_users_quotas_counters(users, resources=None, sources=None, flt=None):\n user_counters = get_user_counters(users, resources, sources, flt=flt)\n projects = get_related_sources(user_counters)\n project_counters = qh.get_quota(holders=projects, resources=resources)\n return strip_names(user_counters), strip_names(project_counters)\n\n\ndef get_users_quotas(users, resources=None, sources=None, flt=None):\n u_c, p_c = get_users_quotas_counters(users, resources, sources, flt=flt)\n return mk_quota_dict(u_c, p_c)\n\n\ndef get_user_quotas(user, resources=None, sources=None):\n quotas = get_users_quotas([user], resources, sources)\n return quotas.get(user.uuid, {})\n\n\ndef service_get_quotas(component, users=None, sources=None):\n name_values = Service.objects.filter(\n component=component).values_list('name')\n service_names = [t for (t,) in name_values]\n resources = Resource.objects.filter(service_origin__in=service_names)\n resource_names = [r.name for r in resources]\n astakosusers = AstakosUser.objects.verified()\n if users is not None:\n astakosusers = astakosusers.filter(uuid__in=users)\n if sources is not None:\n sources = [project_ref(s) for s in sources]\n return get_users_quotas(astakosusers, resources=resource_names,\n sources=sources)\n\n\ndef mk_limits_dict(counters):\n quota = QuotaDict()\n for (holder, source, resource), (limit, _, _) in counters.iteritems():\n quota[holder][source][resource] = limit\n return quota\n\n\ndef mk_project_quota_dict(project_counters):\n quota = QuotaDict()\n for (holder, _, resource), p_value in project_counters.iteritems():\n values_dict = from_holding(p_value, is_project=True)\n quota[holder][resource] = values_dict\n return quota\n\n\ndef get_projects_quota(projects, resources=None, sources=None):\n project_counters = get_project_counters(projects, resources, sources)\n return mk_project_quota_dict(strip_names(project_counters))\n\n\ndef service_get_project_quotas(component, projects=None):\n name_values = Service.objects.filter(\n component=component).values_list('name')\n service_names = [t for (t,) in name_values]\n resources = Resource.objects.filter(service_origin__in=service_names)\n resource_names = [r.name for r in resources]\n ps = Project.objects.initialized()\n if projects is not None:\n ps = ps.filter(uuid__in=projects)\n return get_projects_quota(ps, resources=resource_names)\n\n\ndef get_project_quota(project, resources=None, sources=None):\n quotas = get_projects_quota([project], resources, sources)\n return quotas.get(project.uuid, {})\n\n\ndef get_projects_quota_limits():\n project_counters = qh.get_quota(flt=Q(holder__startswith=PROJECT_TAG))\n user_counters = qh.get_quota(flt=Q(holder__startswith=USER_TAG))\n return mk_limits_dict(project_counters), mk_limits_dict(user_counters)\n\n\ndef _level_quota_dict(quotas):\n lst = []\n for holder, holder_quota in quotas.iteritems():\n for source, source_quota in holder_quota.iteritems():\n for resource, limit in source_quota.iteritems():\n key = (holder, source, resource)\n lst.append((key, limit))\n return lst\n\n\ndef set_quota(quotas, resource=None):\n q = _level_quota_dict(quotas)\n qh.set_quota(q, resource=resource)\n\n\nPENDING_APP_RESOURCE = 'astakos.pending_app'\n\n\ndef mk_user_provision(user, source, resource, quantity):\n holder = user_ref(user)\n source = project_ref(source)\n return (holder, source, resource), quantity\n\n\ndef mk_project_provision(project, resource, quantity):\n holder = project_ref(project)\n return (holder, None, resource), quantity\n\n\ndef _mk_provisions(values):\n provisions = []\n for (holder, source, resource, quantity) in values:\n provisions += [((holder, source, resource), quantity),\n ((source, None, resource), quantity)]\n return provisions\n\n\ndef register_pending_apps(triples, force=False):\n values = [(get_user_ref(user), get_project_ref(project),\n PENDING_APP_RESOURCE, quantity)\n for (user, project, quantity) in triples]\n\n provisions = _mk_provisions(values)\n try:\n s = qh.issue_commission(clientkey='astakos',\n force=force,\n provisions=provisions)\n except NoCapacityError as e:\n limit = e.data['limit']\n return False, limit\n qh.resolve_pending_commission('astakos', s)\n return True, None\n\n\ndef get_pending_app_quota(user):\n quota = get_user_quotas(user)\n source = user.get_base_project().uuid\n return quota[source][PENDING_APP_RESOURCE]\n\n\ndef _partition_by(f, l):\n d = {}\n for x in l:\n group = f(x)\n group_l = d.get(group, [])\n group_l.append(x)\n d[group] = group_l\n return d\n\n\ndef astakos_project_quotas(projects, resource=None):\n objs = ProjectResourceQuota.objects.select_related()\n flt = Q(resource__name=resource) if resource is not None else Q()\n grants = objs.filter(project__in=projects).filter(flt)\n grants_d = _partition_by(lambda g: g.project_id, grants)\n\n objs = ProjectMembership.objects\n memberships = objs.initialized(projects).select_related(\n \"person\", \"project\")\n memberships_d = _partition_by(lambda m: m.project_id, memberships)\n\n user_quota = QuotaDict()\n project_quota = QuotaDict()\n\n for project in projects:\n pr_ref = get_project_ref(project)\n state = project.state\n if state not in Project.INITIALIZED_STATES:\n continue\n\n project_grants = grants_d.get(project.id, [])\n project_memberships = memberships_d.get(project.id, [])\n for grant in project_grants:\n resource = grant.resource.name\n val = grant.project_capacity if state == Project.NORMAL else 0\n project_quota[pr_ref][None][resource] = val\n for membership in project_memberships:\n u_ref = get_user_ref(membership.person)\n val = grant.member_capacity if membership.is_active() else 0\n user_quota[u_ref][pr_ref][resource] = val\n\n return project_quota, user_quota\n\n\ndef list_user_quotas(users, qhflt=None):\n qh_quotas = get_users_quotas(users, flt=qhflt)\n return qh_quotas\n\n\ndef qh_sync_projects(projects, resource=None):\n p_quota, u_quota = astakos_project_quotas(projects, resource=resource)\n p_quota.update(u_quota)\n set_quota(p_quota, resource=resource)\n\n\ndef qh_sync_project(project):\n qh_sync_projects([project])\n\n\ndef membership_quota(membership):\n project = membership.project\n pr_ref = get_project_ref(project)\n u_ref = get_user_ref(membership.person)\n objs = ProjectResourceQuota.objects.select_related()\n grants = objs.filter(project=project)\n user_quota = QuotaDict()\n is_active = membership.is_active()\n for grant in grants:\n resource = grant.resource.name\n value = grant.member_capacity if is_active else 0\n user_quota[u_ref][pr_ref][resource] = value\n return user_quota\n\n\ndef qh_sync_membership(membership):\n quota = membership_quota(membership)\n set_quota(quota)\n\n\ndef pick_limit_scheme(project, resource):\n return resource.uplimit if project.is_base else resource.project_default\n\n\ndef qh_sync_new_resource(resource):\n projects = Project.objects.filter(state__in=Project.INITIALIZED_STATES).\\\n select_for_update()\n\n entries = []\n for project in projects:\n limit = pick_limit_scheme(project, resource)\n entries.append(\n ProjectResourceQuota(\n project=project,\n resource=resource,\n project_capacity=limit,\n member_capacity=limit))\n ProjectResourceQuota.objects.bulk_create(entries)\n qh_sync_projects(projects, resource=resource.name)\n","repo_name":"grnet/synnefo","sub_path":"snf-astakos-app/astakos/im/quotas.py","file_name":"quotas.py","file_ext":"py","file_size_in_byte":10349,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"41103456146","text":"#\n# Push data live to API using Python on a RaspberryPi\n#\n\nimport serial\nfrom utils import parse_data, send_data\n\nser = serial.Serial('/dev/ttyACM0')\nwhile True:\n message = ser.readline()\n data = parse_data(message)\n if data:\n send_data(data)\n","repo_name":"opendata-stuttgart/sensors-software","sub_path":"raspberry-serial/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":552,"dataset":"github-code","pt":"32"} +{"seq_id":"27150046859","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport numpy as np\n\nfrom scipy.stats import pearsonr, spearmanr\nfrom six.moves import xrange\nimport paddle.fluid as fluid\n\nfrom model.ernie import ErnieModel\nfrom ddparser.parser.nets import nn\nfrom tools.representation import gnn\nfrom tools.representation import utils\n\n\ndef create_model(args,\n pyreader_name,\n ernie_config,\n is_prediction=False,\n task_name=\"\",\n is_classify=False,\n is_regression=False,\n ernie_version=\"1.0\"):\n if is_classify:\n # 增加邻接矩阵和核心词的shape\n pyreader = fluid.layers.py_reader(capacity=50,\n shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1], [-1, 1], [-1, 1],\n [-1, args.max_seq_len, args.max_seq_len], [-1, 2]],\n dtypes=[\n 'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64', 'int64',\n 'int64'\n ],\n lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0],\n name=task_name + \"_\" + pyreader_name,\n use_double_buffer=True)\n elif is_regression:\n pyreader = fluid.layers.py_reader(capacity=50,\n shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],\n dtypes=['int64', 'int64', 'int64', 'int64', 'float32', 'float32', 'int64'],\n lod_levels=[0, 0, 0, 0, 0, 0, 0],\n name=task_name + \"_\" + pyreader_name,\n use_double_buffer=True)\n\n (src_ids, sent_ids, pos_ids, task_ids, input_mask, labels, qids, adj_mat,\n head_ids) = fluid.layers.read_file(pyreader)\n\n ernie = ErnieModel(src_ids=src_ids,\n position_ids=pos_ids,\n sentence_ids=sent_ids,\n task_ids=task_ids,\n input_mask=input_mask,\n config=ernie_config,\n use_fp16=args.use_fp16)\n\n erinie_output = ernie.get_sequence_output()\n cls_feats = ernie.get_pooled_output()\n\n # 增加GAT网络\n gat = gnn.GAT(input_size=768, hidden_size=100, output_size=50, dropout=0.0, alpha=0.1, heads=12, layer=2)\n # 将ernie的表示和邻接矩阵输入到gat网络中得到包含句子结构信息的表示\n gat_emb = gat.forward(erinie_output, adj_mat)\n # 提取核心词的表示\n gat_emb = utils.index_sample(gat_emb, head_ids)\n # 将[CLS]和核心词的表示拼接,供下游网络使用\n cls_feats = fluid.layers.concat([cls_feats, gat_emb], axis=1)\n\n cls_feats = fluid.layers.dropout(x=cls_feats, dropout_prob=0.1, dropout_implementation=\"upscale_in_train\")\n logits = fluid.layers.fc(input=cls_feats,\n size=args.num_labels,\n param_attr=fluid.ParamAttr(name=task_name + \"_cls_out_w\",\n initializer=fluid.initializer.TruncatedNormal(scale=0.02)),\n bias_attr=fluid.ParamAttr(name=task_name + \"_cls_out_b\",\n initializer=fluid.initializer.Constant(0.)))\n\n if is_prediction:\n probs = fluid.layers.softmax(logits)\n feed_targets_name = [src_ids.name, sent_ids.name, pos_ids.name, input_mask.name]\n if ernie_version == \"2.0\":\n feed_targets_name += [task_ids.name]\n return pyreader, probs, feed_targets_name\n\n assert is_classify != is_regression, 'is_classify or is_regression must be true and only one of them can be true'\n num_seqs = fluid.layers.create_tensor(dtype='int64')\n if is_classify:\n ce_loss, probs = fluid.layers.softmax_with_cross_entropy(logits=logits, label=labels, return_softmax=True)\n loss = fluid.layers.mean(x=ce_loss)\n accuracy = fluid.layers.accuracy(input=probs, label=labels, total=num_seqs)\n graph_vars = {\n \"loss\": loss,\n \"probs\": probs,\n \"accuracy\": accuracy,\n \"labels\": labels,\n \"num_seqs\": num_seqs,\n \"qids\": qids\n }\n elif is_regression:\n cost = fluid.layers.square_error_cost(input=logits, label=labels)\n loss = fluid.layers.mean(x=cost)\n graph_vars = {\"loss\": loss, \"probs\": logits, \"labels\": labels, \"num_seqs\": num_seqs, \"qids\": qids}\n else:\n raise ValueError('unsupported fine tune mode. only supported classify/regression')\n\n return pyreader, graph_vars\n\n\ndef evaluate_mrr(preds):\n last_qid = None\n total_mrr = 0.0\n qnum = 0.0\n rank = 0.0\n correct = False\n for qid, score, label in preds:\n if qid != last_qid:\n rank = 0.0\n qnum += 1\n correct = False\n last_qid = qid\n\n rank += 1\n if not correct and label != 0:\n total_mrr += 1.0 / rank\n correct = True\n\n return total_mrr / qnum\n\n\ndef evaluate_map(preds):\n def singe_map(st, en):\n total_p = 0.0\n correct_num = 0.0\n for index in xrange(st, en):\n if int(preds[index][2]) != 0:\n correct_num += 1\n total_p += correct_num / (index - st + 1)\n if int(correct_num) == 0:\n return 0.0\n return total_p / correct_num\n\n last_qid = None\n total_map = 0.0\n qnum = 0.0\n st = 0\n for i in xrange(len(preds)):\n qid = preds[i][0]\n if qid != last_qid:\n qnum += 1\n if last_qid != None:\n total_map += singe_map(st, i)\n st = i\n last_qid = qid\n\n total_map += singe_map(st, len(preds))\n return total_map / qnum\n\n\ndef evaluate_classify(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=False,\n metric='simple_accuracy',\n is_classify=False,\n is_regression=False):\n train_fetch_list = [graph_vars[\"loss\"].name, graph_vars[\"accuracy\"].name, graph_vars[\"num_seqs\"].name]\n\n if eval_phase == \"train\":\n if \"learning_rate\" in graph_vars:\n train_fetch_list.append(graph_vars[\"learning_rate\"].name)\n outputs = exe.run(fetch_list=train_fetch_list)\n ret = {\"loss\": np.mean(outputs[0]), \"accuracy\": np.mean(outputs[1])}\n if \"learning_rate\" in graph_vars:\n ret[\"learning_rate\"] = float(outputs[3][0])\n return ret\n\n test_pyreader.start()\n total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n qids, labels, scores, preds = [], [], [], []\n time_begin = time.time()\n\n fetch_list = [\n graph_vars[\"loss\"].name, graph_vars[\"accuracy\"].name, graph_vars[\"probs\"].name, graph_vars[\"labels\"].name,\n graph_vars[\"num_seqs\"].name, graph_vars[\"qids\"].name\n ]\n while True:\n try:\n if use_multi_gpu_test:\n np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(fetch_list=fetch_list)\n else:\n np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(program=test_program,\n fetch_list=fetch_list)\n total_cost += np.sum(np_loss * np_num_seqs)\n total_acc += np.sum(np_acc * np_num_seqs)\n total_num_seqs += np.sum(np_num_seqs)\n labels.extend(np_labels.reshape((-1)).tolist())\n if np_qids is None:\n np_qids = np.array([])\n qids.extend(np_qids.reshape(-1).tolist())\n scores.extend(np_probs[:, 1].reshape(-1).tolist())\n np_preds = np.argmax(np_probs, axis=1).astype(np.float32)\n preds.extend(np_preds)\n total_label_pos_num += np.sum(np_labels)\n total_pred_pos_num += np.sum(np_preds)\n total_correct_num += np.sum(np.dot(np_preds, np_labels))\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n time_end = time.time()\n cost = total_cost / total_num_seqs\n elapsed_time = time_end - time_begin\n\n evaluate_info = \"\"\n if metric == 'acc_and_f1':\n ret = acc_and_f1(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)\n elif metric == 'matthews_corrcoef':\n ret = matthews_corrcoef(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret, total_num_seqs, elapsed_time)\n elif metric == 'pearson_and_spearman':\n ret = pearson_and_spearman(scores, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)\n elif metric == 'simple_accuracy':\n ret = simple_accuracy(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret, total_num_seqs, elapsed_time)\n elif metric == \"acc_and_f1_and_mrr\":\n ret_a = acc_and_f1(preds, labels)\n preds = sorted(zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))\n ret_b = evaluate_mrr(preds)\n evaluate_info = \"[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)\n else:\n raise ValueError('unsupported metric {}'.format(metric))\n return evaluate_info\n\n\ndef evaluate_regression(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=False,\n metric='pearson_and_spearman'):\n\n if eval_phase == \"train\":\n train_fetch_list = [graph_vars[\"loss\"].name]\n if \"learning_rate\" in graph_vars:\n train_fetch_list.append(graph_vars[\"learning_rate\"].name)\n outputs = exe.run(fetch_list=train_fetch_list)\n ret = {\"loss\": np.mean(outputs[0])}\n if \"learning_rate\" in graph_vars:\n ret[\"learning_rate\"] = float(outputs[1][0])\n return ret\n\n test_pyreader.start()\n total_cost, total_num_seqs = 0.0, 0.0\n qids, labels, scores = [], [], []\n\n fetch_list = [graph_vars[\"loss\"].name, graph_vars[\"probs\"].name, graph_vars[\"labels\"].name, graph_vars[\"qids\"].name]\n\n time_begin = time.time()\n while True:\n try:\n if use_multi_gpu_test:\n np_loss, np_probs, np_labels, np_qids = exe.run(fetch_list=fetch_list)\n else:\n np_loss, np_probs, np_labels, np_qids = exe.run(program=test_program, fetch_list=fetch_list)\n labels.extend(np_labels.reshape((-1)).tolist())\n if np_qids is None:\n np_qids = np.array([])\n qids.extend(np_qids.reshape(-1).tolist())\n scores.extend(np_probs.reshape(-1).tolist())\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n time_end = time.time()\n\n elapsed_time = time_end - time_begin\n\n if metric == 'pearson_and_spearman':\n ret = pearson_and_spearman(scores, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, elapsed time: %f s\" \\\n % (eval_phase, 0.0, ret['pearson'], ret['spearmanr'], ret['corr'], elapsed_time)\n else:\n raise ValueError('unsupported metric {}'.format(metric))\n\n return evaluate_info\n\n\ndef evaluate(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=False,\n metric='simple_accuracy',\n is_classify=False,\n is_regression=False):\n\n if is_classify:\n return evaluate_classify(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=use_multi_gpu_test,\n metric=metric)\n else:\n return evaluate_regression(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=use_multi_gpu_test,\n metric=metric)\n\n\ndef matthews_corrcoef(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n tp = np.sum((labels == 1) & (preds == 1))\n tn = np.sum((labels == 0) & (preds == 0))\n fp = np.sum((labels == 0) & (preds == 1))\n fn = np.sum((labels == 1) & (preds == 0))\n\n mcc = ((tp * tn) - (fp * fn)) / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n return mcc\n\n\ndef f1_score(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n tp = np.sum((labels == 1) & (preds == 1))\n tn = np.sum((labels == 0) & (preds == 0))\n fp = np.sum((labels == 0) & (preds == 1))\n fn = np.sum((labels == 1) & (preds == 0))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n f1 = (2 * p * r) / (p + r + 1e-8)\n return f1\n\n\ndef pearson_and_spearman(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n pearson_corr = pearsonr(preds, labels)[0]\n spearman_corr = spearmanr(preds, labels)[0]\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n }\n\n\ndef acc_and_f1(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n acc = simple_accuracy(preds, labels)\n f1 = f1_score(preds, labels)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n\n\ndef simple_accuracy(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n return (preds == labels).mean()\n\n\ndef predict(exe, test_program, test_pyreader, graph_vars, dev_count=1, is_classify=False, is_regression=False):\n test_pyreader.start()\n qids, scores, probs = [], [], []\n preds = []\n\n fetch_list = [graph_vars[\"probs\"].name, graph_vars[\"qids\"].name]\n\n while True:\n try:\n if dev_count == 1:\n np_probs, np_qids = exe.run(program=test_program, fetch_list=fetch_list)\n else:\n np_probs, np_qids = exe.run(fetch_list=fetch_list)\n\n if np_qids is None:\n np_qids = np.array([])\n qids.extend(np_qids.reshape(-1).tolist())\n if is_classify:\n np_preds = np.argmax(np_probs, axis=1).astype(np.float32)\n preds.extend(np_preds)\n elif is_regression:\n preds.extend(np_probs.reshape(-1))\n\n probs.append(np_probs)\n\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n\n probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])\n\n return qids, preds, probs\n","repo_name":"baidu/DDParser","sub_path":"tools/representation/demo/ERNIE/finetune/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":16372,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"32"} +{"seq_id":"11078593528","text":"from ..feature_producer import FeatureProducer\nfrom datasource.treasury import get_us_treasury_yield\n\n\nclass USTreasuryYieldFeatureProducer(FeatureProducer):\n \"\"\"\n This produces US treasury yield data. \n \"\"\"\n\n def __init__(self, maturity='10y', feature_label='us_treasury'):\n \"\"\"\n Keyword Arguments:\n maturity {str} -- [Maturity period e.g 1m, 10y] (default: {'10y'})\n \"\"\"\n FeatureProducer.__init__(self, feature_label)\n self.maturity = maturity\n\n def produce(self, df):\n result = df.copy()\n\n df_index = [str(s)[:10] for s in sorted(df.index.values)]\n treasury_data = get_us_treasury_yield(\n df_index[0], df_index[-1], maturity=self.maturity)\n result['%s_%s' % (self.feature_label, self.maturity)] = treasury_data\n\n return result\n","repo_name":"danielwpz/dw-feature-util","sub_path":"feature_util/macro/us_treasury.py","file_name":"us_treasury.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13081169496","text":"from bs4 import BeautifulSoup\r\nimport scrapy\r\n\r\nclass BreadSpider(scrapy.Spider):\r\n name = \"bread\"\r\n \r\n def start_requests(self):\r\n urls = [\r\n 'http://www.ralphs.com/pl/bakery-bread/01002?page=1'\r\n ]\r\n \r\n headers = {\r\n \":authority\" : \"www.ralphs.com\",\r\n \":method\" : \"GET\",\r\n \":path\" : \"/pl/bakery-bread/01002?page=1\",\r\n \":scheme\" : \"https\",\r\n \"accept\" : \"text/html,application/xhtml+xml,application/xml;\"\\\r\n \"q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n \"accept-encoding\" : \"gzip, deflate, br\",\r\n \"accept-language\" : \"en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7\",\r\n \"cache-control\" : \"max-age=0\",\r\n \"upgrade-insecure-requests\" : \"1\",\r\n \"user-agent\" : \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) \"\\\r\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\\\r\n \"Chrome/70.0.3538.110 Safari/537.36\"\r\n\r\n }\r\n\r\n for url in urls:\r\n yield scrapy.Request(url=url, callback = self.parse, headers = headers)\r\n\r\n def parse(self, response):\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n results = soup.select(\"div.PriceDisplay\")\r\n print(results)\r\n \r\n \"\"\" page = response.url.split(\"=\")[-1]\r\n filename = 'bread-%s.html' % page\r\n with open(filename, 'wb') as f:\r\n f.write(response.body) \"\"\"\r\n\r\n","repo_name":"Honey4Groceries/Webscraping","sub_path":"History/H4GFirstTry/scrapeRalphsBread/scrapeBread/scrapeBread/spiders/bread_spider.py","file_name":"bread_spider.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22213033364","text":"person='我是章三,我喜欢你,我今天开心'\nstr_plan=person.split(',')\nprint(str_plan)\n\n\nprint('昨天作业得分{:.2f}完成度{:.2%}'.format(89.999,0.99))\n\nprint( r'helloworld \\n hello')\n\nstr1='hello1'\nstr2='hello2'\nstr3='hello3'","repo_name":"hsmwm/0518","sub_path":"0522/pra.py","file_name":"pra.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69880365851","text":"data,completedSearch={},[]\nfor i in range(int(input())):\n x=input().strip().split()\n data[x[0]]=x[1:]\nsearch=input().strip().split()\nfor i in data.keys():\n st=True\n for j in range(len(search)):\n if search[j] not in data[i]:\n st=False\n if st:\n completedSearch.append(i)\n[print(i,data[i][0],data[i][1],data[i][2]) for i in sorted(completedSearch)] if len(completedSearch)!=0 else print(\"Not Found\")\n \n","repo_name":"Banktts/complog2018_2","sub_path":"week8/08_P14.py","file_name":"08_P14.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32651476150","text":"if __name__ == '__main__':\n n = int(input(\"Enter number of students\"))\n students_marks = {}\n result = []\n for _ in range(n):\n name,*mark = input(\"Username : eg: 20 30 40\").split()#Khine 20 30 40\n scores = list(map(float,mark))#[20.0,30.0,40.0]\n students_marks[name] = scores#{\"Khine\":[20.0,30.0,40.0],\"Zar\":[20.0,30.0,40.0]}\n query_name = input(\"Please enter one student: \")#Khine\nresult = students_marks[query_name]\neach_scores = sum(result)/len(result)#average score\nprint(each_scores)\n\n\n\n\n\n","repo_name":"KhineThwe/Python_For_One_By_One_Singapore_Stu","sub_path":"Step2/6.2.AssFindPercentage.py","file_name":"6.2.AssFindPercentage.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4259171751","text":"#!/usr/local/bin/python\nimport random\n\ndef split_num(categories, num):\n output = []\n for i in range(categories):\n output.append(random.randint(0, num//categories))\n total = sum(output)\n if total != num:\n loops = num - total\n for i in range(loops):\n output[i%categories] += 1\n return output\n\ndef encrypt(plaintext):\n encrypted = []\n for char in plaintext:\n encrypted += split_num(4, 255-ord(char))\n return ' '.join(str(e) for e in encrypted)\n\ndef start_challenge():\n print(\"Welcome to MEGACORP's proprietary encryption service! Just type your message below and out will come the encrypted text!\\n\")\n while True:\n plaintext = input(\"Please enter the message you wish to encrypt: \")\n while not plaintext:\n plaintext = input(\"Messages cannot be empty. Try again: \")\n ciphertext = encrypt(plaintext)\n print(f\"Your encrypted message is: {ciphertext}\\n\")\n\nstart_challenge()\n","repo_name":"ractf/challenges","sub_path":"2020/bootleg_crypto/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"71583151772","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets,transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\nimport functools\nimport utils\nimport math\nimport DDC\nfrom torch.autograd import Variable\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n#Function to display images\ndef showImage(images,labels,title=\"SVHN Dataset\"):\n if title.upper() == \"MNIST\":\n images = torch.cat((images,images,images),dim=1)\n images = nn.functional.pad(images,(2,2,2,2))\n rows = 2\n cols = 5\n k = 0\n if type(labels) == torch.Tensor:\n labels = np.array(labels).tolist()\n #Un-normalize\n img_to_display = images[:10].permute(0,2,3,1).cpu()\n labels = labels[:10]\n fig, ax = plt.subplots(rows,cols,figsize=(10, 5))\n for i in range(rows):\n for j in range(cols):\n ax[i][j].imshow(img_to_display[k])\n _ = ax[i][j].set(xlabel = labels[k])\n k+=1\n _ = fig.suptitle(title)\n\n\n# In[3]:\n\n\ndevice = 'cuda'\n\n\n# In[4]:\n\n\ntrain_loader_svhn, test_loader_svhn = utils.load_data(\"SVHN\")\ntrain_loader_mnist, test_loader_mnist = utils.load_data(\"MNIST\")\n\n\n# In[5]:\n\n\n#Display svhn\nimages,labels = iter(test_loader_svhn).__next__()\nshowImage(images,labels)\n\n\n# In[6]:\n\n\nimages,labels = iter(test_loader_mnist).__next__()\nshowImage(images,labels,\"MNIST\")\n\n\n# In[7]:\n\n\n#Load model\nmodel = DDC.DDC()\nmodel.to(device)\n\n#Load pretrained\n# pretrain_dict = torch.load(\"C:\\\\Users\\\\Joab-PC\\\\Desktop\\\\Personal Documents\\\\Jupyter Notebooks\\\\svhn_model_statedict.pth\")\n# model.convNet.state_dict().update(pretrain_dict)\n\n\n# In[8]:\n\n\n#Define Optimizer\nLEARNING_RATE = 0.001\nMOMENTUM = 0.9\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam([{'params': model.convNet.parameters()},\n {'params': model.fc.parameters(), 'lr':LEARNING_RATE}],\n lr=LEARNING_RATE)\n\n\n# In[9]:\n\n\nsource, target = list(enumerate(train_loader_svhn)), list(enumerate(train_loader_mnist))\ntrain_steps = min(len(source), len(target)) - 1\n\n\n# In[10]:\n\n\n_, (source_data, source_label) = source[1]\n_, (target_data, _) = target[1]\ntarget_data = torch.cat((target_data,target_data,target_data),dim=1)\ntarget_data = nn.functional.pad(target_data,(2,2,2,2))\nout1, out2, ddc_loss = model(source_data.cuda(),target_data.cuda())\nprint(\"Test ddc_loss: \" + str(ddc_loss))\n\n\n# In[11]:\n\n\n#Training\nepochs = 20\nfor i in range(epochs):\n ddc_loss_epoch = 0\n class_loss = 0\n _lambda = 2 / (1 + math.exp(-10 * (i) / epochs)) - 1\n epoch_loss = 0\n start_time = time.time()\n model.train()\n for batch_idx in range(train_steps):\n _, (source_data, source_label) = source[batch_idx]\n _, (target_data, _) = target[batch_idx]\n source_data = source_data.to(device)\n source_label = source_label.to(device)\n target_data = target_data.to(device)\n target_data = torch.cat((target_data,target_data,target_data),dim=1)\n target_data = nn.functional.pad(target_data,(2,2,2,2))\n \n \n \n optimizer.zero_grad()\n #Forward pass\n out_source, out_target, ddc_loss = model(source_data,target_data)\n #Calculate loss\n classification_loss = criterion(out_source,source_label)\n total_loss = classification_loss + (_lambda * ddc_loss)\n class_loss +=classification_loss\n\n #Backward pass\n total_loss.backward()\n optimizer.step()\n ddc_loss_epoch += ddc_loss.item()\n epoch_loss += total_loss.item()\n end_time = time.time()\n time_taken = end_time - start_time\n print(\"ddc loss \" + str(ddc_loss_epoch))\n print(\"Epoch {:d} completed, time taken: {:f}\".format(i+1,time_taken),end=\"\\t\")\n print(\"Training Loss: \" + str(epoch_loss))\n \n #Validation\n# model.eval()\n# for images, labels in val_loader:\n# images = images.to(device)\n# labels = labels.to(device)\n \n# #Forward pass\n# output = model(images)\n# batch_loss = criterion(output,labels)\n# val_loss += batch_loss.item()\n# pred = torch.argmax(output,1)\n# val_correct += torch.sum(pred == labels).item()\n \n# print(\"Validation Loss: \" + str(val_loss),end=\"\\t\")\n# print(\"Accuracy: \" + str(val_correct/len(val_data)*100) + \"%\")\n \n\n\n# In[12]:\n\n\nget_ipython().run_cell_magic('time', '', '#Calculate Accuracy\\nmodel.eval()\\nnum_correct = 0\\nfor images,labels in test_loader_svhn:\\n images = images.to(device)\\n labels = labels.to(device)\\n output,_ ,_ = model(images,images)\\n pred = torch.argmax(output,1)\\n num_correct += torch.sum(pred == labels).item()\\nprint(\"Test accuracy: \" + str(num_correct/len(test_loader_svhn.dataset)*100 ) + \"%\")')\n\n\n# In[13]:\n\n\n#Show predictions\nimages, labels = iter(test_loader_svhn).__next__()\nimages = images.to(device)\noutput, _ , _ = model(images,images)\npred = torch.argmax(output,1)\nlabel_list = np.array(labels.cpu()).tolist()\npred_list = np.array(pred.cpu()).tolist()\nxlabels = []\nfor i in range(len(label_list)):\n temp_label = \"Actual: \" + str(label_list[i]) + \" Predicted: \" + str(pred_list[i])\n xlabels.append(temp_label)\n\nshowImage(images,xlabels)\n\n\n# In[14]:\n\n\ntorch.save(model.state_dict(), \"C:\\\\Users\\\\Joab-PC\\\\Desktop\\\\Personal Documents\\\\Jupyter Notebooks\\\\svhn_coralLoss_statedict.pth\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"joabgoh123/Domain-Adaptation","sub_path":"SVHN & MNIST/DDC/Svhn_DDC.py","file_name":"Svhn_DDC.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29360253080","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2023/4/20 11:05\r\nfrom dataclasses import dataclass\r\nfrom typing import Tuple, Optional\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import PretrainedConfig\r\nfrom transformers.utils import ModelOutput\r\nfrom .configuration import PPOConfig\r\nfrom ..utils import logprobs_of_labels, get_tensor_stats, flatten_dict, whiten\r\nfrom .data_define import PPORLBatch\r\nfrom ...models.rl.utils import CausalLMOutputWithValue\r\n\r\n\r\n\r\nclass PPOLLMAbstract:\r\n def forward_llm_value_and_logits(self,input_ids,**kwargs):\r\n outputs = self.forward_logits_values(input_ids=input_ids,**kwargs)\r\n logits = outputs.logits\r\n values_pred = outputs.value\r\n return (logits,values_pred)\r\n\r\nclass PPOSEQ2SEQAbstract:\r\n def forward_seq2seq_value_and_logits(self,\r\n input_ids,attention_mask,\r\n decoder_input_ids,decoder_attention_mask,\r\n **kwargs):\r\n outputs = self.forward_logits_values(input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n decoder_input_ids=decoder_input_ids,\r\n decoder_attention_mask=decoder_attention_mask,\r\n **kwargs)\r\n logits = outputs.logits\r\n values_pred = outputs.value\r\n return (logits, values_pred)\r\n\r\nclass PPOPrefixLMAbstract:\r\n def forward_prefix_value_and_logits(self,input_ids,**kwargs):\r\n outputs = self.forward_logits_values(input_ids=input_ids, **kwargs)\r\n logits = outputs.logits\r\n values_pred = outputs.value\r\n return (logits, values_pred)\r\n\r\n\r\nclass PPOModelLoss(nn.Module, PPOLLMAbstract, PPOSEQ2SEQAbstract,PPOPrefixLMAbstract):\r\n def forward_ppo_loss(self,batch: PPORLBatch, device):\r\n \"\"\"Forward pass & loss\r\n Args:\r\n batch: Previous batch of episodes\r\n \"\"\"\r\n query_tensors = batch.query_tensors.to(device)\r\n response_tensors = batch.response_tensors.to(device)\r\n old_logprobs = batch.logprobs.to(device)\r\n old_values = batch.values.to(device)\r\n old_rewards = batch.rewards.to(device)\r\n response_length = old_rewards.shape[1]\r\n\r\n advantages, returns = self.get_advantages_and_returns(old_values, old_rewards, response_length)\r\n if self.ppo_config.model_arch_type == \"seq2seq\":\r\n input_ids = query_tensors\r\n decoder_input_ids = response_tensors\r\n attention_mask = input_ids.ne(self.config.pad_token_id).long().to(device)\r\n decoder_attention_mask = (\r\n decoder_input_ids.ne(self.config.pad_token_id).long().to(device)\r\n )\r\n decoder_attention_mask[:, 0] = 1\r\n\r\n # Forward pass\r\n logits,values_pred = self.forward_seq2seq_value_and_logits(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n decoder_input_ids=decoder_input_ids,\r\n decoder_attention_mask=decoder_attention_mask,\r\n return_dict=True,\r\n )\r\n\r\n logprobs = logprobs_of_labels(logits[:, :-1, :], decoder_input_ids[:, 1:])\r\n mask = decoder_input_ids.ne(self.config.pad_token_id).long().to(device)\r\n start = 0\r\n end = start + response_length\r\n logprobs, values_pred, mask = (\r\n logprobs[:, start:end],\r\n values_pred[:, start:end],\r\n mask[:, start:end],\r\n )\r\n elif self.ppo_config.model_arch_type == \"prefixlm\":\r\n tokens = torch.cat((query_tensors, response_tensors), dim=1)\r\n attention_mask = tokens.not_equal(self.config.pad_token_id).long().to(tokens.device)\r\n logits, values_pred = self.forward_prefix_value_and_logits(input_ids=tokens,return_dict=True)\r\n values_pred = values_pred[:, :-1]\r\n logprobs = logprobs_of_labels(logits[:, :-1, :], tokens[:, 1:])\r\n\r\n start = query_tensors.shape[1] - 1\r\n end = start + response_length\r\n logprobs, values_pred, mask = (\r\n logprobs[:, start:end],\r\n values_pred[:, start:end],\r\n attention_mask[:, start:end],\r\n )\r\n else:\r\n tokens = torch.cat((query_tensors, response_tensors), dim=1)\r\n attention_mask = tokens.not_equal(self.config.pad_token_id).long().to(tokens.device)\r\n logits,values_pred = self.forward_llm_value_and_logits(input_ids=tokens,\r\n attention_mask=attention_mask,\r\n return_dict=True)\r\n values_pred = values_pred[:, :-1]\r\n logprobs = logprobs_of_labels(logits[:, :-1, :], tokens[:, 1:])\r\n\r\n start = query_tensors.shape[1] - 1\r\n end = start + response_length\r\n logprobs, values_pred, mask = (\r\n logprobs[:, start:end],\r\n values_pred[:, start:end],\r\n attention_mask[:, start:end],\r\n )\r\n\r\n loss, stats = self.loss_fn(\r\n logprobs=logprobs,\r\n values=values_pred,\r\n old_logprobs=old_logprobs,\r\n old_values=old_values,\r\n advantages=advantages,\r\n returns=returns,\r\n mask=mask,\r\n )\r\n return {\r\n 'loss': loss,\r\n 'stats': stats\r\n }\r\n\r\n def get_advantages_and_returns(\r\n self,\r\n values, # : TensorType[\"batch_size\", \"response_size\"]\r\n rewards, #: TensorType[\"batch_size\", \"response_size\"]\r\n response_length: int,\r\n use_whitening: Optional[bool] = True,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"Function that computes advantages and returns from rewards and values.\r\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\r\n Note that rewards may include a KL divergence loss term.\r\n\r\n Advantages looks like this:\r\n Adv1 = R1 + γ * λ * R2 + γ^2 * λ^2 * R3 + ...\r\n - V1 + γ * (1 - λ) V2 + γ^2 * λ * (1 - λ) V3 + ...\r\n\r\n Returns looks like this:\r\n Ret1 = R1 + γ * λ * R2 + γ^2 * λ^2 * R3 + ...\r\n + γ * (1 - λ) V2 + γ^2 * λ * (1 - λ) V3 + ...\r\n\r\n Args:\r\n values: Tensor of shape (batch_size, response_size)\r\n rewards: Tensor of shape (batch_size, response_size)\r\n response_length: Length of the response sequence\r\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\r\n \"\"\"\r\n lastgaelam = 0\r\n advantages_reversed = []\r\n for t in reversed(range(response_length)):\r\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\r\n delta = rewards[:, t] + self.ppo_config.gamma * nextvalues - values[:, t]\r\n lastgaelam = delta + self.ppo_config.gamma * self.ppo_config.lam * lastgaelam\r\n advantages_reversed.append(lastgaelam)\r\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\r\n returns = advantages + values\r\n if use_whitening:\r\n advantages = whiten(advantages)\r\n return advantages.detach(), returns\r\n\r\n def loss_fn(\r\n self,\r\n logprobs, # : TensorType[\"batch_size\", \"response_size\"]\r\n values, # : TensorType[\"batch_size\", \"response_size\"]\r\n old_logprobs, # : TensorType[\"batch_size\", \"response_size\"]\r\n old_values, # : TensorType[\"batch_size\", \"response_size\"]\r\n advantages, # : TensorType[\"batch_size\", \"response_size\"]\r\n returns, # : TensorType[\"batch_size\", \"response_size\"]\r\n mask # : TensorType[\"batch_size\", \"response_size\"],\r\n ):\r\n \"\"\"PPO objective function.\r\n References:\r\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\r\n \"\"\"\r\n values_clipped = torch.clamp(\r\n values,\r\n old_values - self.ppo_config.cliprange_value,\r\n old_values + self.ppo_config.cliprange_value,\r\n )\r\n n = mask.sum()\r\n\r\n vf_loss1 = (values - returns) ** 2\r\n vf_loss2 = (values_clipped - returns) ** 2\r\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\r\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\r\n\r\n log_ratio = (logprobs - old_logprobs) * mask\r\n ratio = torch.exp(log_ratio)\r\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\r\n with torch.no_grad():\r\n approx_kl = torch.mean((ratio - 1) - log_ratio)\r\n\r\n pg_loss1 = -advantages * ratio\r\n pg_loss2 = -advantages * torch.clamp(\r\n ratio,\r\n 1.0 - self.ppo_config.cliprange,\r\n 1.0 + self.ppo_config.cliprange,\r\n )\r\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\r\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\r\n\r\n loss = pg_loss + self.ppo_config.vf_coef * vf_loss\r\n\r\n stats = dict(\r\n losses=dict(\r\n total_loss=loss.item(),\r\n policy_loss=pg_loss.item(),\r\n value_loss=vf_loss.item(),\r\n ),\r\n values=dict(\r\n get_tensor_stats(values, mask, n),\r\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\r\n clipfrac=vf_clipfrac,\r\n ),\r\n old_values=get_tensor_stats(old_values, mask, n),\r\n returns=get_tensor_stats(returns, mask, n),\r\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\r\n ratio=(ratio * mask).sum() / n,\r\n padding_percentage=n / mask.numel(),\r\n )\r\n return loss, flatten_dict(stats)\r\n","repo_name":"ssbuild/deep_training","sub_path":"src/deep_training/nlp/rl/ppo/ppo_module.py","file_name":"ppo_module.py","file_ext":"py","file_size_in_byte":10103,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"20184006813","text":"from vpython import *\nfrom color import *\n\n\nclass Pixel:\n def __init__(self, pos, image_res):\n self.pixel = box(color=color.black, pos=pos, height=10, width=.5, length=image_res)\n self.illumination = vec(0, 0, 0)\n def set_color(self, cl):\n self.pixel.color = vec(cl, cl, cl)\nclass Slit:\n def __init__(self, pos):\n self.slit = sphere(radius=.5, pos=pos, color=color.red, visible=False)\n\nclass Renderer: # Calculations are included in the renderer.\n def __init__(self, slitN, slitL, lambda_, distance, image_res):\n self.attributes = {\n 'N':slitN,\n 'L':slitL,\n \"Lambda\":lambda_,\n \"D\":distance\n }\n self.image_res = image_res\n self.pixels = []\n self.slits = []\n def modify(self, attr, value):\n self.attributes[attr] = value\n def change_distance(self):\n for slit in self.slits:\n slit.slit.pos.z = self.attributes['D']\n def render(self): # Canvas Initial\n self.pixels.clear()\n p0 = -30 + self.image_res / 2\n for cnt in range(int(60 // self.image_res)):\n self.pixels.append(Pixel(vec(p0 + cnt * self.image_res, 0, 0), self.image_res))\n\n self.slits.clear()\n p0 = (1 - self.attributes['N']) * self.attributes['L'] / 2\n for cnt in range(self.attributes['N']):\n self.slits.append(Slit(vec(p0 + cnt * self.attributes['L'], 0, self.attributes['D'])))\n def simulate(self):\n self.render()\n\n for slit in self.slits:\n for pixel in self.pixels:\n length = mag(slit.slit.pos - pixel.pixel.pos)\n c = cos((length / self.attributes[\"Lambda\"]) * 2 * pi)\n s = sin((length / self.attributes[\"Lambda\"]) * 2 * pi)\n pixel.illumination += vec(c, s, 0)\n\n for pixel in self.pixels:\n illumination = mag(pixel.illumination) / self.attributes['N']\n pixel.set_color(illumination)\n","repo_name":"sdlks1/VPython_Project","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70629148573","text":"\"\"\"\nprocessing.py\n\nMain processing chain.\n\"\"\"\n\nimport os\nimport json\nimport pickle\nimport logging\nimport warnings\nimport contextlib\nimport collections\nimport multiprocessing\n\nimport tqdm\nimport filelock\nimport numpy as np\n\nfrom .constants import (\n WAVE_HISTORY_LENGTH, SEA_STATE_INTERVALS,\n QC_FAIL_LOG_THRESHOLD, QC_EXTREME_WAVE_LOG_THRESHOLD,\n DYNAMIC_WINDOW_LENGTH_BOUNDS, DYNAMIC_WINDOW_UPDATE_FREQUENCY,\n DYNAMIC_WINDOW_REFERENCE_PERIOD, NUM_DYNAMIC_WINDOWS,\n NUM_DYNAMIC_WINDOW_SAMPLES\n)\n\nfrom .operators import (\n find_wave_indices, add_prefix, get_time_index, get_md5_hash, get_proc_version,\n get_station_meta, get_wave_parameters, get_sea_parameters, get_directional_parameters,\n check_quality_flags, compute_significant_wave_height, compute_dynamic_window_size,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n# constants\n\nWAVE_PARAMS_DTYPE = [\n ('start_time', ' QC_FAIL_LOG_THRESHOLD\n above_extreme_threshold = rel_waveheight > QC_EXTREME_WAVE_LOG_THRESHOLD\n write_qc = above_extreme_threshold or (flags_fired and above_fail_threshold)\n\n if write_qc:\n with qc_lock, open(qc_outfile, 'a') as qcf:\n qc_info = qc_format(filename, flags_fired, rel_waveheight, *qc_args)\n qcf.write(json.dumps(qc_info) + '\\n')\n\n if flags_fired:\n for flag in flags_fired:\n num_flags_fired[flag] += 1\n # skip further processing for this wave\n continue\n\n # add metadata\n this_wave_records.update(\n add_prefix(station_meta, 'meta')\n )\n\n # compute sea state parameters\n for sea_state_period in SEA_STATE_INTERVALS:\n if sea_state_period == 'dynamic':\n dynamic_sea_state_period, dynamic_period_last_update = get_dynamic_window_size(\n dynamic_sea_state_period, wave_params[\"start_time\"], time, elevation,\n last_updated=dynamic_period_last_update\n )\n this_wave_records['sea_state_dynamic_window_length'] = dynamic_sea_state_period\n offset = np.timedelta64(dynamic_sea_state_period, 'm')\n else:\n offset = np.timedelta64(sea_state_period, 'm')\n\n sea_state_idx = slice(\n get_time_index(wave_params['start_time'] - offset, time),\n wave_start\n )\n\n wave_param_timediff = wave_params['start_time'] - wave_params_history['start_time']\n wave_param_mask = np.logical_and(\n # do not look into the future\n wave_param_timediff > np.timedelta64(1, 'ms'),\n # look at most sea_state_period minutes into the past\n wave_param_timediff < offset\n )\n\n sea_state_params = get_sea_parameters(\n time[sea_state_idx],\n elevation[sea_state_idx],\n wave_params_history['height'][wave_param_mask],\n wave_params_history['zero_crossing_period'][wave_param_mask],\n water_depth\n )\n\n if isinstance(sea_state_period, str):\n sea_state_prefix = f'sea_state_{sea_state_period}'\n else:\n sea_state_prefix = f'sea_state_{sea_state_period}m'\n\n this_wave_records.update(\n add_prefix(sea_state_params, sea_state_prefix)\n )\n\n # compute directional quantities\n if direction_args is not None:\n directional_time_idx = get_time_index(\n wave_params['start_time'], direction_args['direction_time'], nearest=True\n )\n\n if directional_time_idx != last_directional_time_idx:\n # only re-compute if index has changed\n directional_params = get_directional_parameters(\n direction_args['direction_time'][directional_time_idx],\n direction_args['direction_frequencies'],\n direction_args['direction_spread'][directional_time_idx],\n direction_args['direction_mean_direction'][directional_time_idx],\n direction_args['direction_energy_density'][directional_time_idx],\n direction_args['direction_peak_direction'][directional_time_idx],\n )\n last_directional_time_idx = directional_time_idx\n\n this_wave_records.update(\n add_prefix(directional_params, 'direction')\n )\n\n for var in this_wave_records.keys():\n wave_records[var].append(this_wave_records[var])\n\n local_wave_id += 1\n\n # output and empty records in regular intervals\n if local_wave_id % 1000 == 0:\n handle_output(wave_records, wave_params_history, num_flags_fired)\n wave_records.clear()\n pbar.set_postfix(dict(waves_processed=str(local_wave_id)))\n\n else:\n # all waves processed\n pbar.update(len(elevation) - last_wave_stop)\n\n if wave_records:\n handle_output(wave_records, wave_params_history, num_flags_fired)\n\n pbar.set_postfix(dict(waves_processed=str(local_wave_id)))\n\n return outfile, statefile\n","repo_name":"dionhaefner/FOWD","sub_path":"fowd/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":16477,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"41281007002","text":"import unittest\n\n\n# O(n) time, O(1) space solution\ndef remove_dupes(nums: list) -> int:\n\n p1, p2 = 0, 0\n length_count = 1\n\n while p2 < len(nums) - 1:\n while nums[p1] == nums[p2]:\n if p2 < len(nums):\n p2 += 1\n if p2 == len(nums):\n break\n p1 += 1\n if p2 != len(nums):\n nums[p1] = nums[p2]\n length_count += 1\n\n return length_count\n\n\nclass TestRemovingDuplicates(unittest.TestCase):\n\n def test_0011122334(self):\n input_list = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n new_list_len = remove_dupes(input_list)\n self.assertEqual(new_list_len, 5)\n self.assertEqual(input_list, [0, 1, 2, 3, 4, 2, 2, 3, 3, 4])\n\n def test_0(self):\n input_list = [0]\n new_list_len = remove_dupes(input_list)\n self.assertEqual(new_list_len, 1)\n self.assertEqual(input_list, [0])\n\n def test_000123(self):\n input_list = [0, 0, 0, 1, 2, 3]\n new_list_len = remove_dupes(input_list)\n self.assertEqual(new_list_len, 4)\n self.assertEqual(input_list, [0, 1, 2, 3, 2, 3])\n\n def test_12(self):\n input_list = [1, 2]\n new_list_len = remove_dupes(input_list)\n self.assertEqual(new_list_len, 2)\n self.assertEqual(input_list, [1, 2])\n\n def test_11(self):\n input_list = [1, 1]\n new_list_len = remove_dupes(input_list)\n self.assertEqual(new_list_len, 1)\n self.assertEqual(input_list, [1, 1])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"zach-wahrer/leet-code-solutions","sub_path":"python/easy/26-Remove-Duplicates-From-Sorted-Array.py","file_name":"26-Remove-Duplicates-From-Sorted-Array.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27600242744","text":"\"\"\"\r\nModule containing the :Orbital: class that acts as a basis for symmetrized fragment orbitals (SFOs) and molecular orbitals (MOs).\r\n\"\"\"\r\nfrom __future__ import annotations\r\nfrom abc import ABC, abstractmethod\r\n\r\nimport attrs\r\n\r\nfrom orb_analysis.custom_types import SpinTypes\r\n\r\nOCCUPATION_TO_LABEL: dict[float, str] = {\r\n 0.0: \"LUMO\",\r\n 1.0: \"SOMO\",\r\n 2.0: \"HOMO\"\r\n}\r\n\r\n\r\n@attrs.define\r\nclass Orbital(ABC):\r\n \"\"\"\r\n Abstract class that contains information about an orbital. This class should not be instantiated directly.\r\n \"\"\"\r\n index: int\r\n irrep: str\r\n spin: str\r\n energy: float = 1000.0\r\n occupation: float = -1.0\r\n homo_lumo_index: int = 1000 # Displays either HOMO-[x] or LUMO+[x]\r\n\r\n# ------------------------------------------------------------------\r\n# ---------------------- Shared Classmethods ------------------------\r\n# ------------------------------------------------------------------\r\n\r\n @classmethod\r\n def from_label(cls, label: str):\r\n \"\"\"\r\n Extracts the index, irrep and spin from the label of the SFO. The correct format of the label is:\r\n\r\n __ or _ if the SFO is from an unrestricted calculation.\r\n \"\"\"\r\n index, irrep, *spin = label.split(\"_\")\r\n spin = spin[0] if spin else SpinTypes.A\r\n return cls(index=int(index), irrep=irrep, spin=spin)\r\n\r\n# ------------------------------------------------------------------\r\n# ------------------ Shared Property Methods ------------------------\r\n# ------------------------------------------------------------------\r\n\r\n @property\r\n def is_occupied(self) -> bool:\r\n return self.occupation >= 1e-6\r\n\r\n @property\r\n def homo_lumo_label(self) -> str:\r\n \"\"\" Returns the label in the format HOMO(-x), SOMO(-x) / SOMO(+x), or LUMO(+x) \"\"\"\r\n ret_str = OCCUPATION_TO_LABEL[round(self.occupation)]\r\n\r\n if self.is_occupied:\r\n ret_str = f\"{ret_str}-{self.homo_lumo_index}\" if self.homo_lumo_index != 0 else ret_str\r\n else:\r\n ret_str = f\"{ret_str}+{self.homo_lumo_index}\" if self.homo_lumo_index != 0 else ret_str\r\n return ret_str\r\n\r\n @property\r\n @abstractmethod\r\n def amsview_label(self) -> str:\r\n pass\r\n\r\n# -------------------------------------------------------------------\r\n# ------------------------ Magic Methods ----------------------------\r\n# -------------------------------------------------------------------\r\n\r\n @abstractmethod\r\n def __eq__(self, __value: str | SFO) -> bool:\r\n pass\r\n\r\n\r\n@attrs.define\r\nclass SFO(Orbital):\r\n \"\"\"\r\n This class contains information about a symmetrized fragment orbital (SFO). Initalizing the class requires\r\n the index, irrep and spin of the SFO. The index is the order in which the SFOs are stored in the rkf file.\r\n\r\n Also possible is to initialize the class with a label. The correct format of the label is:\r\n ___ if the SFO is from an unrestricted calculation.\r\n \"\"\"\r\n gross_pop: float = 1000.0\r\n\r\n def __eq__(self, __value: str | SFO) -> bool:\r\n if isinstance(__value, str):\r\n return self == SFO.from_label(__value)\r\n else:\r\n return (\r\n self.index == __value.index\r\n and self.irrep == __value.irrep\r\n and self.spin == __value.spin\r\n )\r\n\r\n @property\r\n def amsview_label(self) -> str:\r\n \"\"\" Returns the orbital label that can be used for AMSView plotting \"\"\"\r\n return f\"SFO_{self.irrep}_{self.index}_{self.spin}\"\r\n\r\n\r\nclass MO(Orbital):\r\n \"\"\"\r\n This class contains information about a molecular orbital (MO). Initalizing the class requires\r\n the index, irrep and spin of the MO. The index is the order in which the MOs are stored in the rkf file.\r\n\r\n Also possible is to initialize the class with a label. The correct format of the label is:\r\n ___ if the MO is from an unrestricted calculation.\r\n \"\"\"\r\n\r\n def __eq__(self, __value: str | SFO) -> bool:\r\n if isinstance(__value, str):\r\n return self == SFO.from_label(__value)\r\n else:\r\n return (\r\n self.index == __value.index\r\n and self.irrep == __value.irrep\r\n and self.spin == __value.spin\r\n )\r\n\r\n @property\r\n def amsview_label(self) -> str:\r\n \"\"\" Returns the orbital label that can be used for AMSView plotting \"\"\"\r\n return f\"SCF_{self.irrep}_{self.index}_{self.spin}\"\r\n\r\n\r\ndef main():\r\n sfo = SFO.from_label(\"1_A_A\")\r\n # mo = MO.from_label(\"1_A_A\")\r\n label = \"1_A_A\"\r\n print(sfo == label)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"SiebeLeDe/orbitals","sub_path":"src/orb_analysis/orbital/orbital.py","file_name":"orbital.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32594305578","text":"import csv\nfrom datetime import datetime\n\ndef main():\n try:\n current_date_and_time = datetime.now()\n product_dict = read_dictionary(\"products.csv\", 0)\n\n print('\\nInkom Emporium\\n')\n\n with open(\"request.csv\", \"rt\") as file:\n reader = csv.reader(file)\n next(reader)\n\n number_of_items = 0\n subtotal = 0\n tax = 0.06\n discount_amount = 0\n\n for row in reader:\n PRODUCT_CODE = row[0]\n\n try:\n PRODUCT_DATA = product_dict[PRODUCT_CODE]\n PRODUCT_NAME = PRODUCT_DATA[1]\n PRODUCT_QUANTITY = 1\n PRODUCT_PRICE = PRODUCT_DATA[2]\n\n number_of_items += int(row[PRODUCT_QUANTITY])\n subtotal += float(PRODUCT_PRICE) * int(row[PRODUCT_QUANTITY])\n\n print(f'{PRODUCT_NAME}: {row[PRODUCT_QUANTITY]} @ {PRODUCT_PRICE}')\n\n except KeyError as err_key:\n error_message = f\"Error: unknown product ID in the request.csv file\\n{err_key}\"\n print(error_message)\n raise KeyError(error_message)\n\n sales_tax = subtotal * tax\n\n if current_date_and_time.weekday() in [1, 2]:\n discount = 0.10\n discount_amount = subtotal * discount\n total = subtotal + sales_tax - discount_amount\n print(f'\\nDiscount applied (10%) for Tuesday or Wednesday')\n else:\n total = subtotal + sales_tax\n\n print(\n f'\\nNumber of items: {number_of_items}\\n'\n f'Subtotal: {subtotal:.2f}\\n'\n f'Sales Tax: {sales_tax:.2f}\\n'\n f'Discount: {discount_amount:.2f}\\n'\n f'Total: {total:.2f}\\n'\n )\n\n formatted_date_and_time = current_date_and_time.strftime('%a %b %d %H:%M:%S %Y')\n print(\n f\"Thank you for shopping at the Inkom Emporium.\\n\"\n f\"{formatted_date_and_time}\"\n )\n\n except FileNotFoundError as not_found_err:\n print('Error: missing file')\n print(not_found_err)\n raise FileNotFoundError(f\"Error: File not found: {not_found_err}\")\n\ndef read_dictionary(filename, key_column_index):\n dictionary = {}\n\n with open(filename, \"rt\") as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n\n for product_list in reader:\n if product_list != 0:\n key = product_list[key_column_index]\n dictionary[key] = product_list\n\n return dictionary\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gleyson003/cse111","sub_path":"receipt.py","file_name":"receipt.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42636783222","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nn=50\nw_mean = np.zeros(2)\ncov = np.eye(2)\nw = np.random.multivariate_normal(w_mean,cov)\nb = np.random.normal(0,1)\n\nx_p1 = []\nx_p2 = []\nx_n1 = []\nx_n2 = []\ny = []\nx = []\nfor i in range(n):\n x_temp=np.random.uniform(low=-3,high=3,size=2)\n x.append(x_temp)\n y.append(np.sign(np.dot(w,x_temp)+b))\n if y[i]>0:\n x_p1.append(x_temp[0])\n x_p2.append(x_temp[1])\n else:\n x_n1.append(x_temp[0])\n x_n2.append(x_temp[1])\n\n\nplt.scatter(x_p1,x_p2,color='red',label=1)\nplt.scatter(x_n1,x_n2,color='blue',label = -1)\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.title('Problem 2a Testdata')\nplt.legend()\nplt.savefig('Problem2a_Testdata.png')\n\nx = np.asarray(x).T\ny = np.asarray(y)\nf =open('2a_test_data.txt','w')\nfor i in range(len(x[0])):\n f.write(f'{x[0,i]} {x[1,i]} {y[i]}\\n')\nf.close()","repo_name":"DhavalParmar61/Machine-Learning","sub_path":"Assignment 1/Code/Q2/Problem2_a.py","file_name":"Problem2_a.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6634616688","text":"import copy\nimport logging\nimport os\nimport re\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nfrom ConfigParser import Error as ConfigParserError, SafeConfigParser\nfrom decimal import Decimal\n\nfrom django import forms\nfrom django.conf import settings as django_settings\nfrom django.db.models.loading import get_model\nfrom django.utils import importlib\nfrom django.utils.datastructures import SortedDict\n\n\n__all__ = ('AVAILABLE_SETTINGS', 'auth_permitted', 'parse_config')\n\n\nDEFAULT_SETTINGS_FILENAME = 'settings.cfg'\nlogger = logging.getLogger('setman')\n\n\nclass ConfigParser(SafeConfigParser, object):\n \"\"\"\n Customize default behavior for config parser instances to support config\n files without sections at all.\n \"\"\"\n no_sections_mode = False\n optionxform = lambda _, value: value\n\n def _read(self, fp, fpname):\n \"\"\"\n If \"No Sections Mode\" enabled - add global section as first line of\n file handler.\n \"\"\"\n if self.no_sections_mode:\n global_section = StringIO()\n global_section.write('[DEFAULT]\\n')\n global_section.write(fp.read())\n global_section.seek(0)\n fp = global_section\n\n return super(ConfigParser, self)._read(fp, fpname)\n\n\nclass Setting(object):\n \"\"\"\n Base class for setting values that can provided in configuration definition\n file.\n\n The class has next attributes:\n\n * ``name``\n * ``type``\n * ``default``\n * ``required``\n * ``label``\n * ``help_text``\n * ``validators``\n * ``field``\n * ``field_args``\n * ``field_kwargs``\n\n The last three attributes can be provided only in Python module, when all\n other attrs can read from configuration definition file.\n \"\"\"\n default = None\n field_args = ('label', 'help_text', 'initial', 'required', 'validators')\n field_klass = None\n field_kwargs = {}\n help_text = None\n label = None\n name = None\n required = True\n type = None\n validators = None\n\n def __init__(self, **kwargs):\n self._validators = kwargs.pop('validators', None)\n restricted = ('field_klass', 'field_args', 'field_kwargs',\n 'validators')\n\n for key, _ in kwargs.items():\n if not hasattr(self, key):\n kwargs.pop(key)\n\n if key in restricted:\n kwargs.pop(key)\n\n self.__dict__.update(kwargs)\n self.required = force_bool(self.required)\n\n def __repr__(self):\n return u'<%s: %s>' % (self.__class__.__name__, self.__unicode__())\n\n def __unicode__(self):\n return u'%s = %r' % (self.name, self.initial)\n\n @property\n def initial(self):\n \"\"\"\n Read real setting value from database or if impossible - just send\n default setting value.\n \"\"\"\n from setman import settings\n return getattr(settings, self.name, self.default)\n\n def to_field(self, **kwargs):\n \"\"\"\n Convert current setting instance to form field.\n\n You should provide ``kwargs`` and all values from here would be used\n when initing ``field`` instance instead of ``Setting`` attributes.\n \"\"\"\n if not self.field_klass:\n raise ValueError('Please, supply `field_klass` attribute first.')\n\n field_kwargs = {}\n\n for arg in self.field_args:\n value = kwargs[arg] if arg in kwargs else getattr(self, arg)\n field_kwargs.update({arg: value})\n\n field_kwargs.update(**self.field_kwargs)\n return self.field_klass(**field_kwargs)\n\n def to_python(self, value):\n \"\"\"\n Convert setting value to necessary Python type. By default, returns\n same value without any conversion.\n \"\"\"\n return value\n\n @property\n def validators(self):\n \"\"\"\n Lazy loaded validators.\n \"\"\"\n cache_key = '_validators_cache'\n if not hasattr(self, cache_key):\n setattr(self, cache_key, self._parse_validators(self._validators))\n return getattr(self, cache_key)\n\n def _parse_validators(self, value):\n \"\"\"\n Parse validators string and try to convert it to list with actual\n validator functions.\n \"\"\"\n if not value:\n return []\n\n items = map(lambda item: item.strip(), value.split(','))\n validators = []\n\n for item in items:\n try:\n validator = load_from_path(item)\n except (AttributeError, ImportError):\n logger.exception('Cannot load %r validator for %s setting.',\n item, self.name)\n continue\n\n validators.append(validator)\n\n return validators\n\n\nclass SettingTypeDoesNotExist(Exception):\n \"\"\"\n Simple exception that raised when user tried to load not supported setting\n type from configuration definition file.\n \"\"\"\n\n\nclass BooleanSetting(Setting):\n \"\"\"\n Boolean setting.\n \"\"\"\n field_klass = forms.BooleanField\n required = False\n type = 'boolean'\n\n def __init__(self, **kwargs):\n super(BooleanSetting, self).__init__(**kwargs)\n self.default = self.to_python(self.default)\n\n def to_python(self, value):\n \"\"\"\n Convert string to the boolean type.\n \"\"\"\n return force_bool(value)\n\n\nclass ChoiceSetting(Setting):\n \"\"\"\n Choice setting.\n \"\"\"\n choices = None\n field_args = Setting.field_args + ('choices', )\n field_klass = forms.ChoiceField\n type = 'choice'\n\n def __init__(self, **kwargs):\n self._choices = kwargs.pop('choices', None)\n super(ChoiceSetting, self).__init__(**kwargs)\n\n @property\n def choices(self):\n \"\"\"\n Lazy loaded choices.\n \"\"\"\n cache_key = '_choices_cache'\n if not hasattr(self, cache_key):\n setattr(self, cache_key, self._parse_choices(self._choices))\n return getattr(self, cache_key)\n\n def _parse_choices(self, value):\n \"\"\"\n Convert string value to valid choices tuple.\n\n **Supported formats:**\n\n * a, b, c\n * (a, A), (b, B), (c, C)\n * a { b, c }, d { e, f }\n * A { (b, B), (c, C) }, D { (e, E), (f, F) }\n * path.to.CHOICES\n * path.to.Model.CHOICES\n * app.Model.CHOICES\n\n \"\"\"\n # Start parsing with internal choices\n if not ',' in value and '.' in value:\n # Choices tuple should be last part of value\n path, attr = value.rsplit('.', 1)\n\n # Try to process path as ``app.Model`` definition\n model = None\n\n try:\n app, model = path.split('.')\n except ValueError:\n pass\n else:\n model = get_model(app, model)\n\n # If cannot process path as ``app.Model`` just load it as module\n # or as class from module\n if model is None:\n try:\n module = importlib.import_module(path)\n except ImportError:\n try:\n module = load_from_path(path)\n except (AttributeError, ImportError):\n logger.exception('Cannot load choices from %r path',\n value)\n return ()\n else:\n module = model\n\n # And finally, try to get choices attr in module or model\n try:\n choices = getattr(module, attr)\n except AttributeError:\n logger.exception('Cannot load choices from %r path', value)\n return ()\n elif not '{' in value and not '}' in value:\n # Parse choice with labels\n label_re = re.compile(r'\\(([^,]+),\\s+([^\\)]+)\\)', re.M)\n found = label_re.findall(value)\n\n if found:\n choices = found\n # If nothing found by regex, just split value by comma and\n # duplicate resulted items\n else:\n choices = map(lambda item: (item.strip(), item.strip()),\n value.split(','))\n else:\n # Parse groups\n groups_re = re.compile(r'([^{]+){([^}]+)},?', re.M)\n found = groups_re.findall(value)\n\n if found:\n choices = []\n\n for group, data in found:\n group = group.strip()\n choices.append((group, self._parse_choices(data.strip())))\n else:\n logger.error('Cannot parse choices from %r', value)\n return ()\n\n return tuple(choices)\n\n\nclass DecimalSetting(Setting):\n \"\"\"\n Decimal setting.\n \"\"\"\n decimal_places = None\n field_args = Setting.field_args + ('decimal_places', 'max_digits',\n 'max_value', 'min_value')\n field_klass = forms.DecimalField\n max_digits = None\n max_value = None\n min_value = None\n type = 'decimal'\n\n def __init__(self, **kwargs):\n super(DecimalSetting, self).__init__(**kwargs)\n\n int_setting = IntSetting()\n self.decimal_places = int_setting.to_python(self.decimal_places)\n self.max_digits = int_setting.to_python(self.max_digits)\n\n self.default = self.to_python(self.default)\n self.max_value = self.to_python(self.max_value)\n self.min_value = self.to_python(self.min_value)\n\n def to_python(self, value):\n if value is None:\n return value\n return Decimal(str(value))\n\n\nclass IntSetting(Setting):\n \"\"\"\n Integer setting.\n \"\"\"\n field_args = Setting.field_args + ('max_value', 'min_value')\n field_klass = forms.IntegerField\n max_value = None\n min_value = None\n type = 'int'\n\n def __init__(self, **kwargs):\n super(IntSetting, self).__init__(**kwargs)\n self.default = self.to_python(self.default)\n self.max_value = self.to_python(self.max_value)\n self.min_value = self.to_python(self.min_value)\n\n def to_python(self, value):\n try:\n return int(value)\n except (TypeError, ValueError):\n return None\n\n\nclass FloatSetting(IntSetting):\n \"\"\"\n Float setting.\n \"\"\"\n field_klass = forms.FloatField\n type = 'float'\n\n def to_python(self, value):\n try:\n return float(value)\n except (TypeError, ValueError):\n return None\n\n\nclass StringSetting(Setting):\n \"\"\"\n String setting.\n \"\"\"\n field_args = Setting.field_args + ('max_length', 'min_length')\n field_klass = forms.CharField\n max_length = None\n min_length = None\n regex = None\n type = 'string'\n\n def to_field(self, **kwargs):\n \"\"\"\n Use ``RegexField`` for string setting if ``regex`` was filled in\n configuration definition file.\n \"\"\"\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)\n\n\nclass SettingsContainer(object):\n\n def __init__(self, path):\n self._data = []\n self.path = path\n\n def __iter__(self):\n return (item for item in self._data)\n\n def __len__(self):\n return len(self._data)\n\n def append(self, value):\n self._data.append(value)\n setattr(self, value.name, value)\n\n\ndef auth_permitted(user):\n \"\"\"\n Check that the user can have access to the view.\n \"\"\"\n default = lambda user: user.is_superuser\n func = getattr(django_settings, 'SETMAN_AUTH_PERMITTED', default)\n return func(user)\n\n\n\ndef data_to_setting(data, additional_types=None):\n \"\"\"\n Convert data dict to setting instance.\n \"\"\"\n additional_types = additional_types or []\n setting = None\n setting_type = data.get('type')\n\n all_values = globals().values() + additional_types\n\n for value in all_values:\n try:\n if not issubclass(value, Setting):\n continue\n except TypeError:\n continue\n\n if not value.type or not setting_type or \\\n value.type.lower() != setting_type.lower():\n continue\n\n setting = value(**data)\n\n if setting is None:\n raise SettingTypeDoesNotExist('%r setting type not found.' % \\\n setting_type)\n\n return setting\n\n\ndef force_bool(value):\n \"\"\"\n Convert string value to boolean instance.\n \"\"\"\n if isinstance(value, (bool, int)):\n return bool(value)\n\n boolean_states = ConfigParser._boolean_states\n if not value.lower() in boolean_states:\n return None\n\n return boolean_states[value.lower()]\n\n\ndef load_from_path(path):\n \"\"\"\n Load class or function from string path.\n \"\"\"\n module, attr = path.rsplit('.', 1)\n mod = importlib.import_module(module)\n return getattr(mod, attr)\n\n\ndef parse_config(path=None):\n \"\"\"\n Parse Configuration Definition File.\n\n In most cases this file needs to be placed in same folder where project\n settings module exist and named as ``settings.cfg``.\n\n But you can customize things with using ``SETMAN_SETTINGS_FILE`` option.\n Provide there path where settings file actually placed.\n\n Also current function can called with ``path`` string.\n \"\"\"\n additional_types = getattr(django_settings, 'SETMAN_ADDITIONAL_TYPES', ())\n additional_setting_types = []\n default_values = {}\n\n for item in additional_types:\n try:\n additional_type = load_from_path(item)\n except (AttributeError, TypeError):\n logger.exception('Cannot load %r additional setting type from ' \\\n 'configuration.', item)\n\n additional_setting_types.append(additional_type)\n\n if path is None:\n path = getattr(django_settings, 'SETMAN_SETTINGS_FILE', None)\n\n if path is None:\n module = importlib.import_module(django_settings.SETTINGS_MODULE)\n dirname = os.path.dirname(os.path.normpath(module.__file__))\n path = os.path.join(dirname, DEFAULT_SETTINGS_FILENAME)\n\n empty_settings = SettingsContainer(path)\n\n if not os.path.isfile(path):\n logger.error('Cannot read configuration definition file at %r. Exit ' \\\n 'from parsing!', path)\n return empty_settings\n\n # Use ``SortedDict`` instance for reading sections on config file instead\n # of default ``dict`` that can shuffle the sections.\n config = ConfigParser(dict_type=SortedDict)\n\n # First we need to read default values file\n default_values_file = \\\n getattr(django_settings, 'SETMAN_DEFAULT_VALUES_FILE', None)\n\n if default_values_file:\n config.no_sections_mode = True\n\n try:\n config.read(default_values_file)\n except ConfigParserError:\n logger.exception('Cannot read default values from %r',\n default_values_file)\n else:\n default_values = config.defaults()\n finally:\n config.no_sections_mode = False\n\n # Only then really read from config definition file\n try:\n config.read(path)\n except ConfigParserError:\n logger.exception('Cannot parse configuration definition file from ' \\\n '%r', path)\n return empty_settings\n\n settings = copy.deepcopy(empty_settings)\n\n for setting in config.sections():\n data = dict(config.items(setting))\n data.update({'name': setting})\n\n if setting in default_values:\n data.update({'default': default_values[setting]})\n\n try:\n setting = data_to_setting(data, additional_setting_types)\n except SettingTypeDoesNotExist:\n logger.exception('Cannot find proper setting class for %r type',\n data.get('type'))\n return empty_settings\n\n settings.append(setting)\n\n return settings\n\n\nAVAILABLE_SETTINGS = parse_config()\n","repo_name":"owais/django-setman","sub_path":"setman/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"32264374562","text":"\"\"\"\nkey が存在するかどうか確信が持たないときは get メソッドを使います\n\n見つからなかったときは:\n 第二引数を指定した場合は : 第二引数の値を返す\n 第二引数を指定しなかった場合は: None を返す\n\n\"\"\"\nfish_dict = {\n 'karei': 15,\n 'katsuo': 25,\n 'fugu': 100,\n}\n\nprint('same' in fish_dict)\n\n# print(fish_dict['same']) #これはエラー\n\nsame_value = fish_dict.get('same', '見つかりませんでした')\nprint(same_value)\n\nsame_value = fish_dict.get('same')\nprint(same_value)\n\nprint('終了しました')\n","repo_name":"k-brahma/python_basic_sec1","sub_path":"part4_dictionary/basic42_dict_get_value_key.py","file_name":"basic42_dict_get_value_key.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1782174016","text":"\"\"\"Largely adopted from\nhttps://github.com/executablebooks/sphinx-design/blob/6df47513e9e221c61877e9308da7a41d216ae3c3/tests/conftest.py.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport shutil\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any\n\nimport pytest\nfrom auto_pytabs.core import Cache\nfrom sphinx.testing.path import path as sphinx_path\n\nif TYPE_CHECKING:\n from unittest.mock import MagicMock\n\n from docutils import nodes\n from sphinx.testing.util import SphinxTestApp\n\n\npytest_plugins = \"sphinx.testing.fixtures\"\n\n\n@pytest.fixture(autouse=True, scope=\"session\")\ndef purge_cache():\n Cache().clear_all()\n yield\n Cache().clear_all()\n\n\n@pytest.fixture()\ndef mock_cache_persist(mocker) -> MagicMock:\n return mocker.patch(\"auto_pytabs.core.Cache.persist\")\n\n\nclass SphinxBuilder:\n def __init__(self, app: SphinxTestApp, src_path: Path):\n self.app = app\n self._src_path = src_path\n\n @property\n def src_path(self) -> Path:\n return self._src_path\n\n @property\n def out_path(self) -> Path:\n return Path(self.app.outdir)\n\n def build(self, assert_pass=True):\n self.app.build()\n if assert_pass:\n assert self.warnings == \"\", self.status\n return self\n\n @property\n def status(self):\n return self.app._status.getvalue()\n\n @property\n def warnings(self):\n return self.app._warning.getvalue()\n\n def get_doctree(\n self, docname: str, post_transforms: bool = False\n ) -> nodes.document:\n doctree: nodes.document = self.app.env.get_doctree(docname)\n if post_transforms:\n self.app.env.apply_post_transforms(doctree, docname)\n # make source path consistent for test comparisons\n for node in doctree.findall(include_self=True):\n if not (\"source\" in node and node[\"source\"]):\n continue\n node[\"source\"] = Path(node[\"source\"]).relative_to(self.src_path).as_posix()\n if node[\"source\"].endswith(\".rst\"):\n node[\"source\"] = node[\"source\"][:-4]\n elif node[\"source\"].endswith(\".md\"):\n node[\"source\"] = node[\"source\"][:-3]\n return doctree\n\n\n@pytest.fixture()\ndef sphinx_builder(tmp_path: Path, make_app, monkeypatch):\n def _create_project(\n source: str,\n compat: bool = False,\n **conf_kwargs: dict[str, Any],\n ):\n if compat:\n conf_kwargs[\"auto_pytabs_compat_mode\"] = True\n src_path = tmp_path / \"srcdir\"\n src_path.mkdir()\n conf_kwargs = {\n \"extensions\": [\n \"sphinx_design\",\n \"auto_pytabs.sphinx_ext\",\n ],\n \"auto_pytabs_no_cache\": True,\n **(conf_kwargs or {}),\n }\n content = \"\\n\".join(\n [f\"{key} = {value!r}\" for key, value in conf_kwargs.items()]\n )\n src_path.joinpath(\"conf.py\").write_text(content, encoding=\"utf8\")\n app = make_app(srcdir=sphinx_path(str(src_path.resolve())), buildername=\"html\")\n shutil.copy(\n \"test/sphinx_ext_test_data/example.py\", src_path.joinpath(\"example.py\")\n )\n shutil.copy(\n \"test/sphinx_ext_test_data/example.js\", src_path.joinpath(\"example.js\")\n )\n\n src_path.joinpath(\"index.rst\").write_text(source)\n return SphinxBuilder(app, src_path)\n\n yield _create_project\n","repo_name":"provinzkraut/AutoPyTabs","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"12501721892","text":"import sys\nimport os\nimport argparse\nimport json\nimport numpy as np\nimport random\nimport math\n\n\ndef isfile(path):\n \"\"\"Check if path is an existing file.\n :Parameters:\n path: Path to the file\n \"\"\"\n if not os.path.isfile(path):\n if os.path.isdir(path):\n msg = \"{0} is a directory\".format(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)\n return path\n \n\ndef get_arguments():\n \"\"\"Retrieves the arguments of the program.\n Returns: An object that contains the arguments\n \"\"\"\n # Parsing arguments\n parser = argparse.ArgumentParser(description=__doc__, usage=\n \"{0} -h\".format(sys.argv[0]))\n parser.add_argument('-i', dest='aa_stat_file', type=isfile, required=True,\n help=\"Protein stat json file\")\n parser.add_argument('-n', dest='num_protein', type=int, required=True,\n help='Number of protein')\n parser.add_argument('-min', dest='minlen', type=int,\n help='Minimum length of protein (for uniform law)')\n parser.add_argument('-max', dest='maxlen', type=int,\n help='Maximum length of protein (for uniform law)')\n parser.add_argument('-mean', dest='meanlen', type=float,\n help='Mean of length distribution (for normal law)')\n parser.add_argument('-sd', dest='sdlen', type=float,\n help='Standard deviation of length distribution (for normal law)')\n parser.add_argument('-lambda', dest='lamb', type=float,\n help='Lambda of length distribution (for exponential law)')\n parser.add_argument('-o', dest='output_file', type=str, required=True,\n help='Output fasta file')\n return parser.parse_args()\n\n\ndef load_json(aa_stat_file):\n \"\"\"Load stat\n \"\"\"\n data = {}\n try:\n with open(aa_stat_file, \"rt\") as aa_stat:\n data = json.load(aa_stat)\n except IOError:\n sys.exit(\"Error cannot open {0}\".format(json_file))\n return data\n\n\ndef fill(text, width=80): \n \"\"\"Split text\"\"\" \n return os.linesep.join(text[i:i+width] for i in xrange(0, len(text), width))\n\n\ndef generate_protein(aa_list, aa_prob, minlen, maxlen, meanlen, sdlen, lamb):\n \"\"\"\n \"\"\"\n protlen = 0\n while protlen <= 19:\n if minlen and maxlen:\n protlen = np.random.random_integers(minlen, maxlen)\n elif meanlen and sdlen:\n protlen = int(math.exp(round(random.normalvariate(meanlen, sdlen), 0)))\n elif lamb: \n protlen = int(round(random.expovariate(lamb), 0))\n #assert(protlen > 0)\n return \"\".join(np.random.choice(aa_list, protlen, p=aa_prob))\n\n\ndef simulate_protein(aa_list, aa_prob, num_protein, minlen, maxlen,\n meanlen, sdlen, lamb, output_file):\n \"\"\"\n \"\"\"\n try:\n with open(output_file, \"wt\") as output:\n for i in xrange(1, num_protein + 1):\n output.write(\">protein_{1}{0}{2}{0}\".format(os.linesep, i, \n fill(generate_protein(aa_list, aa_prob, minlen, maxlen,\n meanlen, sdlen, lamb))))\n except IOError:\n sys.exit(\"Error cannot open {0}\".format(output_file))\n\n#==============================================================\n# Main program\n#==============================================================\ndef main():\n \"\"\"\n Main program function\n \"\"\"\n # Get arguments\n args = get_arguments()\n # Load proportion\n stat_prot = load_json(args.aa_stat_file)\n # cumulated\n item = stat_prot.items()\n aa_list = [i[0] for i in item]\n aa_prob = [i[1] for i in item]\n # Start simulating\n simulate_protein(aa_list, aa_prob, args.num_protein, args.minlen,\n args.maxlen, args.meanlen, args.sdlen, args.lamb,\n args.output_file)\n\n\nif __name__ == '__main__':\n main()","repo_name":"aghozlane/compogen","sub_path":"random_protein_generator.py","file_name":"random_protein_generator.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69980468252","text":"import torch\n\nfrom ...modules.graph_construction.embedding_construction import EmbeddingConstruction\nfrom ...modules.utils.padding_utils import pad_2d_vals_no_size\nfrom ...modules.utils.vocab_utils import VocabModel\n\nif __name__ == \"__main__\":\n raw_text_data = [[\"I like nlp.\", \"Same here!\"], [\"I like graph.\", \"Same here!\"]]\n\n vocab_model = VocabModel(\n raw_text_data, max_word_vocab_size=None, min_word_vocab_freq=1, word_emb_size=300\n )\n\n src_text_seq = list(zip(*raw_text_data))[0]\n src_idx_seq = [vocab_model.word_vocab.to_index_sequence(each) for each in src_text_seq]\n src_len = torch.LongTensor([len(each) for each in src_idx_seq])\n num_seq = torch.LongTensor([len(src_len)])\n input_tensor = torch.LongTensor(pad_2d_vals_no_size(src_idx_seq))\n print(\"input_tensor: {}\".format(input_tensor.shape))\n\n emb_constructor = EmbeddingConstruction(vocab_model.word_vocab, \"w2v\", \"bilstm\", \"bilstm\", 128)\n emb = emb_constructor(input_tensor, src_len, num_seq)\n print(\"emb: {}\".format(emb.shape))\n","repo_name":"graph4ai/graph4nlp","sub_path":"graph4nlp/pytorch/test/graph_construction/test_embedding_construction.py","file_name":"test_embedding_construction.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":1637,"dataset":"github-code","pt":"32"} +{"seq_id":"14267373238","text":"from bs4 import BeautifulSoup\nimport requests\n\nhtml_content = requests.get('http://forecast.weather.gov/MapClick.php?lat=45.01265248822284&lon=-93.24372590552537#.WMn9UDsrKM8')\nsoup = BeautifulSoup(html_content.text, 'html.parser')\ncurrent_temp=soup.select('#current_conditions-summary > p.myforecast-current-lrg')\nlower_temp = soup.select('#current_conditions-summary > p.myforecast-current-sm')\ndaily_tem = soup.select('#current_conditions_detail')\nprint(daily_tem[0].text.strip())\nprint('Current temperature\": '+current_temp[0].text.strip())\nprint('Lowest temperature: '+lower_temp[0].text.strip())\n\nhtml_day= requests.get('http://forecast.weather.gov/MapClick.php?lat=44.979967375000456&lon=-93.26383802699968#.WMoB7DsrKM8')\nhtml_day.raise_for_status()\nsoup2 = BeautifulSoup(html_day, 'html.parser')\nday_temp = soup2.select('#seven-day-forecast-list > li:nth-child(2) > div > p.period-name')\nprint(day_temp.text)\n","repo_name":"mo12g13/Data-Processing-Lab","sub_path":"webbrowser.py","file_name":"webbrowser.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74439981212","text":"import boto3\nimport json\nimport datetime\nfrom pkg_resources import resource_filename\n\nclient = boto3.client('pricing', region_name='us-east-1')\n# Search product filter. This will reduce the amount of data returned by the\n# get_products function of the Pricing API\nFLT = '[{{\"Field\": \"tenancy\", \"Value\": \"shared\", \"Type\": \"TERM_MATCH\"}},' \\\n '{{\"Field\": \"operatingSystem\", \"Value\": \"{o}\", \"Type\": \"TERM_MATCH\"}},' \\\n '{{\"Field\": \"preInstalledSw\", \"Value\": \"NA\", \"Type\": \"TERM_MATCH\"}},' \\\n '{{\"Field\": \"instanceType\", \"Value\": \"{t}\", \"Type\": \"TERM_MATCH\"}},' \\\n '{{\"Field\": \"location\", \"Value\": \"{r}\", \"Type\": \"TERM_MATCH\"}},' \\\n '{{\"Field\": \"capacitystatus\", \"Value\": \"Used\", \"Type\": \"TERM_MATCH\"}}]'\n\n\n# Get current AWS price for an on-demand instance\ndef get_price(region, instance, os='linux'):\n f = FLT.format(r=region, t=instance, o=os)\n data = client.get_products(ServiceCode='AmazonEC2', Filters=json.loads(f))\n od = json.loads(data['PriceList'][0])['terms']['OnDemand']\n id1 = list(od)[0]\n id2 = list(od[id1]['priceDimensions'])[0]\n return od[id1]['priceDimensions'][id2]['pricePerUnit']['USD']\n\n\n# Translate region code to region name. Even though the API data contains\n# regionCode field, it will not return accurate data. However using the location\n# field will, but then we need to translate the region code into a region name.\n# You could skip this by using the region names in your code directly, but most\n# other APIs are using the region code.\ndef get_region_name(region_code):\n default_region = 'US East (N. Virginia)'\n endpoint_file = resource_filename('botocore', 'data/endpoints.json')\n try:\n with open(endpoint_file, 'r') as f:\n data = json.load(f)\n # Botocore is using Europe while Pricing API using EU...sigh...\n return data['partitions'][0]['regions'][region_code]['description'].replace('Europe', 'EU')\n except IOError:\n return default_region\n\n\n# Use AWS Pricing API through Boto3\n# API only has us-east-1 and ap-south-1 as valid endpoints.\n# It doesn't have any impact on your selected region for your instance.\n\n\ndef get_ondemand_price(region, instance_type):\n price = get_price(get_region_name(region), instance_type, 'linux')\n return price\n\n\ndef get_spot_price(region, az, instance_type):\n client = boto3.client('ec2', region_name=region)\n prices = client.describe_spot_price_history(\n InstanceTypes=[instance_type],\n ProductDescriptions=['Linux/UNIX', 'Linux/UNIX (Amazon VPC)'],\n StartTime=(datetime.datetime.now() -\n datetime.timedelta(hours=1)).isoformat(),\n MaxResults=10\n )\n\n results = {}\n for price in prices[\"SpotPriceHistory\"]:\n results[price[\"AvailabilityZone\"]] = price[\"SpotPrice\"]\n return results[az]\n\n\ndef get_node_price(capacity_type, region, az, instance_type):\n if capacity_type == 'SPOT':\n return eval(get_spot_price(region, az, instance_type))\n else:\n return eval(get_ondemand_price(region, instance_type))\n\n\nif __name__ == '__main__':\n # Get current price for a given instance, region and os\n print(get_spot_price('us-east-1', 'us-east-1b', 'c5.xlarge'))\n\n print(type(get_ondemand_price('us-east-1', 'c5.xlarge')))\n print(type(get_node_price('SPOT', 'us-east-1', 'us-east-1b', 'c5.xlarge')))\n","repo_name":"luanluandehaobaoman/ekscost","sub_path":"ekscost/get_ec2_price.py","file_name":"get_ec2_price.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26181496397","text":"from __future__ import print_function\n\nimport io\nimport os\nimport re\nimport sys\nimport codecs\n\nfrom bladerunner.progressbar import get_term_width\n\n\nDEFAULT_ENCODINGS = [\"utf-8\", \"latin-1\", \"utf-16\"]\nDEFAULT_ENCODING = \"utf-8\"\n\nif sys.version_info > (3,):\n UNICODE_TYPE = str\nelse:\n UNICODE_TYPE = unicode\n\n\nclass FakeStdOut(object):\n \"\"\"An object to pass to pexpect's debug logger to simulate sys.stdout.\"\"\"\n\n @staticmethod\n def write(string):\n \"\"\"Fake write, use print instead.\"\"\"\n\n for encoding in DEFAULT_ENCODINGS:\n try:\n print(codecs.decode(string, encoding).strip())\n except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n break\n\n @staticmethod\n def flush():\n \"\"\"Fake flush, print will flush.\"\"\"\n\n pass\n\n\ndef no_empties(input_list):\n \"\"\"Searches through a list and tosses empty elements.\"\"\"\n\n out_list = []\n for item in input_list:\n if item:\n for encoding in DEFAULT_ENCODINGS:\n try:\n out_list.append(codecs.decode(\n codecs.encode(item, encoding),\n encoding,\n ).strip())\n except UnicodeEncodeError:\n pass\n else:\n break\n return out_list\n\n\ndef format_output(output, command, options=None):\n \"\"\"Formatting function to strip colours, remove tabs, etc.\n\n Args::\n\n output: the pexpect object's before method after issuing the command\n command: the command last issued\n options: dictionary of Bladerunner options\n\n Returns:\n a (hopefully) nicely formatted string of the command's output\n \"\"\"\n\n if options is None:\n options = {}\n\n def cmd_in_line(command, line):\n \"\"\"Checks for long commands wrapping into the output.\"\"\"\n\n if len(command) < 60:\n return False\n\n # how large of command sections we'll look for.\n size = 30\n cmd_split = [command[i:i + size] for i in range(0, len(command), size)]\n\n for fraction in cmd_split:\n if line.find(fraction) > -1:\n return True\n\n output = output.splitlines()\n results = []\n # the first line is the command, the last is /probably/ the prompt\n # there can be cases that disobey this though, like exiting without a \\n\n for line in output[1:-1]:\n line = format_line(line, options)\n if line and not cmd_in_line(command, line):\n results.append(line)\n return \"\\n\".join(results)\n\n\ndef format_line(line, options=None):\n \"\"\"Removes whitespace, weird tabs, etc...\n\n Args::\n\n line: string line to clean\n options: dictionary of Bladerunner options\n \"\"\"\n\n if options is None:\n options = {}\n\n for encoding in DEFAULT_ENCODINGS:\n try:\n line = codecs.decode(line, encoding)\n except (UnicodeDecodeError, UnicodeEncodeError):\n pass\n else:\n break\n else:\n return line # can't decode this, not sure what to do. pass it back\n\n line = line.strip(os.linesep) # can't strip new lines enough\n line = line.replace(\"\\r\", \"\") # no extra carriage returns\n line = re.sub(\"\\033\\[[0-9;]+m\", \"\", line) # no colours\n line = re.sub(\"\\x1b\\[[0-9;]+G\", \"\", line) # no crazy tabs\n line = re.sub(\"\\\\x1b\\[m\\\\x0f\", \"\", line)\n line = re.sub(\"^\\s+\", \"\", line) # no trailing whitespace\n\n # hide the user's passwords in the output in case the term echo'd them\n for key in [\"password\", \"second_password\", \"jump_password\"]:\n password = options.get(key)\n if password:\n if isinstance(password, (list, tuple)):\n for passwd in password:\n line = line.replace(passwd, \"*\" * len(passwd))\n else:\n line = line.replace(password, \"*\" * len(password))\n\n return line\n\n\ndef consolidate(results):\n \"\"\"Makes a list of servers and replies, consolidates dupes.\n\n Args:\n results: the results dictionary from Bladerunner.run\n\n Returns:\n a results dictionary, with a names key instead of name, containing a\n lists of hosts with matching outputs\n \"\"\"\n\n finalresults = []\n for server in results:\n for tempserver in finalresults:\n if tempserver[\"results\"] == server[\"results\"]:\n tempserver[\"names\"].append(server[\"name\"])\n break\n else:\n server[\"names\"] = [server[\"name\"]]\n del server[\"name\"]\n finalresults.append(server)\n\n return finalresults\n\n\ndef csv_results(results, options=None):\n \"\"\"Prints the results consolidated and in a CSV-ish fashion.\n\n Args::\n\n results: the results dictionary from Bladerunner.run\n options: dictionary with optional keys:\n csv_char: a character or string to separate with\n \"\"\"\n\n if options is None:\n options = {}\n\n if \"csv_char\" in options:\n csv_char = options[\"csv_char\"]\n else:\n csv_char = \",\"\n\n write(\"server{csv}command{csv}result\\r\\n\".format(csv=csv_char), options)\n for server in results:\n for command, command_result in server[\"results\"]:\n server_name = server.get(\"name\")\n if not server_name: # catch for consolidated results\n server_name = \" \".join(server.get(\"names\"))\n\n command_result = \"\\n\".join(no_empties(command_result.split(\"\\n\")))\n write(\n (\n \"{name_quote}{name}{name_quote}{csv}{cmd_quote}{command}\"\n \"{cmd_quote}{csv}{res_quote}{result}{res_quote}\\r\\n\"\n ).format(\n name_quote='\"' * int(\" \" in server_name),\n name=server_name,\n csv=csv_char,\n cmd_quote='\"' * int(\" \" in command),\n command=command,\n res_quote='\"' * int(\" \" in command_result),\n result=command_result,\n ),\n options,\n )\n\n\ndef stacked_results(results, options=None):\n \"\"\"Display the results in a vertical stack without a frame.\n\n Args::\n\n results: the bladerunner result dictionary\n options: the bladerunner options dictionary\n \"\"\"\n\n results, options = prepare_results(results, options)\n spacer = False\n for result_set in results:\n if spacer:\n write(\"=\" * options[\"width\"], options, end=\"\\n\")\n\n server_lines = []\n line = []\n for name in result_set[\"names\"]:\n # get the current line length...\n currently = sum([len(x) for x in line]) + + len(line)\n # if the name and space for a comma afterwards fit, add to the line\n if currently + (len(name) * 2) + 1 < options[\"width\"]:\n line.append(name)\n else:\n server_lines.append(\", \".join(line + [\"\"]).strip())\n line = [name]\n\n server_lines.append(\", \".join(line))\n\n write(\"\\n\".join(server_lines), options, end=\"\\n\")\n write(\"-\" * options[\"width\"], options, end=\"\\n\")\n for _, result in result_set[\"results\"]:\n write(result, options, end=\"\\n\")\n\n spacer = True\n\n\ndef prepare_results(results, options=None):\n \"\"\"Prepare the results and options dictionary for pretty printing.\n\n Args::\n\n results: the bladerunner result dictionary\n options: the bladerunner options dictionary\n\n Returns:\n a tuple of (results, options) after modifying the keys for printing\n \"\"\"\n\n if options is None:\n options = {}\n\n left_len = 0\n already_consolidated = False\n for server in results:\n try:\n if len(str(server[\"name\"])) > left_len:\n left_len = len(str(server[\"name\"]))\n except KeyError:\n # catches passing already consolidated results in\n already_consolidated = True\n for server_name in server[\"names\"]:\n if len(server_name) > left_len:\n left_len = len(server_name)\n\n if left_len < 6:\n left_len = 6\n\n # print characters, defined by options[\"style\"]\n options[\"chars\"] = {\n \"top_left\": [\"┌\", \"*\", \"╔\", \"╭\"],\n \"top\": [\"─\", \"-\", \"═\", \"─\"],\n \"top_right\": [\"┐\", \"*\", \"╗\", \"╮\"],\n \"top_down\": [\"┬\", \"+\", \"╦\", \"┬\"],\n \"side_left\": [\"├\", \"*\", \"╠\", \"├\"],\n \"side\": [\"│\", \"|\", \"║\", \"│\"],\n \"middle\": [\"┼\", \"+\", \"╬\", \"┼\"],\n \"side_right\": [\"┤\", \"*\", \"╣\", \"┤\"],\n \"bot_left\": [\"└\", \"*\", \"╚\", \"╰\"],\n \"bot\": [\"─\", \"-\", \"═\", \"─\"],\n \"bot_right\": [\"┘\", \"*\", \"╝\", \"╯\"],\n \"bot_up\": [\"┴\", \"+\", \"╩\", \"┴\"],\n }\n\n if not \"style\" in options or not 3 >= options[\"style\"] >= 0:\n options[\"style\"] = 0\n\n options[\"left_len\"] = left_len\n\n try:\n width = options[\"width\"] or get_term_width()\n except KeyError:\n width = get_term_width()\n finally:\n options[\"width\"] = width\n\n if not already_consolidated:\n results = consolidate(results)\n\n return (results, options)\n\n\ndef pretty_results(results, options=None):\n \"\"\"Prints the results in a relatively pretty way.\n\n Args::\n\n results: the results dictionary from Bladerunner.run\n options: a dictionary with optional keys.\n style: integer style, from 0-3\n jump_host: the string jumpbox hostname\n width: integer fixed width for output\n \"\"\"\n\n results, options = prepare_results(results, options)\n\n pretty_header(options)\n\n for result in results:\n _pretty_result(result, options, results)\n\n write(\n \"{left_corner}{left}{up}{right}{right_corner}\\n\".format(\n left_corner=options[\"chars\"][\"bot_left\"][options[\"style\"]],\n left=options[\"chars\"][\"bot\"][options[\"style\"]] * (\n options[\"left_len\"] + 2),\n up=options[\"chars\"][\"bot_up\"][options[\"style\"]],\n right=options[\"chars\"][\"bot\"][options[\"style\"]] * (\n options[\"width\"] - options[\"left_len\"] - 5),\n right_corner=options[\"chars\"][\"bot_right\"][options[\"style\"]],\n ),\n options,\n )\n\n\ndef pretty_header(options):\n \"\"\"Internal function for printing the header of pretty_results.\n\n Args::\n\n options: a dictionary with the following keys:\n width: terminal width, already determined in pretty_results\n chars: the character dictionary map, defined in pretty_results\n left_len: the left side length, defined in pretty_results\n jump_host: a string hostname of the jumpbox (if any)\n \"\"\"\n\n jumphost = options.get(\"jump_host\")\n\n if jumphost:\n write(\n \"{l_corner}{left}{down}{right}{down}{jumpbox}{r_corner}\\n\".format(\n l_corner=options[\"chars\"][\"top_left\"][options[\"style\"]],\n left=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"left_len\"]\n + 2\n ),\n down=options[\"chars\"][\"top_down\"][options[\"style\"]],\n right=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"width\"]\n - options[\"left_len\"]\n - 17\n - len(jumphost)\n ),\n jumpbox=options[\"chars\"][\"top\"][options[\"style\"]] * (\n len(jumphost) + 11\n ),\n r_corner=options[\"chars\"][\"top_right\"][options[\"style\"]],\n ),\n options,\n )\n\n write(\n (\n \"{side} Server{l_gap} {side} Result{r_gap} {side} Jumpbox: \"\n \"{jumphost} {side}\\n\"\n ).format(\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n l_gap=\" \" * (options[\"left_len\"] - 6),\n r_gap=\" \" * (\n options[\"width\"]\n - options[\"left_len\"]\n - 25\n - len(jumphost)\n ),\n jumphost=jumphost,\n ),\n options,\n )\n else:\n write(\n \"{l_corner}{left}{down}{right}{r_corner}\\n\".format(\n l_corner=options[\"chars\"][\"top_left\"][options[\"style\"]],\n left=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"left_len\"]\n + 2\n ),\n down=options[\"chars\"][\"top_down\"][options[\"style\"]],\n right=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"width\"]\n - options[\"left_len\"]\n - 5\n ),\n r_corner=options[\"chars\"][\"top_right\"][options[\"style\"]],\n ),\n options,\n )\n\n write(\n \"{side} Server{l_gap} {side} Result{r_gap} {side}\\n\".format(\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n l_gap=\" \" * (options[\"left_len\"] - 6),\n r_gap=\" \" * (options[\"width\"] - options[\"left_len\"] - 13),\n ),\n options,\n )\n\n\ndef _pretty_result(result, options, consolidated_results):\n \"\"\"Internal function, ran inside of a loop to print super fancy results.\n\n Args::\n\n result: the object iterated over in consolidated_results\n options: the options dictionary from pretty_results\n consolidate_results: the output from consolidate\n \"\"\"\n\n result_lines = []\n for command, command_result in result[\"results\"]:\n command_split = no_empties(command_result.split(\"\\n\"))\n for command_line in command_split:\n result_lines.append(command_line)\n\n if len(result_lines or \"\") > len(result[\"names\"]):\n max_length = len(result_lines)\n else:\n max_length = len(result[\"names\"])\n\n if consolidated_results.index(result) == 0 and options.get(\"jump_host\"):\n # first split has a bottom up character when using a jumpbox\n write(\n \"{l_edge}{left}{middle}{right}{up}{jumpbox}{r_edge}\\n\".format(\n l_edge=options[\"chars\"][\"side_left\"][options[\"style\"]],\n left=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"left_len\"] + 2),\n middle=options[\"chars\"][\"middle\"][options[\"style\"]],\n right=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"width\"]\n - options[\"left_len\"]\n - 17\n - len(options[\"jump_host\"] or \"\")\n ),\n up=options[\"chars\"][\"bot_up\"][options[\"style\"]],\n jumpbox=options[\"chars\"][\"top\"][options[\"style\"]] * (\n len(options[\"jump_host\"] or \"\")\n + 11\n ),\n r_edge=options[\"chars\"][\"side_right\"][options[\"style\"]],\n ),\n options,\n )\n else:\n # typical horizontal split\n write(\n \"{l_side}{left}{middle}{right}{r_side}\\n\".format(\n l_side=options[\"chars\"][\"side_left\"][options[\"style\"]],\n left=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"left_len\"] + 2),\n middle=options[\"chars\"][\"middle\"][options[\"style\"]],\n right=options[\"chars\"][\"top\"][options[\"style\"]] * (\n options[\"width\"] - options[\"left_len\"] - 5),\n r_side=options[\"chars\"][\"side_right\"][options[\"style\"]],\n ),\n options,\n )\n\n for command in range(max_length):\n # print server name or whitespace, mid mark, and leading space\n try:\n write(\n \"{side} {server}{gap} {side} \".format(\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n server=result[\"names\"][command],\n gap=\" \" * (options[\"left_len\"] - len(\n str(result[\"names\"][command]))),\n ),\n options,\n )\n except IndexError:\n write(\n \"{side} {gap} {side} \".format(\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n gap=\" \" * options[\"left_len\"],\n ),\n options,\n )\n\n # print result line, or whitespace, and side mark\n try:\n write(\n \"{result}{gap} {side}\\n\".format(\n result=result_lines[command],\n gap=\" \" * (\n options[\"width\"]\n - options[\"left_len\"]\n - 7\n - len(result_lines[command])\n ),\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n ),\n options,\n )\n except IndexError:\n write(\n \"{gap} {side}\\n\".format(\n gap=\" \" * (options[\"width\"] - options[\"left_len\"] - 7),\n side=options[\"chars\"][\"side\"][options[\"style\"]],\n ),\n options,\n )\n\n\ndef write(string, options, end=\"\"):\n \"\"\"Writes a line of output to either the output file or stdout.\n\n Args::\n\n string: the string to write out\n options: the options dictionary, uses 'output_file' key only\n end: character or empty string to end the print statement with\n \"\"\"\n\n if options.get(\"output_file\"):\n for enc in DEFAULT_ENCODINGS:\n try:\n with io.open(options[\"output_file\"], \"a\", encoding=enc) as out:\n out.write(UNICODE_TYPE(\"{0}{1}\".format(string, end)))\n except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n break\n else:\n _retry_write(string, options, end)\n\n else:\n try:\n print(string, end=end)\n except (UnicodeEncodeError, UnicodeDecodeError):\n _retry_write(string, options, end)\n\n\ndef _retry_write(string, options, end):\n \"\"\"Retries the write function call if it encounters a UnicodeError.\n\n Args::\n\n string: the string desired to have written\n options: the Bladerunner options dictionary\n end: character or empty string to end the print statement with\n error: the Exception class to raise if the user cancels\n \"\"\"\n\n user_cancel = SystemExit(\n \"Could not write results. Cancelled on user request.\"\n )\n\n double_check = _prompt_for_input_on_error(\n \"Errored printing the results. Would you like to \"\n \"write them to a file somewhere instead? \",\n user_cancel,\n )\n\n if double_check.lower().startswith(\"y\"):\n options[\"output_file\"] = _prompt_for_input_on_error(\n \"File name: \",\n user_cancel,\n )\n return write(string, options, end)\n else:\n raise user_cancel\n\n\ndef _prompt_for_input_on_error(user_msg, error):\n \"\"\"Prompt the user with a message. If they try to quit, raise the error.\n\n Args::\n\n user_msg: string message to display to the user\n error: Exception class to raise if the user sends KeyboardInterrupt\n\n Returns:\n the user's reply to the string message\n \"\"\"\n\n try:\n if sys.version_info > (3,):\n return input(user_msg)\n else:\n return raw_input(user_msg)\n except KeyboardInterrupt:\n raise error\n","repo_name":"Demonware/bladerunner","sub_path":"bladerunner/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":19646,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"32"} +{"seq_id":"13957695062","text":"import sqlite3\nimport pandas as pd\n\n# Mit der Datenbank verbinden\nconnection = sqlite3.connect(\"CustomerSales.db\")\n\n# Cursor erstellen\ncursor = connection.cursor()\n\n# Query 1: Übersicht aller Verkäufe + Kunden-, Produkt-, und Regionsinformationen\nverkauf_kunde_produkt_region = (\n \"SELECT verkauf.*, kunde.*, produkt.*, region.* \"\n \"FROM verkaufsregion \"\n \"JOIN verkauf ON verkaufsregion.verkaufsID = verkauf.verkaufsID \"\n \"JOIN kunde ON verkaufsregion.kundenID = kunde.kundenID \"\n \"JOIN produkt ON verkaufsregion.produktID = produkt.produktID \"\n \"JOIN region ON verkaufsregion.regionID = region.regionID;\"\n)\n\nverkauf_kunde_produkt_region_ergebnis = pd.read_sql_query(verkauf_kunde_produkt_region, connection)\n\n# Query 3: Produkte, die ein Modell besitzen, aber keine Produktlinie\nprodukte_mit_Modell_ohne_Linie = (\n \"SELECT DISTINCT produkt.*, produktmodell.modellID, linienmodell.linienID \"\n \"FROM produktmodell \"\n \"JOIN produkt ON produktmodell.produktID = produkt.produktID \"\n \"LEFT JOIN modell ON produktmodell.modellID = modell.modellID \"\n \"LEFT JOIN linienmodell ON modell.modellID = linienmodell.modellID \"\n \"WHERE linienmodell.modellID IS NULL; \"\n)\n\nprodukte_mit_Modell_ohne_Linie_ergebnis = pd.read_sql_query(produkte_mit_Modell_ohne_Linie, connection)\n\n# Query 4: Der Kunde / die Kundin, der/die am häufigsten Bestellungen aufgegeben hat\nkunde_mit_häufigste_Bestellungen = (\n \"SELECT kunde.*, COUNT(verkauf.verkaufsID) AS anzahl_bestellungen \"\n \"FROM kunde \"\n \"JOIN verkauf ON kunde.kundenID = verkauf.kundenID \"\n \"GROUP BY kunde.kundenID \"\n \"ORDER BY anzahl_bestellungen DESC \"\n \"LIMIT 1; \"\n)\n\nkunde_mit_häufigste_Bestellungen_ergebnis = pd.read_sql_query(kunde_mit_häufigste_Bestellungen, connection)\n\n# Die Ergebnisse anzeigen\nprint(\"Query 1: Übersicht aller Verkäufe + Kunden-, Produkt-, und Regionsinformationen:\")\nprint(verkauf_kunde_produkt_region_ergebnis)\nprint(\"\\nQuery 3: Produkte, die ein Modell besitzen, aber keine Produktlinie:\")\nprint(produkte_mit_Modell_ohne_Linie_ergebnis)\nprint(\"\\nQuery 4: Der Kunde / die Kundin, der/die am häufigsten Bestellungen aufgegeben hat:\")\nprint(kunde_mit_häufigste_Bestellungen_ergebnis)\nprint(\"\\nQueries done.\")\n\n# Den Cursor und die Verbindung schließen\ncursor.close()\nconnection.close()","repo_name":"VakZok/Data_Engineering_Internship_Exercise","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30013842556","text":"def make_fibo(n):\n if (n==0):\n return 0\n elif (n==1):\n return 1\n else:\n return make_fibo(n-1) + make_fibo\n \n\n\ndef solution2(n):\n return make_fibo(n) % 1234567\n\ndef solution(n):\n fibos = [0,1]\n for i in range(n-1):\n fibos.append(fibos[-1] + fibos[-2])\n return fibos[-1] % 1234567\n\ncase = solution(3)\nprint(case) # 2\n\ncase = solution(5)\nprint(case) # 5","repo_name":"kangtae210/leetCode","sub_path":"p_lv2/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16147384745","text":"from flask import Flask, render_template\nfrom flask_mysqldb import MySQL\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport io\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\n\napp = Flask(__name__)\n\nurl_datos = 'https://raw.githubusercontent.com/YorelyDamian/DatosCSV/main/prueba.csv'\ndataset = pd.read_csv(url_datos)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/prediccion\", methods=['POST'])\ndef prediccion():\n\n X = dataset[['duration', 'count']]\n Y = dataset['class']\n\n correos = dataset.groupby('class').size()\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, Y, test_size=0.3, random_state=0)\n\n modelo_RL = linear_model.LogisticRegression()\n\n modelo_RL.fit(X_train, y_train)\n\n y_pred = modelo_RL.predict(X_test)\n\n p_s = round(metrics.accuracy_score(y_test, y_pred), 3)\n return render_template('index.html', prediccion=f'La predicción es: {p_s}', num_correos=f'{correos}')\n\n\n@app.route('/grafica')\ndef grafica():\n cur = conexion.connection.cursor()\n curs = conexion.connection.cursor()\n cur.execute(\n \"SELECT ROUND((((SELECT COUNT(*) FROM prueba WHERE class='anomaly')*100)/count(*)),2)FROM prueba ;\")\n curs.execute(\n \"SELECT ROUND((((SELECT COUNT(*) FROM prueba WHERE class='normal')*100)/count(*)),2)FROM prueba ;\")\n dataA = cur.fetchall()\n dataN = curs.fetchall()\n return render_template('index.html', spam=dataA, normal=dataN)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"YorelyDamian/Deteccion-de-Spam-con-Flask","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6291257025","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom compas.datastructures import Mesh\nfrom compas.geometry import centroid_points\nfrom compas.geometry import angle_points\n\n\nclass Mesh(Mesh):\n\n def __init__(self):\n super(Mesh, self).__init__()\n\n def to_vertices_and_faces(self, keep_keys=True):\n\n if keep_keys:\n vertices = {vkey: self.vertex_coordinates(vkey) for vkey in self.vertices()}\n faces = {fkey: self.face_vertices(fkey) for fkey in self.faces()}\n else:\n key_index = self.key_index()\n vertices = [self.vertex_coordinates(key) for key in self.vertices()]\n faces = [[key_index[key] for key in self.face_vertices(fkey)] for fkey in self.faces()]\n return vertices, faces\n\n def boundaries(self):\n \"\"\"Collect the mesh boundaries as lists of vertices.\n\n Parameters\n ----------\n mesh : Mesh\n Mesh.\n\n Returns\n -------\n boundaries : list\n List of boundaries as lists of vertex keys.\n\n \"\"\"\n\n boundary_edges = {}\n for u, v in self.edges():\n if self.halfedge[u][v] is None:\n boundary_edges[u] = v\n elif self.halfedge[v][u] is None:\n boundary_edges[v] = u\n\n boundaries = []\n boundary = list(boundary_edges.popitem())\n while len(boundary_edges) > 0:\n w = boundary_edges.pop(boundary[-1])\n if w == boundary[0]:\n boundaries.append(boundary)\n if len(boundary_edges) > 0:\n boundary = list(boundary_edges.popitem())\n else:\n boundary.append(w)\n\n return boundaries\n\n def is_boundary_vertex_kink(self, vkey, threshold_angle):\n \"\"\"Return whether there is a kink at a boundary vertex according to a threshold angle.\n\n Parameters\n ----------\n vkey : Key\n The boundary vertex key.\n threshold_angle : float\n Threshold angle in rad.\n\n Returns\n -------\n bool\n True if vertex is on the boundary and has an angle larger than the threshold angle. False otherwise.\n \"\"\"\n\n # check if vertex is on boundary\n if not self.is_vertex_on_boundary(vkey):\n return False\n\n # get the two adjacent boundary vertices (exactly two for manifold meshes)\n ukey, wkey = [nbr for nbr in self.vertex_neighbors(vkey) if self.is_edge_on_boundary(vkey, nbr)]\n\n # compare boundary angle with threshold angle\n return angle_points(self.vertex_coordinates(ukey), self.vertex_coordinates(vkey), self.vertex_coordinates(wkey)) > threshold_angle\n\n def boundary_kinks(self, threshold_angle):\n \"\"\"Return the boundary vertices with kinks.\n\n Parameters\n ----------\n threshold_angle : float\n Threshold angle in rad.\n\n Returns\n -------\n list\n The list of the boundary vertices at kink angles higher than the threshold value.\n\n \"\"\"\n\n return [vkey for bdry in self.vertices_on_boundaries() for vkey in bdry if self.is_boundary_vertex_kink(vkey, threshold_angle)]\n\n def vertex_centroid(self):\n \"\"\"Calculate the centroid of the mesh vertices.\n\n Parameters\n ----------\n\n Returns\n -------\n list\n The coordinates of the centroid of the mesh vertices.\n \"\"\"\n\n return centroid_points([self.vertex_coordinates(vkey) for vkey in self.vertices()])\n","repo_name":"BlockResearchGroup/compas-RV2","sub_path":"src/compas_rv2/singular/datastructures/mesh/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"32"} +{"seq_id":"30598379474","text":"import requests\nfrom random import choice\nfrom pyfiglet import figlet_format\nfrom termcolor import colored\n\nheader = figlet_format(\"JOKE LIBRARY\")\nheader = colored(header, color = 'cyan')\nprint(header)\n\nuser_input = input(\"What would you like to search for?\")\nurl = \"https://icanhazdadjoke.com/search\"\nres = requests.get(\n\turl, \n\theaders = {\"Accept\": \"application/json\"},\n\tparams = {\"term\" : user_input}\n).json()\nnum_jokes = res[\"total_jokes\"]\nif num_jokes > 1 :\n\tprint(f\"I found {num_jokes} jokes, here is one of them:\")\n\tpick = choice(res['results'])\n\tprint(pick['joke'])\nelif num_jokes == 1:\n\tprint(f'Here is one joke:')\n\tprint(res['results'][0]['joke'])\nelse:\n\tprint(\"There are no joke\")","repo_name":"tsiyuki/Games","sub_path":"jokes.py","file_name":"jokes.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25777651987","text":"######################################\n# Keivn Krause\n# Surfsup JSON\n# June 12, 2023\n#\n# Import the dependencies.\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, text\n\nfrom flask import Flask, jsonify\n\n\n\n#################################################\n# Database Setup to Connect To the hawaii.sqllite DB\n#################################################\n# Create our session (link) from Python to the DB\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(autoload_with=engine)\n\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n\n#################################################\n# Flask Setup\n#################################################\n\n# Main Route\napp = Flask(__name__)\n@app.route(\"/\")\ndef home():\n return (\n f\"Welcome to the SurfsUp Weather and Weather Station Data
\"\n f\"Here are your available routes to choose from:
\"\n f\"Precipitation Information: /api/v1.0/precipitation
\"\n f\"Station Information: /api/v1.0/stations
\"\n f\"Temperature Information: /api/v1.0/tobs
\"\n f\"Add a start date /api/v1.0/precipenter/YYYY-MM-DD
\"\n f\"Add a start and end date /api/v1.0/precipenterse/YYYY-MM-DD/YYYY-MM-DD
\"\n \"where YYYY-MM-DD = a valid start and end date\"\n )\n#\n#################################################\n# Flask Routes #\n#################################################\n#\n#\n#\n#######################################################\n### The precipitation route was entered #\n#######################################################\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n# Query all Measurement \n precipitation_data = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date > '2016-08-23').order_by(Measurement.date).all()\n \n #.order_by(Measurement.date)\n\n session.close()\n\n # Convert list of tuples into normal list\n top_one_year_precip = list(np.ravel(precipitation_data))\n\n return jsonify(top_one_year_precip)\n# return \"Hi - You are at the Precipitation Page\"\n# End of Precipitation\n#\n#\n#\n# I L E F T R I G H T H E R E\n\n#######################################################\n### The station route was entered #\n#######################################################\n@app.route(\"/api/v1.0/stations\")\ndef Stations():\n session = Session(engine)\n station_data = session.query(Station.id,Station.station,Station.name,Station.latitude,Station.longitude,Station.elevation).all()\n station_list = list(np.ravel(station_data))\n session.close()\n\n return jsonify(station_list)\n\n# return \"Hi - You are at the Stations Page\"\n \n\n# End of Stations\n#\n#######################################################\n### The temperature route was entered #\n#######################################################\n@app.route(\"/api/v1.0/tobs\")\ndef Temperature():\n session = Session(engine)\n\n\n# Query all Measurement \n tobs_data = session.query(Measurement.station,Measurement.date,Measurement.tobs).filter(Measurement.station == 'USC00519281').filter(Measurement.date > '2016-08-23').all()\n \n #.ORDER_by(Measurement.date)\n\n session.close()\n\n # Convert list of tuples into normal list\n tobs_list = list(np.ravel(tobs_data))\n\n return jsonify(tobs_list)\n# return \"Hi - You are at the Temperature Page\"\n# End of Stations\n\n###########################################################################################\n#\n# I N T E R A C T I V E S E C T I O N\n#\n###########################################################################################\n\n############################################################################\n### The precipitation route with date was entered by the user #\n###########################################################################\n@app.route(\"/api/v1.0/precipenter/\")\ndef precip_pass_dt(dtentry):\n \n session = Session(engine)\n query = \"SELECT MIN(tobs), MAX(tobs),AVG(tobs) FROM measurement where DATE(date) >= '%s'\" %dtentry\n mma_data = engine.execute(query)\n mma_data_df = pd.DataFrame(mma_data,columns =['min', 'max','avg'])\n mma_data_json = mma_data_df.to_json(orient = 'records')\n\n session.close()\n\n return mma_data_json\n\n#\n\n#######################################################\n### The precipitation route with start and end date was entered #\n#######################################################\n@app.route(\"/api/v1.0/precipenterse//\")\ndef precip_passse_dt(dtstart,dtend):\n\n session = Session(engine)\n btwncls = f\"'{dtstart}' AND '{dtend}'\"\n query = \"SELECT MIN(tobs), MAX(tobs),AVG(tobs) FROM measurement where DATE(date) BETWEEN %s\" %btwncls\n mma_data_2 = engine.execute(query)\n mma_data_2_df = pd.DataFrame(mma_data_2,columns =['min', 'max','avg'])\n mma_data_2_json = mma_data_2_df.to_json(orient = 'records')\n session.close()\n\n\n return mma_data_2_json\n\n#\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"PapaKLK/sqlalchemy-challenge","sub_path":"SurfsUp/app_surfsup.py","file_name":"app_surfsup.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73235845851","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('book', '0008_introduction_cnt'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='introduction',\n name='publisher',\n field=models.CharField(max_length=3, choices=[(b'KY', '\\uad50\\ubcf4'), (b'YE', 'YES24'), (b'IN', '\\uc778\\ud130\\ud30c\\ud06c'), (b'BA', '\\ubc18\\ub514\\uc5d4\\ub8e8\\ub2c8\\uc2a4')]),\n preserve_default=True,\n ),\n ]\n","repo_name":"harry81/hoodpub","sub_path":"web/book/migrations/0009_auto_20151122_1359.py","file_name":"0009_auto_20151122_1359.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39189233413","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n for x in range(len(s)-1):\n y = s.pop(-1)\n s.insert(x,y)\n return s\n ","repo_name":"kerrblackhole/Leetcode-Code-Backup","sub_path":"reverse-string/reverse-string.py","file_name":"reverse-string.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31589219450","text":"import sys\nimport os\nimport yaml\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom cerberus import Validator, schema_registry\nfrom pkg_resources import parse_version\n\nimport awsorgs\nfrom awsorgs.utils import *\nfrom awsorgs.validator import file_validator, spec_validator\n\n# Spec parser defaults\nDEFAULT_CONFIG_FILE = '~/.awsorgs/config.yaml'\nDEFAULT_SPEC_DIR = '~/.awsorgs/spec.d'\n\n\n\ndef scan_config_file(log, args):\n if args['--config']:\n config_file = args['--config']\n else:\n config_file = DEFAULT_CONFIG_FILE\n config_file = os.path.expanduser(config_file)\n if not os.path.isfile(config_file):\n log.error(\"config_file not found: {}\".format(config_file))\n return None\n log.debug(\"loading config file: {}\".format(config_file))\n with open(config_file) as f:\n try:\n config = yaml.safe_load(f.read())\n except (yaml.scanner.ScannerError, UnicodeDecodeError):\n log.error(\"{} not a valid yaml file\".format(config_file))\n return None\n except Exception as e:\n log.error(\"cant load config_file '{}': {}\".format(config_file, e))\n return None\n log.debug(\"config: {}\".format(config))\n return config\n\n\ndef get_master_account_id(log, args, config):\n \"\"\"\n Determine the Org Master account id. Try in order:\n cli option, config file, client.describe_organization()\n \"\"\"\n if args['--master-account-id']:\n master_account_id = args['--master-account-id']\n else:\n master_account_id = config.get('master_account_id')\n if master_account_id:\n if not valid_account_id(log, master_account_id):\n log.critical(\"config option 'master_account_id' is not valid account Id\")\n sys.exit(1)\n else:\n log.debug(\"'master_account_id' not set in config_file or as cli option\")\n try:\n master_account_id = boto3.client('organizations'\n ).describe_organization()['Organization']['MasterAccountId']\n except ClientError as e:\n log.critical(\"can not determine master_account_id: {}\".format(e))\n sys.exit(1)\n log.debug(\"master_account_id: %s\" % master_account_id)\n return master_account_id\n\n\ndef get_spec_dir(log, args, config):\n \"\"\"\n Determine the spec directory. Try in order:\n cli option, config file, DEFAULT_SPEC_DIR.\n \"\"\"\n if '--spec-dir' in args and args['--spec-dir']:\n spec_dir = args['--spec-dir']\n elif config['spec_dir']:\n spec_dir = config['spec_dir']\n else:\n spec_dir = DEFAULT_SPEC_DIR\n spec_dir = os.path.expanduser(spec_dir)\n log.debug(\"spec_dir: %s\" % spec_dir)\n return spec_dir\n\n\ndef load_config(log, args):\n \"\"\"\n Assemble config options from various sources: cli options, config_file \n params, defaults, etc., and merge them into 'args' dict.\n When we are done we should have found all of the following:\n\n master_account_id\n org_access_role\n spec_dir (except when handling reports)\n auth_account_id (except when called by awsorgs)\n \"\"\"\n config = scan_config_file(log, args)\n args['--master-account-id'] = get_master_account_id(log, args, config)\n args['--spec-dir'] = get_spec_dir(log, args, config)\n if not args['--org-access-role']:\n args['--org-access-role'] = config.get('org_access_role')\n if not args['--auth-account-id']:\n args['--auth-account-id'] = config.get('auth_account_id')\n return args\n\n\ndef validate_spec_file(log, spec_file, validator, errors):\n with open(spec_file) as f:\n try:\n spec_from_file = yaml.safe_load(f.read())\n except (yaml.scanner.ScannerError, UnicodeDecodeError):\n log.warn(\"{} not a valid yaml file. skipping\".format(spec_file))\n return (None, errors)\n except Exception as e:\n log.error(\"cant load spec_file '{}': {}\".format(spec_file, e))\n return (None, errors)\n if validator.validate(spec_from_file):\n return (spec_from_file, errors)\n else:\n log.error(\"schema validation failed for spec_file: {}\".format(spec_file))\n log.debug(\"validator errors:\\n{}\".format(yamlfmt(validator.errors)))\n errors += 1\n return (None, errors)\n\n\ndef validate_package_version(log, spec_dir):\n common_file_name = next(\n (file for file in os.listdir(spec_dir) if file.startswith('common')),\n None,\n )\n if common_file_name is None:\n log.critical(\"cannot locate common spec file in spec_dir '{}'\".format(spec_dir))\n sys.exit(1)\n common_spec_file = os.path.join(spec_dir, common_file_name)\n log.debug('common spec file: {}'.format(common_spec_file))\n with open(common_spec_file) as f:\n try:\n common_spec = yaml.safe_load(f.read())\n except Exception as e:\n log.critical(\"cant load common spec file '{}': {}\".format(common_spec_file, e))\n sys.exit(1)\n log.debug('minimum_version: {}'.format(common_spec['minimum_version']))\n if not parse_version(awsorgs.__version__) >= parse_version(common_spec['minimum_version']):\n log.critical('Installed aws-orgs package does not meet minimum version requirement. '\n 'Please update your aws-orgs package to version \"{}\" or higher.'.format(\n common_spec['minimum_version']\n ))\n sys.exit(1)\n return\n\n\ndef validate_spec(log, args):\n \"\"\"\n Load all spec files in spec_dir and validate against spec schema\n \"\"\"\n\n # validate spec_files\n spec_dir = args['--spec-dir']\n if not os.path.isdir(spec_dir):\n log.error(\"spec_dir not found or not a directory: {}\".format(spec_dir))\n sys.exit(1)\n validate_package_version(log, spec_dir)\n validator = file_validator(log)\n spec_object = {}\n errors = 0\n for dirpath, dirnames, filenames in os.walk(spec_dir, topdown = True):\n dirnames[:] = [d for d in dirnames if not d.startswith('.')]\n for f in filenames:\n log.debug(\"considering file {}\".format(f))\n spec_from_file, errors = validate_spec_file(log,\n os.path.join(dirpath, f), validator, errors)\n if spec_from_file:\n spec_object.update(spec_from_file)\n if errors:\n log.critical(\"schema validation failed for {} spec files. run in debug mode for details\".format(errors))\n sys.exit(1)\n log.debug(\"spec_object:\\n{}\".format(yamlfmt(spec_object)))\n\n # validate aggregated spec_object\n validator = spec_validator(log)\n if not validator.validate(spec_object):\n log.critical(\"spec_object validation failed:\\n{}\".format(\n yamlfmt(validator.errors)))\n sys.exit(1)\n log.debug(\"spec_object validation succeeded\")\n return spec_object\n","repo_name":"ucopacme/aws-orgs","sub_path":"awsorgs/spec.py","file_name":"spec.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"72649338330","text":"#!/usr/bin/env python\n__author__ = 'jiri'\n\nimport os\nimport logging\nimport os.path\nimport numpy as np\nimport re\nimport time\nimport random\nimport math\nimport sys\nimport json\nimport common\n\n\n#load logger\nlog=logging.getLogger('anfoamRec.ellipsoidPacking')\n\nclass Sphere:\n def __init__(self,vr,pos):\n self.r=vr\n self.relpos=pos\n\nclass Ellipsoid:\n def __init__(self,vax,vby,vcz,vdir,a1,a2,n):\n self.pos=np.array([0.0,0.0,0.0])\n self.ax=vax\n self.by=vby #by=cz\n self.cz=vcz\n self.dir=vdir #direction vector\n self.ang1=a1\n self.ang2=a2\n self.spheres=[]\n if n%2==1:\n self.spheres.append(Sphere(self.by,self.pos))\n self.aproxBySpheres((int)(n//2))\n def aproxBySpheres(self,n):\n #aprox the ellipsoid by 2n+1 spheres\n if n>0:\n xi=0\n rn = self.by ** 2 / self.ax\n xn = self.ax - rn\n h = xn / n\n for j in range(n):\n xi = xi + h\n if j==n-1:\n ri=rn\n xi=xn\n else:\n ri = math.sqrt(self.by ** 2 * (1 - xi ** 2 / (self.ax ** 2 - self.by ** 2)))\n #rotated coor\n dirx=np.array([1.0,0.0,0.0])\n diry=np.array([0.0,1.0,0.0])\n dirz=np.array([0.0,0.0,1.0])\n rxi=xi*np.dot(self.dir,dirx)\n ryi=xi*np.dot(self.dir,diry)\n rzi=xi*np.dot(self.dir,dirz)\n s1=Sphere(ri, np.array([rxi,ryi,rzi]))\n s2=Sphere(ri, np.array([-rxi, -ryi,-rzi]))\n self.spheres.append(s1)\n self.spheres.append(s2)\n def testFastInter(self,el1):\n if (getDist(self.pos,el1.pos)<=(el1.ax+self.ax)):\n return True\n return False\n # cosa1=np.dot(v,self.rot)/np.linalg.norm(v)/np.linalg.norm(self.rot)\n # cosa2 = np.dot(v, el1.rot) / np.linalg.norm(v) / np.linalg.norm(el1.rot)\n # d1=self.getRectDist(cosa1)\n # d2=el1.getRectDist(cosa2)\n # if (maxdtimeout:\n break\n e0 = ge.getEl_proporcional() #consistent also for one sphere per ellipsoids= sphere packing\n for i in range(50):\n e0.pos = np.array([xmax * random.random(), ymax * random.random(), zmax * random.random()])\n inter=False\n for el in els:\n if (e0.testFastInter(el)):\n if (e0.testSphereInter(el)):\n inter = True\n break\n if not inter:\n els.append(e0)\n els_vol += 4 / 3 * np.pi * e0.ax * e0.by * e0.cz\n j+=1\n break\n #mayavi_visu.show_els(els)\n return els,els_vol\n\ndef randEllipsoidPack(MUX,MUYZ,MUA,SX,SA,sp_per_el,num_cell):\n els,els_vol=_randEllipsoidPack(MUX, MUYZ, MUYZ, MUA, SX, SX, SX, SA, sp_per_el, num_cell)\n log.info('Volume fraction: %.3f Number of ellipsoids: %d', els_vol, len(els))\n return els\n\ndef randEllipsoidPack(MUX,MUYZ,MUA,SX,SYZ,SA,sp_per_el,num_cell):\n els,els_vol=_randEllipsoidPack(MUX, MUYZ, MUYZ, MUA, SX, SYZ, SYZ, SA, sp_per_el, num_cell)\n log.info('Volume fraction: %.3f Number of ellipsoids: %d', els_vol, len(els))\n return els\n\n\"\"\"\nSphere packing algorithm - Random sequential adsorption (RSA)\n\"\"\"\ndef randSpherePack(MUR,SR,num_cells):\n '''\n\n :param MUR: average radius\n :param SR: deviation of radius\n :param xmax: x size of the box\n :param ymax: y size of the box\n :param zmax: z size of the box\n :param nSpheres: number of spheres\n :return:\n '''\n els, els_vol = _randEllipsoidPack(MUR, MUR, MUR, 0, SR, SR, SR, 0, 1, num_cells)\n log.info('Volume fraction: %.3f Number of spheres: %d', els_vol, len(els))\n return els\n\ndef sphereVol(sps):\n vol=0\n for sp in sps:\n vol += 4 / 3 * np.pi * sp.r ** 3\n return vol\n\ndef ellipsoidVol(els):\n vol=0\n for el in els:\n vol+=4 / 3 * np.pi * el.ax * el.by * el.cz\n return vol\n\ndef _ellipsoidGrow(els,gf):\n timeout = time.time() + 4.0+len(els)/50\n while True:\n if time.time()>timeout:\n break\n for i in range(len(els)):\n ax=els[i].ax*gf\n by=els[i].by*gf\n cz=els[i].cz*gf\n e0=Ellipsoid(ax,by,cz,els[i].dir,els[i].ang1,els[i].ang2,len(els[i].spheres))\n e0.pos=els[i].pos\n inter=False\n for j in range(len(els)):\n if ( i != j ):\n if (e0.testFastInter(els[j])):\n if (e0.testSphereInter(els[j])):\n inter = True\n break\n if not inter:\n els[i]=e0\n logging.info('Inceased volume fraction: %.3f',ellipsoidVol(els))\n #mayavi_visu.show_els(els)\n return els\n\ndef sphereGrow(els,gf):\n log.info('Increasing radius of spheres...')\n els=_ellipsoidGrow(els, gf)\n return els\n\ndef ellipsoidGrow(els,gf):\n log.info('Increasing radius of ellipsoids...')\n els = _ellipsoidGrow(els, gf)\n return els\n\n'''\nNot implemetned\n'''\n\ndef approxEllipsoidPack(MUX,MUY,MUZ,MUA,SX,SY,SZ,SA,xmax,ymax,zmax,nCirc):\n '''\n\n :param MUX:\n :param MUY:\n :param MUA:\n :param SX:\n :param SY:\n :param SA:\n :param xmax:\n :param ymax:\n :return:\n '''\n els=[]\n random.seed()\n ge=GeneratorEllipsoid(MUX,MUY,MUZ,MUA,SX,SY,SZ,SA,nCirc)\n minFound = False\n z=0\n while z0:\n dist=dist/weight\n if (dist= 0:\n if y >= 0:\n return 1\n else:\n return 4\n else:\n if y >= 0:\n return 2\n else:\n return 3\n\n if isinstance(ent, Arc):\n x_start, y_start = ent.start_point\n x_end, y_end = ent.end_point\n (x0, y0), (x1, y1) = get_max_circle_bbox(ent)\n # Get start and end relative quadrants\n start_quadrant = get_relative_quadrant(\n ent.start_point, ent.center_point)\n end_quadrant = get_relative_quadrant(ent.end_point, ent.center_point)\n if ent.clockwise:\n start_quadrant, end_quadrant = end_quadrant, start_quadrant\n x_start, x_end = x_end, x_start\n y_start, y_end = y_end, y_start\n # Get included quadrants\n if start_quadrant < end_quadrant:\n quadrants = list(range(start_quadrant, end_quadrant+1))\n elif start_quadrant > end_quadrant:\n quadrants = list(range(start_quadrant, 5)) + list(\n range(1, end_quadrant+1))\n else:\n # Handle case when start & end in same quadrant\n start_ahead = False\n if start_quadrant in [1, 2]:\n if x_start <= x_end:\n start_ahead = True\n if start_quadrant in [3, 4]:\n if x_start >= x_end:\n start_ahead = True\n if start_ahead:\n quadrants = list(range(start_quadrant, 5)) + list(\n range(1, end_quadrant+1)\n )\n else:\n quadrants = [start_quadrant]\n def has_ordered_quadrants(q1, q2):\n if not (q1 in quadrants and q2 in quadrants):\n return False\n idx1 = quadrants.index(q1)\n idx2 = quadrants.index(q2)\n if idx2 > idx1:\n return True\n return False\n # Replace each coordinate of max bbox as needed\n if not has_ordered_quadrants(4, 1):\n x1 = max(x_start, x_end)\n if not has_ordered_quadrants(1, 2):\n y1 = max(y_start, y_end)\n if not has_ordered_quadrants(2, 3):\n x0 = min(x_start, x_end)\n if not has_ordered_quadrants(3, 4):\n y0 = min(y_start, y_end)\n return np.array([[x0, y0], [x1, y1]])\n\n if isinstance(ent, Circle):\n return get_max_circle_bbox(ent)\n if isinstance(ent, Line):\n start_x, start_y = ent.start_point\n end_x, end_y = ent.end_point\n return np.array([[min(start_x, end_x), min(start_y, end_y)],\n [max(start_x, end_x), max(start_y, end_y)]])\n if isinstance(ent, Point):\n return np.array([[ent.x, ent.y],\n [ent.x, ent.y]])\n # Return None for unsupported entities\n return None\n\n\ndef get_sketch_bbox(sketch):\n \"\"\"Compute the bounding box for the sketch.\n\n The bounding box is only computed based on entities currently supported\n in `_get_entity_bbox` (`Arc`, `Circle`, `Line`, `Point`).\n TODO: raise error if sketch has unsupported entities\n\n Parameters\n ----------\n sketch : Sketch\n The sketch of interest for bbox computation\n\n Returns\n -------\n np.array\n Bounding box of the form `[[x0, y0], [x1, y1]]`\n \"\"\"\n # Get entity bboxes\n bboxes = [_get_entity_bbox(ent) for ent in sketch.entities.values()]\n # Remove any None bboxes\n bboxes = np.array([bbox for bbox in bboxes if bbox is not None])\n if bboxes.size == 0:\n # Return origin as bounding box for empty sketch\n return np.array([[0., 0.], [0., 0.]])\n # Compute overall bbox\n x0, y0 = np.min(bboxes[:,0,:], axis=0)\n x1, y1 = np.max(bboxes[:,1,:], axis=0)\n return np.array([[x0, y0], [x1, y1]])\n\n\ndef center_sketch(sketch):\n \"\"\"Center the sketch's bounding box over the origin.\n\n The entity parameters of the given sketch are modified in place.\n\n Parameters\n ----------\n sketch : Sketch\n The sketch to be centered\n\n Returns\n -------\n None\n \"\"\"\n # Get bounding box\n (x0, y0), (x1, y1) = get_sketch_bbox(sketch)\n # Compute current offset from origin\n x_offset = np.mean([x0, x1])\n y_offset = np.mean([y0, y1])\n # Modify the sketch's entities\n for ent in sketch.entities.values():\n pos_params = POS_PARAMS.get(ent.type)\n if pos_params is None:\n # Skip unsupported entities\n continue\n for param_id in pos_params:\n this_offset = x_offset if 'x' in param_id.lower() else y_offset\n curr_val = getattr(ent, param_id)\n setattr(ent, param_id, curr_val - this_offset)\n\n\ndef rescale_sketch(sketch):\n \"\"\"Rescale the sketch such that the long axis of the bounding box is one.\n\n The entity parameters of the given sketch are modified in place. This\n function should only be called on sketches that are centered; an error is\n raised it the given sketch is not.\n\n Parameters\n ----------\n sketch : Sketch\n The sketch to be rescaled\n\n Returns\n -------\n float\n The normalizing scale factor. If the sketch is zero-dim,\n -1 is returned instead.\n \"\"\"\n\n # Get bounding box\n (x0, y0), (x1, y1) = get_sketch_bbox(sketch)\n # Ensure sketch is already centered\n if not np.isclose(x0, -x1) or not np.isclose(y0, -y1):\n raise ValueError(\"sketch must be centered before rescaling\")\n # Calculate scale factor\n w = x1 - x0\n h = y1 - y0\n factor = max(w, h)\n if factor == 0:\n return -1\n # Modify the sketch's entities\n for ent in sketch.entities.values():\n pos_params = POS_PARAMS.get(ent.type)\n scale_params = SCALE_PARAMS.get(ent.type)\n if pos_params is None:\n # Skip unsupported entities\n continue\n params = pos_params + scale_params # rescale both types of params\n for param_id in params:\n curr_val = getattr(ent, param_id)\n setattr(ent, param_id, curr_val/factor)\n return factor\n\n\ndef normalize_sketch(sketch):\n \"\"\"Helper function that both centers and rescales the given sketch in place.\n \"\"\"\n center_sketch(sketch)\n scale_factor = rescale_sketch(sketch)\n return scale_factor\n\n\ndef _parameterize_arc(arc: datalib.Arc) -> np.ndarray:\n \"\"\"Extract parameterization for the given Arc instance.\"\"\"\n start_point, end_point = arc.start_point, arc.end_point\n if arc.clockwise:\n start_point, end_point = end_point, start_point\n return np.concatenate([start_point, arc.mid_point, end_point])\n\ndef _parameterize_circle(circle: datalib.Circle) -> np.ndarray:\n \"\"\"Extract parameterization for the given Circle instance.\"\"\"\n return np.append(circle.center_point, circle.radius)\n\ndef _parameterize_line(line: datalib.Line) -> np.ndarray:\n \"\"\"Extract parameterization for the given Line instance.\"\"\"\n return np.concatenate([line.start_point, line.end_point])\n\ndef _parameterize_point(point: datalib.Point) -> np.ndarray:\n \"\"\"Extract parameterization for the given Point instance.\"\"\"\n return np.array([point.x, point.y])\n\n\nNUM_PARAMS = {\n Arc: 6,\n Circle: 3,\n Line: 4,\n Point: 2\n}\n\n\ndef parameterize_entity(ent) -> np.array:\n \"\"\"Extract parameterization for the given entity.\n\n Only continuous parameters of the entity are included.\n\n Parameters\n ----------\n ent : Entity\n The entity of interest for parameterization\n\n Returns\n -------\n np.array\n The entity's parameterization\n \"\"\"\n param_by_type = {\n Arc: _parameterize_arc,\n Circle: _parameterize_circle,\n Line: _parameterize_line,\n Point: _parameterize_point\n }\n param_fn = param_by_type.get(type(ent))\n if param_fn is None:\n return None\n return param_fn(ent)\n\n\ndef _arc_from_params(params, entity_id=None):\n \"\"\"Instantiate an arc from the given parameterization.\n Implementation from:\n https://stackoverflow.com/questions/52990094/calculate-circle-given-\n 3-points-code-explanation\n \"\"\"\n b, c, d = params[:2], params[2:4], params[4:]\n\n temp = c[0]**2 + c[1]**2\n bc = (b[0]**2 + b[1]**2 - temp) / 2\n cd = (temp - d[0]**2 - d[1]**2) / 2\n det = (b[0] - c[0]) * (c[1] - d[1]) - (c[0] - d[0]) * (b[1] - c[1])\n\n if abs(det) < 1.0e-10:\n return None\n\n # Center of circle\n cx = (bc*(c[1] - d[1]) - cd*(b[1] - c[1])) / det\n cy = ((b[0] - c[0]) * cd - (c[0] - d[0]) * bc) / det\n\n radius = ((cx - b[0])**2 + (cy - b[1])**2)**.5\n\n info = {\n 'id': entity_id,\n 'center': [cx, cy],\n 'radius': radius,\n 'startPoint': params[:2],\n 'endPoint': params[-2:]\n }\n return Arc.from_info(info)\n\ndef _circle_from_params(params, entity_id=None):\n \"\"\"Instantiate a circle from the given parameterization.\"\"\"\n return Circle(entity_id, xCenter=params[0], yCenter=params[1], radius=params[2])\n\ndef _line_from_params(params, entity_id=None):\n \"\"\"Instantiate a line from the given parameterization.\"\"\"\n info = {\n 'id': entity_id,\n 'startPoint': params[:2],\n 'endPoint': params[2:]\n }\n return Line.from_info(info)\n\ndef _point_from_params(params, entity_id=None):\n \"\"\"Instantiate a point from the given parameterization\"\"\"\n return Point(entity_id, x=params[0], y=params[1])\n\n\ndef entity_from_params(params, entity_id: str=None):\n \"\"\"Instantiate an entity from the given parameterization.\n\n The length of params uniquely determines the target entity type.\n\n Parameters\n ----------\n params : np.array\n The entity's parameterization\n entity_id : str, optional\n Optional string specifying the id of the entity to create.\n\n Returns\n -------\n Entity\n The entity instance corresponding to the input parameterization\n \"\"\"\n entity_by_num_params = {\n NUM_PARAMS[Arc]: _arc_from_params,\n NUM_PARAMS[Circle]: _circle_from_params,\n NUM_PARAMS[Line]: _line_from_params,\n NUM_PARAMS[Point]: _point_from_params\n }\n\n ent_build_fn = entity_by_num_params.get(len(params))\n if ent_build_fn is None:\n raise ValueError(\"Unsupported number of parameters\")\n return ent_build_fn(params, entity_id)\n\n\n# Minimum and maximum parameter values following normalization\nMIN_VAL = -0.5\nMAX_VAL = 0.5\n\n\ndef quantize_params(params: np.ndarray, n_bins):\n \"\"\"Convert params in [MIN_VAL, MAX_VAL] to discrete values in [0, n_bins-1].\n\n Parameters\n ----------\n params : np.array\n The parameters to be quantized\n n_bins : int\n The number of bins\n\n Returns\n -------\n np.array\n The quantized parameter bins\n \"\"\"\n min_val, max_val = MIN_VAL, MAX_VAL\n params = np.around(params, decimals=10)\n if (params < min_val).any() or (params > max_val).any():\n raise ValueError(\"Parameters must be in [%f, %f]. Got [%f, %f].\"\n % (min_val, max_val, np.min(params), np.max(params)))\n params_quantized = (params - min_val) / (max_val - min_val) * n_bins\n params_quantized = params_quantized.astype('int32')\n # Handle max_val edge case\n params_quantized[params_quantized == n_bins] -= 1\n return params_quantized\n\n\ndef dequantize_params(params, n_bins):\n \"\"\"Convert quantized parameters to floats in range [MIN_VAL, MAX_VAL].\n\n Parameters\n ----------\n params : array-like\n The parameters to be dequantized\n n_bins : int\n The number of bins\n\n Returns\n -------\n np.array\n The dequantized parameter values\n \"\"\"\n if isinstance(params, list):\n params = np.array(params)\n min_val, max_val = MIN_VAL, MAX_VAL\n if ((params < 0).any() or (params > (n_bins-1)).any() \n or not np.issubdtype(params.dtype, np.integer)):\n raise ValueError(\"Invalid quantized params\")\n params = params.astype('float32') + 0.5 # center of each bin\n params = params / n_bins * (max_val - min_val) + min_val\n return params\n\n","repo_name":"PrincetonLIPS/vitruvion","sub_path":"img2cad/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":13505,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"38313361428","text":"\"\"\" TCP server class definition \"\"\"\r\n\r\nfrom tcp_common import *\r\n\r\n\r\n\r\n\"\"\" Class definition \"\"\"\r\nclass TCP_Server(TCP_Common):\r\n\tdef __init__(self):\r\n\t\tsuper(TCP_Server, self).__init__()\r\n\t\tself._ssocket = None\r\n\r\n\tdef initialize(self):\r\n\t\tself._ssocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\tself._ssocket.bind( ('', self._port) )\r\n\r\n\tdef wait_for_client(self):\r\n\t\tself._ssocket.listen(1)\r\n\t\t( self._csocket, (ip, port) ) = self._ssocket.accept()\r\n\t\tself._csocket.setblocking(1)\r\n\t\tself._csocket.settimeout(None)\r\n\t\tself._csocket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)\r\n\t\t#print( (ip, port) )\r\n\r\n\tdef close_client_connection(self):\r\n\t\tself._close_client_connection()\r\n\r\n\tdef close_server_connection(self):\r\n\t\tif self._ssocket:\r\n\t\t\tself._ssocket.close()\r\n\r\n\r\n\r\n\"\"\" Testing \"\"\"\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\tprint('initialize')\r\n\t\ttcp_server = TCP_Server()\r\n\t\ttcp_server.initialize()\r\n\t\t# Run server continuously...\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tprint('-----\\nwait_for_client')\r\n\t\t\t\ttcp_server.wait_for_client()\r\n\t\t\t\t# Communicate with client continuously...\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tmsg = tcp_server.receive_msg()\r\n\t\t\t\t\tprint(msg)\r\n\t\t\t\t\ttcp_server.send_msg(*msg) #< Send back received msg\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(e)\r\n\t\t\tfinally:\r\n\t\t\t\tprint('close_client_connection')\r\n\t\t\t\ttcp_server.close_client_connection()\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\tfinally:\r\n\t\tprint('close_server_connection')\r\n\t\ttcp_server.close_server_connection()\r\n\r\n\r\n","repo_name":"JulienP31/TCP","sub_path":"tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34374988733","text":"import plotly.express as px\nfrom datetime import datetime\nimport yfinance as yf\n\nif __name__ == '__main__':\n cryptocurrencies = ['BTC-USD', 'ETH-USD', 'DOGE-USD', 'BNB-USD']\n\n df = yf.download(cryptocurrencies, start='2020-01-01',\n end=datetime.today().strftime('%Y-%m-%d'))\n df.isnull().any()\n adj_close = df['Adj Close']\n fig = px.line(adj_close, y=\"BTC-USD\")\n fig.write_html(\"plot-btc.html\")\n fig = px.line(adj_close, y=\"ETH-USD\")\n fig.write_html(\"plot-eth.html\")\n fig = px.line(adj_close, y=\"DOGE-USD\")\n fig.write_html(\"plot-doge.html\")\n fig = px.line(adj_close, y=\"BNB-USD\")\n fig.write_html(\"plot-bnb.html\")","repo_name":"kdmaciejewski/Cryptocurrency-Cantor","sub_path":"website/wykresy/exportPlotly.py","file_name":"exportPlotly.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26713126480","text":"from django.core.management import BaseCommand\nfrom django.utils import timezone\n\nfrom main.market.models import Factory, Retail, Ip\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs):\n Factory.objects.all().delete()\n Retail.objects.all().delete()\n\n factory_1 = Factory.objects.create(\n name=\"Завод электроники\",\n contacts=\"7-700\",\n email=\"electro_omsk@mail.ru\",\n country=\"Россия\",\n city=\"Омск\",\n number_house=12,\n name_product=\"Телевизоры\",\n model_product=\"Электроника\",\n date_exit_product=\"2023-10-25 07:08:40.736919+00:00\",\n creation_time=\"2023-10-25 07:08:40.736919+00:00\",\n supplier=\"Завод электроники\",\n debt=0,\n )\n factory_2 = Factory.objects.create(\n name=\"Завод посуды\",\n contacts=\"7-900\",\n email=\"farfor_msk@mail.ru\",\n country=\"Россия\",\n city=\"Москва\",\n number_house=119,\n name_product=\"Тарелки\",\n model_product=\"Посуда\",\n date_exit_product=\"2023-12-25 07:08:40.736919+00:00\",\n creation_time=\"2023-11-25 07:08:40.736919+00:00\",\n supplier=\"Завод посуды\",\n debt=0,\n )\n Retail.objects.create(\n name=\"ооо МосПосуда\",\n contacts=\"7-600\",\n email=\"farfor_msk@mail.ru\",\n country=\"Россия\",\n city=\"Москва\",\n number_house=119,\n name_product=\"Тарелки\",\n model_product=\"Посуда\",\n date_exit_product=\"2023-12-25 07:08:40.736919+00:00\",\n creation_time=\"2023-11-25 07:08:40.736919+00:00\",\n supplier=factory_1,\n debt=200,\n )\n Ip.objects.create(\n name=\"Ип Орехов.О\",\n contacts=\"7-790\",\n email=\"ip_orex@mail.ru\",\n country=\"Россия\",\n city=\"Воронеж\",\n number_house=19,\n name_product=\"Телевизор\",\n model_product=\"Электроника\",\n date_exit_product=\"2023-12-25 07:08:40.736919+00:00\",\n creation_time=\"2023-11-25 07:08:40.736919+00:00\",\n supplier=factory_2,\n debt=100.5,\n )\n","repo_name":"Fl1up/Online_market-API-","sub_path":"main/market/management/commands/add_buy.py","file_name":"add_buy.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18394968418","text":"import numpy as np\nfrom scipy.signal import convolve # correlate\n\ndef conv_media(band, n):\n matrix_ones = np.ones((n, n))\n img_conv = convolve(band, matrix_ones, mode='same')\n\n image_ones = np.ones(band.shape) \n img_conv_ones = convolve(image_ones, matrix_ones, mode='same')\n\n result = img_conv / img_conv_ones\n return result\n\ndef conv_matrix(band, matrix):\n matrix = np.array(matrix)\n img_conv = convolve(band, matrix, mode='same')\n\n matrix_ones = np.ones(matrix.shape)\n image_ones = np.ones(band.shape)\n img_conv_ones = convolve(image_ones, matrix_ones, mode='same')\n\n result = img_conv / img_conv_ones\n return result\n\ndef conv_image(image, matrix_n):\n band_list = []\n if type(matrix_n) == list:\n for b in range(len(image)):\n img = conv_matrix(image[b], matrix_n)\n band_list.append(img)\n elif type(matrix_n) == int:\n for b in range(len(image)):\n img = conv_media(image[b], matrix_n)\n band_list.append(img)\n else:\n print(\"Posible matrix_n values types: list or int.-\")\n\n raster_stacked = np.stack(band_list, axis=0)\n return raster_stacked","repo_name":"DiegoMCastellari/Rasterian","sub_path":"rasterian/focal/global_convolution.py","file_name":"global_convolution.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17501382701","text":"# Finds artcle with max body size\n# in a DSL dictionary\n\nimport os, re, codecs, subprocess\nimport time, random, requests\nimport shutil, stat, errno, sys, http\n\nfrom modules.file_system_functions import *\nfrom modules.general_functions import *\n\n\ndef find_max_body_len():\n fp = 'data/EsEn_Vox_School.dsl'\n \n f = codecs.open(fp, 'r', 'utf16')\n \n i = 1\n maxLine = 0\n maxLen = 0\n \n limitLine = 3006\n \n chars_count = 0\n wordLine = i\n \n for line in f:\n if i == limitLine:\n break\n if line[0] != '\\t' and line[0] != ' ':\n if chars_count > maxLen:\n maxLen = chars_count\n maxLine = wordLine\n \n chars_count = 0\n wordLine = i\n else:\n chars_count += len(line)\n i += 1\n \n f.close()\n print(maxLine)\n\n\n# ---\n\nfind_max_body_len()\n","repo_name":"mortalis13/PythonScripts","sub_path":"dsl_find_largest_article.py","file_name":"dsl_find_largest_article.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5800184153","text":"#!/usr/bin/env python\n\nimport json\nfrom skimage.color import deltaE_ciede2000\n\n# For reducing the colourset\n# requires scikit-image\n\ndef rgb2one(r):\n return (float(r[0]/256),float(r[1]/256),float(r[2]/256))\n \n##with open(\"dmc.json\") as w:\n#with open(\"limited.json\") as w:\nwith open(\"raw/color.json\") as w:\n DMC = json.load(w)\n\n\ndef hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\n\nseen = []\nfor i in DMC:\n icol = i[\"Description\"]\n ihex = \"#\"+i[\"Hex\"]\n irgb = hex_to_rgb(ihex)\n\n for j in DMC:\n jcol = j[\"Description\"]\n jhex = \"#\"+j[\"Hex\"]\n jrgb = hex_to_rgb(jhex)\n\n if icol == jcol:\n continue\n\n #if \"{},{}\".format(jcol,icol) in seen:\n # continue\n\n seen.append(\"{},{}\".format(icol, jcol))\n\n io = rgb2one(irgb) \n jo = rgb2one(jrgb)\n\n d = deltaE_ciede2000(io, jo)\n\n print(\"{}\".format(d), end=\"\")\n print(\"{}{}       \".format(icol,ihex,ihex), end=\"\")\n print(\"        {}{}\".format(jhex,jhex,jcol), end=\"\")\n print(\"\")\n\n","repo_name":"VetoPlayer/626","sub_path":"data/reduce_colours.py","file_name":"reduce_colours.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26932969418","text":"from casestudies.find_and_repair_specifications.goals import w\nfrom core.cgg import Node\nfrom core.contract import Contract\nfrom core.library import Library\nfrom core.patterns.robotics.coremovement.coverage import Visit\nfrom core.patterns.robotics.coremovement.surveillance import (\n OrderedPatrolling,\n Patrolling,\n)\nfrom core.specification.lformula import LTL\n\ng_prime = Node(\n name=\"night_patrolling\",\n description=\"During the night patrol in order all the locations in a strict order\",\n context=w[\"nt\"],\n specification=Contract(\n assumptions=None,\n guarantees=LTL(OrderedPatrolling(\"l1\", \"l3\", \"l5\"), w.typeset),\n ),\n world=w,\n)\n\nset_of_goals = {\n Node(\n name=\"l1\",\n specification=LTL(Patrolling(\"l1\", \"l5\"), w.typeset),\n world=w,\n ),\n Node(\n name=\"l2\",\n specification=LTL(Patrolling(\"l3\"), w.typeset),\n world=w,\n ),\n Node(\n name=\"l3\",\n specification=LTL(Visit(\"l3\", \"l1\"), w.typeset),\n world=w,\n ),\n Node(\n name=\"4\",\n description=\"Keep visiting l3\",\n specification=LTL(Visit(\"l5\"), w.typeset),\n world=w,\n ),\n}\n\n\nlibrary = Library(set_of_goals)\n\ncandidate_composition = library.get_candidate_composition(goal_to_refine=g_prime)\n\nprint(candidate_composition)\n\n\n#\n#\n#\n# print(f\"The composition is: \\n'{candidate_composition}'\")\n#\n# if not candidate_composition <= g_prime:\n# print(\"The candidate_composition does not refine g_prime\")\n#\n# quotient = candidate_composition.quotient(g_prime)\n# print(f\"The quotient is: \\n'{quotient}'\")\n#\n# print(quotient.specification.assumptions.spot_formula._repr_latex_())\n# print(quotient.specification.guarantees.spot_formula._repr_latex_())\n#\n# l_prime_1 = Node(\n# name=\"strict_order_visit_locations\",\n# description=\"l1 -> l3 -> l5 -> l4 -> l2\",\n# specification=LTL(OrderedVisit(\"l1\", \"l3\", \"l5\", \"l4\", \"l2\"), w.typeset),\n# world=w,\n# )\n#\n# if l_prime_1 <= quotient:\n# print(\"The library goal l_prime_1 refines the quotient\")\n#\n# composition = Node.composition({l_prime_1, candidate_composition})\n# print(f\"The composition of l_prime_1 with candidate_composition is: \\n'{composition}'\")\n#\n# if composition <= quotient:\n# print(\"The composition now refines the initial goal g_prime\")\n","repo_name":"pierg/crome","sub_path":"casestudies/find_and_repair_specifications/candidate_composition.py","file_name":"candidate_composition.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24428818739","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\n\ndatos, metadatos = tfds.load('fashion_mnist', as_supervised=True, with_info=True)\n\nmetadatos\n\ndatos_entrenamiento, datos_pruebas = datos['train'], datos['test']\n\nnombres_clases = metadatos.features['label'].names\n\nnombres_clases\n\ndef normalizar(imagenes, etiquetas):\n imagenes = tf.cast(imagenes, tf.float32)\n imagenes /= 255 #From [0-255] to [0-1]\n return imagenes, etiquetas\n\n#normalizar los datos de entrenamiento y pruebas con la funcion de normalización\ndatos_entrenamiento = datos_entrenamiento.map(normalizar)\ndatps_pruebas = datos_pruebas.map(normalizar)\n\n#Agregar datos a caché (velocidad por uso de memoria)\ndatos_entrenamiento = datos_entrenamiento.cache()\ndatos_pruebas = datos_pruebas.cache()\n\n#Mostrar una imagen de los datos de pruebas, mostrar primera.\n\nfor imagen, etiqueta in datos_entrenamiento.take(1):\n break\nimagen = imagen.numpy().reshape((28,28)) # Redimensionar\n\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.imshow(imagen, cmap=plt.cm.binary)\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\nplt.figure(figsize=(10,10))\nfor i, (imagen, etiqueta) in enumerate(datos_entrenamiento.take(25)):\n imagen = imagen.numpy().reshape((28,28))\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(imagen, cmap=plt.cm.binary)\n plt.xlabel(nombres_clases[etiqueta])\nplt.show\n\nmodelo = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=[28,28,1]), #1 - Blanco y negro\n tf.keras.layers.Dense(50, activation=tf.nn.relu),\n tf.keras.layers.Dense(50, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax), #Funcion de activacion de capa para red de clasificacion\n])\n\n#Compilar\n\nmodelo.compile(\n optimizer ='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=['accuracy']\n)\n\ncant_ej_entrenamiento = metadatos.splits[\"train\"].num_examples\ncant_ej_pruebas = metadatos.splits[\"test\"].num_examples\nprint(f\"La cantidad de ejemplos para el entrenamiento es: {cant_ej_entrenamiento}\")\nprint(f\"La cantidad de ejemplos para las pruebas es de: {cant_ej_pruebas}\")\n\ntamano_lote = 32\n\ndatos_entrenamiento = datos_entrenamiento.repeat().shuffle(cant_ej_entrenamiento).batch(tamano_lote)\ndatos_pruebas = datos_pruebas.batch(tamano_lote)\n\nimport math\n\nhistorial = modelo.fit(datos_entrenamiento, epochs=5, steps_per_epoch=math.ceil(cant_ej_entrenamiento/tamano_lote))\n\nplt.xlabel(\"# Epoch\")\nplt.ylabel(\"Escala de perdida\")\nplt.plot(historial.history['loss'])\n\nimport numpy as np\n\nfor imagenes_prueba, etiquetas_prueba in datos_pruebas.take(1):\n imagenes_prueba = imagenes_prueba.numpy()\n etiquetas_prueba = etiquetas_prueba.numpy()\n predicciones = modelo.predict(imagenes_prueba)\n\ndef graficar_imagen(i, arr_predicciones, etiquetas_reales, imagenes):\n arr_predicciones, etiqueta_real, img = arr_predicciones[i], etiquetas_reales[i], imagenes[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img[...,0], cmap=plt.cm.binary)\n\n etiqueta_prediccion = np.argmax(arr_predicciones)\n if etiqueta_prediccion == etiqueta_real:\n color = 'blue' #Prediccion correcta\n else:\n color = 'red' #Prediccion incorrecta\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(\n nombres_clases[etiqueta_prediccion],\n 100*np.max(arr_predicciones),\n nombres_clases[etiqueta_real],\n color=color\n))\n \ndef graficar_valor_arreglo(i, arr_predicciones, etiqueta_real):\n arr_predicciones, etiqueta_real = arr_predicciones[i], etiqueta_real[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n grafica = plt.bar(range(10), arr_predicciones, color=\"#777777\")\n plt.ylim([0,1])\n etiqueta_prediccion = np.argmax(arr_predicciones)\n\n grafica[etiqueta_prediccion].set_color('red')\n grafica[etiqueta_real].set_color('blue')\n\nfilas = 5\ncolumnas = 5\nnum_imagenes = filas * columnas\nplt.figure(figsize=(2*2*columnas, 2*filas))\nfor i in range (num_imagenes):\n plt.subplot(filas, 2*columnas, 2*i+1)\n graficar_imagen(i, predicciones, etiquetas_prueba, imagenes_prueba)\n plt.subplot(filas, 2*columnas, 2*i+2)\n graficar_valor_arreglo(i, predicciones, etiquetas_prueba)\n\n#Acceder a culaquier indice del set de pruebas para ver su prediccion\nimagen = imagenes_prueba[5]\nimagen = np.array([imagen])\nprediccion = modelo.predict(imagen)\n\nprint(\"Prediccion: \" + nombres_clases[np.argmax(prediccion[0])])\n\n","repo_name":"YummySalamy/Image_classifier","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37363441644","text":"import argparse\nimport torch\nfrom torchvision import transforms\nimport os\nimport opt\nfrom torch.utils import data\nimport torch.nn as nn\nimport h5py\nimport numpy as np\nfrom wind_dataset import WindDataset\nfrom net import PConvUNet\nfrom util.io import load_ckpt\nfrom util.image import UnMinMaxUnNormalization\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n\n\n\ndef test_net(model, test_dataloader, device, args,\n save_dir=None, vis=False, save_tensor=False, no_compute_loss=True):\n model.eval()\n unminmax_unnormalization = UnMinMaxUnNormalization(device)\n with torch.no_grad():\n output_list = []\n gt_list = []\n mask_list = []\n for i, (image, mask, transform_gt, gt) in enumerate(test_dataloader):\n output, _ = model(image.to(device), mask.to(device))\n\n # if vis:\n # output = output.to(torch.device('cpu'))\n # output_comp = mask * image + (1 - mask) * output\n # grid = make_grid(\n # torch.cat((unnormalize(image), mask, unnormalize(output),\n # unnormalize(output_comp), unnormalize(gt)), dim=0))\n # save_image(grid, os.path.join(save_dir, str(i) + '.png'))\n # gt = unnormalization(gt.to(device))\n output = unminmax_unnormalization(output)\n output_list.append(output.to('cpu'))\n gt_list.append(gt)\n\n mask_list.append(mask)\n if i % 100 == 0:\n print('processing {}'.format(i))\n\n output_tensor = torch.cat(output_list)\n gt_tensor = torch.cat(gt_list)\n mask_tensor = torch.cat(mask_list)\n output_np = output_tensor.numpy()\n gt_np = gt_tensor.numpy()\n mask_np = mask_tensor.numpy()\n\n if not no_compute_loss:\n # TODO: to modify your own evaluation metrics\n l1_loss_in_mask = compute_loss(output_tensor, gt_tensor, mask_tensor)\n print('l1_loss_in_mask', l1_loss_in_mask)\n correlation_all, rmse_all = compute_all(output_np, gt_np)\n print('Mean_series_correlation: {}; Mean_series_rmse: {}'.format(correlation_all, rmse_all))\n corr_grid, rmse_grid = compute_eachgrid(output_np, gt_np)\n print('Mean_grid_correlation: {}; Mean_grid_rmse: {}'.format(corr_grid, rmse_grid))\n if save_tensor:\n\n with h5py.File(os.path.join(save_dir, '{}_result.h5'.format(args.data_name.split('.')[0])), 'w') as f:\n f.create_dataset('output', data=output_np)\n f.create_dataset('gt', data=gt_np)\n f.create_dataset('mask', data=mask_np)\n\n model.train()\n\ndef compute_loss(output, gt, mask):\n l1 = nn.L1Loss(reduction='sum')\n # In mask, the value equal to 1 is retained and can be seen by the model\n # So, in evaluation phase, we should compute the unmask area.\n unmask = 1 - mask\n # Compute the loss of the unmask area.\n l1_loss = l1(output * unmask, gt * unmask)\n mask_count = mask.shape[0] * mask.shape[1] * mask.shape[2] * mask.shape[3] - mask.count_nonzero()\n return l1_loss / mask_count\n # sum_l1_loss += l1_loss\n # sum_mask_count += mask_count\n\n\n# Calculate the correlation and root mean square error of the total time series\ndef compute_all(output, gt):\n n = gt.shape[0] # image number\n x = gt.reshape([n, -1]).mean(axis=1)\n y = output.reshape([n, -1]).mean(axis=1)\n correlation_all = np.corrcoef(x, y)[0, 1]\n rmse_all = np.sqrt(np.mean(np.power((x - y), 2)))\n return correlation_all, rmse_all\n\n\n# Calculate the average value of correlation and root mean square error of time series on each grid\ndef compute_eachgrid(output, gt):\n output_t = output.transpose(2,3,0,1).reshape(output.shape[2]*output.shape[3], -1)\n gt_t = gt.transpose(2,3,0,1).reshape(gt.shape[2]*gt.shape[3], -1)\n x = gt_t.mean(axis=1)\n y = output_t.mean(axis=1)\n correlation_grid_m = np.corrcoef(x, y)[0,1]\n rmse_grid_m = np.sqrt(np.mean(np.power((x - y), 2)))\n return correlation_grid_m, rmse_grid_m\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # training options\n parser.add_argument('--root', type=str, default='/data/liuhaofeng/Dataset/win_speed/72x72')\n # parser.add_argument('--mask_root', type=str, default='/data/liuhaofeng/Dataset/win_speed/72x72/test')\n parser.add_argument('--result_dir', type=str, default='./result')\n parser.add_argument('--phrase', type=str, default='test')\n parser.add_argument('--data_name', type=str, default='test_data.h5')\n parser.add_argument('--mask_name', type=str, default='test_mask.h5')\n parser.add_argument('--model_name', type=str, default='win_speed2')\n parser.add_argument('--gpu_id', type=str, default='0')\n parser.add_argument('--iter', type=str, default='300000')\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--n_threads', type=int, default=4)\n parser.add_argument('--save_tensor', action='store_true')\n parser.add_argument('--no_compute_loss', action='store_true')\n parser.add_argument('--snapshot', type=str, default='300000')\n parser.add_argument('--image_size', type=int, default=72)\n parser.add_argument('--random_mask', action='store_true')\n parser.add_argument('--use_cpu', action='store_true')\n\n\n args = parser.parse_args()\n snapshot = os.path.join('snapshots', args.model_name, 'ckpt', args.iter + '.pth')\n save_dir = os.path.join(args.result_dir, args.model_name)\n # print(save_dir)\n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n\n if args.use_cpu:\n torch.backends.cudnn.benchmark = False\n device = torch.device('cpu')\n else:\n torch.backends.cudnn.benchmark = True\n device = torch.device('cuda:{}'.format(args.gpu_id))\n\n size = (args.image_size, args.image_size)\n img_transform = transforms.Compose(\n [transforms.Resize(size=size),\n transforms.Normalize(mean=opt.MEAN, std=opt.STD)])\n mask_transform = transforms.Compose(\n [transforms.Resize(size=size), transforms.ToTensor()])\n\n dataset = WindDataset(args.root, args.root, img_transform, mask_transform, args.phrase, args.data_name,\n args.mask_name, random_mask=args.random_mask)\n\n dataloader = data.DataLoader(\n dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.n_threads)\n model = PConvUNet().to(device)\n load_ckpt(snapshot, [('model', model)])\n\n test_net(model, dataloader, device, args, save_dir=save_dir,\n save_tensor=args.save_tensor, no_compute_loss=args.no_compute_loss)\n","repo_name":"HeverLaw/PCNN-for-wind-reconstruction","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"13486864537","text":"#!/usr/bin/python\n\n#\n#\tDeploys built artifacts to $SERVER_HOME/repository/usr\n#\n\nimport os, sys, json, distutils.core, shutil, glob\nfrom subprocess import call\n\nserverHome = os.environ['SERVER_HOME']\nconfig = json.loads(open(os.path.join(os.path.dirname(__file__), 'config.json')).read())\nsourcesdir = config['sourcesdir']\n\n\n\ndef copyext(targetdir, extension):\n\tfiles = glob.iglob(os.path.join(targetdir, '*.'+extension))\n\tfor file in files:\n\t if os.path.isfile(file):\n\t shutil.copy2(file, os.path.join(serverHome, 'repository', 'usr'))\n\ndef main(argv):\n serverHome = os.environ['SERVER_HOME']\n if len(sys.argv)>1 and sys.argv[1]=='eclipse':\n print('Updating the virgo server, Eclipse is used')\n eclipse=True\n else:\n print('Updating the virgo server, Eclipse is not used')\n eclipse=False\n for repo in config['repos']:\n print('Copying libraries for' , repo['name'])\n targetdir = os.path.join(sourcesdir, repo['name'], 'target')\n if not eclipse:\n copyext(targetdir, 'jar')\n copyext(targetdir, 'war')\n copyext(targetdir, 'libd')\n copyext(os.path.join(targetdir, 'classes', 'lib'), 'jar')\n copyext(os.path.join(targetdir, 'classes', 'lib'), 'war')\n copyext(os.path.join(targetdir, 'classes', 'lib'), 'libd')\n\n if not eclipse:\n\t shutil.copy2(os.path.join(sourcesdir, 'org.geppetto', 'geppetto.plan'), os.path.join(serverHome, 'pickup'))\n print('Geppetto build deployed to virgo')\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","repo_name":"openworm/org.geppetto","sub_path":"utilities/source_setup/update_server.py","file_name":"update_server.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"32"} +{"seq_id":"33008810460","text":"\"\"\"\nDefines the forward-facing API for the\n\"Trains 'R' Us\" Application\n\"\"\"\n\n\nfrom flask import Flask, request, make_response\nfrom flask_cors import CORS\nimport json\nfrom utils import ApplicationLogger\nfrom engine import (\n Engine,\n MissingInput,\n InputDomainError, \n NotAllowed, \n NotFound, \n HandlerNotImplemented\n)\n\n\"\"\"\nAll of the HTTP status codes we could ever need\n\"\"\"\nSUCCESS_OK = 200\nSUCCESS_CREATE = 201\nCLIENT_BAD_REQUEST = 400\nCLIENT_FORBIDDEN = 403\nCLIENT_NOT_FOUND = 404\nCLIENT_BAD_METHOD = 405\nSERVER_INTERNAL_ERROR = 500\nSERVER_NOT_IMPLEMENTED = 501\nSERVER_UNAVAILABLE = 503\n\n\n\"\"\"\nSet up application context\n\"\"\"\napp = Flask(\"Trains 'R' Us\")\nCORS(app)\nlogger = ApplicationLogger(debug_on=True)\nengine = Engine(logger)\n\n\n\"\"\"\nRequest and response helpers that enforce JSON\ncompliance\n\"\"\"\n\ndef json_response(o, status_code=SUCCESS_OK):\n \"\"\"\n Return a response object containing JSON data\n o is the python object (dict) to encode, must\n be JSON friendly\n \"\"\"\n data = json.dumps(o)\n res = make_response((data, status_code))\n res.headers['Content-Type'] = 'application/json'\n return res\n\ndef compute_request(k):\n try:\n return k()\n except (\n MissingInput, \n InputDomainError) as e:\n return json_response(\n {\n 'message': str(e)\n }, \n status_code=CLIENT_BAD_REQUEST)\n except NotAllowed as e:\n return json_response(\n {\n 'message': str(e)\n }, \n status_code=CLIENT_FORBIDDEN)\n except NotFound as e:\n return json_response(\n {\n 'message': str(e)\n }, \n status_code=CLIENT_NOT_FOUND)\n except HandlerNotImplemented as e:\n return json_response(\n {\n 'message': str(e)\n }, \n status_code=SERVER_NOT_IMPLEMENTED)\n except Exception as e:\n logger.error(\"Engine error: %s\" % e)\n return json_response(\n {\n 'message': \"An internal engine error occurred\"\n }, \n status_code=SERVER_INTERNAL_ERROR)\n\ndef obj_request(k):\n \"\"\"\n Attempts to read JSON body from the request context.\n If body is an improper mimetype, returns\n a bad request response. Otherwises passes the\n request to the engine computation k and produces the\n result. Should k result in some error, an appropriate\n response will be sent.\n \"\"\"\n req = None\n if request.method == \"GET\":\n try:\n data = request.args.get('body')\n req = json.loads(data)\n except Exception as e:\n logger.error(str(request.args.get('body')))\n logger.error(str(e))\n else: \n req = request.get_json()\n if req is None or not isinstance(req,dict):\n return json_response(\n {\n 'message': \"You gotta give me a JSON object\"\n }, \n status_code=CLIENT_BAD_REQUEST)\n else:\n logger.info(\"Request: %s\" % json.dumps(req))\n return compute_request(lambda: k(req))\n\n\n\"\"\"\nRoutes\n\"\"\"\n\nVERSION_1 = \"v1\"\n\n@app.route((\"/%s/\" % VERSION_1), methods=[\"GET\"])\ndef hello():\n return json_response(engine.sample())\n \n@app.route((\"/%s/execute\" % VERSION_1), methods=[\"POST\"])\ndef execute():\n return obj_request(\n lambda r: \n json_response(engine.handle_execute(r), status_code=SUCCESS_CREATE))\n\n@app.route((\"/%s/worker\" % VERSION_1), methods=[\"POST\", \"DELETE\", \"GET\", \"PUT\"])\ndef worker():\n if request.method == \"POST\":\n return obj_request(\n lambda r: \n json_response(engine.create_worker(r), status_code=SUCCESS_CREATE))\n elif request.method == \"DELETE\":\n return obj_request(\n lambda r: \n json_response(engine.remove_worker(r), status_code=SUCCESS_OK))\n elif request.method == \"GET\":\n return obj_request(\n lambda r: \n json_response(engine.get_single_worker(r), status_code=SUCCESS_OK))\n else:\n return obj_request(\n lambda r: \n json_response(engine.update_worker(r), status_code=SUCCESS_CREATE))\n\n@app.route((\"/%s/segment/status\" % VERSION_1), methods=[\"GET\", \"PUT\"])\ndef segment_status():\n if request.method == \"GET\":\n return obj_request(\n lambda r: \n json_response(engine.get_segments(r), status_code=SUCCESS_OK))\n elif request.method == \"PUT\":\n return obj_request(\n lambda r: \n json_response(engine.update_segment(r), status_code=SUCCESS_CREATE))\n\n@app.route((\"/%s/segment/status/count\" % VERSION_1), methods=[\"GET\"])\ndef segment_status_count():\n return obj_request(\n lambda r: \n json_response(engine.get_segment_status_count(r), status_code=SUCCESS_OK))\n\n@app.route((\"/%s/segment\" % VERSION_1), methods=[\"GET\", \"POST\"])\ndef segment():\n if request.method == \"GET\":\n return obj_request(\n lambda r: \n json_response(engine.get_segment_info(r), status_code=SUCCESS_OK))\n elif request.method == \"POST\":\n return obj_request(\n lambda r: \n json_response(engine.create_segment(r), status_code=SUCCESS_CREATE))\n \n@app.route((\"/%s/shift\" % VERSION_1), methods=[\"GET\", \"POST\", \"DELETE\"])\ndef shiftt():\n if request.method == \"GET\":\n return obj_request(\n lambda r: \n json_response(engine.get_worker_shifts(r), status_code=SUCCESS_OK))\n elif request.method == \"POST\":\n return obj_request(\n lambda r: \n json_response(engine.schedule_shift(r), status_code=SUCCESS_CREATE))\n elif request.method == \"DELETE\":\n return obj_request(\n lambda r: \n json_response(engine.remove_shift(r), status_code=SUCCESS_OK))\n\n@app.route((\"/%s/ticket/info\" % VERSION_1), methods=[\"GET\"])\ndef ticket_info():\n return obj_request(\n lambda r: \n json_response(engine.get_ticket_info(r), status_code=SUCCESS_OK))\n\n@app.route((\"/%s/station\" % VERSION_1), methods=[\"GET\", \"PUT\", \"POST\"])\ndef station():\n if request.method == \"GET\":\n return obj_request(\n lambda r: \n json_response(engine.get_station(r), status_code=SUCCESS_OK))\n elif request.method == \"PUT\":\n return obj_request(\n lambda r: \n json_response(engine.update_station(r), status_code=SUCCESS_CREATE))\n elif request.method == \"POST\":\n return obj_request(\n lambda r: \n json_response(engine.create_station(r), status_code=SUCCESS_CREATE))\n\n@app.route((\"/%s/stat/trip/length\" % VERSION_1), methods=[\"GET\"])\ndef avg_trip_length_stat():\n return compute_request(lambda: json_response(engine.get_avg_trip_length(), status_code=SUCCESS_OK))\n\n@app.route((\"/%s/worker/overworked\" % VERSION_1), methods=[\"GET\"])\ndef overworked():\n return compute_request(lambda: json_response(engine.get_overworked(), status_code=SUCCESS_OK))\n \n\n\"\"\"\nRuntime\n\"\"\"\nif __name__ == \"__main__\":\n try:\n app.run(host=\"0.0.0.0\", port=6000) # TODO change to env variable\n except Exception as e:\n logger.error(\"Could not set up database wrapper (see logs)\")\n","repo_name":"rlnsy/trainsRUs","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34373759588","text":"\"\"\" \nStop Conditions\n----------------\nDetermined when the simulation should stop.\n\"\"\"\n\nfrom typing import TYPE_CHECKING\n\nfrom dooders.sdk.core.core import Core\n\nif TYPE_CHECKING:\n from dooders.sdk import Simulation\n\n\n@Core.register('condition')\nclass StopConditions:\n\n _OPERATOR = 'any'\n\n @classmethod\n def max_cycle(cls, simulation: 'Simulation') -> bool:\n \"\"\" \n Check if the maximum number of cycles has been reached.\n\n The maximum number of cycles is defined in the simulation parameters.\n\n Parameters\n ----------\n simulation : Simulation\n The simulation to check\n\n Returns\n -------\n bool\n True if the maximum number of cycles has been reached, False otherwise\n \"\"\"\n if simulation.cycle_number >= simulation.settings.get('MaxCycles'):\n return True\n\n @classmethod\n def simulation_running(cls, simulation: 'Simulation') -> bool:\n \"\"\" \n Check if the simulation is still running.\n\n Parameters\n ----------\n simulation : Simulation\n The simulation to check\n\n Returns\n -------\n bool\n True if the simulation is not running, False otherwise\n \"\"\"\n if not simulation.running:\n return True\n\n @classmethod\n def dooder_count(cls, simulation: 'Simulation') -> bool:\n \"\"\" \n Check if the number of dooders has reached zero.\n\n Parameters\n ----------\n simulation : Simulation\n The simulation to check\n\n Returns\n -------\n bool\n True if the number of dooders has reached zero, False otherwise\n \"\"\"\n if len(list(simulation.environment.get_objects(\"Dooder\"))) == 0:\n return True\n","repo_name":"csmangum/Dooders","sub_path":"dooders/sdk/conditions/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9883415678","text":"# keys in dictionary must be immutable, we cant use a list [123] as key\n# value will override if keys are duplicated in Dictionary\n\nUser = {\n 'basket': [1, 2, 3],\n 'greet': 'hello',\n 'age': 20\n}\n\nuser2 = dict(name='Sumeet') # alternate way to create dictionary\n\nprint(user2)\n\nprint('age' in User.keys())\nprint('hello' in User.values())\nprint(User.items())\n","repo_name":"sapla07/PythonZerotoMastery","sub_path":"DictionaryDemo.py","file_name":"DictionaryDemo.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71842992731","text":"# Variável\nsoma = 0\n# Repetição\nfor c in range(1, 7):\n num = int(input('Digite um número: '))\n # IF\n if num % 2 == 0:\n soma += num\n# Print\nprint(f'O valor somado é igual a {soma}')\n","repo_name":"catabimbas/Curso-Python","sub_path":"Curso Python/Mundo 2/Modulo2/Desafios/des050.py","file_name":"des050.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22010325906","text":"#!/usr/bin/python3\nimport io\nimport os\nimport platform\nimport shutil\nimport src.qubes_fwupd_heads as qf_heads\nimport src.qubes_fwupdmgr as qfwupd\nimport sys\nimport unittest\n\nfrom test.fwupd_logs import HEADS_XML\n\nCUSTOM_METADATA = \"https://fwupd.org/downloads/firmware-3c81bfdc9db5c8a42c09d38091944bc1a05b27b0.xml.gz\"\n\n\nclass TestQubesFwupdHeads(unittest.TestCase):\n def setUp(self):\n self.q = qf_heads.FwupdHeads()\n self.maxDiff = 2000\n self.captured_output = io.StringIO()\n sys.stdout = self.captured_output\n\n @unittest.skipUnless('qubes' in platform.release(), \"Requires Qubes OS\")\n def test_get_hwids(self):\n self.q._check_fwupdtool_version()\n self.q._get_hwids()\n self.assertNotEqual(self.q.dom0_hwids_info, \"\")\n\n def test_gather_firmware_version_empty(self):\n self.q.dom0_hwids_info = \"\"\n return_code = self.q._gather_firmware_version()\n self.assertEqual(return_code, 99)\n\n def test_gather_firmware_version(self):\n self.q.dom0_hwids_info = \"BiosVersion: CBET4000 0.2.2 heads\"\n self.q._gather_firmware_version()\n self.assertEqual(self.q.heads_version, \"0.2.2\")\n\n @unittest.skipUnless('qubes' in platform.release(), \"Requires Qubes OS\")\n def test_parse_metadata(self):\n qmgr = qfwupd.QubesFwupdmgr()\n qmgr.metadata_file = CUSTOM_METADATA.replace(\n \"https://fwupd.org/downloads\",\n qfwupd.FWUPD_DOM0_METADATA_DIR\n )\n qmgr._download_metadata(metadata_url=CUSTOM_METADATA)\n self.q._parse_metadata(qmgr.metadata_file)\n self.assertTrue(self.q.metadata_info)\n\n def test_check_heads_updates_default_heads(self):\n self.q.metadata_info = HEADS_XML\n self.q.heads_version = \"heads\"\n return_code = self.q._parse_heads_updates(\"x230\")\n self.assertEqual(return_code, 0)\n self.assertEqual(\n self.q.heads_update_url,\n \"https://fwupd.org/downloads/e747a435bf24fd6081b77b6704b39cec5fa2dcf62e0ca6b86d8a6460121a1d07-heads_coreboot_x230-v0_2_3.cab\"\n )\n self.assertEqual(\n self.q.heads_update_sha,\n \"1a54e69ca2b58d1218035115d481480eaf4c66e4\"\n )\n self.assertEqual(\n self.q.heads_update_version,\n \"0.2.3\"\n )\n\n def test_check_heads_updates_no_updates(self):\n self.q.metadata_info = HEADS_XML\n self.q.heads_version = \"0.2.3\"\n return_code = self.q._parse_heads_updates(\"x230\")\n self.assertEqual(return_code, 99)\n\n def test_check_heads_updates_lower_version(self):\n self.q.metadata_info = HEADS_XML\n self.q.heads_version = \"0.2.2\"\n return_code = self.q._parse_heads_updates(\"x230\")\n self.assertEqual(return_code, 0)\n self.assertEqual(\n self.q.heads_update_url,\n \"https://fwupd.org/downloads/e747a435bf24fd6081b77b6704b39cec5fa2dcf62e0ca6b86d8a6460121a1d07-heads_coreboot_x230-v0_2_3.cab\"\n )\n self.assertEqual(\n self.q.heads_update_sha,\n \"1a54e69ca2b58d1218035115d481480eaf4c66e4\"\n )\n self.assertEqual(\n self.q.heads_update_version,\n \"0.2.3\"\n )\n\n @unittest.skipUnless('qubes' in platform.release(), \"Requires Qubes OS\")\n def test_copy_heads_firmware(self):\n qmgr = qfwupd.QubesFwupdmgr()\n self.q.heads_update_url = \"https://fwupd.org/downloads/e747a435bf24fd6081b77b6704b39cec5fa2dcf62e0ca6b86d8a6460121a1d07-heads_coreboot_x230-v0_2_3.cab\"\n self.q.heads_update_sha = \"1a54e69ca2b58d1218035115d481480eaf4c66e4\"\n self.q.heads_update_version = \"0.2.3\"\n qmgr._download_firmware_updates(\n self.q.heads_update_url,\n self.q.heads_update_sha\n )\n heads_boot_path = os.path.join(\n qf_heads.HEADS_UPDATES_DIR,\n self.q.heads_update_version\n )\n if os.path.exists(heads_boot_path):\n shutil.rmtree(heads_boot_path)\n ret_code = self.q._copy_heads_firmware(qmgr.arch_path)\n self.assertNotEqual(ret_code, qfwupd.EXIT_CODES[\"NO_UPDATES\"])\n firmware_path = os.path.join(heads_boot_path, \"firmware.rom\")\n self.assertTrue(os.path.exists(firmware_path))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"3mdeb/qubes-fwupd","sub_path":"test/test_qubes_fwupd_heads.py","file_name":"test_qubes_fwupd_heads.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"39801724795","text":"from random import random\nimport random\nclass Ecosystem():\n def __init__(self):\n self.river = 3\n self.bear = 0\n self.fish = 0\n self.eco = [\"N\",'N','N']\n def setRiver(self, river):\n self.river = river\n def setBear(self,bear):\n self.bear = bear\n def setFish(self,fish):\n self.fish = fish\n def getEco(self):\n return self.eco\n def setEco(self):\n self.eco = []\n for k in range(self.river - self.bear - self.fish):\n self.eco.append(\"N\") \n k += 1\n for i in range(self.fish): #insert fish in the river randomly\n pos = random.randint(0,len(self.eco))\n self.eco.insert(pos,\"F\")\n i += 1\n for j in range(self.bear): #insert bears randomly\n pos = random.randint(0,len(self.eco))\n self.eco.insert(pos,\"B\")\n j += 1\n def simulation(self,N): \n born_b = 0\n born_f = 0\n n = 0\n empty = self.eco.count(\"N\")\n while n < N:\n for ele in self.eco: \n if ele == \"N\": #check whether this element can be move\n continue\n else:\n if self.eco.index(ele) == 0: #choose the direction of mmovement\n move = \"r\"\n elif self.eco.index(ele) == len(self.eco) - 1:\n move = \"l\"\n else:\n move = random.choice(\"lrn\") \n if move == \"l\": #start to move\n m = self.eco.index(ele)\n if self.eco[m - 1] == \"F\" and ele == \"B\": #the \"F\" would be removed\n self.eco.remove(self.eco[m-1])\n elif self.eco[m - 1] == \"B\" and ele == \"F\":\n self.eco.remove(self.eco[m])\n else:\n self.eco[m - 1],self.eco[m] = self.eco[m],self.eco[m - 1]\n if self.eco[m-1] == self.eco[m] and empty >= 1 and self.eco[m] == \"B\":\n empty -= 1 \n born_b += 1 #to see whether there is empty position to add one more\n if self.eco[m-1] == self.eco[m] and empty >= 1 and self.eco[m] == \"F\":\n born_f += 1\n empty -= 1\n \n \n elif move == \"r\":\n m = self.eco.index(ele)\n if self.eco[m + 1] == \"F\" and self.eco[m] == \"B\": #the \"F\" would be removed\n self.eco.remove(self.eco[m+1])\n elif self.eco[m + 1] == \"B\" and self.eco[m] == \"F\":\n self.eco.remove(self.eco[m])\n else:\n self.eco[m+ 1], self.eco[m] = self.eco[m],self.eco[m + 1]\n if self.eco[m-1] == self.eco[m] and empty >= 1 and self.eco[m] == \"B\":#to see whether there is empty position to add one more\n born_b += 1\n empty -= 1\n elif self.eco[m-1] == self.eco[m] and empty >= 1 and self.eco[m] == \"F\":\n born_f += 1\n empty -= 1\n\n if born_b > 0:\n for i in range (born_b):\n avai = [] \n for elem in self.eco:\n if elem == \"N\":\n avai.append(self.eco.index(elem))\n new_born = random.choice(avai)\n self.eco[new_born] = \"B\" #born a same element\n if born_f > 0:\n for i in range (born_f):\n avai = [] \n for elem in self.eco:\n if elem == \"N\":\n avai.append(self.eco.index(elem))\n new_born = random.choice(avai)\n self.eco[new_born] = \"F\" #born a same element\n\n # else:\n # continue\n n += 1\n print(\"step\" + str(n) +\":\" )\n print(self.eco)\n \ndef main():\n while True:\n river = input(\"please input the river length:\")\n if river.isdigit() == True:\n river = int(river)\n if river > 0:\n break\n else:\n print(\"should be positive integer\") \n else:\n print(\"should be a positive integer\")\n while True:\n fish = input(\"please input the number of fish:\")\n if fish.isdigit() == True:\n fish = int(fish)\n if fish >= 0 and fish <= river:\n break\n else:\n print(\"should be less than river length and positive integer\")\n else:\n print(\"should be integers\")\n while True:\n bear = input(\"please input the number of bear:\")\n if bear.isdigit() == True:\n bear = int(bear)\n if bear >= 0 and bear <= (river - fish):\n break\n else:\n print(\"should be positive and total amount should be less than the river length\")\n else:\n print(\"should be integers\")\n ecosys = Ecosystem()\n ecosys.setBear(bear)\n ecosys.setFish(fish)\n ecosys.setRiver(river)\n ecosys.setEco()\n while True:\n N = input(\"please input the number of steps:\")\n if N.isdigit() == True:\n N = int(N)\n if N >= 0:\n break\n else:\n print(\"should be positive integr\")\n print(ecosys.getEco())\n ecosys.simulation(N)\n \nmain()\n \n\n ","repo_name":"MaggieWensiLyu/csc1002","sub_path":"csc1001/Q3 A3.py","file_name":"Q3 A3.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20640206505","text":"from django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse_lazy\nimport urllib\n\n# This is initially from https://github.com/python-social-auth/social-core/blob/master/social_core/pipeline/user.py\ndef get_username(strategy, details, backend, user=None, *args, **kwargs):\n # Get the logged in user (if any)\n logged_in_user = strategy.storage.user.get_username(user)\n\n # Custom: check for email being provided\n if not details.get('email'):\n print('hola')\n error = \"Sorry, but your social network (Facebook or Google) needs to provide us your email address.\"\n return HttpResponseRedirect(reverse_lazy('tenant_login'))\n\n # Custom: if user is already logged in, double check his email matches the social network email\n if logged_in_user:\n if logged_in_user.lower() != details.get('email').lower():\n print('hola dos')\n error = \"Sorry, but you are already logged in with another account, and the email addresses do not match. Try logging out first, please.\"\n return HttpResponseRedirect(reverse_lazy('tenant_login'))\n\n return {\n 'username': details.get('email').lower(),\n }\n","repo_name":"dejuata/SCR","sub_path":"apps/users/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"16619232165","text":"\nimport math\nimport array as specializedarray\nimport vs\n\nfrom ladybug_vectorworks.togeometry import to_face3d\nfrom ladybug_geometry.geometry3d.plane import Plane\nfrom ladybug_geometry.geometry3d.face import Face3D\nfrom ladybug_geometry.geometry3d.mesh import Mesh3D\nfrom ladybug_geometry.geometry3d.ray import Ray3D\nfrom ladybug_geometry.geometry3d.pointvector import Vector3D, Point3D\n\n\nfrom ladybug_vectorworks.config import tolerance, angle_tolerance\n\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n\ndef join_geometry_to_mesh(geometry):\n\tobj = geometry\n\n\tfaces = []\n\tcobj = []\n\tfor ob in obj:\n\t\tf1 = vs.ConvertTo3DPolys(ob)\n\t\tcobj.append(f1)\n\t\th = vs.FInGroup(f1)\n\t\twhile h != None:\n\t\t\tfaces.append(h)\n\t\t\th = vs.NextObj(h)\n\n\n\tvs.BeginMesh()\n\tfor h in faces:\n\t\tvs.CreateDuplicateObject(h,vs.Handle(0))\n\tvs.EndMesh()\n\tmesh = vs.LNewObj()\n\n\tfor h in cobj:\n\t\tvs.Marionette_DisposeObj(h)\n\tfor h in obj:\n\t\tvs.Marionette_DisposeObj(h)\n\n\n\treturn mesh\n\n\t'''\n\tjoined_mesh = rg.Mesh()\n\tfor geo in geometry:\n\t\tif isinstance(geo, rg.Brep):\n\t\t\tmeshes = rg.Mesh.CreateFromBrep(geo, rg.MeshingParameters.Default)\n\t\t\tfor mesh in meshes:\n\t\t\t\tjoined_mesh.Append(mesh)\n\t\telif isinstance(geo, rg.Mesh):\n\t\t\tjoined_mesh.Append(geo)\n\t\telse:\n\t\t\traise TypeError('Geometry must be either a Brep or a Mesh. '\n\t\t\t\t\t\t\t'Not {}.'.format(type(geo)))\n\treturn joined_mesh\n\t'''\n\ndef join_geometry_to_brep(geometry):\n\t\"\"\"Convert an array of Rhino Breps and/or Meshes into a single Rhino Brep.\n\n\tThis is a typical pre-step before using the ray tracing functions.\n\n\tArgs:\n\t\tgeometry: An array of Rhino Breps or Rhino Meshes.\n\t\"\"\"\n\tjoined_mesh = join_geometry_to_mesh(geometry)\n\treturn joined_mesh\n\n\n\n\ndef vec3D_angle(va,vb):\n\t(ax,ay,az) = va\n\t(bx,by,bz) = vb\n\tvcos = (ax*bx+ay*by+az*bz)/((ax**2+ay**2+az**2)**0.5*(bx**2+by**2+bz**2)**0.5)\n\treturn math.acos(vcos)\n\ndef int_check(faces,ray):\n\n\trep = 1\n\tfor face in faces:\n\t\t\tre = face.intersect_line_ray(ray)\n\t\t\tif re != None:\n\t\t\t\trep = 0\n\t\t\t\tbreak\n\treturn rep\n\ndef intersect_mesh_rays(mesh, points, vectors, normals=None, cpu_count=0, parallel=True,max_dist=None):\n\tfaces = to_face3d(mesh)\n\tintersection_matrix = [0] * len(points)\t # matrix to be filled with results\n\tangle_matrix = [0] * len(normals) if normals is not None else None\n\tcutoff_angle = math.pi / 2\t# constant used in all normal checks\n\tif not parallel:\n\t\tcpu_count = 1\n\n\tdef intersect_point(i):\n\t\t\"\"\"Intersect all of the vectors of a given point without any normal check.\"\"\"\n\t\tpt = points[i]\n\t\tint_list = []\n\t\tfor vec in vectors:\n\t\t\t(px,py,pz) = pt\n\t\t\tp = Point3D(px,py,pz)\n\t\t\t(vx,vy,vz) = vec\n\t\t\tv = Vector3D(vx,vy,vz)\n\t\t\tray = Ray3D(p, v)\n\t\t\tif max_dist is not None:\n\t\t\t\tif (vx**2+vy**2+vz**2)**0.5 1:\n\t\t# group the points in order to meet the cpu_count\n\t\tpt_count = len(points)\n\t\tworker_count = min((cpu_count, pt_count))\n\t\ti_per_group = int(math.ceil(pt_count / worker_count))\n\t\tpt_groups = [[x, x + i_per_group] for x in range(0, pt_count, i_per_group)]\n\t\tpt_groups[-1][-1] = pt_count # ensure the last group ends with point count\n\n\tif normals is not None:\n\n\t\tif cpu_count is None: # use all available CPUs\n\t\t\twith ThreadPoolExecutor() as executor:\n\t\t\t\tfor i in range(len(points)):\n\t\t\t\t\texecutor.submit(intersect_point_normal_check,i)\n\t\telif cpu_count <= 1: # run everything on a single processor\n\t\t\tfor i in range(len(points)):\n\t\t\t\tintersect_point_normal_check(i)\n\t\telse: # run the groups in a manner that meets the CPU count\n\t\t\twith ThreadPoolExecutor() as executor:\n\t\t\t\tfor i in range(len(points)):\n\t\t\t\t\texecutor.submit(intersect_each_point_group_normal_check,i)\n\n\telse:\n\n\t\tif cpu_count is None: # use all available CPUs\n\t\t\twith ThreadPoolExecutor() as executor:\n\t\t\t\tfor i in range(len(points)):\n\t\t\t\t\texecutor.submit(intersect_point,i)\n\n\t\telif cpu_count <= 1: # run everything on a single processor\n\t\t\tfor i in range(len(points)):\n\t\t\t\tintersect_point(i)\n\t\telse: # run the groups in a manner that meets the CPU count\n\t\t\twith ThreadPoolExecutor() as executor:\n\t\t\t\tfor i in range(len(points)):\n\t\t\t\t\texecutor.submit(intersect_each_point_group,i)\n\n\treturn intersection_matrix, angle_matrix\n\ndef intersect_mesh_lines(\n\t\tmesh, start_points, end_points, max_dist=None, cpu_count=None, parallel=True):\n\tvectors =[]\n\tfor i ,ep in enumerate(end_points):\n\t\t(x1,y1,z1)=start_points[i]\n\t\t(x2,y2,z2)=ep\n\t\tvectors.append((x2-x1,y2-y1,z2-z1))\n\tintersection_matrix, angle_matrix = intersect_mesh_rays(mesh, start_points, vectors,\\\n\t\tnormals=None, cpu_count=cpu_count, parallel=parallel, max_dist=max_dist)\n\treturn intersection_matrix\n'''\n\tint_matrix = [0] * len(start_points) # matrix to be filled with results\n\tif not parallel:\n\t\tcpu_count = 1\n\n\tdef intersect_line(i):\n\t\t\"\"\"Intersect a line defined by a start and an end with the mesh.\"\"\"\n\t\tpt = start_points[i]\n\t\tint_list = []\n\t\tfor ept in end_points:\n\t\t\tlin = rg.Line(pt, ept)\n\t\t\tint_obj = rg.Intersect.Intersection.MeshLine(mesh, lin)\n\t\t\tis_clear = 1 if None in int_obj or len(int_obj) == 0 else 0\n\t\t\tint_list.append(is_clear)\n\t\tint_matrix[i] = int_list\n\n\tdef intersect_line_dist_check(i):\n\t\t\"\"\"Intersect a line with the mesh with a distance check.\"\"\"\n\t\tpt = start_points[i]\n\t\tint_list = []\n\t\tfor ept in end_points:\n\t\t\tlin = rg.Line(pt, ept)\n\t\t\tif lin.Length > max_dist:\n\t\t\t\tint_list.append(0)\n\t\t\telse:\n\t\t\t\tint_obj = rg.Intersect.Intersection.MeshLine(mesh, lin)\n\t\t\t\tis_clear = 1 if None in int_obj or len(int_obj) == 0 else 0\n\t\t\t\tint_list.append(is_clear)\n\t\tint_matrix[i] = int_list\n\n\tdef intersect_each_line_group(worker_i):\n\t\t\"\"\"Intersect groups of lines so that only the cpu_count is used.\"\"\"\n\t\tstart_i, stop_i = l_groups[worker_i]\n\t\tfor count in range(start_i, stop_i):\n\t\t\tintersect_line(count)\n\n\tdef intersect_each_line_group_dist_check(worker_i):\n\t\t\"\"\"Intersect groups of lines with distance check so only cpu_count is used.\"\"\"\n\t\tstart_i, stop_i = l_groups[worker_i]\n\t\tfor count in range(start_i, stop_i):\n\t\t\tintersect_line_dist_check(count)\n\n\tif cpu_count is not None and cpu_count > 1:\n\t\t# group the lines in order to meet the cpu_count\n\t\tl_count = len(start_points)\n\t\tworker_count = min((cpu_count, l_count))\n\t\ti_per_group = int(math.ceil(l_count / worker_count))\n\t\tl_groups = [[x, x + i_per_group] for x in range(0, l_count, i_per_group)]\n\t\tl_groups[-1][-1] = l_count\t# ensure the last group ends with line count\n\n\tif max_dist is not None:\n\t\tif cpu_count is None: # use all available CPUs\n\t\t\ttasks.Parallel.ForEach(range(len(start_points)), intersect_line_dist_check)\n\t\telif cpu_count <= 1: # run everything on a single processor\n\t\t\tfor i in range(len(start_points)):\n\t\t\t\tintersect_line_dist_check(i)\n\t\telse: # run the groups in a manner that meets the CPU count\n\t\t\ttasks.Parallel.ForEach(\n\t\t\t\trange(len(l_groups)), intersect_each_line_group_dist_check)\n\telse:\n\t\tif cpu_count is None: # use all available CPUs\n\t\t\ttasks.Parallel.ForEach(range(len(start_points)), intersect_line)\n\t\telif cpu_count <= 1: # run everything on a single processor\n\t\t\tfor i in range(len(start_points)):\n\t\t\t\tintersect_line(i)\n\t\telse: # run the groups in a manner that meets the CPU count\n\t\t\ttasks.Parallel.ForEach(\n\t\t\t\trange(len(l_groups)), intersect_each_line_group)\n\treturn int_matrix\n'''\n\n\ndef trace_ray(ray, breps, bounce_count=1):\n\t\"\"\"Get a list of Rhino points for the path a ray takes bouncing through breps.\n\n\tArgs:\n\t\tray: A Rhino Ray whose path will be traced through the geometry.\n\t\tbreps: An array of Rhino breps through with the ray will be traced.\n\t\tbounce_count: An positive integer for the number of ray bounces to trace\n\t\t\tthe sun rays forward. (Default: 1).\n\t\"\"\"\n\t### vectorworksに要置き換え\n\n\treturn rg.Intersect.Intersection.RayShoot(ray, breps, bounce_count)\n\n\n'''\ndef normal_at_point(brep, point):\n\t\"\"\"Get a Rhino vector for the normal at a specific point that lies on a brep.\n\n\tArgs:\n\t\tbreps: A Rhino brep on which the normal direction will be evaluated.\n\t\tpoint: A Rhino point on the input brep where the normal will be evaluated.\n\t\"\"\"\n\treturn brep.ClosestPoint(point, tolerance)[5]\n'''\ndef bounding_box(geometry, high_accuracy=False):\n\t\"\"\"Get a Rhino bounding box around an input Rhino Mesh or Brep.\n\n\tThis is a typical pre-step before using intersection functions.\n\n\tArgs:\n\t\tgeometry: A Rhino Brep or Mesh.\n\t\thigh_accuracy: If True, a physically accurate bounding box will be computed.\n\t\t\tIf not, a bounding box estimate will be computed. For some geometry\n\t\t\ttypes, there is no difference between the estimate and the accurate\n\t\t\tbounding box. Estimated bounding boxes can be computed much (much)\n\t\t\tfaster than accurate (or \"tight\") bounding boxes. Estimated bounding\n\t\t\tboxes are always similar to or larger than accurate bounding boxes.\n\t\"\"\"\n\toy,ox,oz = vs.Get3DInfo(geometry)\n\t(cx,cy),cz = vs.Get3DCntr(geometry)\n\n\tp1x = cx - ox/2\n\tp1y = cy - oy/2\n\tp1z = cz - oz/2\n\tp2x = cx + ox/2\n\tp2y = cy + oy/2\n\tp2z = cz + oz/2\n\treturn [(p1x,p1y,p1z),(p2x,p2y,p2z)]\n\t#return geometry.GetBoundingBox(high_accuracy)\n\ndef bounding_box_extents(geometry, high_accuracy=False):\n\t\"\"\"Get min and max points around an input Rhino Mesh or Brep\n\n\tArgs:\n\t\tgeometry: A Rhino Brep or Mesh.\n\t\thigh_accuracy: If True, a physically accurate bounding box will be computed.\n\t\t\tIf not, a bounding box estimate will be computed. For some geometry\n\t\t\ttypes, there is no difference between the estimate and the accurate\n\t\t\tbounding box. Estimated bounding boxes can be computed much (much)\n\t\t\tfaster than accurate (or \"tight\") bounding boxes. Estimated bounding\n\t\t\tboxes are always similar to or larger than accurate bounding boxes.\n\t\"\"\"\n\tb_box = bounding_box(geometry, high_accuracy)\n\treturn b_box[1], b_box[0]\n\n\ndef intersect_solids_parallel(solids, bound_boxes, cpu_count=None):\n\t\"\"\"Intersect the co-planar faces of an array of solids using parallel processing.\n\n\tArgs:\n\t\toriginal_solids: An array of closed Rhino breps (polysurfaces) that do\n\t\t\tnot have perfectly matching surfaces between adjacent Faces.\n\t\tbound_boxes: An array of Rhino bounding boxes that parallels the input\n\t\t\tsolids and will be used to check whether two Breps have any potential\n\t\t\tfor intersection before the actual intersection is performed.\n\t\tcpu_count: An integer for the number of CPUs to be used in the intersection\n\t\t\tcalculation. The ladybug_rhino.grasshopper.recommended_processor_count\n\t\t\tfunction can be used to get a recommendation. If None, all available\n\t\t\tprocessors will be used. (Default: None).\n\t\tparallel: Optional boolean to override the cpu_count and use a single CPU\n\t\t\tinstead of multiple processors.\n\n\tReturns:\n\t\tint_solids -- The input array of solids, which have all been intersected\n\t\twith one another.\n\t\"\"\"\n\treturn intersect_solids(solids, bound_boxes)\n\t'''\n\tint_solids = solids[:]\t# copy the input list to avoid editing it\n\n\tdef intersect_each_solid(i):\n\t\t\"\"\"Intersect a solid with all of the other solids of the list.\"\"\"\n\t\tbb_1 = bound_boxes[i]\n\t\t# intersect the solids that come after this one\n\t\tfor j, bb_2 in enumerate(bound_boxes[i + 1:]):\n\t\t\tif not overlapping_bounding_boxes(bb_1, bb_2):\n\t\t\t\tcontinue # no overlap in bounding box; intersection impossible\n\t\t\tsplit_brep1, int_exists = \\\n\t\t\t\tintersect_solid(int_solids[i], int_solids[i + j + 1])\n\t\t\tif int_exists:\n\t\t\t\tint_solids[i] = split_brep1\n\t\t# intersect the solids that come before this one\n\t\tfor j, bb_2 in enumerate(bound_boxes[:i]):\n\t\t\tif not overlapping_bounding_boxes(bb_1, bb_2):\n\t\t\t\tcontinue # no overlap in bounding box; intersection impossible\n\t\t\tsplit_brep2, int_exists = intersect_solid(int_solids[i], int_solids[j])\n\t\t\tif int_exists:\n\t\t\t\tint_solids[i] = split_brep2\n\n\tdef intersect_each_solid_group(worker_i):\n\t\t\"\"\"Intersect groups of solids so that only the cpu_count is used.\"\"\"\n\t\tstart_i, stop_i = s_groups[worker_i]\n\t\tfor count in range(start_i, stop_i):\n\t\t\tintersect_each_solid(count)\n\n\tif cpu_count is None or cpu_count <= 1:\t # use all available CPUs\n\t\ttasks.Parallel.ForEach(range(len(solids)), intersect_each_solid)\n\telse: # group the solids in order to meet the cpu_count\n\t\tsolid_count = len(int_solids)\n\t\tworker_count = min((cpu_count, solid_count))\n\t\ti_per_group = int(math.ceil(solid_count / worker_count))\n\t\ts_groups = [[x, x + i_per_group] for x in range(0, solid_count, i_per_group)]\n\t\ts_groups[-1][-1] = solid_count\t# ensure the last group ends with solid count\n\t\ttasks.Parallel.ForEach(range(len(s_groups)), intersect_each_solid_group)\n\n\treturn int_solids\n\t'''\n\ndef intersect_solids(solids, bound_boxes):\n\t\"\"\"Intersect the co-planar faces of an array of solids.\n\n\tArgs:\n\t\toriginal_solids: An array of closed Rhino breps (polysurfaces) that do\n\t\t\tnot have perfectly matching surfaces between adjacent Faces.\n\t\tbound_boxes: An array of Rhino bounding boxes that parallels the input\n\t\t\tsolids and will be used to check whether two Breps have any potential\n\t\t\tfor intersection before the actual intersection is performed.\n\n\tReturns:\n\t\tint_solids -- The input array of solids, which have all been intersected\n\t\twith one another.\n\t\"\"\"\n\tint_solids = solids[:]\t# copy the input list to avoid editing it\n\n\tfor i, bb_1 in enumerate(bound_boxes):\n\t\tfor j, bb_2 in enumerate(bound_boxes[i + 1:]):\n\t\t\tif not overlapping_bounding_boxes(bb_1, bb_2):\n\t\t\t\tcontinue # no overlap in bounding box; intersection impossible\n\n\t\t\t# split the first solid with the second one\n\t\t\tsplit_brep1, int_exists = intersect_solid(\n\t\t\t\tint_solids[i], int_solids[i + j + 1])\n\t\t\tint_solids[i] = split_brep1\n\n\t\t\t# split the second solid with the first one if an intersection was found\n\t\t\tif int_exists:\n\t\t\t\tsplit_brep2, int_exists = intersect_solid(\n\t\t\t\t\tint_solids[i + j + 1], int_solids[i])\n\t\t\t\tint_solids[i + j + 1] = split_brep2\n\n\treturn int_solids\n\n\n\n\n\ndef intersect_solid(solid, other_solid):\n\t\"\"\"Intersect the co-planar faces of one solid Brep using another.\n\n\tArgs:\n\t\tsolid: The solid Brep which will be split with intersections.\n\t\tother_solid: The other Brep, which will be used to split.\n\n\tReturns:\n\t\tA tuple with two elements\n\n\t\t-\tsolid -- The input solid, which has been split.\n\n\t\t-\tintersection_exists -- Boolean to note whether an intersection was found\n\t\t\tbetween the solid and the other_solid. If False, there's no need to\n\t\t\tsplit the other_solid with the input solid.\n\t\"\"\"\n\t# variables to track the splitting process\n\tre , solid = vs.IntersectSolid(solid, other_solid)\n\tif re ==0 and solid is not None:\n\t\tintersection_exists = True\n\telse:\n\t\tintersection_exists = False\n\treturn solid, intersection_exists\n\t'''intersection_exists = False\t # boolean to note whether an intersection exists\n\ttemp_brep = solid.Split(other_solid, tolerance)\n\tif len(temp_brep) != 0:\n\t\tsolid = rg.Brep.JoinBreps(temp_brep, tolerance)[0]\n\t\tsolid.Faces.ShrinkFaces()\n\t\tintersection_exists = True\n\treturn solid, intersection_exists\n\t'''\n\n\ndef overlapping_bounding_boxes(bound_box1, bound_box2):\n\t\"\"\"Check if two Rhino bounding boxes overlap within the tolerance.\n\n\tThis is particularly useful as a check before performing computationally\n\tintense intersection processes between two bounding boxes. Checking the\n\toverlap of the bounding boxes is extremely quick given this method's use\n\tof the Separating Axis Theorem. This method is built into the intersect_solids\n\tfunctions in order to improve its calculation time.\n\n\tArgs:\n\t\tbound_box1: The first bound_box to check.\n\t\tbound_box2: The second bound_box to check.\n\t\"\"\"\n\t# Bounding box check using the Separating Axis Theorem\n\t(p11x,p11y,p11z),(p12x,p12y,p12z) = bound_box1\n\t(p21x,p21y,p21z),(p22x,p22y,p22z) = bound_box1\n\n\tbb1_width = p12x-p11x #bound_box1.Max.X - bound_box1.Min.X\n\tbb2_width = p22x-p21x #bound_box2.Max.X - bound_box2.Min.X\n\tdist_btwn_x = abs((p11x+p12x)/2 - (p21x+p22x)/2) #abs(bound_box1.Center.X - bound_box2.Center.X)\n\tx_gap_btwn_box = dist_btwn_x - (0.5 * bb1_width) - (0.5 * bb2_width)\n\n\tbb1_depth = p12y-p11y #bound_box1.Max.Y - bound_box1.Min.Y\n\tbb2_depth = p22y-p21y #bound_box2.Max.Y - bound_box2.Min.Y\n\tdist_btwn_y = abs((p11y+p12y)/2 - (p21y+p22y)/2) #abs(bound_box1.Center.Y - bound_box2.Center.Y)\n\ty_gap_btwn_box = dist_btwn_y - (0.5 * bb1_depth) - (0.5 * bb2_depth)\n\n\tbb1_height = p12z-p11z #bound_box1.Max.Z - bound_box1.Min.Z\n\tbb2_height = p22z-p21z #bound_box2.Max.Z - bound_box2.Min.Z\n\tdist_btwn_z = abs((p11z+p12z)/2 - (p21z+p22z)/2) #abs(bound_box1.Center.Z - bound_box2.Center.Z)\n\tz_gap_btwn_box = dist_btwn_z - (0.5 * bb1_height) - (0.5 * bb2_height)\n\n\tif x_gap_btwn_box > tolerance or y_gap_btwn_box > tolerance or \\\n\t\t\tz_gap_btwn_box > tolerance:\n\t\treturn False # no overlap\n\treturn True\t # overlap exists\n\n\n'''\ndef split_solid_to_floors(building_solid, floor_heights):\n\t\"\"\"Extract a series of planar floor surfaces from solid building massing.\n\n\tArgs:\n\t\tbuilding_solid: A closed brep representing a building massing.\n\t\tfloor_heights: An array of float values for the floor heights, which\n\t\t\twill be used to generate planes that subdivide the building solid.\n\n\tReturns:\n\t\tfloor_breps -- A list of planar, horizontal breps representing the floors\n\t\tof the building.\n\t\"\"\"\n\t# get the floor brep at each of the floor heights.\n\tfloor_breps = []\n\tfor hgt in floor_heights:\n\t\tstory_breps = []\n\t\tfloor_base_pt = rg.Point3d(0, 0, hgt)\n\t\tsection_plane = rg.Plane(floor_base_pt, rg.Vector3d.ZAxis)\n\t\tfloor_crvs = rg.Brep.CreateContourCurves(building_solid, section_plane)\n\t\ttry: # Assume a single contour curve has been found\n\t\t\tfloor_brep = rg.Brep.CreatePlanarBreps(floor_crvs, tolerance)\n\t\texcept TypeError: # An array of contour curves has been found\n\t\t\tfloor_brep = rg.Brep.CreatePlanarBreps(floor_crvs)\n\t\tif floor_brep is not None:\n\t\t\tstory_breps.extend(floor_brep)\n\t\tfloor_breps.append(story_breps)\n\n\treturn floor_breps\n'''\n\n'''\ndef geo_min_max_height(geometry):\n\t\"\"\"Get the min and max Z values of any input object.\n\n\tThis is useful as a pre-step before the split_solid_to_floors method.\n\t\"\"\"\n\t# intersection functions changed in Rhino 7.15 such that we now need 2* tolerance\n\tadd_val = tolerance * 2 if (7, 15) <= rhino_version < (7, 17) else 0\n\tbound_box = geometry.GetBoundingBox(rg.Plane.WorldXY)\n\treturn bound_box.Min.Z + add_val, bound_box.Max.Z\n'''","repo_name":"onokennote/Ladybug-tool_for_Vectorworks","sub_path":"ladybug_vectorworks/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":19287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"70161223132","text":"import matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\n\nfrom src.one_v_all import OneVAllClassifier\n\n\ndef cv(X, y, lamb):\n print(\"Cross Validating Lambda:\", lamb)\n n = 5\n acc = 0\n kf = KFold(n_splits=n)\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n ova = OneVAllClassifier(lamb=lamb)\n ova.fit(X_train, y_train)\n\n # Add the accuracy score to the accumulator\n acc += ova.test(X_test, y_test)\n\n # Return average accuracy\n return acc / n\n\n\ndef plot_lambdas(cv_score, lambdas):\n plt.plot(lambdas, cv_score)\n plt.title(\"Cross Validated Avg. Accuracy vs. Lambda \\nfor One Vs All Pegasos\")\n plt.xlabel(\"Lambda for Pegasos (log2)\")\n plt.ylabel(\"CV Average Accuracy (k = 5)\")\n plt.show()\n\n\ndef find_best(X, y):\n lambas = [2 ** i for i in range(-5, 2)]\n cv_score = [cv(X, y, lamb=l) for l in lambas]\n\n # Plot the scores\n plot_lambdas(cv_score, lambas)\n\n return sorted(zip(cv_score, lambas), reverse=True)[0]\n","repo_name":"css459/intro-to-ml-hw5","sub_path":"src/cross_validate.py","file_name":"cross_validate.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9596470785","text":"import sys\nsys.stdin = open('input.txt')\n\n\nfrom collections import deque\nimport copy\n\n\nN, M = map(int, input().split())\nres = 0\nmat = []\nvirus = []\nfor r in range(N):\n tmp_lst = list(map(int, input().split()))\n mat.append(tmp_lst)\n for c in range(M):\n if tmp_lst[c] == 2:\n virus.append((r, c))\n\ndef count_zero():\n global res\n tmp_mat = copy.deepcopy(mat)\n q = deque(virus)\n while q:\n r, c = q.popleft()\n for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n nr, nc = r + dr, c + dc\n if 0<=nr', views.pedals_detail, name='details'),\n path('pedals/create/', views.PedalCreate.as_view(), name='pedals_create'),\n path('pedals//update/', views.PedalUpdate.as_view(), name='pedals_update'),\n path('pedals//delete/', views.PedalDelete.as_view(), name='pedals_delete'),\n path('pedals//add_show/', views.add_show, name='add_show'),\n path('pedals//assoc_instrument//', views.assoc_instrument, name='assoc_instrument'),\n\n\n # instrument urls\n path('instruments/', views.instruments_index, name='all_instruments'),\n path('instruments//', views.instrument_detail, name='instrument_detail'),\n path('instruments/create/', views.Create_instrument.as_view(), name='create_instrument'),\n path('instruments//update/', views.Update_instrument.as_view(), name='update_instrument'),\n path('instruments//delete/', views.Delete_instrument.as_view(), name='delete_instrument'),\n path('pedals//add_photo/', views.add_photo, name='add_photo'),\n path('accounts/signup/', views.signup, name='signup'),\n]\n","repo_name":"jcoles1155/pedalcollector","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15809625740","text":"class Cat:\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n def instance(self):\r\n print(\"I am \", self.name ,\" i have \" ,self.age ,\"years old\")\r\n\r\n\r\ncat1 = Cat(\"kodak\", \"3\")\r\ncat2 = Cat(\"Diesel\", \"9\")\r\ncat3 = Cat(\"rambo\", \"4\")\r\n\r\n#create a function that finds the oldest cat and return the funtion\r\n\r\ndef oldest():\r\n if cat1.age > cat2.age and cat1.age > cat3.age:\r\n print(f\"The oldest cat is: {cat1.name} and he has {cat1.age} years old\")\r\n elif cat2.age > cat1.age and cat2.age > cat3.age:\r\n print(f\"The oldest cat is: {cat2.name} and he has {cat2.age} years old\")\r\n elif cat3.age > cat1.age and cat3.age > cat2.age:\r\n print(f\"The oldest cat is: {cat3.name} and he has {cat3.age} years old\")\r\noldest()\r\n\r\n","repo_name":"Mengawanji/Dev-Ins","sub_path":"Week 5/day 1/xp.py","file_name":"xp.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2152038435","text":"from datetime import datetime\nimport forms.main_form as main_form\n\nclass ShowLogs(main_form.Ui_MainWindow):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n \n \n \n def show_logs(self, text):\n print(\"\\n\" + text)\n today = datetime.today().strftime(\"%d-%m-%Y %H:%M:%S\")\n self.parent.plainTextEdit_logs.appendPlainText(str(today) + \" -> \" + text)\n f = open(\"logs/program/logs.txt\", \"a\")\n f.write(str(today) + \" -> \" + text + \"\\n\")\n f.close()","repo_name":"DanielMamaev/MonCenter","sub_path":"modules/show_logs.py","file_name":"show_logs.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"2812204787","text":"class DocumentTreeError(Exception):\n def __init__(self, title, message):\n super().__init__(title, message)\n self.title = title\n self.message = message\n\n @staticmethod\n def cycle_error(problem_uid, cycled_uids):\n return DocumentTreeError(\n (\n \"a cycle detected: requirements in the document tree must not \"\n \"reference each other.\"\n ),\n (\n f\"Problematic UID: {problem_uid}\\n\"\n f\"Cycle: {', '.join(cycled_uids)}\"\n ),\n )\n\n def to_print_message(self):\n message = f\"error: document tree: {self.title}\\n{self.message}\\n\"\n return message\n","repo_name":"strictdoc-project/strictdoc","sub_path":"strictdoc/backend/sdoc/errors/document_tree_error.py","file_name":"document_tree_error.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"32"} +{"seq_id":"27600630104","text":"#Defining a variable, assigning a value\nday_of_week = \"Monday\"\n\n#Assign a new value\nday_of_week = \"Tuesday\"\n\n#Assign the value of one variable to another variable\ntoday = day_of_week\n\n#Python is a language with **dynamic typing** it means\n#Python allows you to assign values of different types to the same variable.\nmonth = \"December\"\nprint(type(month)) # \n\nmonth = 12\nprint(type(month)) # \n\n\n\n\n\n\n","repo_name":"Sddilora/PythonByExample","sub_path":"variable_types/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"72631426650","text":"import sqlite3\nimport os\n\nDIRECTORY_PATH = os.path.abspath('.')\nCONNECT_STRING = os.path.join(\n DIRECTORY_PATH,\n 'data',\n 'hotel.db'\n)\n\nhotels = [\n {\n 'hotel_id': 'alpha',\n 'name': 'Alpha Hotel',\n 'star': 4.3,\n 'price': 420.34,\n 'city': 'Rio de Janeiro',\n },\n {\n 'hotel_id': 'bravo',\n 'name': 'Bravo Hotel',\n 'star': 4.4,\n 'price': 3800.90,\n 'city': 'Santa Catarina',\n },\n {\n 'hotel_id': 'charlie',\n 'name': 'Charlie Hotel',\n 'star': 3.9,\n 'price': 320.2,\n 'city': 'Santa Catarina',\n },\n]\nhotels = [tuple(hotel.values()) for hotel in hotels]\n\ndef create_hotel_table():\n try:\n connection = sqlite3.connect(CONNECT_STRING)\n cursor = connection.cursor()\n\n sql_create_hotel_table = \"\"\"\n CREATE TABLE IF NOT EXISTS hotels (\n hotel_id text PRIMARY KEY,\n name text,\n star real,\n price real,\n city text\n )\n \"\"\"\n cursor.execute(sql_create_hotel_table)\n \n except Exception as err:\n print(f'ERR: {err}')\n\n finally:\n cursor.close()\n connection.close()\n\ndef insert_hotel(hotels: list):\n # Só vai criar a tabela no caso que ela não exista.\n create_hotel_table()\n\n sql_insert = \"\"\"\n INSERT INTO HOTELS (\n hotel_id,\n name,\n star,\n price,\n city\n ) VALUES (\n ?,\n ?,\n ?,\n ?,\n ?\n )\n \"\"\"\n\n try:\n connection = sqlite3.connect(CONNECT_STRING)\n cursor = connection.cursor()\n\n cursor.executemany(\n sql_insert,\n hotels\n )\n\n connection.commit()\n\n finally:\n cursor.close()\n connection.close()\n\n","repo_name":"JoaoVictorSou/hotel-restful-api","sub_path":"util/database/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32440131098","text":"from time import sleep\n\n\ndef contador(i, f, p):\n if p < 0:\n p *= -1\n if p == 0:\n p = 1\n print('-='*20)\n print(f'Contagem de {i} até {f} de {p} em {p}')\n sleep(1)\n\n if i < f:\n c = i\n while c <= f:\n sleep(0.5)\n print(f'{c}', end=' ')\n c += p\n print('FIM!')\n else:\n c = i\n while c >= f:\n sleep(0.5)\n print(f'{c}', end=' ')\n c -= p\n print('FIM!')\n\n\ncontador(1, 10, 1)\ncontador(10, 0, 2)\nprint('Agora é sua vez de personalizar a contagem')\na = int(input('Inicio: '))\nb = int(input('Fim: '))\nc = int(input('Passo: '))\ncontador(a, b, c)","repo_name":"gustavofcosta/curso-python","sub_path":"Totos exercícios e desafios/ex098v01.py","file_name":"ex098v01.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3386054237","text":"altura = float (input ('Digite a altura em metros: '))\npeso = float (input ('Digite o peso em kg: '))\nimc = float (peso/ (altura*altura))\n\nif imc < 18.5:\n print ('VOCE ESTÁ ABAIXO DO PESO COM IMC DE {:.2f}' .format (imc))\n\nelif 18.5 <= imc < 25:\n print ('VOCE ESTÁ NO PESO IDEAL COM IMC DE {:.2f}' .format (imc))\n\nelif 25 <= imc < 30:\n print ('VOCE ESTÁ EM SOBREPESO COM IMC DE {:.2f}' .format (imc))\n\nelif 30 <= imc < 40:\n print ('VOCE ESTÁ EM OBESIDADE COM IMC DE {:.2f}' .format (imc))\n\nelif 40 <= imc:\n print ('VOCE POSSUI OBESIDADE MÓRBIDA COM IMC DE {:.2f}' .format (imc))\n","repo_name":"Erick080/pythonStuff","sub_path":"imc.py","file_name":"imc.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72784553371","text":"import pandas as pd\nimport numpy as np\nimport re\nfrom fuzzywuzzy import fuzz\nfrom cleanco import cleanco #Altered version of cleanco v1.3\n\n\nnumberOfEmployersToCluster = 100\ntestThresholds = [100, 99, 98, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50]\n\n\nprint(\"Loading data...\")\ndata = pd.read_csv('../../clean/all_clean_data.csv', encoding=\"utf-8\", dtype=str)\nprint(\"Data loading complete. Dataframe shape is \", data.shape)\n\ndata = data[data['CASE_STATUS'] == 'CERTIFIED']\ndata['SOURCE_FILE'] = data['SOURCE_FILE'].str.replace('.csv', ''\n\t).str.replace('yr', '').str.replace('efile_2006', '2006_efile').str.replace('efile_2007', '2007_efile'\n\t).str.replace('efile_2008', '2008_efile').str.replace('fax_2006', '2006_fax'\n\t).str.replace('2009_old', '2009_efile').str.replace('2009_new', '2009')\n\ndata['NAIC_CODE'] = data['NAIC_CODE'].apply(lambda x: np.nan if pd.isnull(x) else str(x).replace('.0',''))\ndata['EMPLOYER_AREA_CODE'] = data['EMPLOYER_AREA_CODE'].apply(lambda x: np.nan if pd.isnull(x) else str(x))\n\ndata = data.loc[:,['EMPLOYER_NAME', 'NAIC_CODE', \n\t'EMPLOYER_ADDRESS', 'EMPLOYER_AREA_CODE', 'EMPLOYER_CITY', 'EMPLOYER_STATE', 'EMPLOYER_POSTAL_CODE', \n\t'SOURCE_FILE']]\n\ndata = data.astype(str).applymap(lambda x: x.strip(' ').upper())\n\ne = data.groupby( ['EMPLOYER_NAME', 'NAIC_CODE', \n\t'EMPLOYER_ADDRESS', 'EMPLOYER_AREA_CODE', 'EMPLOYER_CITY', 'EMPLOYER_STATE', 'EMPLOYER_POSTAL_CODE']\n\t)['SOURCE_FILE'].value_counts().unstack(fill_value=0).reset_index()\n\ne = e.replace('NAN', np.nan)\n\ne['TOTAL'] = e.sum(axis=1)\n\ne = e.sort_values(['EMPLOYER_NAME', 'EMPLOYER_STATE', 'EMPLOYER_CITY', \n\t'EMPLOYER_POSTAL_CODE', 'EMPLOYER_ADDRESS', 'NAIC_CODE', 'EMPLOYER_AREA_CODE'])\n\nprint(\"Employers shape is \", e.shape)\n\n\ne['name'] = None\ne['co_type'] = None\ne['subsidiaryOf'] = None\ne['alias'] = None\ne['formerly'] = None\ne['other'] = None\n\ndef createIndustryMapping(x):\n if(pd.isnull(x)):\n industry = None\n else:\n naic = str(x)\n if(naic[0:4] == '6113'):\n industry = 'Colleges & Universities'\n elif(naic[0:2] in ['61', '92']):\n industry = 'Other Educational, Public Affairs'\n elif(naic[0:4] in ['5112', '5415']):\n industry = 'Software Publishers, Computer Services'\n elif(naic[0:4] in ['5413', '5417']):\n industry = 'Engineering & Scientific R&D Services'\n elif(naic[0:4] == '5416' or naic[0:2] == '55'):\n industry = 'Management, Consulting & Technical Services'\n elif(naic[0:4] in ['5411', '5412']):\n industry = 'Legal & Accounting Services'\n elif(naic[0:4] in ['5414', '5418'] or naic[0:2] == '51'):\n industry = 'Media, Advertising, Telecommunications'\n elif(naic[0:2] == '54' or naic[0:3] == '561'):\n industry = 'Other Professional & Administrative Services'\n elif(naic[0:3] == '335'):\n industry = 'Manufacturing - Computers & Electronics'\n elif(naic[0:2] in ['31', '32', '33']):\n industry = 'Manufacturing - Other'\n elif(naic[0:2] in ['52', '53']):\n industry = 'Finance, Insurance, Real Estate'\n elif(naic[0:2] in ['42', '44', '45', '48', '49']):\n industry = 'Trade, Transportation, Warehousing'\n elif(naic[0:2] in ['62']):\n industry = 'Healthcare'\n else:\n #Commodities, Energy, Utilities, Construction, Arts & Entertainment, Accomodation, Other Services, & Unknown\n industry = 'Other'\n return industry\n\ne['industry'] = e['NAIC_CODE'].apply(createIndustryMapping) \n\n#Begin employer name cleaning\nphraseLookup = {\n 'subsidiaryOf': [' A SUBSIDIARY OF ', ' SUBSIDIARY OF ', ' A SUB OF ', ' SUB OF ', \n ' A DIVISION OF ', ' DIVISION OF ', ' A PART OF ', ' PART OF '],\n 'alias': [' AKA ', ' A/K/A ', ' DBA ', ' D/B/A '],\n 'formerly': [' PREVIOUSLY KNOWN AS ', ' PREVIOUSLY KNOWN ', ' PREVIOUSLY KNOW AS ' , ' PREVIOUSLY ', \n ' FORMERLY KNOWN AS ', ' FORMERLY KNOW AS ', ' FORMERLY ', ' FKA ']\n }\n\nregex = {}\nfor k,v in phraseLookup.items():\n regex[k] = re.compile('|'.join(v))\n\ndef cleanEmployerName(x):\n name = x['EMPLOYER_NAME']\n \n if(not pd.isnull(name)): \n name = re.sub('[\"\\']' , '' , re.sub(r'[;:-=~]+', ',', name))\n \n #Get secondary names\n if(re.search(', A ', name)):\n main = re.sub(', A .*', '', name)\n secondary = re.sub('.*, ', '', name)\n elif(re.search('\\(', name)):\n main = re.sub('\\(.*', '', name)\n after = re.sub('.*\\(', ' ', name)\n secondary = re.sub('\\).*', '', after)\n if(re.sub('\\)', '', after) != after):\n main += re.sub('.*\\)', '', after)\n else:\n main = name.strip()\n secondary = None\n \n secondTypes = {'subsidiaryOf':None, 'alias':None, 'formerly':None, 'other':None}\n \n for k,v in regex.items():\n match = regex[k].search(main)\n if(match):\n secondTypes[k] = main[match.end():]\n main = main[:match.start()]\n elif(secondary):\n match = regex[k].search(secondary)\n if(match):\n secondTypes[k] = secondary[match.end():]\n secondary = secondary[:match.start()]\n\n if(secondary):\n secondTypes['other'] = secondary.strip()\n \n co = cleanco(main)\n cotype = co.type()\n if(cotype):\n main = co.clean_name()\n x['co_type'] = cotype[0]\n \n for k,v in secondTypes.items() :\n if(v):\n co = cleanco(v)\n secondTypes[k] = co.clean_name()\n if(not cotype):\n cotype = co.type()\n if(cotype):\n x['co_type'] = cotype[0]\n \n x['name'] = re.sub('[ ]+', ' ', re.sub('[^\\w ]', '', re.sub('([^\\w ][ ]|[ ][^\\w ])', ' ', ' ' + main + ' '))).strip()\n for k,v in secondTypes.items():\n if(v):\n x[k] = re.sub('[ ]+', ' ', re.sub('[^\\w ]', '', re.sub('([^\\w ][ ]|[ ][^\\w ])', ' ', ' ' + v + ' '))).strip()\n \n return x\n \ne = e.apply(cleanEmployerName, axis = 1)\nprint('Employer name cleaning complete.')\n\n#Create similarity score for any two employers x & y\ndef compareEmployers(x, y):\n #define weights\n w = {'employer_name': 25,\n 'name': 50,\n 'subsidiaryOf': 10,\n 'alias': 10,\n 'formerly': 10,\n 'other': 5,\n 'nameWithSubsidiaryOf': 25, #best of name, name with subsidiaryOf\n 'nameWithAlias': 25, #best of name, name with alias\n 'nameWithFormerly': 25, #best of name, name with formerly\n 'nameWithOther': 5, #best of name, name with other\n 'naic': 15,\n 'industry': 15, \n 'address': 30, \n 'city': 15, \n 'state': 5, \n 'zipcode': 15, \n 'areacode': 10}\n \n employer_name = max([fuzz.token_set_ratio(e['EMPLOYER_NAME'][x], e['EMPLOYER_NAME'][y]),\n fuzz.partial_ratio(e['EMPLOYER_NAME'][x], e['EMPLOYER_NAME'][y]) ])\n \n name = max([fuzz.token_set_ratio(e['name'][x], e['name'][y]),\n fuzz.partial_ratio(e['name'][x], e['name'][y]) ])\n \n if(pd.isnull(e['subsidiaryOf'][x]) or pd.isnull(e['subsidiaryOf'][y])):\n subsidiaryOf = 0\n w['subsidiaryOf'] = 0\n else:\n subsidiaryOf = max([fuzz.token_set_ratio(e['subsidiaryOf'][x], e['subsidiaryOf'][y]),\n fuzz.partial_ratio(e['subsidiaryOf'][x], e['subsidiaryOf'][y]) ])\n \n if(pd.isnull(e['alias'][x]) or pd.isnull(e['alias'][y])):\n alias = 0\n w['alias'] = 0\n else:\n alias = max([fuzz.token_set_ratio(e['alias'][x], e['alias'][y]),\n fuzz.partial_ratio(e['alias'][x], e['alias'][y]) ])\n\n if(pd.isnull(e['formerly'][x]) or pd.isnull(e['formerly'][y])):\n formerly = 0\n w['formerly'] = 0\n else:\n formerly = max([fuzz.token_set_ratio(e['formerly'][x], e['formerly'][y]),\n fuzz.partial_ratio(e['formerly'][x], e['formerly'][y]) ])\n\n if(pd.isnull(e['other'][x]) or pd.isnull(e['other'][y])):\n other = 0\n w['other'] = 0\n else:\n other = max([fuzz.token_set_ratio(e['other'][x], e['other'][y]),\n fuzz.partial_ratio(e['other'][x], e['other'][y]) ])\n \n \n nameWithSubsidiaryOf = max(\n name,\n 0 if pd.isnull(e['subsidiaryOf'][x]) else max(\n fuzz.token_set_ratio(e['subsidiaryOf'][x], e['name'][y]),\n fuzz.partial_ratio(e['subsidiaryOf'][x], e['name'][y]) \n ),\n 0 if pd.isnull(e['subsidiaryOf'][y]) else max(\n fuzz.token_set_ratio(e['name'][x], e['subsidiaryOf'][y]),\n fuzz.partial_ratio(e['name'][x], e['subsidiaryOf'][y]) \n )\n )\n \n nameWithAlias = max(\n name,\n 0 if pd.isnull(e['alias'][x]) else max(\n fuzz.token_set_ratio(e['alias'][x], e['name'][y]),\n fuzz.partial_ratio(e['alias'][x], e['name'][y]) \n ),\n 0 if pd.isnull(e['alias'][y]) else max(\n fuzz.token_set_ratio(e['name'][x], e['alias'][y]),\n fuzz.partial_ratio(e['name'][x], e['alias'][y]) \n )\n )\n \n nameWithFormerly = max(\n name,\n 0 if pd.isnull(e['formerly'][x]) else max(\n fuzz.token_set_ratio(e['formerly'][x], e['name'][y]),\n fuzz.partial_ratio(e['formerly'][x], e['name'][y]) \n ),\n 0 if pd.isnull(e['formerly'][y]) else max(\n fuzz.token_set_ratio(e['name'][x], e['formerly'][y]),\n fuzz.partial_ratio(e['name'][x], e['formerly'][y]) \n )\n ) \n \n nameWithOther = max(\n name,\n 0 if pd.isnull(e['other'][x]) else max(\n fuzz.token_set_ratio(e['other'][x], e['name'][y]),\n fuzz.partial_ratio(e['other'][x], e['name'][y]) \n ),\n 0 if pd.isnull(e['other'][y]) else max(\n fuzz.token_set_ratio(e['name'][x], e['other'][y]),\n fuzz.partial_ratio(e['name'][x], e['other'][y]) \n )\n )\n \n \n if(pd.isnull(e['NAIC_CODE'][x]) or pd.isnull(e['NAIC_CODE'][y])):\n naic = 0\n w['naic'] = 0\n else:\n if(e['NAIC_CODE'][x] == e['NAIC_CODE'][y]):\n naic = 100\n elif(e['NAIC_CODE'][x][0:6] == e['NAIC_CODE'][y][0:6]):\n naic = 98\n elif(e['NAIC_CODE'][x][0:4] == e['NAIC_CODE'][y][0:4]):\n naic = 90\n elif(e['NAIC_CODE'][x][0:2] == e['NAIC_CODE'][y][0:2]):\n naic = 75\n else:\n naic = 0\n \n if(pd.isnull(e['industry'][x]) or pd.isnull(e['industry'][y])):\n industry = 0\n w['industry'] = 0\n else:\n if(e['industry'][x] == e['industry'][y]):\n industry = 100\n else:\n industry = 0\n \n if(pd.isnull(e['EMPLOYER_ADDRESS'][x]) or pd.isnull(e['EMPLOYER_ADDRESS'][y])):\n address = 0\n w['address'] = 0\n else:\n address = max([fuzz.token_set_ratio(e['EMPLOYER_ADDRESS'][x], e['EMPLOYER_ADDRESS'][y]),\n fuzz.partial_ratio(e['EMPLOYER_ADDRESS'][x], e['EMPLOYER_ADDRESS'][y]) ])\n \n if(pd.isnull(e['EMPLOYER_CITY'][x]) or pd.isnull(e['EMPLOYER_CITY'][y])):\n city = 0\n w['city'] = 0\n else:\n city = max([fuzz.token_set_ratio(e['EMPLOYER_CITY'][x], e['EMPLOYER_CITY'][y]),\n fuzz.partial_ratio(e['EMPLOYER_CITY'][x], e['EMPLOYER_CITY'][y]) ])\n \n if(pd.isnull(e['EMPLOYER_STATE'][x]) or pd.isnull(e['EMPLOYER_STATE'][y])):\n state = 0\n w['state'] = 0\n else:\n state = fuzz.ratio(e['EMPLOYER_STATE'][x], e['EMPLOYER_STATE'][y])\n \n if(pd.isnull(e['EMPLOYER_POSTAL_CODE'][x]) or pd.isnull(e['EMPLOYER_POSTAL_CODE'][y])):\n zipcode = 0\n w['zipcode'] = 0\n else:\n zipcode = fuzz.ratio(e['EMPLOYER_POSTAL_CODE'][x], e['EMPLOYER_POSTAL_CODE'][y])\n \n if(pd.isnull(e['EMPLOYER_AREA_CODE'][x]) or pd.isnull(e['EMPLOYER_AREA_CODE'][y])):\n areacode = 0\n w['areacode'] = 0\n elif(e['EMPLOYER_AREA_CODE'][x] == e['EMPLOYER_AREA_CODE'][y]):\n areacode = 1\n else:\n areacode = 0\n \n return (employer_name*w['employer_name'] + name*w['name'] + \n subsidiaryOf*w['subsidiaryOf'] + alias*w['alias'] + formerly*w['formerly'] + other*w['other'] + \n nameWithSubsidiaryOf*w['nameWithSubsidiaryOf'] + nameWithAlias*w['nameWithAlias'] + \n nameWithFormerly*w['nameWithFormerly'] + nameWithOther*w['nameWithOther'] +\n naic*w['naic'] + industry*w['industry'] + \n address*w['address'] + city*w['city'] + \n state*w['state'] + zipcode*w['zipcode'] + areacode*w['areacode']) / sum(w.values())\n\n\n#Create Distance Matrix (ints to save space)\neDist = np.zeros((numberOfEmployersToCluster, numberOfEmployersToCluster), dtype=int)\nprint(\"Calculating Employer distance matrix for \", str(len(eDist)),\" employers.\")\n\nfor x in range(0, eDist.shape[0]):\n for y in range(0, x):\n eDist[x, y] = compareEmployers(x,y)\n\nprint(\"Distance matrix calculated.\")\n\n#np.savetxt(\"eDist.csv\", eDist, delimiter=',')\n\n#Create unique employers based on similarity score above given threshold\ndef clusterEmployers(threshold):\n indices = pd.DataFrame({'entityId': np.empty(len(eDist)), 'numLinksStart': np.zeros(len(eDist))})\n \n links = [] \n for i,j in zip(*np.where(eDist >= threshold)):\n links.append((i,j))\n indices.loc[i, 'numLinksStart'] += 1\n indices.loc[j, 'numLinksStart'] += 1\n \n indices['numLinksRemaining'] = indices['numLinksStart'] \n entityCount = 0\n \n while indices['numLinksRemaining'].sum() > 0:\n entityCount += 1\n entityIndices = [indices.sort_values(by = 'numLinksRemaining', ascending = False).iloc[0].name]\n newIndicesToCheckNext = entityIndices #prime loop\n linksToRemove = []\n \n while len(newIndicesToCheckNext) > 0:\n indicesToCheck = newIndicesToCheckNext\n newIndicesToCheckNext = []\n for link in links:\n if link[0] in indicesToCheck:\n if link[1] not in newIndicesToCheckNext:\n newIndicesToCheckNext.append(link[1])\n entityIndices.append(link[1])\n linksToRemove.append(link)\n if link[1] in indicesToCheck:\n if link[0] not in newIndicesToCheckNext:\n newIndicesToCheckNext.append(link[0])\n entityIndices.append(link[0])\n\n #Remove linksToRemove from links\n links = [link for link in links if link not in linksToRemove]\n \n indices.loc[entityIndices, 'entityId'] = entityCount\n \n indices['numLinksRemaining'] = 0\n for i,j in links:\n indices.loc[i, 'numLinksRemaining'] += 1\n indices.loc[j, 'numLinksRemaining'] += 1\n \n for i in indices.index[indices['numLinksStart'] == 0].tolist():\n entityCount +=1\n indices.loc[i, 'entityId'] = entityCount\n \n return indices['entityId']\n\n\n\nuniqueEntities = {}\n\neEntity = e.copy()\n\nfor threshold in testThresholds:\n eEntity[('entityThreshold' + str(threshold))] = clusterEmployers(threshold)\n uniqueEntities[threshold] = len(eEntity[('entityThreshold' + str(threshold))].unique())\n\nprint(\"Number of Unique Entities at various thresholds: \", uniqueEntities)\n\n\n\n\n\n\n\n","repo_name":"darylkang/capstone-goldman-sachs","sub_path":"src/macro_cleaning/create-unique-employers-at-various-thresholds.py","file_name":"create-unique-employers-at-various-thresholds.py","file_ext":"py","file_size_in_byte":15653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43810510843","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nfrom utils import config, logger, env\nfrom librarian.librarian import Librarian\n\nlog = logger.get_log('KodiLibrarian')\nkodi = Librarian(config.hosts, update_while_playing=config.update_while_playing)\n\nif env.event == 'download':\n if env.calledBy == 'radarr':\n log.info('Radarr has downloaded \"{}\" {}. Initiating update process.'.format(env.movieTitle, env.moviePath))\n kodi.updateMovie(env.movieTitle, env.movieDirectory, env.moviePath)\n if config.clean_after_update:\n kodi.cleanLibrary('movies')\n\n elif env.calledBy == 'sonarr':\n log.info('Sonarr has downloaded \"{}\" {}. Initiating update process.'.format(env.showTitle, env.episodePath))\n kodi.updateTVShow(env.episodePath, env.showDirectory)\n if config.clean_after_update:\n kodi.cleanLibrary('tvshows')\n\n elif env.calledBy == 'lidarr':\n log.info('Lidarr not supported yet!! Aborting.')\n\nelif env.event == 'test':\n log.debug('Called with test environment from {}'.format(env.calledBy))\n sys.exit(0)\n\nelse:\n log.critical('Could not find any recognizable environment variables. Aborting.')\n","repo_name":"jsaddiction/KodiLibrarian","sub_path":"KodiLibrarian.py","file_name":"KodiLibrarian.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71001787930","text":"#!/usr/bin/env python3\n\nimport argparse, sys, glob, os\nfrom intervaltree import Interval, IntervalTree\nfrom copy import deepcopy\nimport multiprocessing as mp\nfrom termcolor import colored\nfrom collections import defaultdict, namedtuple\n\nsample_names = {'Cydno_maternal':'CF', 'Cydno_paternal':'CM', 'Melpomene_maternal':'MF', 'Melpomene_paternal':'MM'}\npacbio_names = {'Cydno_females':'CF', 'Cydno_males':'CM', 'Melpomene_females':'MF', 'Melpomene_males':'MM'}\n\nchromosome_lengths = [0, 17206585, 9045316, 10541528, 9662098, 9908586, 14054175, 14308859, 9320449, 8708747, 17965481, 11759272, 16327298, 18127314, 9174305, 10235750, 10083215, 14773299, 16803890, 16399344, 14871695, 13359691]\n\nclass Inversion:\n def __init__(self, f):\n self.sample = f['Sample']\n self.species, self.sex = f['Sample'].split('_')\n self.species = self.species.title()\n self.chromosome = int(f['Chromosome1'])\n self.start = int(f['ChromPosition1'])\n self.end = int(f['ChromPosition2'])\n self.scaffold = f['Scaffold1']\n self.reads = int(f['Reads'])\n self.length = int(f['Length'])\n self.remainseq = int(f['RemainSeq'])\n self.initiallengths = f['InitialLengths'] if 'InitialLengths' in f else ''\n self.taillengths = f['TailLengths'] if 'TailLengths' in f else ''\n self.gaps = set()\n self.recs = set()\n self.hits = IntervalTree()\n self._status = \"\"\n \n def __repr__(self):\n return '{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(self.species, self.sex, self.chromosome, self.start, self.end, self.scaffold, self.reads)\n \n @property\n def status(self):\n if self._status:\n return self._status\n\n status = \"\"\n for rec in self.recs:\n if rec.data[1] == self.species and rec.begin >= self.start and rec.end <= self.end:\n status = \"Contains recombination\"\n \n if not status:\n same, other, output = get_spanning_hits(IntervalTree(hits[self.chromosome].search(self.start, self.end)), Interval(self.start, self.end, self))\n if same:\n status += \"Spanning hit\"\n \n if not status:\n if self.length < args.ldminsize:\n status += \"Shorter than LD threshold\"\n\n if not status:\n status = \"OK\"\n \n self._status = status\n return status\n\nclass Hit:\n def __init__(self, samplename, tstart, tend, hstart, hend, thitlen, hhitlen, pctid, tscflen, hscflen, tcov, hcov, tdir, hdir, tname, hname):\n self.sample = samplename\n self.species, self.sex = self.sample.split('_')\n self.sex = \"females\" if self.sex == \"maternal\" else \"males\"\n self.chromosome = int(hname[5:7])\n self.tstart = tstart\n self.tend = tend\n \n self.hstart, self.hend = sorted([hstart, hend])\n \n self.thitlen = thitlen\n self.hhitlen = hhitlen\n self.pctid = pctid\n self.tscflen = tscflen\n self.hscflen = hscflen\n self.tcov = tcov\n self.hcov = hcov\n \n self.tdir = tdir\n self.hdir = hdir\n self.dir = tdir * hdir\n \n self.tname = tname\n self.hname = hname\n\n\nclass OutputLine:\n def __init__(self, groupid, invid, groupinvid, hittype, species, sex, chromosome, start, end, length, reads, pctid, dir, scaffold):\n self.groupid = groupid\n self.invid = invid\n self.groupinvid = groupinvid\n self.hittype = hittype\n self.species = species\n self.sex = sex\n self.chromosome = chromosome\n self.start = start\n self.end = end\n self.length = length\n self.reads = reads\n self.pctid = pctid\n self.dir = dir\n self.label = scaffold\n \n def __eq__(self, other):\n return self.hittype == other.hittype and self.species == other.species and self.sex == other.sex and self.chromosome == other.chromosome and self.start == other.start and self.end == other.end\n\n def __repr__(self):\n return '{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\tNA'.format(self.groupid, self.invid, self.groupinvid, self.hittype, self.species, self.sex, self.chromosome, self.start, self.end, self.length, self.reads, self.pctid, self.dir, self.label)\n\ndef spanning(tree, begin, end):\n return tree.search(begin).intersection(tree.search(end))\n\ndef overlap_edges(tree, begin, end):\n return tree.search(begin, end) - tree.search(begin, end, strict=True)\n\ndef get_map_regions(mapfilename):\n map_trees = defaultdict(lambda: defaultdict(IntervalTree))\n\n with open(mapfilename,'r') as mapfile:\n for line in mapfile:\n if line.startswith(\"Species\"):\n continue\n \n species, chromosome, start, end, cmfraction, cm = line.rstrip().split('\\t')\n if species == \"Hybrid\":\n continue\n chromosome, start, end = int(chromosome), int(start), int(end)\n cmfraction, cm = float(cmfraction), float(cm)\n if cmfraction == 0:\n continue\n map_trees[species][chromosome][start:end] = (cm, species)\n \n return map_trees\n\ndef make_gap_trees(mapfilename):\n map_trees = get_map_regions(mapfilename)\n\n gap_trees = defaultdict(IntervalTree)\n for species in sorted(map_trees):\n for chromosome in sorted(map_trees[species]):\n cmlist = sorted(map_trees[species][chromosome])\n\n gap_trees[chromosome][1:cmlist[0].end] = species # First gap\n \n for i in range(len(cmlist)-1):\n gapstart = cmlist[i].begin\n gapend = cmlist[i+1].end\n gaplength = gapend - gapstart + 1\n gap_trees[chromosome][gapstart:gapend] = species\n\n gap_trees[chromosome][(cmlist[-1].begin-1):chromosome_lengths[chromosome]] = species # Last gap\n \n gap_trees[chromosome][(cmlist[-1][0]-1):chromosome_lengths[chromosome]] = species # Last gap\n\n return gap_trees, map_trees\n\ndef write_gaps(gap_trees, outputstub):\n with open(outputstub + \".gaps.tsv\", 'w') as outfile:\n print(\"Species\\tChromosome\\tGapStart\\tGapEnd\\tGapLength\", file=outfile)\n for chromosome in gap_trees:\n for iv in sorted(gap_trees[chromosome]):\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(iv.data, chromosome, iv.begin, iv.end, iv.end-iv.begin), file=outfile)\n\n\ndef make_inversion_trees(inversionsfilename):\n total_inversions = valid_inversions = 0\n inv_trees = defaultdict(IntervalTree)\n samples = defaultdict(int)\n for i, line in enumerate(open(inversionsfilename)):\n if i == 0:\n hf = line.rstrip().split('\\t')\n continue\n total_inversions += 1\n f = dict(zip(hf, line.rstrip().split('\\t')))\n chr1, chr2, cp1, cp2 = int(f['Chromosome1']), int(f['Chromosome2']), int(f['ChromPosition1']), int(f['ChromPosition2'])\n if chr1 != chr2 or chr1 == 0 or f['Scaffold1'] != f['Scaffold2'] or cp2 <= cp1:\n continue\n valid_inversions += 1\n samples[f['Sample']] += 1\n inv_trees[chr1][cp1:cp2] = Inversion(f)\n \n return inv_trees\n\ndef make_candidate_groups(inv_trees, gap_trees, tentative, map_trees, hits):\n\n cand_groups = defaultdict(IntervalTree)\n ok_trees = defaultdict(IntervalTree)\n reject_trees = defaultdict(IntervalTree)\n statuses = defaultdict(lambda: defaultdict(int))\n \n for chromosome in sorted(inv_trees):\n for candidate in sorted(inv_trees[chromosome]):\n candidate.data.gaps = gap_trees[chromosome].search(candidate.begin, candidate.end)\n\n map_overlaps = set()\n for species in map_trees:\n map_overlaps |= map_trees[species][chromosome].search(candidate.begin, candidate.end)\n candidate.data.recs = map_overlaps\n\n if candidate.data.status == \"OK\":\n ok_trees[chromosome].add(candidate)\n else:\n reject_trees[chromosome].add(candidate)\n statuses[candidate.data.status][candidate.data.sample] += 1\n\n ok_tentative = IntervalTree()\n for candidate in ok_trees[chromosome]:\n tenpclen = candidate.length() * 0.1\n for ct in tentative[chromosome].search(candidate.begin, candidate.end):\n reject = False\n for ctok in ok_trees[chromosome].search(candidate.begin, candidate.end):\n if ct.begin == ctok.begin and ct.end == ctok.end and ct.data.species == ctok.data.species and ct.data.sex == ctok.data.sex:\n reject = True\n continue\n if not reject and \\\n abs(ct.begin-candidate.begin) < tenpclen and abs(ct.end-candidate.end) < tenpclen and \\\n ct.data.status == \"OK\":\n if ct.data.status == \"OK\":\n ct.data._status = \"Tentative\"\n ok_tentative.add(ct)\n\n for okt in ok_tentative:\n ok_trees[chromosome].add(okt)\n \n for ok in ok_trees[chromosome]:\n statuses[ok.data.status][ok.data.sample] += 1\n\n cand_groups[chromosome] = ok_trees[chromosome].copy()\n cand_groups[chromosome].merge_overlaps()\n\n return cand_groups, ok_trees, reject_trees\n\ndef load_transitions(transitionfile):\n hmel2 = defaultdict(IntervalTree)\n hmel2o = defaultdict(int)\n chrom_to_hmel2 = defaultdict(IntervalTree)\n \n for line in open(transitionfile):\n if line.startswith(\"Chromosome\"):\n continue\n f = line.rstrip().split('\\t')\n chromosome, chromstart, chromend, pool, pooltype, poolid, hmel2scaffold, hmel2start, hmel2end, orientation, hmel2refined, length, hmel2ordered, hmel2ostart, hmel2oend = \\\n int(f[0]), int(f[1]), int(f[2]), int(f[3]), f[4], int(f[5]), f[6], int(f[7]), int(f[8]), f[9], f[10], int(f[11]), f[12], int(f[13]), int(f[14])\n\n if chromosome == 0:\n continue\n \n if hmel2ordered not in hmel2o:\n hmel2o[hmel2ordered] = chromstart\n \n hmel2[hmel2scaffold][hmel2start:hmel2end] = (chromosome, chromstart, chromend, orientation)\n chrom_to_hmel2[chromosome][chromstart:chromend] = (hmel2scaffold, hmel2start, hmel2end, orientation)\n\n return hmel2, hmel2o, chrom_to_hmel2\n\ndef load_trio_alignments(args, hmel2o):\n \n hits = defaultdict(IntervalTree)\n coordsnames = glob.glob(args.coordsglob)\n for coordsname in coordsnames:\n print(\"Loading \" + coordsname)\n samplename = coordsname.split(os.sep)[-1].split('.')[0]\n for line in open(coordsname):\n if line.endswith('o\\n'):\n f = line.rstrip().split('\\t')\n tstart, tend, hstart, hend = int(f[0]), int(f[1]), int(f[2]), int(f[3])\n thitlen, hhitlen, pctid = int(f[4]), int(f[5]), float(f[6])\n tscflen, hscflen, tcov, hcov = int(f[7]), int(f[8]), float(f[9]), float(f[10])\n tdir, hdir, tname, hname = int(f[11]), int(f[12]), f[13], f[14]\n\n if thitlen < args.minhitlength:\n continue\n \n tname = tname[1:] # Remove erroneous leading > from FASTA name\n locname = samplename + '_' + tname\n chromosome = int(hname[5:7])\n hstart = hstart + hmel2o[hname] - 1\n hend = hend + hmel2o[hname] - 1\n hstart, hend = sorted([hstart, hend])\n hits[chromosome].addi(hstart, hend, Hit(samplename, tstart, tend, hstart, hend, thitlen, hhitlen, pctid,\n tscflen, hscflen, tcov, hcov, tdir, hdir, tname, hname))\n\n return hits\n\n\n\ndef get_gff_chrom_position(scaffold, start, end, hmel2):\n hmel2_overlaps = hmel2[scaffold].search(start, end)\n if len(hmel2_overlaps) != 1:\n return None, None, None\n\n hmel2ov = list(hmel2_overlaps)[0]\n hmel2scfstart = hmel2ov.begin\n hmel2chrom, hmel2chromstart, hmel2chromend, hmel2orient = hmel2ov.data\n if hmel2orient == '+':\n chromstart = start - hmel2scfstart + hmel2chromstart - 1\n chromend = end - hmel2scfstart + hmel2chromstart - 1\n else:\n chromstart = hmel2chromend - (start - hmel2scfstart)\n chromend = hmel2chromend - (end - hmel2scfstart)\n chromstart, chromend = sorted([chromstart, chromend])\n \n return hmel2chrom, chromstart, chromend\n\ndef get_hmel2_position(chromosome, start, end, chrom_to_hmel2):\n\n hmel2_overlaps = chrom_to_hmel2[chromosome].search(start, end)\n if len(hmel2_overlaps) != 1:\n return None, None, None\n\n hmel2ov = list(hmel2_overlaps)[0]\n chromstart = hmel2ov.begin\n hmel2scaffold, hmel2scfstart, hmel2scfend, hmel2orient = hmel2ov.data\n if hmel2orient == '+':\n hmel2start = start - chromstart + hmel2scfstart - 1\n hmel2end = end - chromstart + hmel2scfstart - 1\n else:\n hmel2start = hmel2scfend - (start - chromstart)\n hmel2end = hmel2scfend - (end - chromstart)\n\n hmel2start, hmel2end = sorted([hmel2start, hmel2end])\n \n return hmel2scaffold, hmel2start, hmel2end\n\n\ndef load_gff(gfffile, hmel2=None):\n\n gff_trees = defaultdict(IntervalTree)\n for line in open(gfffile):\n if line.startswith('#'):\n continue\n scaffold, origin, featuretype, start, end, score, strand, frame, attributes = line.rstrip().split('\\t')\n chromosome = int(scaffold[5:7])\n if chromosome == 0:\n continue\n\n gffchrom, gffstart, gffend = chromosome, int(start), int(end)\n if hmel2 is not None:\n gffchrom, gffstart, gffend = get_gff_chrom_position(scaffold, gffstart, gffend, hmel2)\n \n if gffchrom is None:\n continue\n \n gff_trees[gffchrom][gffstart:gffend] = (featuretype, attributes)\n \n return gff_trees\n\n\ndef load_agp(agpfile):\n agp_trees = defaultdict(IntervalTree)\n for line in open(agpfile):\n f = line.rstrip().split('\\t')\n chromosome, start, end = int(f[0]), int(f[1]), int(f[2])\n data = f[5] if f[4] == 'W' else 'Gap'\n agp_trees[chromosome][start:end+1] = data\n \n return(agp_trees)\n\n\ndef get_spanning_hits(hits, inv_iv, tsv_hits=None, group_details=None):\n spanningtree = spanning(hits, inv_iv.begin, inv_iv.end)\n sample_hits = defaultdict(list)\n \n for hit in spanningtree:\n leftlen = inv_iv.begin - hit.data.hstart + 1\n rightlen = hit.data.hend - inv_iv.end + 1\n leftlenratio = leftlen / inv_iv.length()\n rightlenratio = rightlen / inv_iv.length()\n \n if leftlenratio < 0.5 or rightlenratio < 0.5:\n continue\n\n sample_hits[hit.data.sample].append((leftlen, rightlen, leftlenratio, rightlenratio))\n\n if tsv_hits and group_details:\n tsv_hits.append(OutputLine(group_details[0], group_details[1], group_details[2], \"Spanning\", hit.data.species, hit.data.sex, group_details[3], hit.data.hstart, hit.data.hend, hit.data.hhitlen, 'NA', hit.data.pctid, hit.data.dir, hit.data.tname))\n \n same = other = 0\n for s in sample_hits:\n if len(sample_hits[s]) == 0:\n continue\n if inv_iv.data.species in s:\n same += 1\n else:\n other += 1\n return same, other\n\ndef add_edge_hits(edgetree, sample_hits, point, length, inverted_scaffolds):\n for hit in edgetree:\n leftlen = point - hit.data.hstart + 1\n rightlen = hit.data.hend - point + 1\n leftlenratio = leftlen / length\n rightlenratio = rightlen / length\n if hit.data.tname not in inverted_scaffolds[hit.data.sample]:\n sample_hits[hit.data.sample].append((leftlen, rightlen, leftlenratio, rightlenratio))\n\ndef write_edge_tsv(edgetree, begin_tree, end_tree, tsv_hits, group_details, inverted_scaffolds):\n scaffolds = {}\n for iv in begin_tree:\n scaffolds[iv.data.tname] = 1\n for iv in end_tree:\n scaffolds[iv.data.tname] = 1\n\n\n for hit in edgetree:\n if hit.data.tname in scaffolds and hit.data.tname not in inverted_scaffolds[hit.data.sample]:\n tsv_hits.append(OutputLine(group_details[0], group_details[1], group_details[2], \"Edge\", hit.data.species, hit.data.sex, group_details[3], hit.data.hstart, hit.data.hend, hit.data.hhitlen, 'NA', hit.data.pctid, hit.data.dir, hit.data.tname))\n \n\ndef get_edge_hits(hits, inv_iv, tsv_hits, group_details, inverted_scaffolds):\n\n begintree = hits.search(inv_iv.begin)\n endtree = hits.search(inv_iv.end)\n begin_edgetree = begintree - endtree\n end_edgetree = endtree - begintree\n\n sample_hits = defaultdict(list)\n\n add_edge_hits(begin_edgetree, sample_hits, inv_iv.begin, inv_iv.length(), inverted_scaffolds)\n add_edge_hits(end_edgetree, sample_hits, inv_iv.end, inv_iv.length(), inverted_scaffolds)\n \n write_edge_tsv(hits.search(inv_iv.begin, inv_iv.end), begin_edgetree, end_edgetree, tsv_hits, group_details, inverted_scaffolds)\n \n return\n\n\ndef get_outer_hits(outer_hits, scaffold, sample, hitdir):\n scf_dir_tree = IntervalTree()\n for outer_hit in outer_hits:\n if scaffold == outer_hit.data.tname and sample == outer_hit.data.sample and hitdir != outer_hit.data.dir:\n scf_dir_tree.add(outer_hit)\n return scf_dir_tree\n\ndef get_inverted_hits(hits, inv_iv, tsv_hits, group_details):\n\n hits_by_scaffold=defaultdict(lambda: defaultdict(lambda: defaultdict(IntervalTree)))\n for hit in hits.search(inv_iv.begin, inv_iv.end):\n hits_by_scaffold[hit.data.tname][hit.data.sample][hit.data.dir].add(hit)\n \n left_outer_hits = hits.search(inv_iv.begin - args.extendregion, inv_iv.begin)\n right_outer_hits = hits.search(inv_iv.end, inv_iv.end + args.extendregion)\n\n sample_hits = defaultdict(list)\n\n inverted_scaffolds = defaultdict(lambda: defaultdict(int))\n for scaffold in sorted(hits_by_scaffold):\n for sample in sorted(hits_by_scaffold[scaffold]):\n for hitdir in sorted(hits_by_scaffold[scaffold][sample]):\n withinrange = hits_by_scaffold[scaffold][sample][hitdir].range()\n \n scf_dir_left = get_outer_hits(left_outer_hits, scaffold, sample, hitdir)\n scf_dir_right = get_outer_hits(right_outer_hits, scaffold, sample, hitdir)\n\n num_left, num_right = len(scf_dir_left), len(scf_dir_right)\n if num_left + num_right > 0:\n sample_hits[sample].append((withinrange.begin, withinrange.end, len(scf_dir_left), len(scf_dir_right)))\n \n for hit in [h for tree in [hits_by_scaffold[scaffold][sample][hitdir], scf_dir_left, scf_dir_right] for h in tree]:\n tsv_hits.append(OutputLine(group_details[0], group_details[1], group_details[2], \"Inverted\", hit.data.species, hit.data.sex, group_details[3], hit.data.hstart, hit.data.hend, hit.data.hhitlen, 'NA', hit.data.pctid, hit.data.dir, hit.data.tname))\n inverted_scaffolds[sample][hit.data.tname] = 1\n \n\n same = other = 0\n for s in sample_hits:\n if len(sample_hits[s]) == 0:\n continue\n if inv_iv.data.species in s:\n same += 1\n else:\n other += 1\n return same, other, inverted_scaffolds\n\n\ndef summarise_candidate(inv_iv, statuses, spanning_same, spanning_other, inverted_same, inverted_other):\n status = 'None'\n if spanning_same and not inverted_same and not inverted_other:\n status = 'Reject'\n if not spanning_same and inverted_same and not inverted_other:\n status = 'Accept'\n if not spanning_same and not spanning_other and inverted_same and inverted_other:\n status = 'Misassembly'\n statuses[status][pacbio_names[inv_iv.data.species + '_' + inv_iv.data.sex]] += 1\n return status\n\n\ndef get_group_samples(samples, group_num, inv_num, in_group_num, chromosome, candi, inv_iv):\n\n for sn in sorted(sample_names):\n shortsn = sample_names[sn]\n if shortsn == pacbio_names[inv_iv.data.species + '_' + inv_iv.data.sex]:\n samples += shortsn + '_'\n return samples\n\ndef get_status(group_samples, trio_statuses):\n pbhoney_status = 'Single'\n if ('CF' in group_samples or 'CM' in group_samples) and ('MF' in group_samples or 'MM' in group_samples):\n pbhoney_status = 'Misassembly'\n elif 'CM' in group_samples and 'CF' in group_samples:\n pbhoney_status = 'Cydno'\n elif 'MM' in group_samples and 'MF' in group_samples:\n pbhoney_status = 'Melpomene'\n\n status = '????'\n if pbhoney_status == 'Single':\n trio_status = list(trio_statuses)[0]\n if trio_status == 'Misassembly':\n status = 'Misassembly_Trio'\n elif trio_status == 'None':\n if 'CF' in group_samples or 'CM' in group_samples:\n status = 'Cydno_Single'\n else:\n status = 'Melpomene_Single'\n elif trio_status == 'Accept':\n if 'CF' in group_samples or 'CM' in group_samples:\n status = 'Cydno_Both'\n elif 'MF' in group_samples or 'MM' in group_samples:\n status = 'Melpomene_Both'\n else:\n status = '????'\n else:\n status = '????'\n elif pbhoney_status == 'Misassembly':\n if 'Misassembly' in trio_statuses:\n status = 'Misassembly_Both'\n else:\n status = 'Misassembly_PBHoney'\n else: # pbhoney status is Cydno or Melpomene\n if 'Misassembly' in trio_statuses:\n status = 'Misassembly_Trio'\n elif 'Accept' in trio_statuses:\n status = pbhoney_status + '_Both'\n elif len(trio_statuses) == 1 and 'None' in trio_statuses:\n status = pbhoney_status + '_PBHoney'\n else:\n status = '????'\n return status\n\n\ndef write_tsv(group, group_num, group_status, chromosome, tsv_inversions, tsv_hits, plottsv, gff_trees, chrom_to_hmel2, agp_trees, repeats):\n \n print('{}\\t0\\t0\\tGroup\\tAll\\t\\t{}\\t{}\\t{}\\t{}\\tNA\\tNA\\tNA\\tAll\\t{}\\t{}'.format(group_num, chromosome, group.begin, group.end, group.length(), group_status, group_status), file=plottsv)\n\n for inv in tsv_inversions:\n print(inv + \"\\t\" + group_status, file=plottsv)\n \n prevhit = None\n for hit in sorted(tsv_hits, key = lambda x: (x.hittype, x.species, x.sex, x.chromosome, x.start)):\n if prevhit and hit != prevhit:\n print(repr(hit) + \"\\t\" + group_status, file=plottsv)\n prevhit = hit\n \n for gff_feature in sorted(gff_trees[chromosome].search(group.begin, group.end)):\n print('{}\\t0\\t0\\tFeature\\tAll\\t\\t{}\\t{}\\t{}\\t{}\\tNA\\tNA\\tNA\\t{}\\t{}\\t{}'.format(group_num, chromosome, gff_feature.begin, gff_feature.end, gff_feature.length(), gff_feature.data[0], gff_feature.data[1], group_status), file=plottsv)\n\n\n for agp_part in sorted(agp_trees[chromosome].search(group.begin-group.length()/2, group.end+group.length()/2)):\n print('{}\\t0\\t0\\tHmel2Part\\tAll\\t\\t{}\\t{}\\t{}\\t{}\\tNA\\tNA\\tNA\\t{}\\tNA\\t{}'.format(group_num, chromosome, agp_part.begin, agp_part.end, agp_part.length(), agp_part.data, group_status), file=plottsv)\n \n for repeat in sorted(repeats[chromosome].search(group.begin-group.length()/2, group.end+group.length()/2)):\n print('{}\\t0\\t0\\tRepeat\\tAll\\t\\t{}\\t{}\\t{}\\t{}\\tNA\\tNA\\tNA\\t{}\\tNA\\t{}'.format(group_num, chromosome, repeat.begin, repeat.end, repeat.length(), repeat.data, group_status), file=plottsv)\n\n\ndef get_args():\n parser=argparse.ArgumentParser(description='''Find and classify candidate inversions\n -m map\n -i inversions\n -c coordsglob\n -o output\n -s transitions\n -l minhitlength\n -t tentativeinversions\n -g gff\n -a agp\n -r repeats\n -d ldminsize\n ''')\n \n parser.add_argument('-m', '--map', type=str, required=True)\n parser.add_argument('-i', '--inversions', type=str)\n parser.add_argument('-o', '--outputstub', type=str, required=True)\n parser.add_argument('-c', '--coordsglob', type=str)\n parser.add_argument('-s', '--transitions', type=str)\n parser.add_argument('-l', '--minhitlength', type=int, default=1000)\n parser.add_argument('-e', '--extendregion', type=int, default=10000)\n parser.add_argument('-t', '--tentativeinversions', type=str)\n parser.add_argument('-g', '--gff', type=str)\n parser.add_argument('-a', '--agp', type=str)\n parser.add_argument('-r', '--repeats', type=str)\n parser.add_argument('-d', '--ldminsize', type=int, default=1000)\n \n return parser.parse_args()\n \nargs = get_args()\n\nprint(\"Making and writing gaps\")\n\ngap_trees, map_trees = make_gap_trees(args.map)\nwrite_gaps(gap_trees, args.outputstub)\n\nif not args.inversions:\n sys.exit()\n\nprint(\"Loading PBHoney inversions\")\ninv_trees = make_inversion_trees(args.inversions)\n\nif args.tentativeinversions:\n print(\"Loading tentative inversions\")\n tentative = make_inversion_trees(args.tentativeinversions)\nelse:\n tentative = defaultdict(IntervalTree)\n\nif not args.transitions and (args.gff or args.coordsglob):\n print(\"Need Hmel2 transition file (-s)\")\n sys.exit()\n\nhmel2, hmel2o, chrom_to_hmel2 = load_transitions(args.transitions)\n\nif args.gff:\n print(\"Loading transcriptome\")\n gff_trees = load_gff(args.gff, hmel2)\nelse:\n gff_trees = defaultdict(IntervalTree)\n\nif args.agp:\n print(\"Loading AGP file\")\n agp_trees = load_agp(args.agp)\nelse:\n agp_trees = defaultdict(IntervalTree)\n\n\nif args.repeats:\n print(\"Loading repeats\")\n repeats = load_gff(args.repeats)\nelse:\n repeats = defaultdict(IntervalTree)\n\nif not args.coordsglob:\n sys.exit()\n\nprint(\"Loading trio alignments\")\nhits = load_trio_alignments(args, hmel2o)\n\nprint(\"Making candidate groups\")\ncand_groups, ok_trees, reject_trees = make_candidate_groups(inv_trees, gap_trees, tentative, map_trees, hits)\n\ngroup_num = inv_num = 0\nsummary = defaultdict(lambda: defaultdict(int))\n\nplottsv = open(args.outputstub + \".details.tsv\", 'w')\nprint(\"GroupID\\tInvID\\tGroupInvID\\tHitType\\tSpecies\\tSex\\tChromosome\\tStart\\tEnd\\tLength\\tReads\\tPctId\\tDir\\tLabel\\tStatus\\tGroupStatus\", file=plottsv)\n\nfor chromosome in sorted(cand_groups):\n for group in sorted(cand_groups[chromosome]):\n group_num += 1\n in_group_num = 0\n group_samples = ''\n tsv_hits = []\n tsv_inversions = []\n trio_statuses = defaultdict(lambda: defaultdict(int))\n\n group_inversions = sorted(ok_trees[chromosome].search(group.begin, group.end, strict=True), key=lambda x:(x.data.species, x.data.sex, x.begin))\n\n for inv_iv in group_inversions:\n inv_num += 1\n in_group_num += 1\n\n group_samples = get_group_samples(group_samples, group_num, inv_num, in_group_num, chromosome, group, inv_iv)\n group_details = (group_num, inv_num, in_group_num, chromosome)\n\n spanning_same, spanning_other = get_spanning_hits(hits[chromosome], inv_iv, tsv_hits, group_details)\n inverted_same, inverted_other, inverted_scaffolds = get_inverted_hits(hits[chromosome], inv_iv, tsv_hits, group_details)\n get_edge_hits(hits[chromosome], inv_iv, tsv_hits, group_details, inverted_scaffolds)\n\n inv_status = summarise_candidate(inv_iv, trio_statuses, spanning_same, spanning_other, inverted_same, inverted_other)\n \n tsv_inversions.append('{0}\\t{1}\\t{2}\\tPBHoney\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\\t{10}\\t{11}\\t{3}_{4} Candidate\\t{12}'.format(group_num, inv_num, in_group_num, inv_iv.data.species, inv_iv.data.sex, chromosome, inv_iv.begin, inv_iv.end, inv_iv.length(), inv_iv.data.reads, inv_iv.data.initiallengths, inv_iv.data.taillengths, inv_status))\n\n group_status = get_status(group_samples, trio_statuses)\n write_tsv(group, group_num, group_status, chromosome, tsv_inversions, tsv_hits, plottsv, gff_trees, chrom_to_hmel2, agp_trees, repeats)\n\n summary[group_status][group_samples[:-1]] += 1\n\n for reject in sorted(reject_trees[chromosome]):\n print('0\\t0\\t0\\tReject\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\tNA\\t{}\\tReject'.format(reject.data.species, reject.data.sex, chromosome, reject.begin, reject.end, reject.length(), reject.data.reads, reject.data.initiallengths, reject.data.taillengths, reject.data.status), file=plottsv)\n\nplottsv.close()","repo_name":"johnomics/heliconius_speciation_inversions","sub_path":"process_inversions.py","file_name":"process_inversions.py","file_ext":"py","file_size_in_byte":28637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70566746331","text":"import os, datetime, json\nfrom flask import Flask, flash, request, redirect, url_for, render_template, make_response\n\n\n# UPLOAD_FOLDER = \"D:\\hempreport\\static\\photos\"\nUPLOAD_FOLDER = \"static/photos/\"\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n@app.route('/')\n@app.route('/home')\ndef index():\n try:\n\n file = open(\"plants.json\") # main dict\n plants = json.load(file)\n sorted_keys_list = sorted(plants, reverse=True)\n sorted_plants = {}\n file.close()\n for k in sorted_keys_list:\n sorted_plants[k] = plants[k]\n \n return render_template('index.html', plants=sorted_plants)\n \n except:\n return render_template('index.html', plants=None)\n\n\n@app.route(\"/new_plant\", methods=['POST', 'GET'])\ndef new_plant():\n if request.method == 'POST':\n date = datetime.datetime.now().strftime('%d/%m/%Y')\n name = request.form[\"name\"]\n description = request.form[\"description\"]\n photo = request.files[\"photo\"]\n photo.save(os.path.join(app.config['UPLOAD_FOLDER'], photo.filename))\n photo_name = photo.filename\n \n try:\n # dict with plants main info. for preview on main page\n file = open(\"plants.json\") # dict inside\n plants = json.load(file)\n counter_main = int(max(plants))\n plants[counter_main + 1] = [name, description, photo_name, date]\n file.close()\n # creates a file into which data will be written further\n bush_name = str(counter_main + 1) + \".json\"\n # create an empty dict with all plant data, which will be filled further\n plant_data = {}\n with open(bush_name, \"w\") as f:\n json.dump(plant_data, f)\n \n except FileNotFoundError:\n # create list with plants main info. for preview on main page\n plants = {}\n file = open(\"plants.json\", 'w') # create file\n plants[1] = [name, description, photo_name, date] # first writing\n file.close()\n # create a first file of the fisrt bush, into which data will be written further\n bush_name = \"1.json\"\n # create an empty dict with all first plant data, which will be filled further\n plant_data = {}\n #plant_data = {1:[\"03.09.2021\", \"start growing\", \"28\", \"55\"], 2:[\"04.09.2021\", \"normal mode\", \"30\", \"60\"]}\n with open(bush_name, \"w\") as f:\n json.dump(plant_data, f)\n\n file = open(\"plants.json\", 'w')\n json.dump(plants, file)\n file.close()\n\n return redirect('/')\n \n else:\n return render_template('new_plant.html')\n \n \n@app.route('/view/')\ndef view(id):\n try:\n file = open(\"plants.json\")\n plants = json.load(file) # dict\n current_bush = plants[str(id)]\n file.close()\n \n bush_name = str(id) + \".json\"\n sorted_plant_data = {}\n with open(bush_name) as f:\n plant_data = json.load(f) # dict\n sorted_keys_list = sorted(plant_data, reverse=True)\n for k in sorted_keys_list:\n sorted_plant_data[k] = plant_data[k]\n bush_id = id \n return render_template('view.html', bush=current_bush, report=sorted_plant_data, bush_id=bush_id)\n \n except:\n pass\n \n \n@app.route('/add_post/', methods=['POST', 'GET'])\ndef add_post(id):\n if request.method == 'POST':\n date = datetime.datetime.now().strftime('%d/%m/%Y')\n description = request.form[\"description\"]\n temp = request.form[\"temp\"]\n humidity = request.form[\"humidity\"]\n #photo = request.files[\"photo\"]\n #photo.save(os.path.join(app.config['UPLOAD_FOLDER'], photo.filename))\n #photo_name = photo.filename\n\n bush_name = str(id) + \".json\"\n \n with open(bush_name) as f:\n plant_data = json.load(f)\n \n try:\n counter = int(max(plant_data))\n except ValueError:\n counter = 0\n \n plant_data[counter + 1] = [date, description, temp, humidity]\n \n with open(bush_name, \"w\") as f:\n json.dump(plant_data, f)\n \n return redirect('/view/' + str(id))\n \n else:\n return render_template('add.html') \n\n\n \nif __name__ == '__main__':\n app.run(debug=True)\n\n \n \n","repo_name":"Uarsa/hempreport","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28265614248","text":"import json\r\nimport psycopg2\r\nfrom flask import Flask, jsonify, render_template, request, Response\r\n\r\napp = Flask(__name__)\r\n\r\ncon = psycopg2.connect(database='Test',\r\n user='postgres',\r\n host='localhost',\r\n password='basket',\r\n port='5432')\r\n\r\ncur = con.cursor()\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('test.html')\r\n\r\n#Envoie de donnés à la BDD (ici un nom à enregistrer)\r\n@app.route('/sendData', methods=['POST'])\r\ndef sendData():\r\n a = str(request.form['test']) \r\n cur.execute(\"\"\"INSERT INTO \"Test\"(\"Nom\") VALUES (%s);\"\"\", (a,))\r\n con.commit()\r\n return jsonify(a)\r\n\r\n#Récupération des noms enregistrés dans la BDD \r\n@app.route('/getData', methods=['POST'])\r\ndef getData():\r\n cur.execute(\"\"\"SELECT * FROM \"Test\"; \"\"\")\r\n data = cur.fetchall()\r\n liste = []\r\n #On transforme la liste de tuple en liste d'objet python, qu'on passera en Json pour le coté client \r\n for p in data:\r\n print(p)\r\n x = {\r\n \"id\" : p[0],\r\n \"name\" : p[1]\r\n }\r\n liste.append(x)\r\n return Response(json.dumps(liste))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"Bytou/projetFlask","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71388296410","text":"t = [1, 2, 3]\nu = [1, 2, 3]\nv = [1, 2, 3]\n\nt is v\ne = []\nt = [e]\nt[0] is e # True\nt[0].append(2) # Spremeni e\n\ne = [1, 2, 3] # T ostane [[1, 2]], e dobi nov objekt\n\nt = [\"Ana\"] + [\"Ana\"] + [\"Ana\"] # Ustvari 3 objekte \"Ana\"\nprint(t)\nt = [\"Ana\"] * 3 # Ustvari objekt, ki 3x kaže na en objekt\nprint(t)\n\ne = []\nt = [e] * 3\ne.append(1)\nprint(t)\n","repo_name":"ZigaStrgar/programiranje1","sub_path":"P9/predavanja/predavanja_9_1.py","file_name":"predavanja_9_1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"ilo","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30893539628","text":"from types import SimpleNamespace\nfrom typing import Dict\nfrom unittest import mock\n\nimport pytest\nimport urllib3\nfrom django.conf import settings\n\nfrom paas_wl.bk_app.applications.models.managers.app_configvar import AppConfigVarManager\nfrom paasng.platform.engine.deploy.bg_build.utils import (\n generate_builder_env_vars,\n generate_slug_path,\n get_envs_from_pypi_url,\n prepare_slugbuilder_template,\n update_env_vars_with_metadata,\n)\n\nurllib3.disable_warnings()\npytestmark = pytest.mark.django_db(databases=['default', 'workloads'])\n\n\nclass TestEnvVars:\n def test_generate_env_vars_without_metadata(self, build_proc, wl_app):\n env_vars = generate_builder_env_vars(build_proc, {})\n bucket = settings.BLOBSTORE_BUCKET_APP_SOURCE\n cache_path = f'{wl_app.region}/home/{wl_app.name}/cache'\n assert env_vars.pop(\"TAR_PATH\") == f\"{bucket}/{build_proc.source_tar_path}\", \"TAR_PATH 与预期不符\"\n assert env_vars.pop(\"PUT_PATH\") == f\"{bucket}/{generate_slug_path(build_proc)}\", \"PUT_PATH 与预期不符\"\n assert env_vars.pop(\"CACHE_PATH\") == f\"{bucket}/{cache_path}\", \"CACHE_PATH 与预期不符\"\n if settings.BUILD_EXTRA_ENV_VARS:\n for k, v in settings.BUILD_EXTRA_ENV_VARS.items():\n assert env_vars.pop(k) == v, f\"{k} 与预期不符\"\n if settings.PYTHON_BUILDPACK_PIP_INDEX_URL:\n for k, v in get_envs_from_pypi_url(settings.PYTHON_BUILDPACK_PIP_INDEX_URL).items():\n assert env_vars.pop(k) == v, f\"{k} 与预期不符\"\n app_config_var = AppConfigVarManager(app=wl_app).get_envs()\n for key in app_config_var.keys() & env_vars.keys():\n assert env_vars[key] == app_config_var[key], f\"{key} 与预期不符\"\n\n def test_update_env_vars_with_metadata(self, build_proc):\n env: Dict[str, str] = {}\n build_proc.buildpacks = [\n {\"type\": \"git\", \"url\": \"https://github.com/x.git\", \"name\": \"x\", \"version\": \"1.1\"},\n {\"type\": \"tar\", \"url\": \"https://rgw.com/x.tar\", \"name\": \"x\", \"version\": \"1.2\"},\n ]\n\n metadata = {\"extra_envs\": {\"a\": \"b\"}, \"buildpacks\": build_proc.buildpacks_as_build_env()}\n update_env_vars_with_metadata(env, metadata)\n\n assert metadata[\"extra_envs\"][\"a\"] == env[\"a\"]\n assert \"git x https://github.com/x.git 1.1;tar x https://rgw.com/x.tar 1.2\" == env[\"REQUIRED_BUILDPACKS\"]\n\n\nclass TestUtils:\n def test_generate_slug_path(self, wl_app, build_proc):\n slug_path = generate_slug_path(build_proc)\n assert f'{wl_app.region}/home/{wl_app.name}:{build_proc.branch}:{build_proc.revision}/push' == slug_path\n\n # `get_schedule_config` requires a valid cluster, mock it at this moment\n @mock.patch(\n 'paasng.platform.engine.deploy.bg_build.utils.get_schedule_config',\n return_value=SimpleNamespace(\n cluster_name='foo-cluster',\n node_selector={},\n tolerations=[],\n ),\n )\n def test_prepare_slugbuilder_template_without_metadata(self, _, wl_app, build_proc):\n env_vars = generate_builder_env_vars(build_proc, {})\n slug_tmpl = prepare_slugbuilder_template(wl_app, env_vars, None)\n assert slug_tmpl.name == f\"slug-builder--{wl_app.module_name}\", \"slugbuilder_template 的 name 与app的 name 不一致\"\n assert slug_tmpl.namespace == wl_app.namespace, \"slugbuilder_template 的namespace 与 app 的 namespace 不一致\"\n assert slug_tmpl.runtime.image == settings.DEFAULT_SLUGBUILDER_IMAGE, \"slugbuilder_template 的镜像与默认镜像不一致\"\n assert slug_tmpl.runtime.envs == env_vars, \"slugbuilder_template 的 ConfigVars 与生成的环境变量不一致\"\n\n assert slug_tmpl.schedule.cluster_name == 'foo-cluster'\n assert slug_tmpl.schedule.tolerations == []\n assert slug_tmpl.schedule.node_selector == {}\n\n\ndef test_get_envs_from_pypi_url():\n ret = get_envs_from_pypi_url('http://pypi.douban.com')\n assert ret['PIP_INDEX_URL'] == 'http://pypi.douban.com'\n assert ret['PIP_INDEX_HOST'] == 'pypi.douban.com'\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/tests/paasng/platform/engine/deploy/bg_build/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"29912281427","text":"\ndef landscape_type(lst):\n status = \"unknown\"\n peak = max(lst)\n trough = min(lst)\n index1 = lst.index(peak)\n index2 = lst.index(trough)\n if lst.count(peak) == 1 and index1 > 0 and index1 < len(lst) - 1:\n lst1 = lst[:index1]\n lst2 = lst[index1::]\n ascend = sorted(lst1)\n descend = sorted(lst2,reverse = True)\n if lst1 == ascend and lst2 == descend:\n status = \"mountain\"\n if status == \"unknown\":\n if lst.count(trough) == 1 and index2 > 0 and index2 < len(lst) - 1:\n lst3 = lst[:index2]\n lst4 = lst[index2::]\n ascend2 = sorted(lst4)\n descend2 = sorted(lst3, reverse = True)\n if lst3 == descend2 and lst4 == ascend2:\n status = \"valley\"\n if status == \"unknown\":\n status = \"neither\"\n \n \n return status\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"5h5uAmaAWY3jSHA7k_3.py","file_name":"5h5uAmaAWY3jSHA7k_3.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70349194972","text":"import numpy as np\r\nimport math\r\nA = []\r\nB = []\r\nnum = int(input('Enter the number of functions: '))\r\nprint('Enter the functions: ')\r\nfor i in range(0,num):\r\n print('fx = a[0]x^n + a[1]x +....+ a[n]x^0')\r\n n=int(input('Enter the n number: '))\r\n for j in range(0,n+1):\r\n if j<3:\r\n item = int(input(f'a[{j}] = '))\r\n A.append(item)\r\n else:\r\n item = int(input(f'c{i+1} = '))\r\n B.append(item)\r\nitems = np.array(A).reshape(3,3)\r\nequales = np.array(B).reshape(3,1)\r\nprint(items)\r\nprint(equales)\r\nif num == 3:\r\n det = np.linalg.det(items)\r\n if det<0:\r\n det = math.floor(det)\r\n else:\r\n det=math.ceil(det)\r\n # det = items[0][0]*((item[1][1]*items[2][2])-(items[1][2]*items[2][1]))\r\nelse:\r\n print('the number of function should be 3')\r\nfor i in range(0,num):\r\n for j in range(0,num):\r\n items[j][i] = equales[j][0]\r\n det1 = np.linalg.det(items)\r\n x = det1/det\r\n print(f'x{i+1} = {x}')\r\n items = np.array(A).reshape(3,3)\r\n","repo_name":"ghyathmoussa/nomerical_analysis","sub_path":"crammer.py","file_name":"crammer.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18493314999","text":"import datetime as dt\nimport os\nimport sys\n\n\n# Set variable so that todos are shown in local build\non_rtd = os.environ.get(\"READTHEDOCS\") == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"respy\"\ncopyright = f\"2015-{dt.datetime.now().year}, The respy Development Team\" # noqa: A001\nauthor = \"The respy Development Team\"\n\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.1.1\"\nversion = \".\".join(release.split(\".\")[:2])\n\n# -- General configuration ------------------------------------------------\n\nmaster_doc = \"index\"\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinxcontrib.bibtex\",\n \"nbsphinx\",\n \"numpydoc\",\n \"autoapi.extension\",\n \"sphinx_tabs.tabs\",\n]\n\nautodoc_mock_imports = [\n \"chaospy\",\n \"estimagic\",\n \"hypothesis\",\n \"joblib\",\n \"numba\",\n \"numpy\",\n \"pandas\",\n \"pytest\",\n \"scipy\",\n \"yaml\",\n]\n\nextlinks = {\n \"ghuser\": (\"https://github.com/%s\", \"@\"),\n \"gh\": (\"https://github.com/OpenSourceEconomics/respy/pull/%s\", \"#\"),\n}\n\nintersphinx_mapping = {\n \"numba\": (\"http://numba.pydata.org/numba-doc/latest\", None),\n \"numpy\": (\"https://numpy.org/doc/stable\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"python\": (\"https://docs.python.org/3.8\", None),\n}\n\nbibtex_bibfiles = [\"explanations/refs.bib\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\nhtml_static_path = [\"_static\"]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\nif not on_rtd:\n todo_include_todos = True\n todo_emit_warnings = True\n\n# Configure Sphinx' linkcheck\nlinkcheck_ignore = [\n r\"http://cscubs\\.cs\\.uni-bonn\\.de/*.\",\n r\"https://(dx\\.)?doi\\.org/*.\",\n r\"https://jstor\\.org/*.\",\n r\"https://zenodo\\.org/*.\",\n]\n\n# Configuration for nbsphinx\nnbsphinx_execute = \"never\"\nnbsphinx_allow_errors = False\nnbsphinx_prolog = r\"\"\"\n{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}\n\n.. only:: html\n\n .. nbinfo::\n\n View and download the notebook `here `_!\n\n\"\"\"\n\n# Configuration for numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_xref_ignore = {\"type\", \"optional\", \"default\", \"of\"}\n\n# Configuration for autodoc\nautosummary_generate = True\n\n# Configuration for autoapi\nautoapi_type = \"python\"\nautoapi_dirs = [\"../respy\"]\nautoapi_ignore = [\"*/tests/*\"]\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pydata_sphinx_theme\"\n\nhtml_theme_options = {\n \"github_url\": \"https://github.com/OpenSourceEconomics/respy\",\n \"twitter_url\": \"https://twitter.com/open_econ\",\n}\n\nhtml_css_files = [\"css/custom.css\"]\n\nhtml_logo = \"_static/images/respy-logo.svg\"\n\n\nhtml_sidebars = {\n \"index\": [\"search-field\", \"custom-intro\"],\n \"about_us\": [\"search-field\", \"custom-about-us\"],\n}\n","repo_name":"OpenSourceEconomics/respy","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"12662479066","text":"from pydicom import Dataset, uid\r\n\r\nfrom .IOD import IOD, IODTypes\r\nfrom .modules.general_modules import FrameOfReferenceModule, ImagePlaneModule\r\nfrom .modules.specific_image_modules import CTImageModule\r\n\r\nclass CTImage(IOD):\r\n \"\"\"Implementation of the CT Image IOD\r\n ----------\r\n \"\"\"\r\n def __init__(self):\r\n super().__init__(IODTypes.CTImage)\r\n\r\n def create_empty_iod(self):\r\n \"\"\"Creates and empty IOD with the required DICOM tags but no values\r\n Parameters\r\n ----------\r\n \"\"\"\r\n super().create_empty_iod()\r\n\r\n self.copy_required_dicom_attributes(Dataset(), include_optional=True)\r\n\r\n def copy_required_dicom_attributes(self, dataset_to_copy_from,\r\n include_iod_specific=True,\r\n include_optional=False):\r\n \"\"\"Copies required DICOM attributes from provided dataset\r\n Parameters\r\n ----------\r\n dataset_to_copy_from : Dataset to copy DICOM attributes from\r\n include_iod_specific : Include IOD specific DICOM attributes in copy (True)\r\n include_optional : Include optional DICOM attributes in copy (False)\r\n \"\"\"\r\n super().copy_required_dicom_attributes(dataset_to_copy_from,\r\n include_optional)\r\n\r\n if include_iod_specific:\r\n ct_specific_image_modules = [FrameOfReferenceModule(),\r\n ImagePlaneModule(),\r\n CTImageModule()]\r\n for module in ct_specific_image_modules:\r\n module.copy_required_dicom_attributes(dataset_to_copy_from, \r\n self.dataset)\r\n if include_optional:\r\n module.copy_optional_dicom_attributes(dataset_to_copy_from, \r\n self.dataset)\r\n\r\n def initiate(self):\r\n \"\"\"Initiate the IOD by setting some dummy values for\r\n required attributes\r\n \"\"\"\r\n super().initiate()\r\n\r\n # Frame of reference module\r\n self.dataset.FrameOfReferenceUID = uid.generate_uid()\r\n # General image module\r\n # Image plane module\r\n self.dataset.PixelSpacing = [\"1.0\", \"1.0\"]\r\n self.dataset.ImageOrientationPatient = [\"1.0\", \"1.0\", \"1.0\", \"1.0\", \"1.0\", \"1.0\"]\r\n self.dataset.ImagePositionPatient = [\"0.0\", \"0.0\", \"0.0\"]\r\n # CT image module\r\n self.dataset.ImageType = [\"DERIVED\", \"SECONDARY\", \"AXIAL\"]\r\n self.dataset.SamplesPerPixel = 1\r\n self.dataset.PhotometricInterpretation = \"MONOCHROME2\"\r\n self.dataset.BitsAllocated = 16\r\n self.dataset.BitsStored = 12\r\n self.dataset.HighBit = 11\r\n self.dataset.PixelRepresentation = 0\r\n self.dataset.RescaleIntercept = \"-1024.0\"\r\n self.dataset.RescaleSlope = \"1.0\"\r\n self.dataset.RescaleType = \"HU\"\r\n\r\n def add_pixel_data(self, pixel_array,\r\n photometric_interpretation=\"MONOCHROME2\",\r\n pixel_spacing=None,\r\n bits_stored=12):\r\n \"\"\"[summary]\r\n \r\n Arguments:\r\n pixel_array {2D np.array} -- The pixel data to add to the CT object\r\n \r\n Keyword Arguments:\r\n photometric_interpretation {str} -- Photometric interpretation of the provided pixel_array (default: {\"MONOCHROME2\"})\r\n pixel_spacing {[str str]} -- Pixel spacing of the provided pixel_array (default: {None})\r\n \"\"\"\r\n if pixel_spacing is None:\r\n pixel_spacing = [str(1.0), str(1.0)]\r\n if len(pixel_array.shape) != 2:\r\n print(\"Unsupported number of samples per pixel\",pixel_array.shape[2],\r\n \"only samples per pixel is supported\")\r\n else:\r\n self.dataset.SamplesPerPixel = 1\r\n self.dataset.PhotometricInterpretation = photometric_interpretation\r\n self.dataset.Rows = pixel_array.shape[1]\r\n self.dataset.Columns = pixel_array.shape[0]\r\n if pixel_array.dtype == \"uint8\":\r\n self.dataset.BitsAllocated = 8\r\n self.dataset.BitsStored = 8\r\n self.dataset.HighBit = 7\r\n self.dataset.PixelRepresentation = 0\r\n elif pixel_array.dtype == \"uint16\":\r\n self.dataset.BitsAllocated = 16\r\n self.dataset.BitsStored = bits_stored\r\n self.dataset.HighBit = bits_stored - 1\r\n self.dataset.PixelRepresentation = 0\r\n else:\r\n print(\"Unsupported pixel type\",pixel_array.dtype,\r\n \"only uint8 and uint16 is supported\")\r\n self.dataset.PixelSpacing = pixel_spacing\r\n self.dataset.PixelData = pixel_array.tobytes()\r\n\r\n\r\n","repo_name":"sectra-medical/pydicomutils","sub_path":"src/pydicomutils/IODs/CTImage.py","file_name":"CTImage.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"19230700403","text":"from turtle import Screen\nfrom snakey import Snakey\nfrom food import Food\nfrom scoreboard import Scoreboard\nimport time\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.title('Snake Xenzia')\nscreen.bgcolor('black')\nscreen.tracer(0)\n\nsnakey = Snakey()\nfood = Food()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(snakey.up, 'Up')\nscreen.onkey(snakey.down, 'Down')\nscreen.onkey(snakey.left, 'Left')\nscreen.onkey(snakey.right, 'Right')\n\ngame_is_on = True\n\nwhile game_is_on:\n screen.update()\n time.sleep(0.2)\n snakey.move()\n\n # Detect collision with food, add to score, increase snakey and change food location\n if snakey.head.distance(food) < 10:\n food.refresh()\n snakey.extend()\n scoreboard.increase_score()\n\n # Detect collision with game wall\n if snakey.head.xcor() > 295 or snakey.head.xcor() < -305 or snakey.head.ycor() > 300 or snakey.head.ycor() < -290:\n game_is_on = False\n scoreboard.game_over()\n\n # Detect collision with tail\n for square in snakey.squares[1:]:\n if snakey.head.distance(square) < 8:\n game_is_on = False\n scoreboard.game_over()\n\nscreen.exitonclick()\n","repo_name":"Olawale-Alx/100_days_of_python","sub_path":"day20-21/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19021160159","text":"import sys\nimport os\nsys.path.append(os.getcwd())\nprint(sys.path)\nfrom tqdm import tqdm\nfrom numpy import mod\nfrom pickle import NONE\nimport torch.nn as nn\nimport torch.utils.data.dataloader\nfrom train_cpm.utils import AverageMeter\nfrom preprocess.Transformers import Compose, RandomCrop, RandomResized, TestResized\nfrom preprocess.gen_data import LSP_DATA\nimport copy\nfrom cpm import cpm, cpm_condense\n\n\n\ndef train_model(training_dataset_path, val_data_path, model, criterion, optimizer, device=None, ts_mode=False, model_teacher=None, max_epoch=300, eps=1e-8, batch_size=1, save_name='default'):\n if ts_mode is True:\n save_name = save_name+'_ts'\n if device is None:\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n training_dataset_path = training_dataset_path\n val_data_path = val_data_path\n model_save_path = os.path.join(os.getcwd(), 'model\\\\'+save_name+'.pth')\n best_model_path = os.path.join(\n os.getcwd(), 'model\\\\best_'+save_name+'.pth')\n\n criterion = criterion.to(device)\n\n model = model.to(device)\n\n optimizer = optimizer\n\n train_losses = AverageMeter()\n val_losses = AverageMeter()\n min_losses = 999.0\n\n epoch = 0\n try:\n model = torch.load(best_model_path).to(device)\n print(\"continue trainning from the last checkpoint ...\")\n except:\n pass\n\n val_loss_last = 999.0\n train_loss_last = 999.0\n data_train = LSP_DATA('lsp', training_dataset_path, 8,\n Compose([RandomResized(), RandomCrop(368), TestResized(368)]))\n train_loader = torch.utils.data.dataloader.DataLoader(\n data_train, batch_size=batch_size)\n data_val = LSP_DATA('lsp', val_data_path, 8, Compose([TestResized(368)]))\n val_loader = torch.utils.data.dataloader.DataLoader(\n data_val, batch_size=batch_size)\n\n while epoch < max_epoch:\n print('epoch ', epoch)\n \"\"\"--------Train--------\"\"\"\n # Training data\n\n for j, data in tqdm(enumerate(train_loader)):\n inputs, heatmap, centermap, _ = data\n\n inputs = inputs.to(device)\n heatmap = heatmap.to(device)\n centermap = centermap.to(device)\n\n input_var = torch.autograd.Variable(inputs)\n heatmap_var = torch.autograd.Variable(heatmap)\n centermap_var = torch.autograd.Variable(centermap)\n\n heat1, heat2, heat3, heat4, heat5, heat6 = model(\n input_var, centermap_var)\n if ts_mode is True:\n with torch.no_grad():\n heat11, heat22, heat33, heat44, heat55, heat66 = model_teacher(\n input_var, centermap_var)\n loss1 = criterion(heat1, heat11)\n loss2 = criterion(heat2, heat22)\n loss3 = criterion(heat3, heat33)\n loss4 = criterion(heat4, heat44)\n loss5 = criterion(heat5, heat55)\n loss6 = criterion(heat6, heat66)\n\n else:\n loss1 = criterion(heat1, heatmap_var)\n loss2 = criterion(heat2, heatmap_var)\n loss3 = criterion(heat3, heatmap_var)\n loss4 = criterion(heat4, heatmap_var)\n loss5 = criterion(heat5, heatmap_var)\n loss6 = criterion(heat6, heatmap_var)\n\n loss = loss1 + loss2 + loss3 + loss4 + loss5 + loss6\n train_losses.update(loss.item(), inputs.size(0))\n if j % 100 == 0:\n print('Train Loss: ', train_losses.avg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print('Train Loss: ', train_losses.avg)\n torch.save(model, model_save_path)\n\n # --------Validation--------\n # Validation\n print('-----------Validation-----------')\n # Validation data\n model.eval()\n for j, data in enumerate(val_loader):\n inputs, heatmap, centermap, _ = data\n\n inputs = inputs.to(device)\n heatmap = heatmap.to(device)\n centermap = centermap.to(device)\n\n input_var = torch.autograd.Variable(inputs)\n heatmap_var = torch.autograd.Variable(heatmap)\n centermap_var = torch.autograd.Variable(centermap)\n\n heat1, heat2, heat3, heat4, heat5, heat6 = model(\n input_var, centermap_var)\n\n loss1 = criterion(heat1, heatmap_var)\n loss2 = criterion(heat2, heatmap_var)\n loss3 = criterion(heat3, heatmap_var)\n loss4 = criterion(heat4, heatmap_var)\n loss5 = criterion(heat5, heatmap_var)\n loss6 = criterion(heat6, heatmap_var)\n\n loss = loss1 + loss2 + loss3 + loss4 + loss5 + loss6\n val_losses.update(loss.item(), inputs.size(0))\n\n print('Validation Loss: ', val_losses.avg)\n if val_losses.avg < min_losses:\n # Save best cpm\n torch.save(model, best_model_path)\n min_losses = val_losses.avg\n if abs(val_losses.avg-val_loss_last) < eps and abs(train_losses.avg-train_loss_last) < eps:\n break\n val_loss_last = val_losses.avg\n train_loss_last = train_losses.avg\n model.train()\n\n epoch += 1\n\n\ndef transfer_state_dict(pretrained_dict, model_dict):\n '''\n According to model_dict, remove some unneeded parameters of pretrained_dict in order to migrate to the new network\n '''\n state_dict = {}\n for k, v in pretrained_dict.state_dict().items():\n if k in model_dict.keys() and pretrained_dict.state_dict()[k].shape == model_dict[k].shape:\n state_dict[k] = v\n else:\n print(\"Missing key(s) or dismatch shape in state_dict :{}\".format(k))\n return state_dict\n\n\ndef transfer_model(pretrained_file, model):\n '''\n Import only parameters of the same name and shape in pretrained_model\n '''\n pretrained_dict = torch.load(pretrained_file) # get pretrained dict\n model_dict = model.state_dict() # get model dict\n # Before update, you need to remove some unneeded parameters of pretrained_dict\n pretrained_dict = transfer_state_dict(pretrained_dict, model_dict)\n model_dict.update(pretrained_dict) # update parameter\n model.load_state_dict(model_dict)\n return model\n\n\ndef print_size_of_model(model):\n torch.save(model.state_dict(), \"temp.p\")\n print('Size (MB):', os.path.getsize(\"temp.p\")/1e6)\n os.remove('temp.p')\n\n\nif __name__ == \"__main__\":\n # set dataset path\n training_dataset_path = 'atrw_split\\\\trainset'\n val_data_path = 'atrw_split\\\\valset'\n\n\n # set criterion\n criterion = nn.MSELoss()\n\n # baseline\n model = cpm.CPM(k=15)\n '''\n optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)\n train_model(training_dataset_path=training_dataset_path,\n val_data_path=val_data_path, model=model,\n criterion=criterion, optimizer=optimizer,\n max_epoch=300, batch_size=1, save_name='cpm_atrw')\n\n # transfer learning with lsp pretrained parameter\n model = transfer_model('model/best_cpm.pth', model)\n optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)\n train_model(training_dataset_path=training_dataset_path,\n val_data_path=val_data_path, model=model,\n criterion=criterion, optimizer=optimizer,\n max_epoch=300, batch_size=1, save_name='cpm_atrw_transfer')\n\n # baseline of depthwise pointwise convolution\n model = cpm_condense.CPM_dpc(k=15)\n optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)\n train_model(training_dataset_path=training_dataset_path,\n val_data_path=val_data_path, model=model,\n criterion=criterion, optimizer=optimizer,\n max_epoch=300, batch_size=1, save_name='cpm_atrw_dpc')\n\n # teacher-student mode to distillate knowledge (condense network)\n try:\n model_teacher = torch.load('model\\\\best_cpm_atrw_transfer.pth')\n except:\n print('Well trained teacher model is not ready yet.')\n model = cpm_condense.CPM_dpc(k=15)\n optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)\n train_model(training_dataset_path=training_dataset_path,\n val_data_path=val_data_path, model=model,\n criterion=criterion, optimizer=optimizer,\n max_epoch=300, batch_size=1, save_name='cpm_atrw_dpc_ts')\n print('origin model size:')\n print_size_of_model(model_teacher)\n print('condesed model size:')\n print_size_of_model(model)\n '''\n # post-training Qutization model\n qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n best_model_path = os.path.join(\n os.getcwd(), 'model\\\\best_cpm_atrw_dpc_ts.pth')\n save_model_path = os.path.join(\n os.getcwd(), 'model\\\\best_cpm_atrw_dpc_ts_quant.pth')\n model = cpm_condense.CPM_dpc_quant(k=15)\n model1 = transfer_model(best_model_path, model)\n model1.qconfig = qconfig\n model1_prepared = torch.quantization.prepare(model1)\n model1_prepared.eval()\n data = LSP_DATA('lsp', training_dataset_path, 8,\n Compose([RandomResized(), RandomCrop(368)]))\n train_loader = torch.utils.data.dataloader.DataLoader(\n data, batch_size=4)\n print(len(train_loader))\n for j, data in tqdm(enumerate(train_loader)):\n inputs, heatmap, centermap, _ = data\n\n inputs = inputs\n heatmap = heatmap\n centermap = centermap\n\n input_var = torch.autograd.Variable(inputs)\n heatmap_var = torch.autograd.Variable(heatmap)\n centermap_var = torch.autograd.Variable(centermap)\n\n heat1, heat2, heat3, heat4, heat5, heat6 = model1_prepared(\n input_var, centermap_var)\n model1_prepared_int8 = torch.quantization.convert(model1_prepared)\n print('origin model size:')\n print_size_of_model(model1)\n print('qunatized model size:')\n print_size_of_model(model1_prepared_int8)\n torch.save(model1_prepared_int8, save_model_path)\n","repo_name":"jinchiniao/cpm_animal","sub_path":"train_cpm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17012564065","text":"#!/usr/bin/env python3\r\nimport pandas as pd\r\nfrom statistics import stdev\r\nfrom sklearn.ensemble import ExtraTreesRegressor\r\nfrom sklearn.linear_model import ElasticNetCV\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.svm import LinearSVR\r\nfrom tpot.builtins import StackingEstimator\r\nfrom xgboost import XGBRegressor\r\n\r\n\r\nclass RegressorLTC:\r\n\r\n def __init__(self):\r\n\t\t#On lit le fichier CSV grace a pd qui contient les memes données que dans la base de donnée\r\n tpot_data = pd.read_csv('cryptodata.csv', sep=',')\r\n\t\t#On s'interesse que au prix moyen du LTC, de son volume google trend et son sentiment twitter pour notre Variable X\r\n X = tpot_data[tpot_data[\"symbol\"] == \"LTC\"][[\"price_ave\", \"volume\", \"google_trend\", \"twitter_sent\"]].values\r\n\t\t#Pour la variable Y on s'interesse seulement a la variable prix\r\n y = tpot_data[tpot_data[\"symbol\"] == \"LTC\"][[\"price\"]].values\r\n training_features, testing_features, training_target, testing_target = \\\r\n train_test_split(X, y, random_state=42)\r\n self.__std = stdev([item[0] for item in y])\r\n\r\n # Le score sur l'ensemble de formation était:-0.5963858827457206\r\n self.exported_pipeline = make_pipeline(\r\n StackingEstimator(estimator=LinearSVR(C=1.0, dual=False, epsilon=0.001, loss=\"squared_epsilon_insensitive\", tol=0.1)),\r\n StackingEstimator(estimator=LinearSVR(C=0.5, dual=True, epsilon=0.001, loss=\"epsilon_insensitive\", tol=0.1)),\r\n StackingEstimator(estimator=ElasticNetCV(l1_ratio=1.0, tol=1e-05)),\r\n StackingEstimator(estimator=LinearSVR(C=0.0001, dual=True, epsilon=1.0, loss=\"epsilon_insensitive\", tol=0.01)),\r\n StackingEstimator(estimator=XGBRegressor(learning_rate=0.001, max_depth=3, min_child_weight=1, n_estimators=100, nthread=1, subsample=0.05)),\r\n MinMaxScaler(),\r\n ExtraTreesRegressor(bootstrap=False, max_features=0.9500000000000001, min_samples_leaf=1, min_samples_split=4, n_estimators=100)\r\n )\r\n\r\n self.exported_pipeline.fit(training_features, training_target.ravel())\r\n self.y_predict = self.exported_pipeline.predict(testing_features)\r\n self.y_real = testing_target.ravel()\r\n self.score = self.exported_pipeline.score(testing_features, testing_target)\r\n\r\n\t#Fonction predict qui va etre utiliser dans notre main\r\n def predict(self, price_ave, volume, google_trend, twitter_sent):\r\n return self.exported_pipeline.predict([[price_ave, volume, google_trend, twitter_sent]])[0]\r\n\t#Encapsulation\r\n @property\r\n def stdev(self):\r\n return self.__std\r\n\r\n\r\nif __name__ == '__main__':\r\n\t#Tester pour le fun notre modele avec des valeurs fictives\r\n model = RegressorLTC()\r\n print(\"Score:\", model.score)\r\n print(\"Prediction:\", model.predict(9.09393709e+03, 9.63213800e+06, 7.00000000e+01, 1.47052201e-0))\r\n print(\"Stdev:\", model.stdev)","repo_name":"MohamedBkhm/cryptoBot","sub_path":"modeles/regressor_LTC.py","file_name":"regressor_LTC.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27028617565","text":"\"\"\"\r\nThis code is an evolution of the classic Caesar Cipher,\r\nbecause he can transport despite uppercase and lowercase letters but numbers, punctuations and spaces\r\n\"\"\"\r\n\r\n# Import the necessary module in order to create our alphabet\r\nimport string\r\n\r\n# This is the new alphabet that we will use in order to convert our texts\r\nalphabets = string.ascii_uppercase, string.ascii_lowercase, string.digits, string.punctuation\r\n\r\n\r\ndef encrypt_caesar(plain_text, shift):\r\n def shift_alphabet_encrypt(alphabets):\r\n return alphabets[shift:] + alphabets[:shift]\r\n\r\n shifted_alphabets = tuple(map(shift_alphabet_encrypt, alphabets))\r\n final_alphabet = ''.join(alphabets)\r\n final_shifted_alphabet = ''.join(shifted_alphabets)\r\n table = str.maketrans(final_alphabet, final_shifted_alphabet)\r\n return plain_text.translate(table)\r\n\r\n\r\ndef decrypt_caesar(plain_text, shift):\r\n def shift_alphabet_encrypt(alphabets):\r\n return alphabets[-shift:] + alphabets[:-shift]\r\n\r\n shifted_alphabets = tuple(map(shift_alphabet_encrypt, alphabets))\r\n final_alphabet = ''.join(alphabets)\r\n final_shifted_alphabet = ''.join(shifted_alphabets)\r\n table = str.maketrans(final_alphabet, final_shifted_alphabet)\r\n return plain_text.translate(table)\r\n\r\n\r\ndef Caesar():\r\n\r\n # The user has three options\r\n choice_of_user = int(input('Please choose one of the following belong: \\n '\r\n 'Enter 1. For Encryption:\\n '\r\n 'Enter 2. For Decryption: \\n '\r\n 'Enter 3. For Brute Force Attack: \\n '))\r\n\r\n if (choice_of_user == 1) | (choice_of_user == 2):\r\n\r\n \"\"\"\r\n Information that the user has to type each time\r\n \"\"\"\r\n\r\n plain_text = input('Please type your text that you want to Encrypt/Decrypt: \\n')\r\n shift = int(input('Please type your key number (0-25) so to encrypt/decrypt the message: \\n'))\r\n\r\n if choice_of_user == 1:\r\n print('You chose the number', choice_of_user, 'so you just want to encrypt your message: \\n'\r\n 'The encrypted message is: ', encrypt_caesar(plain_text, shift))\r\n\r\n elif choice_of_user == 2:\r\n print('You chose the number', choice_of_user, 'so you just want to decrypt your message: \\n' \r\n 'The decrypted message is: ', decrypt_caesar(plain_text, shift))\r\n\r\n elif choice_of_user == 3:\r\n\r\n \"\"\"\r\n BRUTE FORCE ATTACK\r\n \r\n You can break any message without knowing the key \r\n \"\"\"\r\n\r\n message_to_break = input('Type your message you want to apply Brute Force Attack: ')\r\n print('Your original message is somewhere below: \\n')\r\n\r\n for i in range(26):\r\n print(' shift{:2} : {}'.format(i, decrypt_caesar(message_to_break, shift=i)))\r\n\r\n else:\r\n print('Please type a valid input')\r\n\r\n\r\nCaesar()\r\n","repo_name":"Arkantos-13/Cryptographic_Algorithms","sub_path":"Caesar Cipher Improvement 2.0.py","file_name":"Caesar Cipher Improvement 2.0.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"29337178349","text":"from django.urls import path\n\nfrom .views import PostViewSet,UserAPIView\nurlpatterns = [\n path('post/', PostViewSet.as_view({\n 'get':'list',\n 'post':'create'\n })),\n \n path('post/',PostViewSet.as_view({\n 'put':'update',\n 'get':'retrieve',\n 'delete':'destroy'\n })),\n path('users/', UserAPIView.as_view({\n 'get':'list',\n }))\n]\n","repo_name":"Soham2402/microservices-blog","sub_path":"blog_admin/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28311540439","text":"import sqlite3\nfrom datetime import datetime\ntimestr = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ndb_file_name = f\"example_{timestr}.db\"\nconn = sqlite3.connect(db_file_name)\nc = conn.cursor()\nnow = datetime.now()\nformatted_date = now.strftime('%Y-%m-%d %H:%M:%S')\nfile_name = \"demo\"+timestr+\".db\"\nprint(file_name)\nf=open(file_name,\"w\")\nc.execute(\"INSERT INTO EMP(current, EMP_Name, City, Timestamp) VALUES(?, ?, ?, ?)\", (3, 'Bob', 'Sweden', formatted_date))\nconn.commit()\nconn.close()","repo_name":"aachal04/Python_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33510893664","text":"import sudo\n\nimport errno\nimport sys\nimport os\nimport pwd\nimport grp\nimport shutil\n\n\nVERSION = 1.0\n\n\nclass SudoPolicyPlugin(sudo.Plugin):\n \"\"\"Example sudo policy plugin\n\n Demonstrates how to use the sudo policy plugin API. All functions are added\n as an example on their syntax, but note that most of them are optional\n (except check_policy).\n\n On detailed description of the functions refer to sudo_plugin manual (man\n sudo_plugin).\n\n Most functions can express error or reject through their \"int\" return value\n as documented in the manual. The sudo module also has constants for these:\n sudo.RC.ACCEPT / sudo.RC.OK 1\n sudo.RC.REJECT 0\n sudo.RC.ERROR -1\n sudo.RC.USAGE_ERROR -2\n\n If the plugin encounters an error, instead of just returning sudo.RC.ERROR\n result code it can also add a message describing the problem.\n This can be done by raising the special exception:\n raise sudo.PluginError(\"Message\")\n This added message will be used by the audit plugins.\n\n If the function returns \"None\" (for example does not call return), it will\n be considered sudo.RC.OK. If an exception other than sudo.PluginError is\n raised, its backtrace will be shown to the user and the plugin function\n returns sudo.RC.ERROR. If that is not acceptable, catch it.\n \"\"\"\n\n _allowed_commands = (\"id\", \"whoami\")\n _safe_password = \"12345\"\n\n # -- Plugin API functions --\n\n def __init__(self, user_env: tuple, settings: tuple,\n version: str, **kwargs):\n \"\"\"The constructor matches the C sudo plugin API open() call\n\n Other variables you can currently use as arguments are:\n user_info: tuple\n plugin_options: tuple\n\n For their detailed description, see the open() call of the C plugin API\n in the sudo manual (\"man sudo\").\n \"\"\"\n if not version.startswith(\"1.\"):\n raise sudo.PluginError(\n \"This plugin plugin is not compatible with python plugin\"\n \"API version {}\".format(version))\n\n self.user_env = sudo.options_as_dict(user_env)\n self.settings = sudo.options_as_dict(settings)\n\n def check_policy(self, argv: tuple, env_add: tuple):\n cmd = argv[0]\n # Example for a simple reject:\n if not self._is_command_allowed(cmd):\n sudo.log_error(\"You are not allowed to run this command!\")\n return sudo.RC.REJECT\n\n raise sudo.PluginError(\"You are not allowed to run this command!\")\n\n # The environment the command will be executed with (we allow any here)\n user_env_out = sudo.options_from_dict(self.user_env) + env_add\n\n command_info_out = sudo.options_from_dict({\n \"command\": self._find_on_path(cmd), # Absolute path of command\n \"runas_uid\": self._runas_uid(), # The user id\n \"runas_gid\": self._runas_gid(), # The group id\n })\n\n return (sudo.RC.ACCEPT, command_info_out, argv, user_env_out)\n\n def init_session(self, user_pwd: tuple, user_env: tuple):\n \"\"\"Perform session setup\n\n Beware that user_pwd can be None if user is not present in the password\n database. Otherwise it is a tuple convertible to pwd.struct_passwd.\n \"\"\"\n # conversion example:\n user_pwd = pwd.struct_passwd(user_pwd) if user_pwd else None\n\n # This is how you change the user_env:\n return (sudo.RC.OK, user_env + (\"PLUGIN_EXAMPLE_ENV=1\",))\n\n # If you do not want to change user_env, you can just return (or None):\n # return sudo.RC.OK\n\n def list(self, argv: tuple, is_verbose: int, user: str):\n cmd = argv[0] if argv else None\n as_user_text = \"as user '{}'\".format(user) if user else \"\"\n\n if cmd:\n allowed_text = \"\" if self._is_command_allowed(cmd) else \"NOT \"\n sudo.log_info(\"You are {}allowed to execute command '{}'{}\"\n .format(allowed_text, cmd, as_user_text))\n\n if not cmd or is_verbose:\n sudo.log_info(\"Only the following commands are allowed:\",\n \", \".join(self._allowed_commands), as_user_text)\n\n def validate(self):\n pass # we have no cache\n\n def invalidate(self, remove: int):\n pass # we have no cache\n\n def show_version(self, is_verbose: int):\n sudo.log_info(\"Python Example Policy Plugin \"\n \"version: {}\".format(VERSION))\n if is_verbose:\n sudo.log_info(\"Python interpreter version:\", sys.version)\n\n def close(self, exit_status: int, error: int) -> None:\n if error == 0:\n sudo.log_info(\"The command returned with exit_status {}\".format(\n exit_status))\n else:\n error_name = errno.errorcode.get(error, \"???\")\n sudo.log_error(\n \"Failed to execute command, execve syscall returned \"\n \"{} ({})\".format(error, error_name))\n\n # -- Helper functions --\n\n def _is_command_allowed(self, cmd):\n return os.path.basename(cmd) in self._allowed_commands\n\n def _find_on_path(self, cmd):\n if os.path.isabs(cmd):\n return cmd\n\n path = self.user_env.get(\"PATH\", \"/usr/bin:/bin\")\n absolute_cmd = shutil.which(cmd, path=path)\n if not absolute_cmd:\n raise sudo.PluginError(\"Can not find cmd '{}' on PATH\".format(cmd))\n return absolute_cmd\n\n def _runas_pwd(self):\n runas_user = self.settings.get(\"runas_user\") or \"root\"\n try:\n return pwd.getpwnam(runas_user)\n except KeyError:\n raise sudo.PluginError(\"Could not find user \"\n \"'{}'\".format(runas_user))\n\n def _runas_uid(self):\n return self._runas_pwd().pw_uid\n\n def _runas_gid(self):\n runas_group = self.settings.get(\"runas_group\")\n if runas_group is None:\n return self._runas_pwd().pw_gid\n\n try:\n return grp.getgrnam(runas_group).gr_gid\n except KeyError:\n raise sudo.PluginError(\n \"Could not find group '{}'\".format(runas_group))\n","repo_name":"sudo-project/sudo","sub_path":"plugins/python/example_policy_plugin.py","file_name":"example_policy_plugin.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":956,"dataset":"github-code","pt":"32"} +{"seq_id":"11712104577","text":"from django.db import models\nfrom categories.models import Category\n\n# Create your models here.\n\nclass Home(models.Model):\n name = 'ffghh'\n \n @property \n def some(self):\n string = 'some string testing'\n return string\n \n def __str__(self):\n return self.name","repo_name":"DollyGt/e-commerce-final-project","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16878034474","text":"from django.conf import settings\nfrom django.db.models import Q\nfrom rest_framework import viewsets, permissions\nfrom src.helper.response import MyResponse\nfrom .serializers import BookCategorySerializer, BookSerializer\nfrom .models import Book\n\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\n\nclass BookViewSet(viewsets.ViewSet):\n # permission_classes = [permissions.IsAuthenticated]\n # queryset = Book.objects.all()\n # serializer_class = BookSerializer\n\n def list(self, request):\n queryset = Book.objects.all()\n serializer = BookSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Book.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = BookSerializer(user)\n return Response(serializer.data)\n\n # def list(self, request):\n limit = request.GET.get(\"limit\", 10)\n sort = request.GET.get(\"sort\", None)\n q = request.GET.get(\"q\", None)\n\n # if limit:\n # # Dynamic page_size\n # self.pagination_class.page_size = limit\n\n \"\"\"\n -- Start --\n Dynamic filter parameter queryset\n \"\"\"\n args = Q()\n\n if q is not None:\n args = args | Q(title__icontains=q)\n\n \"\"\"\n Dynamic filter parameter queryset\n -- End --\n \"\"\"\n\n \"\"\"\n -- Start --\n Dynamic sort parameter queryset\n \"\"\"\n order_by = []\n\n if sort is not None:\n order_by = []\n\n # Get from filter\n split_sort = sort.split(\",\")\n\n for row in split_sort:\n split_row = row.split(\"~\")\n\n try:\n key = split_row[0]\n except IndexError:\n key = None\n\n try:\n value = split_row[1]\n except IndexError:\n value = \"DESC\"\n\n order_by.append(\"-\" + str(key) if value == \"DESC\" else str(key))\n else:\n order_by.append(\"-created_date\")\n\n order_by.append(\"-id\")\n \"\"\"\n -- End --\n Dynamic sort parameter queryset\n \"\"\"\n\n queryset = (\n self.filter_queryset(self.get_queryset())\n .filter(*(args,))\n .order_by(*order_by)\n )\n page = self.paginate_queryset(queryset)\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n data = self.get_paginated_response(serializer.data)\n return MyResponse(\n settings.RESPONSE_META[\"SUCCESS\"][\"CODE\"],\n \"\",\n data,\n )\n\n serializer = self.get_serializer(queryset, many=True)\n data = {\"content\": serializer.data}\n return MyResponse(\n settings.RESPONSE_META[\"SUCCESS\"][\"CODE\"],\n \"\",\n data,\n )\n","repo_name":"inspira-raina/test","sub_path":"src/book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15228973015","text":"import requests\nfrom time import sleep\nimport firebase_admin\nfrom firebase_admin import firestore\nfrom datetime import datetime, timedelta, timezone\n\n\n# ValueError: The default Firebase app already exists.対策\nif len(firebase_admin._apps) == 0:\n firebase_admin.initialize_app()\n\ndb = firestore.client()\n\n# 都道府県の単位を合わせる用 東京 => 東京都\nt = [\"東京\"]\nf = [\"大阪\", \"京都\"]\nk = [\n \"青森\", \"岩手\", \"宮城\", \"秋田\", \"山形\", \"福島\",\n \"茨城\", \"栃木\", \"群馬\", \"埼玉\", \"千葉\", \"神奈川\",\n \"新潟\", \"富山\", \"石川\", \"福井\", \"山梨\", \"長野\", \"岐阜\",\n \"静岡\", \"愛知\", \"三重\", \"滋賀\", \"兵庫\", \"奈良\", \"和歌山\",\n \"鳥取\", \"島根\", \"岡山\", \"広島\", \"山口\", \"徳島\", \"香川\",\n \"愛媛\", \"高知\", \"福岡\", \"佐賀\", \"長崎\", \"熊本\", \"大分\",\n \"宮崎\", \"鹿児島\", \"沖縄\"\n]\n\n# API\napi_url = \"https://covid19-japan-web-api.now.sh/api/v1/prefectures\"\n\n# firestore保存用dictionary\ndata_dic = {\n \"detail\": {\"update\": \"\"},\n \"prefectures\": {},\n \"total\": {\n \"total_cases\": 0,\n \"total_deaths\": 0\n }\n}\n\n\ndef send_data_to_firestore(Request):\n\n # 初期化\n prefectures = {}\n json_dic = {}\n total_cases = 0\n total_deaths = 0\n total_pcr = 0\n cnt = 0\n\n # 5回リクエストしてステータスが200じゃなかったら例外を出す\n for i in range(5):\n r = requests.get(api_url)\n s = r.status_code\n if s == 200:\n json_dic = r.json()\n break\n else:\n cnt += 1\n sleep(10)\n if cnt >= 5:\n raise Exception(\"APIからデータを取得することができませんでした。\")\n\n for i in json_dic:\n\n # 都道府県名の単位の修正\n name_ja = i[\"name_ja\"]\n if name_ja in t:\n i[\"name_ja\"] = name_ja + \"都\"\n elif name_ja in f:\n i[\"name_ja\"] = name_ja + \"府\"\n elif name_ja in k:\n i[\"name_ja\"] = name_ja + \"県\"\n\n # 各都道府県の感染者数・死亡者数を加算\n cases = i[\"cases\"]\n deaths = i[\"deaths\"]\n pcr = i[\"pcr\"]\n total_cases += cases\n total_deaths += deaths\n total_pcr += pcr\n\n # 都道府県名、感染者数、死亡者数を格納\n prefectures.update({\n i[\"name_ja\"]: {\n \"cases\": cases,\n \"deaths\": deaths,\n \"pcr\": pcr,\n \"name_en\": i[\"name_en\"]\n }\n })\n\n # アップデート時間\n jst = timezone(timedelta(hours=+9), \"JST\")\n now = datetime.now(jst).strftime(\"%Y-%m-%d %H:%M\")\n\n # データを格納\n data_dic.update({\n \"detail\": {\"update\": now},\n \"prefectures\": prefectures,\n \"total\": {\n \"total_cases\": total_cases,\n \"total_deaths\": total_deaths,\n \"total_pcr\": total_pcr\n }\n })\n\n # firestoreに保存\n doc_num = str(datetime.now(jst).hour)\n if doc_num == \"22\":\n db.collection(\"data\").document(\"before\").set(data_dic)\n db.collection(\"data\").document(\"now\").set(data_dic)\n\n\n# debug\n# send_data_to_firestore(\"ok\")\n","repo_name":"miya/covid19-jp-linebot","sub_path":"functions/get_prefectures_data.py","file_name":"get_prefectures_data.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"27302282032","text":"import socket\n\nclass SmartSocket:\n def __init__(self, socket_obj):\n self.socket = socket_obj\n\n def extractSize(self, crudeData):\n sizeInStr = \"\"\n isStarted = False\n for char in crudeData:\n if(char != '0'):\n isStarted = True\n if(char != '0' or (char == '0' and isStarted)):\n sizeInStr += char\n return int(sizeInStr)\n\n def recvAll(self, dataSize):\n data = self.socket.recv(dataSize)\n if data == \"\":\n return -1\n\n while (len(data) < dataSize):\n data += self.socket.recv(dataSize-(len(data)))\n\n return bytes(data)\n\n def recv(self):\n data_size = self.recvAll(5)\n\n if data_size == -1:\n return \"\"\n data_size = self.extractSize(data_size)\n data = self.recvAll(data_size)\n\n return data\n\n def get_size_in_format(self, data_size):\n str_size = str(data_size)\n completion = \"\"\n for i in xrange(5 - len(str_size)):\n completion += \"0\"\n return completion + str_size\n\n def send(self, data):\n data_size = self.get_size_in_format(len(data))\n self.socket.sendall(data_size + data)\n\n def disconnect(self):\n for i in xrange(3):\n self.send(\"OUT-123\")\n approve = self.recv()\n if approve == \"555\":\n self.send(\"OUT-123\")\n self.socket.close()\n return\n\n raise Exception(\"Error 555: Disconnection failure\")\n\n\n","repo_name":"strmrider/Messenger-application","sub_path":"server/smartsocket.py","file_name":"smartsocket.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10813991587","text":"import random\nimport json\nimport pickle as pkl\nfrom collections import Counter, defaultdict\n\n\ndef get_dist(data, idx_list):\n labels = []\n for idx in idx_list:\n labels.append(data[idx].label)\n labels_dist = Counter(labels)\n return labels_dist\n\n\ndef check_dist(data_1, data_2, idx_list_1, idx_list_2, flag):\n label_dist_1 = get_dist(data_1, idx_list_1)\n label_dist_2 = get_dist(data_2, idx_list_2)\n for keys in label_dist_2:\n if not flag[keys] and label_dist_2[keys] != label_dist_1[keys]:\n print(keys)\n print(flag)\n print(label_dist_1[keys])\n print(label_dist_2[keys])\n raise\n\n\ndef build_vaild(data, data_partition, method):\n client_num = int(method.split(\"_\")[0].split(\"=\")[1])\n\n all_valid_labels_dist = get_dist(data[\"valid\"], [i for i in range(len(data[\"valid\"]))])\n all_test_labels_dist = get_dist(data[\"test\"], [i for i in range(len(data[\"test\"]))])\n print(all_valid_labels_dist, all_test_labels_dist)\n\n all_valid_labels_list = {key: [i for i, exapmle in enumerate(data[\"valid\"]) if exapmle.label == key] for key in\n all_valid_labels_dist}\n all_test_labels_list = {key: [i for i, exapmle in enumerate(data[\"test\"]) if exapmle.label == key] for key in\n all_test_labels_dist}\n\n for i in range(client_num):\n train_idx_list = data_partition[method][\"train\"][i]\n train_labels_dist = get_dist(data[\"train\"], train_idx_list)\n\n vaild_idx_list, test_idx_list = [], []\n valid_flag, test_flag = {}, {}\n for key, value in train_labels_dist.items():\n if key in all_valid_labels_dist.keys():\n if all_valid_labels_dist[key] < value:\n valid_value = all_valid_labels_dist[key]\n valid_flag[key] = True\n else:\n valid_value = value\n valid_flag[key] = False\n vaild_idx_list.extend(random.sample(all_valid_labels_list[key], valid_value))\n\n if key in all_test_labels_dist.keys():\n if all_test_labels_dist[key] < value:\n test_value = all_test_labels_dist[key]\n test_flag[key] = True\n else:\n test_value = value\n test_flag[key] = False\n test_idx_list.extend(random.sample(all_test_labels_list[key], test_value))\n\n check_dist(data[\"train\"], data[\"valid\"], train_idx_list, vaild_idx_list, valid_flag)\n check_dist(data[\"train\"], data[\"test\"], train_idx_list, test_idx_list, test_flag)\n\n data_partition[method][\"valid\"][i] = vaild_idx_list\n data_partition[method][\"test\"][i] = test_idx_list\n return data_partition","repo_name":"SMILELab-FL/FedPETuning","sub_path":"tools/glue_scripts/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"16322065275","text":"#!/usr/bin/env python3\n'''\nA submodule for loading a form from a JSON or YAML file\n\nAuthor : Michael Biselx\nDate : 11.2022\nProject : PyQtTest\n'''\n\n__all__ = [\n 'FormDisplay',\n 'ListDisplay'\n]\n\nimport json\nimport yaml\nimport typing\nimport logging\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass FormDisplay(QtWidgets.QFrame):\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n form: typing.Optional[str] = None) -> None:\n super().__init__(parent, flags)\n\n self.form = QtWidgets.QFormLayout()\n\n export_button = QtWidgets.QPushButton('export', self)\n export_button.setIcon(self.style().standardIcon(\n self.style().StandardPixmap.SP_ToolBarHorizontalExtensionButton))\n export_button.clicked.connect(self.export_callback)\n\n self.setLayout(QtWidgets.QVBoxLayout(self))\n self.layout().addLayout(self.form)\n self.layout().addWidget(export_button)\n\n if form is not None:\n self.fromFile(form)\n\n def fromFile(self, filename: str):\n '''read the form structure from a dict'''\n with open(filename) as f:\n if filename.lower().endswith('json'):\n prototype: 'dict[str, dict]' = json.load(f)\n elif filename.lower().endswith(('yaml', 'yml')):\n prototype: 'dict[str, dict]' = yaml.safe_load(f)\n self.formFromPrototype(prototype)\n\n def formFromPrototype(self, prototype: 'dict[str, typing.Any]'):\n for key, value in prototype.items():\n if key.lower() == 'fields':\n for protofield in value:\n self.form.addRow(*self.formitemFromProtofield(protofield))\n\n def formitemFromProtofield(self, protofield: 'dict[str, dict]'):\n # get the obligatory fields :\n try:\n field_name = 'ERROR'\n field_name = str(protofield.get('name'))\n field_type = str(protofield.get('type')).lower()\n except KeyError as e:\n logging.error(f\"Failed to parse field. Missing key '{e!s}'\")\n w = QtWidgets.QLabel(f\"Missing '{e!s}'\")\n w.getValue = w.text\n return field_name, w\n\n # make the input widget according to the field\n if field_type == 'string':\n w = QtWidgets.QLineEdit(self)\n w.setText(str(protofield.get('value', '')))\n w.getValue = w.text\n\n elif field_type == 'integer':\n w = QtWidgets.QSpinBox(self)\n w.setMinimum(int(protofield.get('minimum', 0)))\n w.setMaximum(int(protofield.get('maximum', 99)))\n w.setValue(int(protofield.get('value', 0)))\n w.getValue = w.value\n\n elif field_type == 'decimal':\n w = QtWidgets.QDoubleSpinBox(self)\n w.setMinimum(float(protofield.get('minimum', 0)))\n w.setMaximum(float(protofield.get('maximum', 99)))\n w.setValue(float(protofield.get('value', 0)))\n w.setDecimals(int(protofield.get('precision', 2)))\n w.getValue = w.value\n\n elif field_type == 'date':\n w = QtWidgets.QDateEdit(self)\n w.setCalendarPopup(True)\n w.getValue = lambda: w.date().toString(QtCore.Qt.DateFormat.ISODate)\n value = str(protofield.get('value', 'today'))\n\n if value.lower() != 'today':\n try:\n w.setDate(QtCore.QDate.fromString(\n value,\n QtCore.Qt.DateFormat.ISODate))\n except Exception as e:\n logging.error(\n f\"could not parse {value} as a date\")\n logging.exception(e)\n else:\n w.setDate(QtCore.QDate.currentDate())\n\n elif field_type == 'choice':\n if protofield.get('exclusive', True):\n w = QtWidgets.QComboBox(self)\n w.setEditable(bool(protofield.get('open', False)))\n w.addItems(map(str, protofield.get('choices', [])))\n if 'value' in protofield:\n w.setCurrentText(str(protofield.get('value')))\n w.getValue = w.currentText\n else:\n w = QtWidgets.QWidget(self)\n w.setLayout(QtWidgets.QHBoxLayout(w))\n w.buttonGroup = QtWidgets.QButtonGroup(w)\n w.buttonGroup.setExclusive(False)\n values = protofield.get('value', [])\n if not isinstance(values, list):\n values = [values]\n values = map(str, values)\n for choice in map(str, protofield.get('choices', [])):\n cb = QtWidgets.QCheckBox(choice)\n w.layout().addWidget(cb)\n w.buttonGroup.addButton(cb)\n if choice in values:\n cb.setChecked(True)\n\n w.getValue = lambda: [\n b.text() for b in w.buttonGroup.buttons() if b.isChecked()]\n else:\n logging.error(\n f\"No widget has been implemented for type '{field_type}'\")\n w = QtWidgets.QLabel(f\"'{field_type.title()}' not implemented\")\n w.getValue = w.text\n\n return field_name, w\n\n def export_callback(self, *, filename: str = None):\n if filename is None:\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(self)\n\n formitem_dict = self.toDict()\n\n if filename.endswith('yaml'):\n dumper = yaml.safe_dump\n elif filename.endswith('json'):\n dumper = json.dump\n else:\n print(\"dict contents:\")\n for item in formitem_dict.items():\n print(*item, sep=' : ')\n raise TypeError(f'Filetype `{filename}` not supported')\n\n with open(filename, 'w') as file:\n dumper(formitem_dict, file)\n\n def get_row(self, row: int) -> 'tuple[str, typing.Any]':\n row_label = self.form.itemAt(row, self.form.ItemRole.LabelRole)\n row_field = self.form.itemAt(row, self.form.ItemRole.FieldRole)\n return row_label.widget().text(), row_field.widget().getValue()\n\n def toDict(self) -> 'dict[str, typing.Any]':\n return dict(self.get_row(row) for row in range(self.form.rowCount()))\n\n\nclass ListItemPicker(QtWidgets.QFrame):\n addItemRequest: QtCore.pyqtSignal\n removeItemRequest: QtCore.pyqtSignal\n\n def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = None) -> None:\n super().__init__(parent)\n self._choice = QtWidgets.QComboBox(self)\n self._addItem = QtWidgets.QToolButton(self)\n self._addItem.setText('+')\n self.addItemRequest = self._addItem.clicked\n self._removeItem = QtWidgets.QToolButton(self)\n self._removeItem.setText('-')\n self.removeItemRequest = self._removeItem.clicked\n\n self.setLayout(QtWidgets.QHBoxLayout(self))\n self.layout().addWidget(self._choice)\n self.layout().addWidget(self._addItem)\n self.layout().addWidget(self._removeItem)\n self.layout().setContentsMargins(*4*[0])\n\n def setChoices(self, choices: list):\n for choice in choices:\n self._choice.addItem(\n f\"{choice['name']} ({choice['type']})\", choice)\n\n\nclass ListDisplay(QtWidgets.QFrame):\n def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = None,\n flags: typing.Union[QtCore.Qt.WindowFlags,\n QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n form: typing.Optional[str] = None) -> None:\n super().__init__(parent, flags)\n\n self.form = QtWidgets.QFormLayout(self)\n self.setLayout(self.form)\n\n if form is not None:\n self.fromFile(form)\n\n def fromFile(self, filename: str):\n '''read the form structure from a dict'''\n with open(filename) as f:\n if filename.lower().endswith('json'):\n prototype: 'dict[str, dict]' = json.load(f)\n elif filename.lower().endswith(('yaml', 'yml')):\n prototype: 'dict[str, dict]' = yaml.safe_load(f)\n self._fromPrototype(prototype)\n\n def _fromPrototype(self, prototype: 'dict[str, typing.Any]', *, layout=None) -> QtWidgets.QWidget:\n if layout is None:\n layout = self.form\n\n name = prototype.get('name', '')\n dtype = str(prototype['type']).lower()\n\n if dtype == 'list':\n w = self._fromPrototype(prototype.get('value', {'type': 'group'}),\n layout=layout)\n w.setTitle(prototype.get('name', w.title()))\n lip = ListItemPicker(w)\n lip.setChoices(prototype['choices'])\n w.layout().addRow(lip)\n # np = QtWidgets.QLineEdit(w)\n # w.layout().addRow(np, lip)\n\n def removeItem():\n if w.layout().rowCount() > 1:\n rr = w.layout().takeRow(w.layout().rowCount()-2)\n rr.labelItem.widget().deleteLater()\n rr.fieldItem.widget().deleteLater()\n lip.removeItemRequest.connect(removeItem)\n\n def insertNewItem():\n rr = w.layout().takeRow(w.layout().rowCount()-1)\n self._fromPrototype(lip._choice.currentData(),\n layout=w.layout())\n w.layout().addRow(rr.fieldItem.widget())\n # w.layout().addRow(rr.labelItem.widget(),\n # rr.fieldItem.widget())\n lip.addItemRequest.connect(insertNewItem)\n\n elif dtype == 'group':\n w = QtWidgets.QGroupBox()\n w.setTitle(prototype.get('name', ''))\n w.setLayout(QtWidgets.QFormLayout(w))\n for key, value in prototype.items():\n if key in ('name', 'type'):\n continue\n self._fromPrototype(value, layout=w.layout())\n layout.addRow(w)\n\n # make the input widget according to the field\n else:\n l, w = self._fieldFromPrototype(prototype)\n layout.addRow(l, w)\n\n return w\n\n def _fieldFromPrototype(self, prototype: dict) -> 'tuple[str, QtWidgets.QWidget]':\n name = prototype.get('name', '')\n dtype = str(prototype['type']).lower()\n\n if dtype == 'string':\n w = QtWidgets.QLineEdit()\n w.setText(str(prototype.get('value', '')))\n\n elif dtype == 'integer':\n w = QtWidgets.QSpinBox()\n w.setMinimum(int(prototype.get('minimum', 0)))\n w.setMaximum(int(prototype.get('maximum', 99)))\n w.setValue(int(prototype.get('value', 0)))\n\n else:\n raise NotImplementedError(f\"{dtype}\")\n\n return name, w\n","repo_name":"mbiselx/PyQtTest","sub_path":"src/PyQtTest/widgets/form_generation/form_generation.py","file_name":"form_generation.py","file_ext":"py","file_size_in_byte":10970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9594127793","text":"#!/usr/bin/env python\r\n\"\"\" \"\"\"\r\n\r\n# Script information for the file.\r\n__author__ = \"Hendrix Demers (hendrix.demers@mail.mcgill.ca)\"\r\n__version__ = \"\"\r\n__date__ = \"\"\r\n__copyright__ = \"Copyright (c) 2009 Hendrix Demers\"\r\n__license__ = \"\"\r\n\r\n# Standard library modules.\r\nimport logging\r\n\r\n# Third party modules.\r\n\r\n# Local modules.\r\nimport casinotools.fileformat.FileReaderWriterTools as FileReaderWriterTools\r\n#import casinotools.fileformat.casino3.SampleSubtrate as SampleSubtrate\r\nimport casinotools.fileformat.casino3.SampleObjectFactory as SampleObjectFactory\r\nimport casinotools.fileformat.casino3.SampleTree as SampleTree\r\nimport casinotools.fileformat.casino3.Region as Region\r\nimport casinotools.fileformat.casino3.Version as Version\r\n\r\n# Globals and constants variables.\r\nOFFSET_ROTATION_Y = \"offset_rotation_y\"\r\nOFFSET_ROTATION_Z = \"offset_rotation_z\"\r\n\r\nclass ShapeError(Exception): pass\r\n\r\nclass Sample(FileReaderWriterTools.FileReaderWriterTools):\r\n def __init__(self):\r\n self._file = None\r\n self._startPosition = 0\r\n self._endPosition = 0\r\n self._filePathname = \"\"\r\n self._fileDescriptor = 0\r\n\r\n self._sampleObjects = []\r\n self._regions = []\r\n\r\n self._offsets = {}\r\n\r\n def read(self, file):\r\n self._file = file\r\n self._startPosition = file.tell()\r\n self._filePathname = file.name\r\n self._fileDescriptor = file.fileno()\r\n\r\n logging.debug(\"File position at the start of %s.%s: %i\", self.__class__.__name__, \"read\", file.tell())\r\n\r\n tagID = b\"*CASINOSAMPLE%%\"\r\n if self.findTag(file, tagID):\r\n self._version = self.readInt(file)\r\n\r\n if self._version >= 3010301:\r\n return self._read_3131(file)\r\n else:\r\n raise \"version_not_supported\"\r\n\r\n def _read_3131(self, file):\r\n logging.debug(\"File position at the start of %s.%s: %i\", self.__class__.__name__, \"_read_3131\", file.tell())\r\n\r\n tagID = b\"*SUBSTRATE%%%%%\"\r\n if self.findTag(file, tagID):\r\n self._useSubstrate = self.readInt(file)\r\n\r\n self._substrate = SampleObjectFactory.CreateObjectFromType(SampleObjectFactory.SHAPE_SUBSTRATE)\r\n self._substrate.read(file)\r\n\r\n tagID = b\"*SAMPLEOBJECTS%\"\r\n if self.findTag(file, tagID):\r\n self._count = self.readInt(file)\r\n\r\n for dummy in range(self._count):\r\n type = self.readInt(file)\r\n\r\n sampleObject = SampleObjectFactory.CreateObjectFromType(type)\r\n\r\n sampleObject.read(file)\r\n\r\n if self._version >= 30200002:\r\n objectId = self.readInt(file)\r\n self.addSampleObjectWithId(sampleObject, objectId)\r\n else:\r\n self.addSampleObject(sampleObject)\r\n\r\n if self._version < 30107001:\r\n tagID = b\"*MAC%%%%%%%%%%%\"\r\n if self.findTag(file, tagID):\r\n #float MAC[100][100][3]\r\n #file.read((char*)&MAC,sizeof(MAC[0][0][0]*100*100*3));\r\n numberElements = 100 * 100 * 3\r\n self._mac = self.readFloatList(file, numberElements)\r\n\r\n tagID = b\"*SAMPLEDATA%%%%\"\r\n if self.findTag(file, tagID):\r\n self._maxSampleTreeLevel = self.readInt(file)\r\n\r\n if self._version >= Version.SIM_OPTIONS_VERSION_3_1_8_2:\r\n self._offsets[OFFSET_ROTATION_Y] = file.tell()\r\n self._rotationAngleY_deg = self.readDouble(file)\r\n self._offsets[OFFSET_ROTATION_Z] = file.tell()\r\n self._rotationAngleZ_deg = self.readDouble(file)\r\n\r\n self._presence = self.readInt(file)\r\n if self._presence:\r\n self._sampleTree = SampleTree.SampleTree()\r\n self._sampleTree.read(file)\r\n\r\n tagID = b\"*REGIONDATA%%%%\"\r\n if self.findTag(file, tagID):\r\n self._numberRegions = self.readInt(file)\r\n\r\n #return\r\n for dummy in range(self._numberRegions):\r\n regionInfo = Region.Region()\r\n regionInfo.read(file)\r\n\r\n self.addRegion(regionInfo)\r\n\r\n # TODO calculate regions for the sample's triangles.\r\n\r\n def addSampleObject(self, sampleObject):\r\n self._sampleObjects.append(sampleObject)\r\n\r\n def addSampleObjectWithId(self, sampleObject, objectId):\r\n self._sampleObjects.append(sampleObject)\r\n\r\n def addRegion(self, region):\r\n self._regions.append(region)\r\n\r\n def getRegions(self):\r\n return self._regions\r\n\r\n def getShapes(self):\r\n return self._sampleObjects\r\n\r\n def getFirstSphereShape(self):\r\n for shape in self._sampleObjects:\r\n type = shape.getType()\r\n if type == SampleObjectFactory.SHAPE_SPHERE:\r\n return shape\r\n\r\n raise ShapeError(\"Shape not found.\")\r\n\r\n def getPlaneShapes(self):\r\n shapes = []\r\n for shape in self._sampleObjects:\r\n type = shape.getType()\r\n if type == SampleObjectFactory.SHAPE_PLANE:\r\n shapes.append(shape)\r\n\r\n return shapes\r\n\r\n def getVersion(self):\r\n return self._version\r\n\r\n def getRotationY_deg(self):\r\n return self._rotationAngleY_deg\r\n\r\n def setRotationY_deg(self, rotationAngle_deg):\r\n self._rotationAngleY_deg = rotationAngle_deg\r\n\r\n def modifyRotationY_deg(self, rotationAngle_deg):\r\n self._file.seek(self._offsets[OFFSET_ROTATION_Y])\r\n self.writeDouble(self._file, rotationAngle_deg)\r\n self._rotationAngleY_deg = rotationAngle_deg\r\n\r\n def getRotationZ_deg(self):\r\n return self._rotationAngleZ_deg\r\n\r\n def setRotationZ_deg(self, rotationAngle_deg):\r\n self._rotationAngleZ_deg = rotationAngle_deg\r\n\r\n def modifyRotationZ_deg(self, rotationAngle_deg):\r\n self._file.seek(self._offsets[OFFSET_ROTATION_Z])\r\n self.writeDouble(self._file, rotationAngle_deg)\r\n self._rotationAngleZ_deg = rotationAngle_deg\r\n\r\n def write(self, file):\r\n pass\r\n\r\n def export(self, exportFile):\r\n # todo: implement the export method.\r\n self._exportHeader(exportFile)\r\n self._exportVersion(exportFile)\r\n self._exportSubstrate(exportFile)\r\n self._exportSampleObjects(exportFile)\r\n self._exportSampleData(exportFile)\r\n self._exportRegionData(exportFile)\r\n\r\n def _exportHeader(self, exportFile):\r\n line = \"-\"*80\r\n self.writeLine(exportFile, line)\r\n\r\n line = \"%s\" % (\"Sample\")\r\n self.writeLine(exportFile, line)\r\n\r\n line = \"-\"*40\r\n self.writeLine(exportFile, line)\r\n\r\n def _exportVersion(self, exportFile):\r\n version = self.getVersion()\r\n versionString = self._extractVersionString(version)\r\n line = \"File version: %s (%i)\" % (versionString, version)\r\n self.writeLine(exportFile, line)\r\n\r\n def _exportSubstrate(self, exportFile):\r\n text = self._extractBooleanString(self._useSubstrate)\r\n line = \"Use substract: %s\" % (text)\r\n self.writeLine(exportFile, line)\r\n\r\n self._substrate.export(exportFile)\r\n\r\n def _exportSampleObjects(self, exportFile):\r\n line = \"number of sample objects: %i\" % (self._count)\r\n self.writeLine(exportFile, line)\r\n\r\n sampleObjectID = 0\r\n for sampleObject in self._sampleObjects:\r\n sampleObjectID += 1\r\n line = \"Sample object: %i\" % (sampleObjectID)\r\n self.writeLine(exportFile, line)\r\n\r\n sampleObject.export(exportFile)\r\n\r\n def _exportSampleData(self, exportFile):\r\n line = \"Maximum sample tree level: %i\" % (self._maxSampleTreeLevel)\r\n self.writeLine(exportFile, line)\r\n\r\n line = \"Sample rotation angle Y (deg): %g\" % (self._rotationAngleY_deg)\r\n self.writeLine(exportFile, line)\r\n\r\n line = \"Sample rotation angle Z (deg): %g\" % (self._rotationAngleZ_deg)\r\n self.writeLine(exportFile, line)\r\n\r\n text = self._extractBooleanString(self._presence)\r\n line = \"Presence: %s\" % (text)\r\n self.writeLine(exportFile, line)\r\n\r\n if self._presence:\r\n self._sampleTree.export(exportFile)\r\n\r\n def _exportRegionData(self, exportFile):\r\n line = \"number of regions: %i\" % (self._numberRegions)\r\n self.writeLine(exportFile, line)\r\n\r\n sampleRegionID = 0\r\n for region in self._regions:\r\n sampleRegionID += 1\r\n line = \"Sample region: %i\" % (sampleRegionID)\r\n self.writeLine(exportFile, line)\r\n\r\n region.export(exportFile)\r\n","repo_name":"drix00/pycasinotools","sub_path":"casinotools/fileformat/casino3/Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"37732671044","text":"import numpy as np\nimport pandas as pd\nfrom Our_code.Hashing import lst_gram\nfrom Our_code import args\n\n\n# build dictionary and assign index to each slice\ndef load_vocab():\n vocab = open(args.VOCAB_FILE, encoding='utf-8').readlines()\n slice2idx = {}\n idx2slice = {}\n cnt = 0\n for char in vocab:\n char = char.strip('\\n')\n slice2idx[char] = cnt\n idx2slice[cnt] = char\n cnt += 1\n return slice2idx, idx2slice\n\n\n# limit all output to be the same length\ndef padding(text, maxlen=args.SENTENCE_MAXLEN):\n pad_text = []\n for sentence in text:\n\n pad_sentence = np.zeros(maxlen).astype('int64') # build a all zero vector that matches the shape of text\n cnt = 0\n for index in sentence:\n pad_sentence[cnt] = index\n cnt += 1\n if cnt == maxlen:\n break # break loop if go over maxlen\n pad_text.append(pad_sentence.tolist())\n return pad_text\n\n\n# output list of indexes from dictionary given a line of text\ndef char_index(text_a, text_b):\n slice2idx, idx2slice = load_vocab()\n a_list, b_list = [], []\n\n # for each line (parsed into two sentences)in the file\n for a_sentence, b_sentence in zip(text_a, text_b):\n a, b = [], []\n\n # for each slice of the first sentence in each line\n for slice in lst_gram(a_sentence):\n\n if slice in slice2idx.keys(): # append index if slice exist in dictionary\n a.append(slice2idx[slice])\n else:\n a.append(1) # for those not in the txt remark it as “UNK”\n # for each slice of the second sentence in each line\n for slice in lst_gram(b_sentence):\n if slice in slice2idx.keys():\n b.append(slice2idx[slice])\n else:\n b.append(1)\n\n a_list.append(a)\n b_list.append(b)\n\n a_list = padding(a_list)\n b_list = padding(b_list)\n\n return a_list, b_list\n\n\n# read lines from file and return lists of indexes\ndef load_char_data(filename):\n df = pd.read_csv(filename, sep='\\t')\n text_a = df['#1 string'].values\n text_b = df['#2 string'].values\n label = df['quality'].values\n a_index, b_index = char_index(text_a, text_b)\n return np.array(a_index), np.array(b_index), np.array(label)\n","repo_name":"WeijianShi/DSSM_Research","sub_path":"Our_code/Data_Load.py","file_name":"Data_Load.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"16499193605","text":"class Guests:\n def __init__(self, name, sity, status):\n self.n = name\n self.s = sity\n self.status = status\n def print_guest(self):\n G = str(self.n) + \", \" + str(self.s) + ', статус \"' + str(self.status) + '\"'\n return G\n\nclass Bad_Guests (Guests):\n comment = \"посадить в угол и не кормить!!!\"\n\nG1 = Guests(\"Иванов Иван\",\"г.Москва\",\"Наставник\")\nG2 = Guests(\"Петров Василий\",\"г.Москва\",\"Ментор\")\nG3 = Bad_Guests(\"Тот-Кто-Придумал-Эти-Задачи\",\"д.Гадюкино\",\"Садист\")\nprint(G1.print_guest())\nprint(G2.print_guest())\nprint(G3.print_guest() + \" - \" + G3.comment)\n","repo_name":"isabanchin/isabanchin-skillfactory-B-6-10","sub_path":"B-6-10-4.py","file_name":"B-6-10-4.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6996892347","text":"import random\nfrom functools import wraps\nfrom flask import Blueprint, jsonify, request\n\nfrom Decider.db.db_setup import db_session\nfrom Decider.db.models import Question, QuestionEffect, Token, TokenValue, User\n\napi = Blueprint(\"api\", __name__)\n\n# decorator to protect a route, returning a 401 if anything \n # is not sent with a proper auth token \ndef auth_guard(func):\n @wraps(func)\n def protectedFunc(*args,**kwargs):\n authHeader = request.headers.get('Authorization')\n authToken = None\n if authHeader:\n authToken = authHeader.split()[1]\n if authToken:\n decoded = { \"error_message\": None} \n # ^^^ what would go here is a means of decoding a user's auth\n # token. error_message would tell us if the token was \n # expired, blacklisted, or otherwise invalid. \n if decoded['error_message']:\n return jsonResponse(decoded['error_message'], status=401)\n else:\n return func(*args,**kwargs)\n else:\n # return \"unauthorized request\", 401\n # ^^^ what we should return \n # since this is a prototype and authorization is not implemented, \n # we allow the function to run. \n return func(*args,**kwargs)\n return protectedFunc\n\n\n# CRUD (just create, and not even then) portraits\n@api.route(\"portraits\", methods=[\"POST\"])\n@auth_guard\ndef save_portrait(): \n portrait_data = request.get_json()[\"data\"]\n # here is where we would\n # save to S3 \n # save an item linked to the user, etc., at that S3 URL\n return jsonify({\n \"message\": \"Got a cool shaded portrait. Thanks!\"\n })\n\n# CRUD questions\n@api.route(\"questions\")\n@auth_guard\ndef read_questions():\n return jsonify({\n \"questions\": Question.serialize_query_result(Question.query.filter(Question.active == True))\n })\n\n@api.route(\"questions/random\")\ndef get_random_question():\n return jsonify(random.choice(Question.serialize_query_result(Question.query.filter(Question.active == True))))\n\n@api.route(\"questions\", methods=[\"POST\"])\ndef create_question():\n data = request.get_json()\n question = Question(name=data[\"name\"], active=1 )\n db_session.add(question)\n db_session.commit()\n return jsonify({\n \"saved_question\": question.to_dict()\n })\n\n@api.route(\"questions\", methods=[\"DELETE\"])\ndef delete_question():\n return jsonify({\n \"questions\": \"got deleted\"\n })\n\n# CRUD tokens\n@api.route(\"tokens\")\ndef read_tokens():\n return jsonify({\n \"tokens\": Token.serialize_query_result(Token.query.filter(Token.active == True))\n })\n\n@api.route(\"tokens\", methods=[\"POST\"])\ndef create_token():\n data = request.get_json()\n token = Token(name=data[\"name\"], active=1 )\n db_session.add(token)\n db_session.commit()\n return jsonify({\n \"saved_token\": token.to_dict()\n })\n\n@api.route(\"tokens\", methods=[\"DELETE\"])\ndef delete_token():\n return jsonify({\n \"tokens\": \"got deleted\"\n })\n\n\n# CRUD token-values\n@api.route(\"token_values\")\ndef read_token_values():\n user_id = request.args.get(\"user_id\")\n if not user_id:\n return \"you must provide a `user_id` query parameter\", 400\n return jsonify(TokenValue.serialize_query_result(TokenValue.query.filter(TokenValue.user_id == user_id)))\n\n@api.route(\"token_values\", methods=[\"POST\"])\ndef create_or_update_token_value():\n request_data = request.get_json()\n print(request_data)\n\n # interpret answer as sign for the token delta\n sign = 1 if request_data['answer'] == 'yes' else -1\n\n # find the tokens affected by this question\n question_effects = QuestionEffect.query.filter(QuestionEffect.question_id == request_data[\"question_id\"])\n \n # get all the token values corresponding to this user (excess results, but fewer queries)\n # organize into a lookup table by token id\n token_values = TokenValue.query.filter(TokenValue.user_id == request_data[\"user_id\"])\n token_values_by_token_id = {}\n for val in token_values: \n token_values_by_token_id[val.token_id] = val \n\n # for every relevant token...\n updated_tokens = []\n for effect in question_effects:\n token = effect.token \n # see if this user has such a token value\n token_value = token_values_by_token_id.get(token.id)\n if not token_value: \n # otherwise make a new one\n token_value = TokenValue(\n user_id=request_data[\"user_id\"],\n token_id=token.id,\n value=0\n )\n # now add to session, mark as dirty\n db_session.add(token_value)\n token_value.value = max(min(token_value.value + effect.delta * sign, 100),0)\n updated_tokens.append(token_value)\n\n # make all updates\n db_session.commit()\n return jsonify(TokenValue.serialize_query_result(updated_tokens))\n \n@api.route(\"token_values/reset\", methods=[\"POST\"])\ndef reset_token_values():\n user_id = request.args.get(\"user_id\")\n if not user_id:\n return \"you must provide a `user_id` query parameter\", 400\n # delete all existing token values for this user\n TokenValue.query.filter(TokenValue.user_id == user_id).delete()\n \n # get all tokens\n all_tokens = Token.query.filter(Token.active == True)\n\n # and create a new token value for the user for that token\n new_token_values = []\n for token in all_tokens:\n token_value = TokenValue(token_id=token.id, user_id=user_id, value=50)\n token_value.token = token\n db_session.add(token_value)\n new_token_values.append(token_value)\n\n db_session.commit()\n print(new_token_values)\n return jsonify(TokenValue.serialize_query_result(new_token_values))\n\n@api.route(\"token_values\", methods=[\"DELETE\"])\ndef delete_token_values():\n return jsonify({\n \"token_values\": \"got deleted\"\n })\n","repo_name":"magentanova/decider","sub_path":"Decider/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23938127773","text":"import os.path\nimport unittest\nimport warnings\n\nfrom debian import changelog\nfrom debian import debian_support\n\n\ntry:\n # pylint: disable=unused-import\n from typing import (\n Any,\n IO,\n Optional,\n Text,\n )\nexcept ImportError:\n # Missing types aren't important at runtime\n pass\n\n\ndef find_test_file(filename):\n # type: (str) -> str\n \"\"\" find a test file that is located within the test suite \"\"\"\n return os.path.join(os.path.dirname(__file__), filename)\n\n\ndef open_utf8(filename, mode='r'):\n # type: (str, str) -> IO[Text]\n \"\"\"Open a UTF-8 text file in text mode.\"\"\"\n return open(filename, mode=mode, encoding='UTF-8')\n\n\nclass ChangelogTests(unittest.TestCase):\n\n def test_create_changelog(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = f.read()\n cl = changelog.Changelog(c)\n cs = str(cl)\n clines = c.split('\\n')\n cslines = cs.split('\\n')\n for i in range(len(clines)):\n self.assertEqual(clines[i], cslines[i])\n self.assertEqual(len(clines), len(cslines), \"Different lengths\")\n\n def test_create_changelog_single_block(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = f.read()\n cl = changelog.Changelog(c, max_blocks=1)\n cs = str(cl)\n self.assertEqual(cs,\n \"\"\"gnutls13 (1:1.4.1-1) unstable; urgency=HIGH\n\n [ James Westby ]\n * New upstream release. Closes: #123, #456,\n #789. LP: #1234, #2345,\n #3456\n * Remove the following patches as they are now included upstream:\n - 10_certtoolmanpage.diff\n - 15_fixcompilewarning.diff\n - 30_man_hyphen_*.patch\n * Link the API reference in /usr/share/gtk-doc/html as gnutls rather than\n gnutls-api so that devhelp can find it.\n\n -- Andreas Metzler Sat, 15 Jul 2006 11:11:08 +0200\n\n\"\"\")\n\n def test_modify_changelog(self):\n # type: () -> None\n with open(find_test_file('test_modify_changelog1')) as f:\n c = f.read()\n cl = changelog.Changelog(c)\n cl.package = 'gnutls14'\n cl.version = '1:1.4.1-2'\n cl.distributions = 'experimental'\n cl.urgency = 'medium'\n cl.add_change(' * Add magic foo')\n cl.author = 'James Westby '\n cl.date = 'Sat, 16 Jul 2008 11:11:08 -0200'\n with open(find_test_file('test_modify_changelog2')) as f:\n c = f.read()\n clines = c.split('\\n')\n cslines = str(cl).split('\\n')\n for i in range(len(clines)):\n self.assertEqual(clines[i], cslines[i])\n self.assertEqual(len(clines), len(cslines), \"Different lengths\")\n\n def test_preserve_initial_lines(self):\n # type: () -> None\n cl_text = b\"\"\"\nTHIS IS A LINE THAT SHOULD BE PRESERVED BUT IGNORED\nhaskell-src-exts (1.8.2-3) unstable; urgency=low\n\n * control: Use versioned Replaces: and Conflicts:\n\n -- Somebody Wed, 05 May 2010 18:01:53 -0300\n\"\"\"\n with self.assertWarns(UserWarning):\n cl = changelog.Changelog(cl_text)\n self.assertEqual(cl_text, bytes(cl))\n\n def test_add_changelog_section(self):\n # type: () -> None\n with open(find_test_file('test_modify_changelog2')) as f:\n c = f.read()\n cl = changelog.Changelog(c)\n cl.new_block(package='gnutls14',\n version=debian_support.Version('1:1.4.1-3'),\n distributions='experimental',\n urgency='low',\n author='James Westby ')\n\n self.assertRaises(changelog.ChangelogCreateError, cl.__str__)\n\n cl.set_date('Sat, 16 Jul 2008 11:11:08 +0200')\n cl.add_change('')\n cl.add_change(' * Foo did not work, let us try bar')\n cl.add_change('')\n\n f = open(find_test_file('test_modify_changelog3'))\n c = f.read()\n f.close()\n clines = c.split('\\n')\n cslines = str(cl).split('\\n')\n for i in range(len(clines)):\n self.assertEqual(clines[i], cslines[i])\n self.assertEqual(len(clines), len(cslines), \"Different lengths\")\n\n def test_strange_changelogs(self):\n # type: () -> None\n \"\"\" Just opens and parses a strange changelog \"\"\"\n with open(find_test_file('test_strange_changelog')) as f:\n c = f.read()\n cl = changelog.Changelog(c)\n\n def test_set_version_with_string(self):\n # type: () -> None\n with open(find_test_file('test_modify_changelog1')) as f:\n c1 = changelog.Changelog(f.read())\n f.seek(0)\n c2 = changelog.Changelog(f.read())\n c1.version = '1:2.3.5-2'\n c2.version = debian_support.Version('1:2.3.5-2')\n self.assertEqual(c1.version, c2.version)\n self.assertEqual((c1.full_version, c1.epoch, c1.upstream_version,\n c1.debian_version),\n (c2.full_version, c2.epoch, c2.upstream_version,\n c2.debian_version))\n\n def test_changelog_no_author(self):\n # type: () -> None\n cl_no_author = \"\"\"gnutls13 (1:1.4.1-1) unstable; urgency=low\n\n * New upstream release.\n\n --\n\"\"\"\n c1 = changelog.Changelog()\n c1.parse_changelog(cl_no_author, allow_empty_author=True)\n self.assertEqual(c1.author, None)\n self.assertEqual(c1.date, None)\n self.assertEqual(c1.package, \"gnutls13\")\n self.assertRaises(changelog.ChangelogCreateError, str, c1)\n self.assertEqual(c1._format(allow_missing_author=True), cl_no_author)\n c2 = changelog.Changelog()\n self.assertRaises(changelog.ChangelogParseError, c2.parse_changelog, cl_no_author)\n\n def test_magic_version_properties(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = changelog.Changelog(f)\n self.assertEqual(c.debian_version, '1')\n self.assertEqual(c.full_version, '1:1.4.1-1')\n self.assertEqual(c.upstream_version, '1.4.1')\n self.assertEqual(c.epoch, '1')\n self.assertEqual(str(c.version), c.full_version)\n\n def test_bugs_closed(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = iter(changelog.Changelog(f))\n # test bugs in a list\n block = next(c)\n self.assertEqual(block.bugs_closed, [123, 456, 789])\n self.assertEqual(block.lp_bugs_closed, [1234, 2345, 3456])\n # test bugs in parentheses\n block = next(c)\n self.assertEqual(block.bugs_closed, [375815])\n self.assertEqual(block.lp_bugs_closed, [])\n\n def test_allow_full_stops_in_distribution(self):\n # type: () -> None\n with open(find_test_file('test_changelog_full_stops')) as f:\n c = changelog.Changelog(f)\n self.assertEqual(c.debian_version, None)\n self.assertEqual(c.full_version, '1.2.3')\n self.assertEqual(str(c.version), c.full_version)\n\n def test_str_consistent(self):\n # type: () -> None\n # The parsing of the changelog (including the string representation)\n # should be consistent whether we give a single string, a list of\n # lines, or a file object to the Changelog initializer\n with open(find_test_file('test_changelog')) as f:\n cl_data = f.read()\n f.seek(0)\n c1 = changelog.Changelog(f)\n c2 = changelog.Changelog(cl_data)\n c3 = changelog.Changelog(cl_data.splitlines())\n for c in (c1, c2, c3):\n self.assertEqual(str(c), cl_data)\n\n def test_utf8_encoded_file_input(self):\n # type: () -> None\n f = open_utf8(find_test_file('test_changelog_unicode'))\n c = changelog.Changelog(f)\n f.close()\n u = str(c)\n expected_u = \"\"\"haskell-src-exts (1.8.2-3) unstable; urgency=low\n\n * control: Use versioned Replaces: and Conflicts:\n\n -- Marco T\\xfalio Gontijo e Silva Wed, 05 May 2010 18:01:53 -0300\n\nhaskell-src-exts (1.8.2-2) unstable; urgency=low\n\n * debian/control: Rename -doc package.\n\n -- Marco T\\xfalio Gontijo e Silva Tue, 16 Mar 2010 10:59:48 -0300\n\"\"\"\n self.assertEqual(u, expected_u)\n self.assertEqual(bytes(c), u.encode('utf-8'))\n\n def test_unicode_object_input(self):\n # type: () -> None\n with open(find_test_file('test_changelog_unicode'), 'rb') as f:\n c_bytes = f.read()\n c_unicode = c_bytes.decode('utf-8')\n c = changelog.Changelog(c_unicode)\n self.assertEqual(str(c), c_unicode)\n self.assertEqual(bytes(c), c_bytes)\n\n def test_non_utf8_encoding(self):\n # type: () -> None\n with open(find_test_file('test_changelog_unicode'), 'rb') as f:\n c_bytes = f.read()\n c_unicode = c_bytes.decode('utf-8')\n c_latin1_str = c_unicode.encode('latin1')\n c = changelog.Changelog(c_latin1_str, encoding='latin1')\n self.assertEqual(str(c), c_unicode)\n self.assertEqual(bytes(c), c_latin1_str)\n for block in c:\n self.assertEqual(bytes(block),\n str(block).encode('latin1'))\n\n def test_malformed_date(self):\n # type: () -> None\n c_text = \"\"\"package (1.0-1) codename; urgency=medium\n\n * minimal example reproducer of malformed date line\n\n -- John Smith Tue, 27 Sep 2016 14:08:04 -0600\n \"\"\"\n # In strict mode, exceptions should be raised by the malformed entry\n with self.assertRaises(changelog.ChangelogParseError):\n c = changelog.Changelog(c_text, strict=True)\n # In non-strict mode, warnings should be emitted by the malformed entry\n with self.assertWarns(Warning):\n c = changelog.Changelog(c_text, strict=False)\n self.assertEqual(len(c), 1)\n\n def test_block_iterator(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = changelog.Changelog(f)\n self.assertEqual([str(b) for b in c._blocks], [str(b) for b in c])\n\n def test_block_access(self):\n # type: () -> None\n \"\"\" test random access to changelog entries \"\"\"\n with open(find_test_file('test_changelog')) as f:\n c = changelog.Changelog(f)\n self.assertEqual(str(c[2].version), '1.4.0-2',\n 'access by sequence number')\n self.assertEqual(str(c['1.4.0-1'].version), '1.4.0-1',\n 'access by version string')\n self.assertEqual(str(c[debian_support.Version('1.3.5-1.1')].version),\n '1.3.5-1.1',\n 'access by Version object')\n\n def test_len(self):\n # type: () -> None\n with open(find_test_file('test_changelog')) as f:\n c = changelog.Changelog(f)\n self.assertEqual(len(c._blocks), len(c))\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"alvistack/python-debian-team-python-debian","sub_path":"lib/debian/tests/test_changelog.py","file_name":"test_changelog.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1433961586","text":"import torch\nfrom tqdm import tqdm\nfrom utils import AverageMeter\n\n\ndef train(dataloader, model, criterion, optimizer, logger=None, args=None):\n model.train()\n losses = AverageMeter()\n pbar = tqdm(total=len(dataloader))\n for i, (input_, target) in enumerate(dataloader):\n bs, ts, h, w = target.size()\n output, _target, loss = step(\n input_, target, model, criterion, args=args)\n n_elements = _get_n_elements(bs, ts, h, w, args)\n losses.update(loss.item(), n_elements)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n pbar.update(1)\n if args.debug: break # noqa\n pbar.close()\n\n if args.loss.lower().startswith('bce'):\n output = torch.sigmoid(output)\n logger.debug(\n 'output/min {} output/max {} target/min {} target/max {}'.format(\n output.min().item(), output.max().item(),\n _target.min().item(), _target.max().item()))\n\n return {args.loss: losses.avg, 'output': output.cpu(), 'target': _target.cpu()}\n\n\ndef validate(dataloader, model, criterion, logger=None, args=None):\n model.eval()\n losses = AverageMeter()\n pbar = tqdm(total=(len(dataloader)))\n for i, (input_, target) in enumerate(dataloader):\n bs, ts, h, w = target.size()\n with torch.no_grad():\n output, _target, loss = step(\n input_, target, model, criterion, args=args)\n n_elements = _get_n_elements(bs, ts, h, w, args)\n losses.update(loss.item(), n_elements)\n\n pbar.update(1)\n if args.debug: break # noqa\n pbar.close()\n\n if args.loss.lower().startswith('bce'):\n output = torch.sigmoid(output)\n\n return {args.loss: losses.avg, 'output': output.cpu(), 'target': _target.cpu()}\n\n\ndef step(input_, target, model, criterion, args):\n bs, ts, h, w = target.size()\n input_ = input_.float() / 255.\n target = target.float() / 255.\n input_, target = input_.to(args.device), target.to(args.device)\n output = model(input_.unsqueeze(2), target.unsqueeze(2))\n\n # (bs, ts, c, h, w) -> (bs, ts, h, w) -> (ts, bs, h, w)\n output = output.squeeze(2).permute(1, 0, 2, 3)\n # (bs, ts, h, w) -> (ts, bs, h, w)\n target = target.permute(1, 0, 2, 3)\n\n assert len(output) == len(target) == ts\n loss = 0.\n reduction = args.loss.split('/')[-1].lower()\n if reduction == 'image':\n loss = criterion(output, target) / bs / ts\n elif reduction == 'pixel':\n loss = criterion(output, target)\n else:\n raise NotImplementedError\n\n # output, target returned in batch_first shape\n return output.permute(1, 0, 2, 3), target.permute(1, 0, 2, 3), loss\n\n\ndef _get_n_elements(bs, ts, h, w, args):\n reduction = args.loss.split('/')[-1].lower()\n if reduction == 'image':\n n = bs * ts\n elif reduction == 'pixel':\n n = bs * ts * h * w\n else:\n raise NotImplementedError\n return n\n","repo_name":"fujiki-1emon/DeepLearningCodes","sub_path":"ConvLSTM/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74474825372","text":"import matplotlib.pyplot as plt\nfrom numpy import arctan,log,sin,cos,log10,sqrt\nimport numpy as np\n\n#color de fondo de los ejercicios\ncolor_ejemplo = '#F2FBF8' \ncolor_comment = '#F3F3F3'\n\n\"\"\"\n#################\nIntegral de línea\n#################\n\"\"\"\ndef f1(x):\n return arctan(x-3)+4\n\nx = np.linspace(1,6)\nplt.figure(figsize=(10,6))\nplt.plot(x,f1(x),c='k')\nplt.scatter([1,6],[f1(1),f1(6)],c='k')\nplt.annotate('A',[1,f1(1)+0.2],fontsize=18)\nplt.annotate('B',[6,f1(6)+0.2],fontsize=18)\nplt.vlines(x=1, ymin = 0, ymax= f1(1), color='k', linestyle='--')\nplt.vlines(x=6, ymin = 0, ymax= f1(6), color='k', linestyle='--')\nplt.annotate(\"\", [2.5,f1(2.5)], xytext=(2.49, f1(2.49)),arrowprops=dict(arrowstyle=\"->\"),fontsize=30)\nplt.annotate(\"\", [4.5,f1(4.5)], xytext=(4.49, f1(4.49)),arrowprops=dict(arrowstyle=\"->\"),fontsize=30)\nplt.xticks([1, 6], ['a', 'b'])\nplt.yticks([],[])\nplt.axis('off')\nplt.xlim(0,7)\nplt.ylim(2,6)\n#plt.savefig('latex/integraldelinea.png')\n\n\"\"\"\n#############\nEjemplo 4.1.3\n#############\n\"\"\"\ndef f2(x):\n return x\ndef f3(x):\n return x**2\n\nx = np.linspace(-0.1,1.1)\nxf= np.linspace(0,1)\nfig, ax = plt.subplots()\nfig.patch.set_facecolor(color_ejemplo)\nax.set_facecolor(color_ejemplo)\nax.plot(x,f3(x),c='k')\nax.plot(x,f2(x),c='k')\nax.fill_between(xf,f3(xf),f2(xf),color=\"none\",hatch=\"//\",edgecolor=\"k\")\nax.annotate('$\\mathcal{D}$', xy=(0.45, 0.3), xytext=(0.3, 0.8),\n arrowprops=dict(facecolor='black', shrink=0.05),fontsize=22)\nax.annotate('$y=x^2$',[0.7,f3(0.7)-0.2],fontsize=18)\nax.annotate('$y=x$',[0.7,f2(0.7)+0.3],fontsize=18)\nax.vlines(x=1, ymin = 0, ymax= f2(1), color='k', linestyle='--')\nax.spines['left'].set_position('zero')\nax.spines['right'].set_color('none')\nax.yaxis.tick_left()\nax.spines['bottom'].set_position('zero')\nax.spines['top'].set_color('none')\nax.xaxis.tick_bottom()\nax.set_xticks([1],['1'],fontsize=14)\nax.set_yticks([],[])\n#plt.savefig('latex/ejemplo4.1.3.png')\n\n\"\"\"\n#############\nEjemplo 4.1.4\n#############\n\"\"\"\ndef f4(x):\n return 2\nf4=np.vectorize(f4)\ndef f5(x):\n return np.e**x\n\nx = np.linspace(-0.05,np.log(2)+0.1)\nxf= np.linspace(0,np.log(2))\nfig, ax = plt.subplots()\nfig.patch.set_facecolor(color_ejemplo)\nax.set_facecolor(color_ejemplo)\nax.plot(x,f4(x),c='k')\nax.plot(x,f5(x),c='k')\nax.fill_between(xf,f4(xf),f5(xf),color=\"none\",hatch=\"//\",edgecolor=\"k\")\nax.annotate('$\\mathcal{D}$', xy=(0.25, 1.7), xytext=(0.1, 0.7),\n arrowprops=dict(facecolor='black', shrink=0.05),fontsize=22)\nax.annotate('$y=2$',[np.log(2)/2+0.1,2.1],fontsize=18)\nax.annotate('$y=e^x$',[np.log(2)/2+0.1,1.35],fontsize=18)\nax.vlines(x=np.log(2), ymin = 0, ymax= f4(1), color='k', linestyle='--')\nax.spines['left'].set_position('zero')\nax.spines['right'].set_color('none')\nax.yaxis.tick_left()\nax.spines['bottom'].set_position('zero')\nax.spines['top'].set_color('none')\nax.tick_params(axis=u'both', which=u'both',length=0)\nax.set_ylim(ymin=-0.4, ymax=3)\nax.set_xticks([np.log(2)],['$\\log\\left(2\\\\right)$'],fontsize=14)\nax.set_yticks([1.1,2.1],['1','2'],fontsize=14)\n#plt.savefig('latex/ejemplo4.1.4.png')\n\n\"\"\"\n#################\nIntegrales dobles\n#################\n\"\"\"\ndef f6(x):\n return 1\ndef f7(x):\n return 3\ndef f8(x):\n return 1.2\ndef f9(x):\n return 1.7\nf6,f7,f8,f9=np.vectorize(f6),np.vectorize(f7),np.vectorize(f8),np.vectorize(f9) \n\nx = np.linspace(1,4)\nxr = np.linspace(2.75,3.5)\nxf= np.linspace(0,np.log(2))\nfig, ax = plt.subplots()\n\nax.plot(x,f6(x),c='k')\nax.plot(x,f7(x),c='k')\nax.plot(xr,f8(xr),c='k')\nax.plot(xr,f9(xr),c='k')\nax.vlines(x=1, ymin = 1, ymax= 3, color='k', linestyle='-')\nax.vlines(x=1, ymin = 0, ymax= 1, color='k', linestyle='--')\nax.vlines(x=4, ymin = 1, ymax= 3, color='k', linestyle='-')\nax.vlines(x=4, ymin = 0, ymax= 1, color='k', linestyle='--')\nax.hlines(y=1, xmin = 0, xmax= 1, color='k', linestyle='--')\nax.hlines(y=3, xmin = 0, xmax= 1, color='k', linestyle='--')\n\nax.vlines(x=3.5, ymin = 1.2, ymax= 1.7, color='k', linestyle='-')\nax.vlines(x=2.75, ymin = 1.2, ymax= 1.7, color='k', linestyle='-')\nax.vlines(x=3.5, ymin = 0, ymax= 1.2, color='k', linestyle='--')\nax.vlines(x=2.75, ymin = 0, ymax= 1.2, color='k', linestyle='--')\nax.hlines(y=1.2, xmin = 0, xmax= 2.75, color='k', linestyle='--')\nax.hlines(y=1.7, xmin = 0, xmax= 2.75, color='k', linestyle='--')\n\nax.fill_between(xr,f8(xf),f9(xf),color=\"none\",hatch=\"//\",edgecolor=\"k\")\n\nax.spines['left'].set_position('zero')\nax.spines['right'].set_color('none')\nax.yaxis.tick_left()\nax.spines['bottom'].set_position('zero')\nax.spines['top'].set_color('none')\nax.tick_params(axis=u'both', which=u'both',length=0)\nax.set_xlim(xmin=-0.4, xmax=5)\nax.set_ylim(ymin=-0.4, ymax=4)\nax.set_xticks([1,4],['a','b'],fontsize=12)\nax.set_yticks([0.98,1.25,1.75,2.98],['c','$y_{j-1}$','$y_j$','d'],fontsize=12)\n#plt.savefig('latex/integralesdobles.png')\n\n\"\"\"\n################\nComentario 4.1.3\n################\n\"\"\"\ndef f6(x):\n return 1\ndef f7(x):\n return 3\ndef f8(x):\n return 1.2\ndef f9(x):\n return 1.7\ndef f10(x):\n if x < 3.88:\n return 1.028*sqrt(log10(x-0.804)+sin(x)**2)+2\n else: \n return 3\ndef f11(x):\n return -0.93*sqrt(log10(x-0.712)+cos(x-2))+2\n\nf6,f7,f8,f9,f10,f11=np.vectorize(f6),np.vectorize(f7),np.vectorize(f8),np.vectorize(f9),np.vectorize(f10),np.vectorize(f11)\n\nx = np.linspace(1,4,5000)\nxr = np.linspace(1,4,5000)\nxr1 = np.linspace(1,4,5000)\nfig, ax = plt.subplots()\nax.fill_between(x,f10(x),f11(x),color=\"none\",hatch=\"//\",edgecolor=\"r\")\n\nfig.patch.set_facecolor(color_comment)\nax.set_facecolor(color_comment)\n\nax.plot(x, f6( x ),c='k')\nax.plot(x, f7( x ),c='k')\nax.plot(xr,f10(xr),c='r')\nax.plot(xr1,f11(xr1),c='r')\n\nax.annotate('$\\mathcal{D}$', xy=(2, 1.7), xytext=(2.3, 1.8),fontsize=22)\n\nax.vlines(x=1, ymin = 1, ymax= 3, color='k', linestyle='-')\nax.vlines(x=1, ymin = 0, ymax= 1, color='k', linestyle='--')\nax.vlines(x=4, ymin = 1, ymax= 3, color='k', linestyle='-')\nax.vlines(x=4, ymin = 0, ymax= 1, color='k', linestyle='--')\nax.hlines(y=1, xmin = 0, xmax= 1, color='k', linestyle='--')\nax.hlines(y=3, xmin = 0, xmax= 1, color='k', linestyle='--')\n\nax.spines['left'].set_position('zero')\nax.spines['right'].set_color('none')\nax.yaxis.tick_left()\nax.spines['bottom'].set_position('zero')\nax.spines['top'].set_color('none')\nax.tick_params(axis=u'both', which=u'both',length=0)\nax.set_xlim(xmin=-0.4, xmax=5)\nax.set_ylim(ymin=-0.4, ymax=4)\nax.set_xticks([1,4],['a','b'],fontsize=12)\nax.set_yticks([0.98,2.98],['c','d'],fontsize=12)\n\nax.vlines(x=4, ymin = f11(4), ymax= 3, color='r', linestyle='-')\n\n#plt.savefig('latex/comentario4.1.3.png')\n\nplt.show()\n","repo_name":"vmr48-ua/fisicaua","sub_path":"tercero/cuantica1/Latex/graficas.py","file_name":"graficas.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24456466472","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"SpacePuma\",\n version=\"0.14\",\n author=\"Alex DelFranco\",\n author_email=\"adelfranco24@amherst.edu\",\n description=(\"Interact with your data.\"),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/alexdelfranco/SpacePuma\",\n packages=[\"spacepuma\"],\n license=\"MIT\",\n install_requires=[\n \"numpy\",\n \"scipy\",\n \"jupyterlab\",\n \"matplotlib\",\n \"seaborn\",\n \"ipympl\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"alexdelfranco/SpacePuma","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23477519156","text":"import sys\ninput = sys.stdin.readline\nN, K = map(int, input().split())\ncnt = 0\nA = []\nfor _ in range(N):\n X = int(input())\n A.append(X)\nA.sort(reverse = True)\nfor i in A:\n Y = K//i\n K -= Y*i\n cnt +=Y\n if K == 0:\n break\nprint(cnt)\n","repo_name":"sangmandu/SangSangPlus","sub_path":"Algorithm/SINGON/11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"15902441499","text":"#\n# UDP ping command\n# Model 2\n#\n\nimport time\n\nimport zmq\nfrom udplib import UDP\n\nPING_PORT_NUMBER = 9999\nPING_MSG_SIZE = 1\nPING_INTERVAL = 1 # Once per second\n\ndef main():\n\n udp = UDP(PING_PORT_NUMBER)\n\n poller = zmq.Poller()\n poller.register(udp.handle, zmq.POLLIN)\n\n # Send first ping right away\n ping_at = time.time()\n\n while True:\n timeout = ping_at - time.time()\n if timeout < 0:\n timeout = 0\n try:\n events = dict(poller.poll(1000* timeout))\n except KeyboardInterrupt:\n print(\"interrupted\")\n break\n\n # Someone answered our ping\n if udp.handle.fileno() in events:\n udp.recv(PING_MSG_SIZE)\n\n if time.time() >= ping_at:\n # Broadcast our beacon\n print (\"Pinging peers...\")\n udp.send(b'!')\n ping_at = time.time() + PING_INTERVAL\n\nif __name__ == '__main__':\n main()\n","repo_name":"booksbyus/zguide","sub_path":"examples/Python/udpping2.py","file_name":"udpping2.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":3371,"dataset":"github-code","pt":"32"} +{"seq_id":"27064553368","text":"import math\r\n\r\nimport torch\r\nfrom torch.nn import Module, BCELoss\r\nimport torch.nn.functional as F\r\n\r\nfrom navex.losses.quantizer import Quantizer\r\nfrom navex.losses.sampler import GuidedSampler\r\n\r\n\r\nclass DiscountedAPLoss(Module):\r\n def __init__(self, base=0.5, scale=0.1, nq=20, warmup_batches=6000, sampler_conf=None):\r\n super(DiscountedAPLoss, self).__init__()\r\n\r\n self.eps = 1e-5\r\n self.base = base\r\n self.scale = scale\r\n self.bias = self.scale * math.log(math.exp((1 - self.base) / self.scale) + 1)\r\n self.name = 'ap-loss'\r\n self.discount = False\r\n self.batch_count = torch.nn.Parameter(torch.Tensor([0]), requires_grad=False)\r\n self.warmup_batches = warmup_batches\r\n\r\n # TODO: update config\r\n c = sampler_conf\r\n self.sampler = GuidedSampler(pos_r=c['pos_d'], neg_min_r=c['neg_d'], neg_max_r=c['neg_d'] + c['ngh'],\r\n neg_step=c['subd'], cell_d=abs(c['subq']), border=c['border'],\r\n max_neg_b=c['max_neg_b'], random=float('inf'))\r\n\r\n self.calc_ap = DifferentiableAP(bins=nq, euclidean=False) # eucl perf worse, maybe due to lower mid ap res\r\n self.bce_loss = BCELoss(reduction='none')\r\n\r\n def batch_end_update(self, accs):\r\n self.batch_count += 1\r\n\r\n def forward(self, output1, output2, aflow):\r\n scores, labels, mask, qlt = self.sampler(output1, output2, aflow)\r\n\r\n n = qlt.numel()\r\n scores, labels, qlt = scores.view(n, -1), labels.view(n, -1), qlt.view(n, -1)\r\n ap = self.calc_ap(scores, labels).view(n, -1)\r\n\r\n a_loss, q_loss = self.losses(ap, qlt)\r\n\r\n a_loss = a_loss.view(mask.shape)[mask].mean()\r\n q_loss = q_loss.view(mask.shape)[mask].mean() if q_loss is not None else None\r\n return a_loss, q_loss\r\n\r\n def losses(self, ap, qlt):\r\n if 0:\r\n # reversed logistic function shaped derivative for loss (x = 1 - ap), arrived at by integration:\r\n # integrate(1 - 1/(1+exp(-(x - bias) / scale)), x) => -scale * log(1 + exp(-(x - bias) / scale))\r\n x = 1 - ap\r\n # a_loss = self.bias - self.scale * torch.log(1 + torch.exp(-(x - (1 - self.base)) / self.scale))\r\n a_loss = self.bias - F.softplus(-(x - (1 - self.base)), 1 / self.scale)\r\n elif self.discount:\r\n a_loss = (1 - ap) * (qlt.detach() if self.batch_count > self.warmup_batches else 1.0)\r\n elif 0:\r\n a_loss = -torch.log(ap + self.eps)\r\n else:\r\n a_loss = 1 - ap\r\n\r\n q_loss = self.bce_loss(qlt, ap.detach())\r\n\r\n return a_loss, q_loss\r\n\r\n\r\nclass WeightedAPLoss(DiscountedAPLoss):\r\n \"\"\"\r\n https://openaccess.thecvf.com/content_cvpr_2018/papers/Kendall_Multi-Task_Learning_Using_CVPR_2018_paper.pdf\r\n used as inspiration\r\n \"\"\"\r\n def losses(self, ap, qlt):\r\n # qlt ~ log(1/sigma**2), i.e. log precision\r\n # qlt_capped = qlt.clamp(-self.max_qlt, self.max_qlt)\r\n qlt = qlt + self.eps\r\n a_loss = - qlt * torch.log(ap + self.eps)\r\n q_loss = - 0.5 * torch.log(qlt)\r\n return a_loss, q_loss\r\n\r\n\r\nclass ThresholdedAPLoss(DiscountedAPLoss):\r\n def __init__(self, *args, warmup_batches=500, **kwargs):\r\n super(ThresholdedAPLoss, self).__init__(*args, **kwargs)\r\n self.batch_count = torch.nn.Parameter(torch.Tensor([0]), requires_grad=False)\r\n self.warmup_batches = warmup_batches\r\n\r\n def losses(self, ap, qlt):\r\n # q*(1-a) + (1-q)*(1-b) => q - q*a + 1 - b - q + q*b => 1 - (q*a +b -q*b) => 1 - (q*a + (1-q)*b)\r\n a_loss = 1 - (qlt * ap + (1 - qlt) * self.ap_base)\r\n return a_loss, None\r\n\r\n def batch_end_update(self, accs):\r\n self.batch_count += 1\r\n\r\n @property\r\n def ap_base(self):\r\n r = min(1, self.batch_count.item() / self.warmup_batches)\r\n return self.base * r # (1 - (1 - r)**4)\r\n\r\n\r\nclass LogThresholdedAPLoss(ThresholdedAPLoss):\r\n def losses(self, ap, qlt):\r\n eps = 1e-5\r\n # inspired by binary cross-entropy: -(y*log(p) + (1-y)*log(1-p)), however, if 1-ap=y, 1-y <> base_ap\r\n # - if qlt ~ 1, ap needs to be very close to 1\r\n # - never good idea for qlt ~ 0, ...\r\n if 0:\r\n a_loss = -(torch.log(1 - qlt + eps) * (1 - ap) + torch.log(qlt + eps) * (1 - self.ap_base))\r\n elif 1:\r\n a_loss = -(torch.log(ap + eps) * qlt + torch.log(self.ap_base + eps) * (1 - qlt))\r\n else:\r\n a_loss = torch.log(1 - qlt + eps) * torch.log(ap + eps) \\\r\n + torch.log(qlt + eps) * torch.log(self.ap_base + eps)\r\n\r\n # was first:\r\n # a_loss = -torch.log(qlt * ap + (1 - qlt) * self.ap_base + eps)\r\n return a_loss, None\r\n\r\n\r\nclass DifferentiableAP(Module):\r\n \"\"\"\r\n Based on \"Descriptors Optimized for Average Precision\" by He et al. 2018\r\n \"\"\"\r\n def __init__(self, bins=25, euclidean=True):\r\n super(DifferentiableAP, self).__init__()\r\n self.quantizer = Quantizer(bins, min_v=0, max_v=1) # note that min_v=0 even though scores can go as low as -1\r\n self.euclidean = euclidean\r\n\r\n def forward(self, score, label):\r\n if self.euclidean: # use `1 - euclidean distance` instead of pure inner product\r\n score = 1 - torch.sqrt(2.0001 - 2 * score)\r\n\r\n # quantize matching scores, e\r\n binned_s = self.quantizer(score, insert_dim=1)\r\n\r\n # prepare for ap calculation\r\n samples_per_bin = binned_s.sum(dim=2)\r\n correct_per_bin = (binned_s * label[:, None, :].float()).sum(dim=2)\r\n cum_correct = correct_per_bin.cumsum(dim=1)\r\n cum_precision = cum_correct / (1e-16 + samples_per_bin.cumsum(dim=1))\r\n\r\n # average precision, per query\r\n ap = (correct_per_bin * cum_precision).sum(dim=1) / cum_correct[:, -1]\r\n\r\n return ap\r\n","repo_name":"oknuutti/navex","sub_path":"navex/losses/ap.py","file_name":"ap.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"17692647077","text":"# -*- coding: utf-8 -*-\n\"\"\"\nBKZ Tours.\n\"\"\"\nimport sys\nfrom .pump import pump\n#from .pump_cpu import pump\nfrom .workout import workout\nimport six\nimport psutil \nimport os\nfrom math import log,pi\nimport time\nfrom fpylll.util import gaussian_heuristic\n# from ...pro_pnjBKZ_simulator.codes.util\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\n\ndef get_current_slope(r, start_row=0, stop_row=-1):\n \"\"\"\n A Python re-implementation of ``MatGSO.get_current_slope``.\n\n >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL\n >>> FPLLL.set_random_seed(1337)\n >>> A = IntegerMatrix.random(100, \"qary\", bits=30, k=50)\n >>> _ = LLL.reduction(A)\n >>> M = GSO.Mat(A); _ = M.update_gso()\n >>> from fpylll.tools.quality import get_current_slope\n >>> M.get_current_slope(0, 100) # doctest: +ELLIPSIS\n -0.085500625...\n >>> get_current_slope(M.r(), 0, 100) # doctest: +ELLIPSIS\n -0.085500625...\n\n \"\"\"\n x = [log(r[i]) for i in range(start_row, stop_row)]\n n = stop_row - start_row\n i_mean = (n - 1) * 0.5 + start_row\n x_mean = sum(x)/n\n v1, v2 = 0.0, 0.0\n for i in range(stop_row - start_row):\n v1 += (i - i_mean) * (x[i] - x_mean)\n v2 += (i - i_mean) * (i - i_mean)\n return v1 / v2\n\n\ndef dim4free_wrapper(dim4free_fun, blocksize):\n \"\"\"\n Deals with correct dim4free choices for edge cases when non default\n function is chosen.\n\n :param dim4free_fun: the function for choosing the amount of dim4free\n :param blocksize: the BKZ blocksize\n\n \"\"\"\n if blocksize < 40:\n return 0\n dim4free = dim4free_fun(blocksize)\n return int(min((blocksize - 40)/2, dim4free))\n\n\ndef default_dim4free_fun(blocksize):\n \"\"\"\n Return expected number of dimensions for free, from exact-SVP experiments.\n\n :param blocksize: the BKZ blocksize\n\n \"\"\"\n return int(11.5 + 0.075*blocksize)\n\n# def theo_dim4free_fun(blocksize):\n# \"\"\"\n# Theoretical Dimension-for-free function in [Duc18]\n# \"\"\"\n\n# return int(blocksize*log(4/3.)/log(blocksize/2./pi)) \n\n\n\ndef naive_bkz_tour(g6k, tracer, blocksize, dim4free_fun=default_dim4free_fun,\n extra_dim4free=0, workout_params=None, pump_params=None):\n \"\"\"\n Run a naive BKZ-tour: call ``workout`` as an SVP oracle consecutively on\n each block.\n\n :param g6k: The g6k object to work with\n :param tracer: A tracer for g6k\n :param blocksize: dimension of the blocks\n :param dim4free_fun: number of dimension for free as a function of beta (function, or string e.g. `lambda x: 11.5+0.075*x`)\n :param extra_dim4free: increase the number of dims 4 free (blocksize is increased, but not sieve dimension)\n :param workout_params: parameters to pass to the workout\n :param pump_params: parameters to pass to the pump\n\n \"\"\"\n if workout_params is None:\n workout_params = {}\n\n if \"dim4free_min\" in workout_params:\n raise ValueError(\"In naive_bkz, you should choose dim4free via dim4free_fun.\")\n\n d = g6k.full_n\n\n if isinstance(dim4free_fun, basestring):\n dim4free_fun = eval(dim4free_fun)\n\n dim4free = dim4free_wrapper(dim4free_fun, blocksize) + extra_dim4free\n blocksize += extra_dim4free\n\n for kappa in range(d-3):\n beta = min(blocksize, d - kappa)\n lost_dim = blocksize - beta\n f = max(dim4free - lost_dim, 0)\n\n workout(g6k, tracer, kappa, beta, f, pump_params=pump_params, **workout_params)\n g6k.lll(0, d)\n\n\ndef pump_n_jump_bkz_tour(g6k, tracer, blocksize, jump=1,\n dim4free_fun=default_dim4free_fun, extra_dim4free=0,\n pump_params=None, goal_r0=0., verbose=False):\n \"\"\"\n Run a PumpNjump BKZ-tour: call Pump consecutively on every (jth) block.\n\n :param g6k: The g6k object to work with\n :param tracer: A tracer for g6k\n :param blocksize: dimension of the blocks\n :param jump: only call the pump every j blocks\n :param dim4free_fun: number of dimension for free as a function of beta (function, or string\n e.g. `lambda x: 11.5+0.075*x`)\n :param extra_dim4free: increase the number of dims 4 free (blocksize is increased, but not sieve\n dimension)\n :param pump_params: parameters to pass to the pump\n \"\"\"\n if pump_params is None:\n pump_params = {\"down_sieve\": False}\n\n if \"dim4free\" in pump_params:\n raise ValueError(\"In pump_n_jump_bkz, you should choose dim4free via dim4free_fun.\")\n\n d = g6k.full_n\n g6k.shrink_db(0)\n g6k.lll(0,d)\n g6k.update_gso(0,d)\n\n if isinstance(dim4free_fun, six.string_types):\n dim4free_fun = eval(dim4free_fun)\n \n\n # file_name = \"80-005-gpu-32-thread-gs-lengths-%d-%d.txt\" %(g6k.M.B.ncols,blocksize)\n\n\n dim4free = dim4free_wrapper(dim4free_fun, blocksize) + extra_dim4free\n blocksize += extra_dim4free\n\n indices = [(0, blocksize - dim4free + i, i) for i in range(0, dim4free, jump)]\n indices += [(i, blocksize, dim4free) for i in range(0, d - blocksize, jump)]\n indices += [(d - blocksize + i, blocksize - i, dim4free - i) for i in range(0, dim4free, jump)]\n\n pump_params[\"down_stop\"] = dim4free+3\n\n #pump_params[\"goal_r0\"] = goal_r0\n max_RAM_cost = 0\n # T_pumps = []\n # slopes = []\n # ghs = []\n\n # File = open(file_name,'w')\n # File.write(str([g6k.M.get_r(_,_) for _ in range(g6k.M.B.ncols)]))\n # File.write('\\n')\n for (kappa, beta, f) in indices:\n if verbose:\n print(\"\\r k:%d, b:%d, f:%d , RAM cost: %.4f GB\" % (kappa, beta, f, psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024), end=' ')\n sys.stdout.flush()\n # print()\n \n RAM_cost = psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024\n if max_RAM_cost < RAM_cost:\n max_RAM_cost = RAM_cost\n \n # T0 = time.time()\n pump(g6k, tracer, kappa, beta, f, **pump_params)\n # rr = [g6k.M.get_r(_,_) for _ in range(g6k.M.B.ncols)]\n # File.write(str(rr))\n # File.write('\\n')\n # T_pump = time.time()-T0\n # T_pumps.append(T_pump)\n \n # slopes.append(get_current_slope(rr,kappa+f,kappa+beta))\n # ghs.append(gaussian_heuristic(rr[kappa+f:kappa+beta]))\n # print(\"k:%d, b:%d, f:%d , RAM cost: %.4f GB, Pump Cost: %.4f s \" % (kappa, beta, f,psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024,T_pump))\n\n # raise RuntimeError(\"Debug\")\n g6k.lll(0, d)\n if g6k.M.get_r(0, 0) <= goal_r0:\n return max_RAM_cost\n\n if verbose:\n \n print(\"\\r k:%d, b:%d, f:%d , RAM cost: %.4f GB \" % (d-(blocksize-dim4free), blocksize-dim4free, 0,psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024), end=' ')\n # print(\"k:%d, b:%d, f:%d , RAM cost: %.4f GB, Pump Cost: %.4f s \" % (d-(blocksize-dim4free), blocksize-dim4free, 0,psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024,T_pump))\n sys.stdout.flush()\n\n RAM_cost = psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024\n if max_RAM_cost < RAM_cost:\n max_RAM_cost = RAM_cost\n \n\n # pump_params[\"down_stop\"] = blocksize - dim4free\n \n T_0 = time.time()\n pump(g6k, tracer, d-(blocksize-dim4free), blocksize-dim4free, 0, **pump_params)\n # rr = [g6k.M.get_r(_,_) for _ in range(g6k.M.B.ncols)]\n # File.write(str(rr))\n # File.write('\\n')\n # T_pump = time.time()-T0\n # T_pumps.append(T_pump)\n # slopes.append(get_current_slope(rr,d-(blocksize-dim4free),d))\n # # ghs.append(gaussian_heuristic(rr[d-(blocksize-dim4free):d]))\n if verbose:\n print('')\n sys.stdout.flush()\n\n # print(T_pumps)\n # File.close()\n return max_RAM_cost\n # return T_pumps,slopes,ghs #max_RAM_cost,\n","repo_name":"Summwer/pro-pnj-bkz","sub_path":"g6k/algorithms/bkz.py","file_name":"bkz.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21577819462","text":"#!/usr/bin/env python\n\nimport rospy #include cpp equivalent\nfrom geometry_msgs.msg import PoseStamped\nfrom particle_filter.msg import particles\n\n#import numpy as np\nimport matplotlib.pyplot as plt\n\n##################\n# Functions\n##################\n\n\n\n##################\n# Global variables\n##################\n\nglobal Robot1_pose\nglobal Robot1_particles\n\nRobot1_pose = PoseStamped()\nRobot1_particles = particles()\n\n##################\n# Callbacks\n##################\n\ndef Robot1_pose_cb(pose_cb_msg):\n global Robot1_pose\n Robot1_pose = pose_cb_msg\n\ndef Robot1_particles_cb(Robot1_particles_msg):\n global Robot1_particles\n Robot1_particles = Robot1_particles_msg\n\n##################\n# Main function\n##################\n\ndef main():\n\n # Name node\n rospy.init_node('plot_l3particles')\n rate = rospy.Rate(10)\n\n # Map parameters\n minLimX = 0.095 #m\n minLimY = 0.295 #m\n maxLimX = 4.1656 #m\n maxLimY = 2.4003 #m\n\n\n # Set up subscriptions\n rospy.Subscriber(\"/mocap_node/Robot_1/pose\", PoseStamped, Robot1_pose_cb)\n rospy.Subscriber(\"particles\", particles, Robot1_particles_cb)\n\n xPltRobot1 = []\n yPltRobot1 = []\n\n\n while not rospy.is_shutdown():\n\n Robot1_poseX = Robot1_pose.pose.position.x\n Robot1_poseY = Robot1_pose.pose.position.y\n\n if Robot1_poseX != 0.0 and Robot1_poseY != 0.0:\n xPltRobot1.append(Robot1_poseX)\n yPltRobot1.append(Robot1_poseY)\n\n\n # Plotting stuff\n plt.clf()\n\n # Plot Wifi stuff\n plt.plot(xPltRobot1, yPltRobot1,\"r\")\n plt.plot(Robot1_poseX, Robot1_poseY,'kD', markersize=6)\n plt.plot(Robot1_particles.X, Robot1_particles.Y, 'k.')\n #Transmitter_location = [0.5,0.5]\n plt.plot(1,1,'gx')\n\n\n plt.axis(\"tight\") # gets rid of white border\n plt.margins(x=0)\n plt.tight_layout()\n plt.xlim(minLimX, maxLimX)\n plt.ylim(minLimY, maxLimY)\n plt.grid(True)\n plt.pause(0.01)\n rate.sleep()\n\n # plt.xlabel(\"x [m]\")\n # plt.ylabel(\"y [m]\")\n # plt.grid()\n # plt.xlim(minLimX, maxLimX)\n # plt.ylim(minLimY, maxLimY)\n # plt.pause(0.01)\n # rate.sleep()\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Mgoodell97/catkinSrc","sub_path":"l3_particlefilter/scripts/plotParticleMocap.py","file_name":"plotParticleMocap.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73515322970","text":"import os\r\nimport re\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n# Define constants and paths\r\nBACKGROUND_IMG_PATH = 'certificate-background.png'\r\nFONT_PATH = \"arial.ttf\"\r\nOUTPUT_DIR_PATH = \"output\"\r\nTEXT_COLOR = (0, 116, 95)\r\nNAMES_FILE_PATH = \"names.txt\"\r\nFONT_SIZE = 130\r\n\r\n#==================================== read names from file ====================================#\r\ndef ReadNames(file_path):\r\n \"\"\"\r\n Read the names from the specified file and return them as a list.\r\n \"\"\"\r\n names = [] # names = [\"Ali Mohammadiyeh\", \"John Doe\", \"Jane Smith\", \"Iman Mirazimi\", \"Matin Mohammadi\"]\r\n\r\n with open(file_path, \"r\") as file:\r\n for line in file:\r\n line = line.strip()\r\n line = line.replace(\"\\t\", \" \")\r\n line = re.sub(r\"\\s+\", \" \", line)\r\n\r\n if line:\r\n names.append(line)\r\n return names\r\n\r\n#========================================= file name ===================================================#\r\ndef CertificateFileName(name):\r\n \"\"\"\r\n Make filename for the certificate image related on their names.\r\n \"\"\"\r\n return os.path.join(OUTPUT_DIR_PATH, f\"certificate-{name.replace(' ', '-').lower()}.png\")\r\n\r\n#==================================== Design and make the certificate ====================================#\r\ndef DesignCertificate(name):\r\n \"\"\"\r\n Design a certificate image for the each name.\r\n \"\"\"\r\n # Load the background image\r\n background_img = Image.open(BACKGROUND_IMG_PATH)\r\n\r\n # Get the drawing context\r\n draw = ImageDraw.Draw(background_img)\r\n\r\n # center the text based on its size and the image size\r\n font = ImageFont.truetype(FONT_PATH, FONT_SIZE)\r\n text_width, text_height = draw.textsize(name, font)\r\n x_position = (background_img.width - text_width) / 2\r\n y_position = 840\r\n \r\n # Draw the text on the image\r\n draw.text((x_position, y_position), name, font=font, fill=TEXT_COLOR)\r\n \r\n # Save the output image in the output directory with a unique filename \r\n output_filename = CertificateFileName(name)\r\n background_img.save(output_filename)\r\n print(f\"{output_filename} generated\")\r\n \r\n#======================================= create the certificates ==============================================#\r\ndef CreateCertificates(names):\r\n \"\"\"\r\n Create a certificate image for each name in the list.\r\n \"\"\"\r\n # Check output directory exists, if not create it\r\n if not os.path.exists(OUTPUT_DIR_PATH):\r\n os.makedirs(OUTPUT_DIR_PATH)\r\n \r\n for name in names:\r\n DesignCertificate(name)\r\n\r\n#======================================= init ==============================================#\r\nif __name__ == \"__main__\":\r\n # Read the names from the file\r\n names = ReadNames(NAMES_FILE_PATH)\r\n\r\n # Create a certificate image for each name\r\n CreateCertificates(names)\r\n","repo_name":"BaseMax/CertificateGenerator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"74402357211","text":"#\n# Proyecto Demanda Energética Tenerife\n\n\n# Jose Luis Quintero García (c) feb2023\n\n# Creación del dataset con histórico de demanda de energía cada 5m en Tenerife\n# Datos desde https:\\\\demanda.ree.es\n\n# Librerías\nfrom os import path, listdir\nfrom pathlib import PurePath\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom selenium import webdriver \nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.chrome.service import Service as ChromeService \nfrom selenium.common import exceptions\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\n\n\n# Inicialización de Variables\nurl_base = \"https://demanda.ree.es/visiona/canarias/tenerife5m/tablas/\"\nstart_date = datetime(2023, 2, 16)\nPLAZO = 15 # Finalización desde fecha de inicio en años\nend_date = start_date + relativedelta(days=PLAZO) # Uso de relativedelta en vez de timedelta, pues timedelta no admite years\nsw_corrige_csv = False # Ejecuta o no el código de corrección de CSV (eliminación de Líneas con NaN)\nsw_descarga = False # Ejecuta o no la parte de descarga de los datos\n\n# Diferencia entre fecha de inicio y fecha de fin en días \ndif_fechas = (end_date - start_date).days\n\n# Crea el driver de Selenium para cargar las páginas dinámicas de cada día\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--headless\") # No muestra el navegador que se abre\n\n# Recorre los días desde start_date a start_date + PLAZO\nif sw_descarga:\n for day in range(dif_fechas):\n date = start_date + timedelta(days=day)\n url = url_base + date.strftime('%Y-%m-%d') + \"/1\"\n print(url)\n\n # Define el nombre del archivo con el que vamos a grabar los datos\n file_name = PurePath(url.split('/')[-2]).name + \".csv\"\n file_path = path.join(\".\",\"web\", file_name)\n\n # Si el archivo no existe, lo descarga\n if not path.exists(file_path):\n \n # Hay que crear el driver cada vez que carguemos una página\n driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=options) \n \n print(f\"[INFO] Descargando archivo '{file_path}'\")\n driver.get(url) \n\n # Descargamos la tabla de datos del día elegido, metiendo una espera hasta que se haya cargado la tabla\n # Sin la espera, es fácil que aparezca un error al intentar obtener el objeto sin haberse terminado de generar la tabla\n tabla_dia = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, 'tabla_evolucion')))\n tabla_dia = tabla_dia.find_elements(By.XPATH, 'tbody/tr')\n lista_temporal = []\n\n # Ahora recogemos los datos de cada columna, registro por registro, hasta acabar la tabla\n # Solo descargo fecha (fecha y hora) y consumo real (el resto no me interesa).\n # El primer elemento son los títulos de la tabla, que debo obviar, así que recorro la lista por índices, \n # metiendo los datos que queremos en un diccionario\n for num_registro in range(1, len(tabla_dia)):\n registro = tabla_dia[num_registro]\n\n datos = registro.find_elements(By.TAG_NAME, \"td\")\n registro_dict = {\n \"fecha\": datos[0].text,\n \"consumo\": datos[1].text\n }\n # Añadimos cada registro a una tabla temporal, con las variables ya separadas en el diccionario\n lista_temporal.append(registro_dict)\n\n # Convertimos la lista en Dataframe para exportar a CSV (1 CSV por día)\n data = pd.DataFrame(lista_temporal)\n data.to_csv(file_path, index=False)\n driver.close\n else:\n print(f\"[INFO] Archivo '{file_path}' ya existe\")\n print(\"[INFO] Descargados archivos CSV\")\nelse:\n print(\"Saltando descargas\") \n\n\n# Borra los ejemplos con alguna columna vacía\n# Lo suyo es hacer esto en el momento de descargar los CSV al principio, pero no lo hice entonces \n# y no los voy a descargar de nuevo ;)\nruta = path.abspath(path.join(\".\",\"web\"))\nlist_csv = [path.join(ruta, file) for file in listdir(ruta) if '.csv' in file]\nif sw_corrige_csv:\n for file in list_csv:\n try:\n print(f\"Tratando archivo {file}\")\n df = pd.read_csv(file, delimiter=\",\")\n df = df.dropna()\n df.to_csv(file, index=False) \n except Exception as e:\n print(f\"Excepción en {file}: {e}\") \nelse:\n print(\"Saltando corrección\")\n\n# Combina todos los archivos de la lista\ncsv_unificado = pd.concat([pd.read_csv(f, delimiter=\",\") for f in list_csv])\ncsv_unificado.info()\n\n# Exporta a csv\ncsv_unificado.to_csv( \"demanda_energia_TF_5m.csv\", index=False, encoding='utf-8-sig')\nprint(\"Creado CSV unificado\")\n\n","repo_name":"joselquin/Demanda_Energia_TF","sub_path":"listado_paginas.py","file_name":"listado_paginas.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20032855800","text":"import numpy as np \nimport h5py \nimport os\nimport sys\n\n\n\nfilename = os.listdir(\"./image\")\n\nfileout = sys.argv[1]\n\nfo = open(fileout,\"w\")\nfilename = \"features\" + \"/\" + filename[0] + \".h5\"\n#print filename\nf = h5py.File(filename,\"r\")\n\na = f[u'feat'].value\n\n#np.savetxt(fileout,a,fmt='%0.4f')\n\nline = \"\"\n\nfor i in a:\n line = line + \"\\t\" + str(i)\n\nfo.write(line)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kiyotaka-tanaka/choi_extract_feature","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6335542401","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 30 13:38:44 2022\n\n@author: bokar\n\"\"\"\n\n# import cv2 as cv\nimport numpy as np\n\n# def reprojection_error_wrong():\n# mean_error = 0\n# for i in range(len(objpoints)):\n# imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\n# error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)\n# mean_error += error\n \n# return mean_error\n#%% Reprojection Error calculation\n\ndef reprojection_error(data, projection_mat):\n debug = False\n mean_error = 0\n \n pts_3d = np.ones((data.shape[0],4))\n pts_3d[:,:3] = data[:,:3]\n pts_2d = np.ones((data.shape[0],3))\n pts_2d[:,:2] = data[:,3:]\n pred_xy = []\n print('3D points:\\n', pts_3d) if debug else None\n print('2D points:\\n', pts_2d) if debug else None\n \n for i in range(data.shape[0]):\n print('Shape of 3d points:',pts_3d[i].shape) if debug else None\n print('Projection matrix shape:',projection_mat.shape) if debug else None\n img_pt = np.dot(projection_mat, pts_3d[i])\n print('Image points calculated:', img_pt) if debug else None\n img_pt = img_pt/img_pt[-1]\n print(img_pt) if debug else None\n pred_xy.append(img_pt)\n print('Normalized image point calculated:', img_pt) if debug else None\n print('Original Image Points:', pts_2d[i]) if debug else None\n error = np.sum(np.square(img_pt[:2] - pts_2d[i,:2]))\n print('Error sq is:', error) if debug else None\n mean_error+= error/data.shape[0]\n print() if debug else None\n mean_error = np.sqrt(mean_error)\n return mean_error, pred_xy\n\n#%% Run this to test the above code\n\ndata = np.random.rand(6,5)\nprojection_mat = np.random.rand(3,4)\n\nreprojection_error(data, projection_mat)\n#%%\n\n# mat1 = [[1,2,1],\n# [0,4,5]]\n# mat2 = [[3,4,5],\n# [4,5,6]]\n# mat2 = np.array(mat2)\n# print(np.dot(mat1, mat2[0].T))\n\n","repo_name":"abstruse020/ELL-793-ASS1","sub_path":"calc_reprojection_error.py","file_name":"calc_reprojection_error.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74323764250","text":"from typing import List\nimport logging\nimport numpy as np\nimport sys\nfrom unittest import mock\n\nfrom mlagents.envs.communicator_objects.agent_info_pb2 import AgentInfoProto\nfrom mlagents.envs.communicator_objects.observation_pb2 import (\n ObservationProto,\n NONE as COMPRESSION_TYPE_NONE,\n)\nfrom mlagents.envs.brain import BrainInfo, BrainParameters\n\ntest_brain = BrainParameters(\n brain_name=\"test_brain\",\n vector_observation_space_size=3,\n camera_resolutions=[],\n vector_action_space_size=[],\n vector_action_descriptions=[],\n vector_action_space_type=1,\n)\n\n\ndef _make_agent_info_proto(vector_obs: List[float]) -> AgentInfoProto:\n obs = ObservationProto(\n float_data=ObservationProto.FloatData(data=vector_obs),\n shape=[len(vector_obs)],\n compression_type=COMPRESSION_TYPE_NONE,\n )\n agent_info_proto = AgentInfoProto(observations=[obs])\n return agent_info_proto\n\n\n@mock.patch.object(np, \"nan_to_num\", wraps=np.nan_to_num)\n@mock.patch.object(logging.Logger, \"warning\")\ndef test_from_agent_proto_nan(mock_warning, mock_nan_to_num):\n agent_info_proto = _make_agent_info_proto([1.0, 2.0, float(\"nan\")])\n\n brain_info = BrainInfo.from_agent_proto(1, [agent_info_proto], test_brain)\n # nan gets set to 0.0\n expected = [1.0, 2.0, 0.0]\n assert (brain_info.vector_observations == expected).all()\n mock_nan_to_num.assert_called()\n mock_warning.assert_called()\n\n\n@mock.patch.object(np, \"nan_to_num\", wraps=np.nan_to_num)\n@mock.patch.object(logging.Logger, \"warning\")\ndef test_from_agent_proto_inf(mock_warning, mock_nan_to_num):\n agent_info_proto = _make_agent_info_proto([1.0, float(\"inf\"), 0.0])\n\n brain_info = BrainInfo.from_agent_proto(1, [agent_info_proto], test_brain)\n # inf should get set to float_max\n expected = [1.0, sys.float_info.max, 0.0]\n assert (brain_info.vector_observations == expected).all()\n mock_nan_to_num.assert_called()\n # We don't warn on inf, just NaN\n mock_warning.assert_not_called()\n\n\n@mock.patch.object(np, \"nan_to_num\", wraps=np.nan_to_num)\n@mock.patch.object(logging.Logger, \"warning\")\ndef test_from_agent_proto_fast_path(mock_warning, mock_nan_to_num):\n \"\"\"\n Check that all finite values skips the nan_to_num call\n \"\"\"\n agent_info_proto = _make_agent_info_proto([1.0, 2.0, 3.0])\n\n brain_info = BrainInfo.from_agent_proto(1, [agent_info_proto], test_brain)\n expected = [1.0, 2.0, 3.0]\n assert (brain_info.vector_observations == expected).all()\n mock_nan_to_num.assert_not_called()\n mock_warning.assert_not_called()\n","repo_name":"DdATM/ML-FlappyBird","sub_path":"ml-agents-envs/mlagents/envs/tests/test_brain.py","file_name":"test_brain.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6384175002","text":"with open('../input/day9.txt', 'r') as f:\n numbers = [int(n.strip('\\n')) for n in f.readlines()]\n\n\ndef find_number(numlist:list, preamble: int) -> int:\n preamble_list = numlist[0: preamble]\n for i in range(preamble, len(numlist) + 1):\n found_sum = False\n for j in range(0, preamble):\n for k in range(j, preamble):\n if preamble_list[j] + preamble_list[k] == numlist[i]:\n found_sum = True\n break\n if found_sum:\n break\n\n if not found_sum:\n return numlist[i]\n break\n\n preamble_list.pop(0)\n preamble_list.append(numlist[i])\n\n\ndef find_range(number:int, lst: list):\n startpos = 0\n\n while True:\n sum = 0\n for i in range(startpos, len(lst)):\n sum += lst[i]\n if sum == number and i != startpos:\n minn = min(numbers[startpos:i+1])\n maxn = max(numbers[startpos:i+1])\n print(minn + maxn)\n break\n elif sum > number:\n startpos += 1\n break\n if sum == number:\n break\n\n\nif __name__ == '__main__':\n errnum = find_number(numbers, 25)\n print(errnum)\n find_range(errnum, numbers)\n","repo_name":"martijnhielema/aoc2020","sub_path":"Day 9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32594084998","text":"import torch\nimport torch.nn as nn\n\n\nclass LSTM(nn.Module):\n def __init__(self):\n super().__init__()\n\n mean, std = torch.tensor(0.0), torch.tensor(1.0)\n\n # LTM % to remember | fg = forget_gate | inp = input | hs = hidden_state\n self.fg_hs_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.fg_inp_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.fg_b = nn.Parameter(torch.tensor(0.0), requires_grad=True)\n\n # Potential LTM % to remember | ig = input_gate | lb = left_block\n self.ig_lb_hs_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.ig_lb_in_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.ig_lb_b = nn.Parameter(torch.tensor(0.0), requires_grad=True)\n\n # Potential LTM\n self.ig_rb_hs_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.ig_rb_in_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.ig_rb_b = nn.Parameter(torch.tensor(0.0), requires_grad=True)\n\n # Potential STM\n # here we have only tanh activation function which will be implemented in forward method\n # and output of this finction will be multiplied by Potential LTM % from left_block and result is the updated STM.\n\n # Potential STM % to remember | og = output_gate\n self.og_lb_hs_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.og_lb_in_w = nn.Parameter(\n torch.normal(mean=mean, std=std), requires_grad=True\n )\n self.og_lb_b = nn.Parameter(torch.tensor(0.0), requires_grad=True)\n\n def lstm_unit(self, input_value, ltm, stm):\n # cell state component to restrict ltm to remember persentage\n ltm_to_remember_persentage = torch.sigmoid(\n (stm * self.fg_hs_w + input_value * self.fg_inp_w) + self.fg_b\n )\n\n # ptl = potential | ltm = long term memory | stm = sort term memory\n ptl_ltm_to_remember_persentage = torch.sigmoid(\n (stm * self.ig_lb_hs_w + input_value * self.ig_lb_in_w) + self.ig_lb_b\n )\n potential_ltm_to_remember = torch.tanh(\n (stm * self.ig_rb_hs_w + input_value * self.ig_rb_in_w) + self.ig_rb_b\n )\n\n # cell state component to update scaled ltm\n updated_ltm = (ltm * ltm_to_remember_persentage) + (\n ptl_ltm_to_remember_persentage * potential_ltm_to_remember\n )\n\n stm_to_remember_persentage = torch.sigmoid(\n (stm * self.og_lb_hs_w + input_value * self.og_lb_in_w) + self.og_lb_b\n )\n updated_stm = torch.tanh(updated_ltm) * stm_to_remember_persentage\n\n return [updated_ltm, updated_stm]\n\n def forward(self, input):\n ltm, stm = 0, 0\n\n day1 = input[0]\n day2 = input[1]\n day3 = input[2]\n day4 = input[3]\n\n ltm, stm = self.lstm_unit(day1, ltm, stm)\n ltm, stm = self.lstm_unit(day2, ltm, stm)\n ltm, stm = self.lstm_unit(day3, ltm, stm)\n ltm, stm = self.lstm_unit(day4, ltm, stm)\n\n return stm\n\n\nif __name__ == \"__main__\":\n device = (\n \"cuda\"\n if torch.cuda.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n )\n model = LSTM().to(device)\n print(model)\n","repo_name":"Glebmaksimov/scratch_implemented","sub_path":"deep_learning/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71402934810","text":"from importing_modules import *\n\n\ndef download_file(url, verbose=False):\n # # local_filename = os.path.join('.', f\"{url.split('/')[-1]}.xml\")\n # if url.find(r'C:\\Users\\vladi') != -1:\n local_filename = os.path.join(f'{url.split(\"/\")[-1]}.xml')\n # else:\n # local_filename = os.path.join(rf\"D:\\{url.split('/')[-1]}.xml\")\n\n # First version\n code = requests.get(url, stream=True)\n file_size = code.headers.get('content-length')\n\n chunk_size = 2 ** 14\n\n # Second version\n # user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n # headers = {'User-Agent': user_agent}\n # request = Request(url, headers=headers)\n # code = urlopen(request)\n # file_size = code.headers.get('content-length')\n # print(code.text)\n\n if file_size:\n file_size = int(file_size)\n num_bars = int(file_size / chunk_size)\n\n if verbose:\n print(f'File size: {file_size}')\n # print(dict(file_size=file_size))\n print(f'Number of bars: {num_bars}')\n # print(dict(num_bars=num_bars))\n\n with open(local_filename, 'wb') as open_file:\n for chunk in tqdm.tqdm(code.iter_content(chunk_size=chunk_size), total=num_bars, unit='MB',\n desc=local_filename, leave=True, ncols=100, ascii=True):\n open_file.write(chunk)\n\n return\n\n code_1 = requests.head(url, headers={'Accept-Encoding': None})\n file_size = code_1.headers.get('content-length')\n\n if file_size:\n file_size = int(file_size)\n num_bars = int(file_size / chunk_size)\n\n if verbose:\n print(f'File size: {file_size}')\n # print(dict(file_size=file_size))\n print(f'Number of bars: {num_bars}')\n # print(dict(num_bars=num_bars))\n\n with open(local_filename, 'wb') as open_file:\n for chunk in tqdm.tqdm(code.iter_content(chunk_size=chunk_size), total=num_bars, unit='MB',\n desc=local_filename, leave=True, ncols=100, ascii=True):\n open_file.write(chunk)\n\n return\n\n else:\n file_size = len(code_1.text.split('\\n'))\n num_bars = int(file_size / 2 ** 14)\n with open(local_filename, 'wb') as open_file:\n for chunk in tqdm.tqdm(code.iter_content(chunk_size=8_192), desc=local_filename, total=num_bars,\n leave=True, ascii=True, ncols=100):\n open_file.write(chunk)\n\n return\n\n\ndef compile_task():\n url_main = sys.argv[1]\n # url_main = input('Input url of the page: ')\n download_file(url_main, verbose=True)\n # file_name = f'{url_main.split(\"/\")[-1]}.xml'\n file_name = f'{url_main.split(\"/\")[-1]}.xml'\n # file_size = os.stat(file_name).st_size\n\n tree = et.parse(file_name)\n root = tree.getroot()\n\n offers_errors = [] # 0\n pictures_errors = [] # 1\n prices_errors = [] # 2\n barcode_errors = [] # 3\n params_errors = [] # 4\n old_prices_errors = [] # 5\n retail_prices_errors = [] # 6\n description_errors = [] # 7\n rec_errors = [] # 8\n vat_errors = [] # 9\n badge_errors = [] # 10\n video_errors = [] # 11\n file_errors = [] # 12\n\n errors_list = []\n warnings_list = []\n infos_list = []\n\n errors_file_dir = 'feed_errors.txt'\n warnings_file_dir = 'feed_warnings.txt'\n infos_file_dir = 'feed_infos.txt'\n\n with open(file_name, 'r', encoding='utf-8') as open_file:\n code = open_file.readlines()\n\n def find_string(code, offer_id):\n for index in range(len(code)):\n if code[index].find(f' 1:\n# if len(soup_code.find_all('category', {'id': found[1].text})) == 0:\n# warnings_list.append(f'No category found [{offers_found[index][\"id\"]}: {found[1].text}]\\n')\n#\n# with open(rf'C:\\Users\\Vlad04\\Downloads\\{new_warnings_filename}.txt',\n# 'w', encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# print()\n# print('Tags are done!')\n#\n# c = 0\n# for index in range(len(offers_found)):\n# if len(offers_found[index].find_all('picture')) < 1: # Чтобы найти price, просто меняешь picture :D\n# # print(offers_found[index].find('categoryid').text)\n# # print('Error', index)\n# # print(offers_found[index])\n# warnings_list.append(f'No image found in offer id: {offers_found[index][\"id\"]}\\n')\n# with open(f'{new_warnings_filename}.txt', 'w',\n# encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# for index in range(len(offers_found)):\n# if len(offers_found[index].find_all('price')) < 1: # Чтобы найти price, просто меняешь picture :D\n# # print(offers_found[index].find('categoryid').text)\n# # print('Error', index)\n# # print(offers_found[index])\n# warnings_list.append(f'No price found in offer id: {offers_found[index][\"id\"]}\\n')\n# with open(f'{new_warnings_filename}.txt', 'w',\n# encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# # with open(rf'C:\\Users\\Vlad04\\Downloads\\{new_warnings_filename}.txt', 'w', encoding='utf-8') as open_file_1:\n# # open_file_1.writelines(warnings_list)\n# os.startfile(f'{new_warnings_filename}.txt')\n\n\n# if __name__ == '__main__':\n# download_file('https://kiehls.ru/media/feed/Yandex%20Kiehls.xml', verbose=True)\n# xml_feeds()\n# xml_feeds_1()\n# No pictures found\n# 4945\n# python main.py https://www.forward-sport.ru/bitrix/catalog_export/forward_stocks_250650.php\n\n\n# def compile_task():\n# url_main = 'https://xn--80ae2aeeogi5fxc.xn--p1ai/feed/yml/imshop_catalog'\n# warnings_list = []\n#\n# # code = requests.get(url_main)\n# with open(r'C:\\Users\\vladi\\Downloads\\imshop_catalog.xml', 'r', encoding='utf-8') as open_file:\n# code = ''.join(open_file.readlines())\n# print('Downloaded')\n# soup_code = bs(code, 'lxml')\n# print('Parsed')\n#\n# offers_found = soup_code.find_all('offer')\n#\n# # count_0 = 0\n# # count_1 = 0\n# # for index in range(len(offers_found)):\n# # if offers_found[index]['available'] == 'true':\n# # count_0 += 1\n# # if int(offers_found[index].find('quantity').text) > 0:\n# # count_1 += 1\n# #\n# # print(count_0)\n# # print(count_1)\n#\n# for index in range(len(offers_found)):\n# print(index, end=' ')\n# found = offers_found[index].find_all('categoryid')\n# if len(soup_code.find_all('category', {'id': found[0].text})) == 0:\n# warnings_list.append(f'No category found [{offers_found[index][\"id\"]}: {found[0].text}]\\n')\n# if len(found) > 1:\n# if len(soup_code.find_all('category', {'id': found[1].text})) == 0:\n# warnings_list.append(f'No category found [{offers_found[index][\"id\"]}: {found[1].text}]\\n')\n# with open(f'forward_errors_1.txt', 'w', encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# print()\n# print('Tags are done!')\n#\n# for index in range(len(offers_found)):\n# if len(offers_found[index].find_all('picture')) < 1: # Чтобы найти price, просто меняешь picture :D\n# # print(offers_found[index].find('categoryid').text)\n# # print('Error', index)\n# # print(offers_found[index])\n# warnings_list.append(f'No image found in offer id: {offers_found[index][\"id\"]}\\n')\n# with open(f'forward_errors_1.txt', 'w',\n# encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# for index in range(len(offers_found)):\n# if len(offers_found[index].find_all('price')) < 1: # Чтобы найти price, просто меняешь picture :D\n# # print(offers_found[index].find('categoryid').text)\n# # print('Error', index)\n# # print(offers_found[index])\n# warnings_list.append(f'No price found in offer id: {offers_found[index][\"id\"]}\\n')\n# with open(f'forward_errors_1.txt', 'w',\n# encoding='utf-8') as open_file_1:\n# open_file_1.writelines(warnings_list)\n#\n# os.startfile('forward_errors_1.txt')\n\n\nif __name__ == '__main__':\n compile_task()\n print('First task completed!', end='\\n')\n print('-=' * 20, end='\\n')","repo_name":"vladcelona/Imshop_main","sub_path":"project_files/sixth_task.py","file_name":"sixth_task.py","file_ext":"py","file_size_in_byte":15970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42005867855","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport pycdio\nimport iso9660\n\narg = sys.argv[1]\n\niso = iso9660.ISO9660.IFS(source=arg)\nfd = os.open(arg, os.O_RDONLY)\n\nif not iso.is_open() or fd is None:\n raise Exception(\"Could not open %s as an ISO-9660 image.\" % arg)\n\n# On Tumbleweed, there is no '/suse' prefix\nfor path in ['/repodata']:\n file_stats = iso.readdir(path)\n if file_stats is None:\n continue\n\n for stat in file_stats:\n filename = stat[0]\n LSN = stat[1]\n size = stat[2]\n sec_size = stat[3]\n is_dir = stat[4] == 2\n print(\"[LSN %6d] %8d %s%s\" % (LSN, size, path,\n iso9660.name_translate(filename)))\n\n if (filename.endswith('-filelists.xml.gz')):\n os.lseek(fd, LSN * pycdio.ISO_BLOCKSIZE, io.SEEK_SET)\n\n #if (filename.endswith('.rpm')):\n # os.lseek(fd, LSN * pycdio.ISO_BLOCKSIZE, io.SEEK_SET)\n # h = self.ts.hdrFromFdno(fd)\n # _getdata(h)\n\nos.close(fd)\n","repo_name":"cwh42/microos-webman","sub_path":"isoextract.py","file_name":"isoextract.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30121845877","text":"\"\"\"\r\n\n\nCreate a function that takes a string and returns the reversed string. However\nthere's a few rules to follow in order to make the challenge interesting:\n\n * The UPPERCASE/lowercase positions must be kept in the same order as the original string (see example #1 and #2).\n * Spaces must be kept in the same order as the original string (see example #3).\n\n### Examples\n\n special_reverse_string(\"Edabit\") ➞ \"Tibade\"\n \n special_reverse_string(\"UPPER lower\") ➞ \"REWOL reppu\"\n \n special_reverse_string(\"1 23 456\") ➞ \"6 54 321\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef special_reverse_string(txt):\n l = [i.casefold() for i in txt if i != ' '][::-1]\n for i, val in enumerate(txt):\n if val == ' ':\n l.insert(i, val)\n return ''.join(\n k.swapcase() if not i.islower() and i.isalpha() \n else k\n for i, k in zip(txt, l))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"hZ4HzhboCJ5dDiNve_12.py","file_name":"hZ4HzhboCJ5dDiNve_12.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6480170247","text":"import requests\nimport os\nimport sys\nimport json\nimport uuid\nimport time\nfrom datetime import datetime, timedelta\nimport urllib.parse\nimport urllib\nimport pagerduty_json_csv\nimport system_config_reader\n\ndef pull_env_var(key):\n env_value = os.environ.get(key, None)\n if env_value is None:\n print(\"%s environment variable is not set\" % key)\n sys.exit(1)\n\n return env_value\n\ndef get_services_by_escalation_policy_id(escalation_policy_id, pagerduty_user_token):\n response = requests.get(\"https://api.pagerduty.com/escalation_policies/%s\" % escalation_policy_id,\n headers=get_headers(pagerduty_user_token))\n\n status_code = response.status_code\n if status_code != 200:\n raise Exception(\"Failed to invoke escalation_policies api: \" + status_code + \" content: \" + response.content)\n\n data = json.loads(response.content)\n\n services = data['escalation_policy']['services']\n service_ids = [service['id'] for service in services]\n print(\"Serviceids for escalation policy: %s are: %s\" % (escalation_policy_id, str(service_ids)))\n return service_ids\n\ndef getTimeRange():\n endTime = datetime.utcnow()\n # startTime = datetime.utcnow() - timedelta(hours=12)\n startTime = datetime.utcnow() - timedelta(hours=168)\n return (startTime, endTime)\n\ndef get_headers(pagerduty_user_token):\n return {\n 'Accept': 'application/vnd.pagerduty+json;version=2',\n 'Authorization': 'Token token=%s' % pagerduty_user_token\n }\n\ndef get_pagerduty_timeformat(targetDateTime):\n return targetDateTime.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n# def print_curl_version(response):\n# req = response.request\n# command = \"curl -X {method} -H {headers} -d '{data}' '{uri}'\"\n# method = req.method\n# uri = req.url\n# data = req.body\n# headers = ['\"{0}: {1}\"'.format(k, v) for k, v in req.headers.items()]\n# headers = \" -H \".join(headers)\n# print(command.format(method=method, headers=headers, data=data, uri=uri))\n\ndef request_pagerduty_events(startTime, endTime, service_ids_list, offset, limit, pagerduty_user_token):\n\n query_parameters = {\n \"limit\": limit,\n # \"service_ids[]\": service_id,\n # \"service_ids[]\": \",\".join(service_ids_list),\n \"time_zone\": \"UTC\",\n \"total\": True,\n \"since\": get_pagerduty_timeformat(startTime),\n \"until\": get_pagerduty_timeformat(endTime),\n \"offset\": offset\n }\n\n query_buf = []\n for service_id in service_ids_list:\n query_buf.append(\"service_ids%5B%5D=\" + service_id)\n\n payload_str = urllib.parse.urlencode(query_parameters)\n service_id_query = \"&\".join(query_buf)\n print(service_id_query)\n\n response = requests.get(\"https://api.pagerduty.com/incidents\",\n params=payload_str + \"&\" + service_id_query,\n headers=get_headers(pagerduty_user_token))\n\n if response.status_code == 200:\n # TODO: remove the sleep from here\n time.sleep(0.5)\n # print(response.content)\n return response.content\n\n raise Exception(\"Got non-200 response from pagerduty api\")\n\n\ndef write_to_file(file_name, content):\n with open(file_name, \"w\") as file_pointer:\n file_pointer.write(content)\n\n print(\"File write complete: \" + file_name)\n\ndef pull_pagerduty_events(startTime, endTime, service_ids_list, pagerduty_user_token, json_drop_directory):\n hasMoreEvents = True\n offset = 0\n limit = 50\n while hasMoreEvents:\n print(\"Requesting with offset: \" + str(offset))\n raw_response = request_pagerduty_events(startTime=startTime, endTime=endTime,\n service_ids_list=service_ids_list, offset=offset,\n limit=limit, pagerduty_user_token=pagerduty_user_token)\n\n response = json.loads(raw_response)\n offset = offset + limit\n hasMoreEvents = response['more']\n file_name = os.path.join(json_drop_directory, \"%s-%d.json\" % (\"events\", offset))\n write_to_file(file_name=file_name, content=json.dumps(response, indent=4))\n\nif __name__ == \"__main__\":\n pagerduty_user_token = system_config_reader.get_config(\"PD_USER_TOKEN\")\n escalation_policies = system_config_reader.get_config(\"PD_ESC_POLICIES_CSV\")\n\n service_ids_list = list()\n\n escalation_policies_list = escalation_policies.split(\",\")\n\n for escalation_policy_id in escalation_policies_list:\n service_ids = get_services_by_escalation_policy_id(escalation_policy_id, pagerduty_user_token)\n service_ids_list.extend(service_ids)\n\n # time format: 2021-03-01T14:04:35\n\n drop_directory = os.path.join(os.getcwd(), str(uuid.uuid4()))\n print(\"Drop directory is: \" + drop_directory)\n os.mkdir(drop_directory)\n\n (startTime, endTime) = getTimeRange()\n\n total_services = len(service_ids_list)\n current = 0\n\n pull_pagerduty_events(startTime=startTime,endTime=endTime,\n service_ids_list=service_ids_list,\n pagerduty_user_token=pagerduty_user_token,\n json_drop_directory=drop_directory)\n\n pagerduty_json_csv.process_directory(drop_directory)","repo_name":"mohammed-ibrahim/rcfiles","sub_path":"pagerduty_recent_events.py","file_name":"pagerduty_recent_events.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20103384907","text":"import socket\nimport json\nimport cv2 \nimport numpy as np\n# IP = '127.0.0.1'\n# IP = '118.67.132.167'\n# PORT = 30012\n# IP = '101.101.208.43'\n# PORT = 30010\n# ADDR = (IP,PORT)\ndef connect(ADDR):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:\n total_msg = \"\"\n client.connect(ADDR)\n \n client.send(json.dumps({'a':1}).encode())\n while True:\n msg = client.recv(1024)\n if not msg:\n break\n total_msg += msg.decode() \n print(total_msg)\n \n\ndef get_client_socket():\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n return client\n\ndef len_packet(data):\n len_data = f'len:{len(data):<12}'.encode()\n return len_data\n\ndef img_encode_func(img_data):\n retval, encode_data = cv2.imencode('.webp', img_data, [cv2.IMWRITE_WEBP_QUALITY,100])\n encode_data = encode_data.tobytes()\n\n # print(retval,type(encode_data))\n return encode_data\n\ndef com_packet(cmd):\n return f'com {cmd:<12}'.encode()\n\ndef connect_with_server(ADDR, data,data_encode_func=img_encode_func):\n client = get_client_socket()\n client.settimeout(10)\n client.connect(ADDR)\n if data_encode_func:\n data = data_encode_func(data)\n\n data = len_packet(data)+com_packet('image')+data\n\n # data = data\n\n client.sendall(data)\n total_msg = ''\n\n ## length packet이 있을 경우\n total_len = int(client.recv(16).decode()[4:])\n client.recv(16)\n print('total_len:',total_len)\n while total_len>0:\n msg = client.recv(1024)\n total_len -= len(msg)\n total_msg += msg.decode()\n\n ## length packet이 없을 경우\n # while True:\n # print(client,'f')\n # msg = client.recv(1024)\n # total_msg += msg.decode()\n # print(len(msg))\n \n # if not msg:\n # print('break')\n # break\n\n print('recieve_len:',len(total_msg))\n print(total_msg)\n client.close()\n return total_msg\n\n\n\n\n\n# len_packet(img_encode_func(cv2.imread('./sample_img.jpg')))\n# connect(ADDR)\nif __name__ == '__main__':\n connect_with_server(cv2.imread('./sample_img.jpg',cv2.IMREAD_COLOR),img_encode_func)\n\n","repo_name":"boostcampaitech5/level3_cv_finalproject-cv-01","sub_path":"serving/frontend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10216757963","text":"import random\n\n\"\"\"\n Spencer Palmeter - Monster Project\n additional changes:\n - added monster naming and default values\n - added basic battle functionality between two monsters. Round based\n fighting, Monster A attacks Monster B and vice versa until one\n Monster's health reaches 0\n - added RNG based attacks for battling\n - added variable argument printout for displaying \n current health and armor for x monsters that displays \n after each round\n\"\"\"\n\n\nclass Monster:\n # Default Monster constructor\n def __init__(self, name=\"Evil Spirit\", health=30):\n self.health = health\n self.name = name\n\n def getHealth(self):\n return self.health\n\n def getName(self):\n return self.name\n\n def takeDamage(self, damage):\n self.health = self.health - damage\n if self.health < 0:\n self.health = 0\n\n # Health check\n def isAlive(self):\n if self.health > 0:\n return True\n return False\n\n # RNG based damage output\n def attack(self):\n damage = random.random() * 10 // 1\n return damage\n\n\nclass ArmoredMonster(Monster):\n # Default ArmoredMonster constructor\n def __init__(self, name=\"Spectral Knight\", health=30, armor=30):\n super().__init__(name, health)\n self.armor = armor\n\n def getArmor(self):\n return self.armor\n\n # ArmoredMonster takes damage from its armor value first, once that is reduced to 0\n # the remaining damage is subtracted from health\n def takeDamage(self, damage):\n if self.armor > 0:\n if damage > self.armor:\n damageLeft = damage - self.armor\n self.armor = 0\n self.health = self.health - damageLeft\n if self.health < 0:\n self.health = 0\n else:\n self.armor = self.armor - damage\n else:\n self.health = self.health - damage\n if self.health < 0:\n self.health = 0\n\n\n# Simulates a battle between two monsters. Each monster attacks until one's health reaches 0\ndef battle(Monster1, Monster2):\n round = 1\n while Monster1.isAlive() and Monster2.isAlive():\n print(\"------- Round: \" + str(round) + \" -------\")\n printStats(Monster1, Monster2)\n # each monster attacks\n mon1damage = Monster1.attack()\n mon2damage = Monster2.attack()\n Monster2.takeDamage(mon1damage)\n Monster1.takeDamage(mon2damage)\n # attack message\n print(Monster2.getName() + \" attacks and deals \" + str(mon2damage) + \" damage!\")\n print(Monster1.getName() + \" attacks and deals \" + str(mon1damage) + \" damage!\\n\")\n round += 1\n # end of round health check\n if not Monster1.isAlive() and not Monster2.isAlive():\n print(\"It's a draw!\")\n elif not Monster1.isAlive():\n print(Monster2.getName() + \" Wins!\")\n else:\n print(Monster1.getName() + \" Wins!\")\n\n\n# prints health and armor(if applicable) stats for x amount of Monsters\ndef printStats(*monsters):\n for monster in monsters:\n statstring = \"\"\n statstring += monster.getName() + \" ---\"\n if isinstance(monster, ArmoredMonster):\n statstring += \" Armor: \" + str(monster.getArmor()) + \"\\n\" + \" \" * len(statstring)\n statstring += \" Health: \" + str(monster.getHealth()) + \"\\n--------------------------\"\n print(statstring)\n\n\ndef main():\n malphas = ArmoredMonster(\"Malphas\", 35, 40)\n azazel = ArmoredMonster(\"Azazel\", 40, 25)\n battle(malphas, azazel)\n\n\nmain()\n","repo_name":"SpencerP22/MonsterProject","sub_path":"prg3.py","file_name":"prg3.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11153859735","text":"class Coord:\n def __init__(self,x,y):\n self.x=x\n self.y=y\nvertex=[]\nfile=input('input name file: ')\nwith open(file) as f:\n for line in f:\n x = line.split()\n vertex.append(Coord(int(x[0]),int(x[1])))\n\nxp=int(input('coord point x='))\nyp=int(input('coord point y='))\n\nd=[]\n\n# D = (x2 - x1) * (yp - y1) - (xp - x1) * (y2 - y1)\n\nd.append((vertex[1].x - vertex[0].x) * (yp - vertex[0].y) - (xp - vertex[0].x) * (vertex[1].y - vertex[0].y))\nd.append((vertex[2].x - vertex[1].x) * (yp - vertex[1].y) - (xp - vertex[1].x) * (vertex[2].y - vertex[1].y))\nd.append((vertex[3].x - vertex[2].x) * (yp - vertex[2].y) - (xp - vertex[2].x) * (vertex[3].y - vertex[2].y))\nd.append((vertex[0].x - vertex[3].x) * (yp - vertex[3].y) - (xp - vertex[3].x) * (vertex[0].y - vertex[3].y))\n\ncount=0\nindex=0\nfor i in range(len(d)):\n if d[i]==0:\n count+=1\n elif d[i]<0:\n index+=1\n\nif index==4:\n print('point in quadrilateral')\n\nelif count==1:\n print('point in edge')\n\nelif count==2:\n print('point in vertex')\n\nelse :\n print('point outside ')\n\ninput()\n","repo_name":"TimurSharibov/Perfomance-Lab","sub_path":"quadrilateral/quadrilateral.py","file_name":"quadrilateral.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72263919131","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as sps\n\n_width = 0\n_ramp_coef = 0\n_t_max = 0\n_t_min = 0\n_amp = 0\n_wd = 0\n_drag_coef = 1\n_last_t = 0\n_last_v = 0\n\ndef pulse_func(t, args=None):\n global _last_t, _last_v\n if t > _t_max:\n v = 0\n elif t < _t_min:\n v = 0\n elif t > _t_max-_width:\n # ramping down\n v = _amp * 0.5 * (1 + np.cos(np.pi * (-2.0/_ramp_coef + 1 + 2.0*t/_ramp_coef/(_t_max))))\n elif t > _width:\n # middle flat top\n v = _amp * 1\n else:\n # ramping up\n v = _amp * 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*t/_ramp_coef/(_t_max))))\n\n # add drag term\n if((t - _last_t) == 0):\n two_point_derivative = 0\n else:\n two_point_derivative = (v - _last_v)/(t - _last_t)\n _last_t = t\n _last_v = v\n\n return (v+_drag_coef*two_point_derivative) * np.cos(t*_wd)\n\n\ndef setup(amplitude, drive_frequency, ramp_coef, drag_coef, tlist):\n global _ramp_coef, _amp, _wd, _t_max, _width, _drag_coef\n _ramp_coef = ramp_coef\n _drag_coef = drag_coef\n _amp = amplitude\n _wd = drive_frequency\n _t_max = tlist[-1]\n _t_min = tlist[0]\n _width = (_ramp_coef*(_t_max)/2.0)\n\ndef get_pulse(tlist):\n # pulse = []\n # for t in tlist:\n # pulse.append(pulse_func(t))\n # return pulse\n return np.vectorize(pulse_func)(tlist)\n","repo_name":"david-gorski/subharmonic_superconducting_qubits","sub_path":"pulse/tukey_with_numerical_drag.py","file_name":"tukey_with_numerical_drag.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42519735948","text":"\"\"\"\nContinuously monitor by polling at an interval.\n\"\"\"\nfrom datetime import timedelta\nimport time\nfrom .controller_state import ControllerState\nfrom .pi_fan_controller import PiFanController\n\n\nclass Monitor:\n \"\"\"\n Continuously monitor by polling at an interval.\n \"\"\"\n controller: PiFanController\n\n interval: timedelta\n\n count: int\n\n def __init__(self, controller: PiFanController, interval: timedelta,\n count: int):\n self.controller = controller\n self.interval = interval\n self.count = count\n\n def launch(self, state: ControllerState) -> None:\n \"\"\"\n Continuously poll fan and CPU sensors and adjust fan speed according\n to easing algorithm.\n \"\"\"\n counter: int = 0\n\n while True:\n self.controller.poll(state)\n\n # Stop after a defined poll limit.\n counter += 1\n if self.count > 0 and counter >= self.count:\n break\n\n # Wait for next polling interval.\n poll_start_time = self.controller.poll_start_time\n poll_end_time = self.controller.poll_end_time\n next_poll_time = poll_start_time + self.interval\n if next_poll_time > poll_end_time:\n delay = (next_poll_time - poll_end_time).total_seconds()\n time.sleep(delay)\n","repo_name":"spoulson/pifan","sub_path":"src/mylib/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28758893000","text":"import os\nimport numpy as np\nfrom math import sin, cos\nfrom multiprocessing import Pool\n\n\ndef extract_3d_label(label_path):\n label_dict = {}\n with open(label_path, 'r') as f:\n for line in f.readlines():\n if len(line) > 3:\n key, value = line.split(' ', 1)\n if key in label_dict.keys():\n label_dict[key].append([float(x) for x in value.split()])\n else:\n label_dict[key] = [[float(x) for x in value.split()]]\n for key in label_dict.keys():\n label_dict[key] = np.array(label_dict[key])\n return label_dict\n\n\ndef add_points(velo, corners): # visualize xyz in 3d label\n labels = np.zeros((velo.shape[0],1), dtype=np.int64)\n velo = np.concatenate((velo, labels), axis=-1)\n labels = np.ones((corners.shape[0], 2), dtype=np.int64)\n print(labels.shape)\n print(corners.shape)\n corners = np.concatenate((corners, labels), axis=-1)\n velo = np.concatenate((velo, corners), axis=0)\n\n return velo\n\n\ndef corner_3d(label_dict):\n corner_dict = {}\n for key in label_dict.keys():\n if key != 'DontCare':\n labels = label_dict[key]\n\n for i in range(labels.shape[0]):\n one_label = labels[i]\n if key == 'Car':\n w = one_label[7]\n h = one_label[8]\n l = one_label[9]\n w += 0.4\n x = one_label[10]\n y = one_label[11]\n z = one_label[12]\n ry = one_label[13]\n else:\n w = one_label[7]\n h = one_label[8]\n l = one_label[9]\n x = one_label[10]\n y = one_label[11]\n z = one_label[12]\n ry = one_label[13]\n\n R = np.array([[+cos(ry), 0, +sin(ry)],\n [0, 1, 0],\n [-sin(ry), 0, +cos(ry)]])\n\n # 3D bounding box corners\n\n x_corners = [0, l, l, l, l, 0, 0, 0] # -l/2\n y_corners = [0, 0, h, h, 0, 0, h, h] # -h\n z_corners = [0, 0, 0, w, w, w, w, 0] # --w/2\n\n x_corners += -l / 2\n y_corners += -h\n z_corners += -w / 2\n\n # bounding box in object co-ordinate\n corners_3D = np.array([x_corners, y_corners, z_corners])\n # print ( 'corners_3d', corners_3D.shape, corners_3D)\n\n # rotate\n corners_3D = R.dot(corners_3D)\n corners_3D = np.array([corners_3D[2,:],-corners_3D[0,:],-corners_3D[1,:]])\n # print ( 'corners_3d', corners_3D.shape, corners_3D)\n\n # translate\n corners_3D += np.array([z, -x, -y]).reshape((3, 1))\n\n corners_3d = np.transpose(corners_3D)\n\n if key in corner_dict.keys():\n corner_dict[key].append(corners_3d)\n else:\n corner_dict[key] = [corners_3d]\n\n for key in corner_dict.keys():\n corner_dict[key] = np.array(corner_dict[key])\n\n return corner_dict\n\ndef label_one_box(velo, box, label_index):\n x_sort_list = np.sort(box[:,0])\n xt1 = x_sort_list[0]\n xt2 = x_sort_list[2]\n xt3 = x_sort_list[4]\n xt4 = x_sort_list[6]\n # print(int(np.argwhere(box[:,0] == xt1)[0]))\n yt1 = box[int(np.argwhere(box[:,0] == xt1)[0]), 1]\n yt2 = box[int(np.argwhere(box[:, 0] == xt2)[0]), 1]\n yt3 = box[int(np.argwhere(box[:, 0] == xt3)[0]), 1]\n yt4 = box[int(np.argwhere(box[:, 0] == xt4)[0]), 1]\n if yt1 > yt2:\n x1 = xt2\n y1 = yt2\n x2 = xt1\n y2 = yt1\n else:\n x1 = xt1\n y1 = yt1\n x2 = xt2\n y2 = yt2\n\n if yt3 > yt4:\n x3 = xt3\n y3 = yt3\n x4 = xt4\n y4 = yt4\n else:\n x3 = xt4\n y3 = yt4\n x4 = xt3\n y4 = yt3\n\n v12 = np.array([x2 - x1, y2 - y1], dtype=np.float)\n v23 = np.array([x3 - x2, y3 - y2], dtype=np.float)\n v34 = np.array([x4 - x3, y4 - y3], dtype=np.float)\n v41 = np.array([x1 - x4, y1 - y4], dtype=np.float)\n v1p = np.array(velo[:, 0:2] - [x1, y1], dtype=np.float)\n v2p = np.array(velo[:, 0:2] - [x2, y2], dtype=np.float)\n v3p = np.array(velo[:, 0:2] - [x3, y3], dtype=np.float)\n v4p = np.array(velo[:, 0:2] - [x4, y4], dtype=np.float)\n\n cond1 = np.sum(v12 * v1p, axis=-1) >= 0\n cond2 = np.sum(v23 * v2p, axis=-1) >= 0\n cond3 = np.sum(v34 * v3p, axis=-1) >= 0\n cond4 = np.sum(v41 * v4p, axis=-1) >= 0\n cond = cond1 & cond2\n cond = cond & cond3\n cond = cond & cond4\n\n z_min = np.min(box[:, 2])\n z_cond = velo[:, 2] >= z_min\n cond = cond & z_cond\n velo[cond, -1] = int(label_index)\n\n return velo\n\ndef add_labels(velo, corner_dict):\n class2label_dict = {'Car': 0, 'Van': 1, 'Truck': 2,\n 'Pedestrian': 3, 'Person_sitting': 4, 'Cyclist': 5,\n 'Tram': 6, 'Misc': 7, 'DontCare': 8}\n # initialize labels\n labels = 8*np.ones((velo.shape[0], 1), dtype=np.int64)\n velo = np.concatenate((velo, labels), axis=-1)\n\n # add labels\n for key in corner_dict.keys():\n for i_box in range(corner_dict[key].shape[0]):\n box = corner_dict[key][i_box]\n label_index = class2label_dict[key]\n velo = label_one_box(velo, box, label_index)\n\n return velo\n\n\ndef add_points(velo, corners): # visualize xyz in 3d label\n corners = corners.reshape(corners.shape[0]*corners.shape[1], corners.shape[2])\n labels = 9*np.ones((corners.shape[0], 2), dtype=np.int64)\n corners = np.concatenate((corners, labels), axis=-1)\n velo = np.concatenate((velo, corners), axis=0)\n return velo\n\ndef label_one_file(lidar_path, label_path, out_path):\n velo = np.fromfile(lidar_path, dtype=np.float32).reshape(-1, 4)\n label_dict = extract_3d_label(label_path)\n corner_dict = corner_3d(label_dict)\n velo = add_labels(velo, corner_dict)\n np.savetxt(out_path, velo)\n\n\n# *****************************main**************************\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nlidar_folder = BASE_DIR + '/data/kitti/data_object_velodyne/training/velodyne'\nlabel_folder = BASE_DIR + '/data/kitti/label_2'\nout_folder_path = BASE_DIR + '/data/labeled_point_cloud' # output path of the labeled point cloud\n# out_folder_path = '/media/jp/disk/temp/kitti/labeled_point_cloud'\nif not os.path.exists(out_folder_path):\n os.makedirs(out_folder_path)\nlidar_file_list = os.listdir(lidar_folder)\np = Pool(12)\n\nfor lidar_name in lidar_file_list:\n print(lidar_name)\n out_file_path = out_folder_path + '/' + lidar_name.split('.')[0] + '.txt'\n if not os.path.exists(out_file_path):\n lidar_file_path = lidar_folder + '/' + lidar_name\n label_path = label_folder + '/' + lidar_name.split('.')[0] + '.txt'\n p.apply_async(label_one_file, args=(lidar_file_path, label_path, out_file_path,))\nprint('Waiting for all subprocessed done...')\np.close()\np.join()\nprint('All subprecessed done.')\n","repo_name":"Prominem/Power-Line-Corridor-LiDAR-Point-Cloud-Segmentation","sub_path":"calculate_3d_bbox_corners.py","file_name":"calculate_3d_bbox_corners.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"38508085065","text":"import unittest\n\n\nfrom helixbilling.test.logic.actor_logic_test import ActorLogicTestCase\nfrom helixcore.error import RequestProcessingError\n\n\nclass ReceiptTestCase(ActorLogicTestCase):\n def test_balance_not_found(self):\n sess = self.login_actor()\n\n # checking balance not found\n req = {'session_id': sess.session_id, 'balance_id': 9999,\n 'amount': '11.12'}\n self.assertRaises(RequestProcessingError, self.add_receipt, **req)\n\n def test_add_receipt(self):\n sess = self.login_actor()\n subj_user_id = 4242\n\n # creating balance\n self.set_used_currencies(sess, ['RUB'])\n req = {'session_id': sess.session_id, 'user_id': subj_user_id,\n 'currency_code': 'RUB'}\n resp = self.add_balance(**req)\n self.check_response_ok(resp)\n balance_id = resp['id']\n\n # adding receipt\n req = {'session_id': sess.session_id, 'balance_id': balance_id,\n 'amount': '11.12', 'info': {'payment_system': 'YandexMoney'}}\n resp = self.add_receipt(**req)\n self.check_response_ok(resp)\n\n balance_info = self.get_balance(sess, balance_id)\n self.assertEquals(balance_id, balance_info['id'])\n self.assertEquals(subj_user_id, balance_info['user_id'])\n self.assertEquals('11.12', balance_info['real_amount'])\n self.assertEquals('0.00', balance_info['virtual_amount'])\n self.assertEquals('0.00', balance_info['locked_amount'])\n\n def test_disabled_balance_failure(self):\n sess = self.login_actor()\n subj_user_id = 4242\n\n # creating balance\n self.set_used_currencies(sess, ['RUB'])\n req = {'session_id': sess.session_id, 'user_id': subj_user_id,\n 'currency_code': 'RUB', 'is_active': False}\n resp = self.add_balance(**req)\n self.check_response_ok(resp)\n balance_id = resp['id']\n\n req = {'session_id': sess.session_id, 'balance_id': balance_id,\n 'amount': '11.12'}\n self.assertRaises(RequestProcessingError, self.add_receipt, **req)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"sand8080/helixbilling","sub_path":"src/helixbilling/test/logic/test_receipt.py","file_name":"test_receipt.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11032164042","text":"from datetime import datetime\n\nfrom app.common import exceptions\nfrom app.models.schedule import ScheduleModel\nfrom app.schema import schedule as schedule_schema\n\nfrom .base import BaseController\n\n\nclass ScheduleController(BaseController):\n @classmethod\n def create_schedule(cls, schedule: schedule_schema.ScheduleIn):\n allowed_timeframes = cls.check_valid_timeframes(schedule.timeframes)\n\n if not allowed_timeframes: # this shouldn't happen in theory\n err = \"No valid schedule was provided\"\n raise exceptions.BadTimeFrameProvided(err)\n\n return ScheduleModel.create_schedule(\n name=schedule.name,\n description=schedule.description,\n duration=schedule.duration,\n timeframes=allowed_timeframes,\n )\n\n @staticmethod\n def list_schedules(offset, limit):\n return ScheduleModel.list_schedules(offset=offset, limit=limit)\n\n @staticmethod\n def retrieve_schedule(schedule_id):\n return ScheduleModel.retrieve_schedule(schedule_id)\n\n @staticmethod\n def delete_schedule(schedule_id):\n return ScheduleModel.hard_delete_schedule(schedule_id)\n\n @classmethod\n def update_schedule(cls, schedule_id, schedule: schedule_schema.ScheduleUpdateIn):\n allowed_timeframes = cls.check_valid_timeframes(schedule.timeframes)\n return ScheduleModel.update_schedule(\n schedule_id,\n name=schedule.name,\n description=schedule.description,\n duration=schedule.duration,\n timeframes=allowed_timeframes,\n )\n\n @staticmethod\n def check_valid_timeframes(timeframes: schedule_schema.ScheduleDay):\n allowed_timeframes = []\n for timeframe in timeframes:\n if not (timeframe.end_ts > timeframe.start_ts):\n err = f\"start_ts should be lower than end_ts: {timeframe}\"\n raise exceptions.BadTimeFrameProvided(err)\n\n start_dt = datetime.combine(timeframe.date, timeframe.start_ts)\n if start_dt > datetime.utcnow():\n allowed_timeframes.append(timeframe)\n else:\n err = f\"Only future schedules are accepted: {timeframe}\"\n raise exceptions.BadTimeFrameProvided(err)\n\n return allowed_timeframes\n","repo_name":"meysam81/MyCalendly","sub_path":"app/controllers/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16250282531","text":"import os\nimport struct\nimport bpy\nfrom bpy import ops\nfrom bpy.props import (BoolProperty,\n FloatProperty,\n StringProperty,\n EnumProperty,\n )\n\nfrom .tools import *\n\nclass NGnode(object):\n def __init__(self, i, nodeName, x, y, z):\n self.i = i\n self.nodeName = nodeName\n self.x = x\n self.y = y\n self.z = z\n \n\n\nclass ExportJbeam(bpy.types.Operator):\n \"\"\"Export Nodes and Beams to .jbeam file for BeamNG\"\"\"\n bl_idname = 'export_mesh.jbeam'\n bl_description = 'Export for use in BeamNG (.jbeam)'\n #bl_space_type = \"PROPERTIES\"\n #bl_region_type = \"WINDOW\"\n bl_label = 'Export Jbeam' + ' v.' + PrintVer()\n \n # From ExportHelper. Filter filenames.\n filename_ext = \".jbeam\"\n filter_glob = StringProperty(default=\"*.jbeam\", options={'HIDDEN'})\n \n filepath = bpy.props.StringProperty(\n name=\"File Path\", \n description=\"File path used for exporting the jbeam file\", \n maxlen= 1024, default= \"\")\n \n listbn = bpy.props.BoolProperty(\n name = \"Export has a list of beam and nodes\",\n description=\"\",\n default = False)\n \n exp_ef = bpy.props.BoolProperty(\n name = \"Export edge from face\",\n description=\"\",\n default = True)\n \n exp_tricol = bpy.props.BoolProperty(\n name = \"Export Faces to colision triangle\",\n description=\"\",\n default = True)\n \n exp_diag = bpy.props.BoolProperty(\n name = \"Edge on quad face\",\n description=\"\",\n default = True)\n \n export_scene = bpy.props.BoolProperty(\n name=\"scene_export\",\n description=\"exporter_prop_scene_tip\",\n default=False,\n options={'HIDDEN'})\n \n def invoke(self, context, event):\n #context.window_manager.fileselect_add(self)\n #return {'RUNNING_MODAL'}\n ops.wm.call_menu(name=\"IO_mesh_jbeam_ExporterChoice\")\n return {'PASS_THROUGH'}\n \n def execute(self, context, ):\n import sys\n file = None\n \n scene = context.scene\n \n # Save currently active object\n active = context.active_object\n \n exportObjects = []\n if(self.export_scene):\n for obj in bpy.context.selectable_objects:\n if (obj.type == 'MESH'):\n if '.jbeam' in obj.name:\n exportObjects.append(obj)\n \n else:\n for o in context.selected_objects:\n if (o.type == 'MESH'):\n #o.select = False\n exportObjects.append(o)\n if len(exportObjects) == 0:\n '''self.report({'WARNING'}, 'WARNING : Must be select objects to export')\n print('CANCELLLED: Must be select objects to export')'''\n return {'CANCELLED'}\n \n tempMesh = None\n try:\n for objsel in exportObjects:\n # Make the active object be the selected one\n scene.objects.active = objsel\n print(objsel.data.jbeam)\n # Want to be in Object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n \n #-------------------------------------\n # Create a copy of the selected object\n #-------------------------------------\n \n tempName = objsel.name + '.JBEAM_TEMP'\n \n # Create new mesh\n tempMesh = bpy.data.meshes.new(tempName)\n \n # Create new object associated with the mesh\n ob_new = bpy.data.objects.new(tempName, tempMesh)\n \n # Copy data block from the old object into the new object\n ob_new.data = objsel.data.copy()\n ob_new.scale = objsel.scale\n ob_new.location = objsel.location\n ob_new.rotation_axis_angle = objsel.rotation_axis_angle\n ob_new.rotation_euler = objsel.rotation_euler\n ob_new.rotation_mode = objsel.rotation_mode\n ob_new.rotation_quaternion = objsel.rotation_quaternion\n \n # Link new object to the given scene, select it, and\n # make it active\n scene.objects.link(ob_new)\n ob_new.select = True\n scene.objects.active = ob_new\n \n # Apply transforms\n bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)\n \n # TODO: Can we copy modifiers from original object and then do this?\n #mesh = ob_new.to_mesh(scene, True, 'PREVIEW')\n \n # Sort vertices\n mesh = ob_new.data\n nodes = []\n for v in mesh.vertices:\n node = NGnode(v.index,\n objsel.data.jbeam.nodename,\n round(v.co[0] + objsel.delta_location[0], 3),\n round(v.co[1] + objsel.delta_location[1], 3),\n round(v.co[2] + objsel.delta_location[2], 3))\n nodes.append(node)\n \n sortedz = sorted(nodes, key=lambda NGnode: NGnode.z)\n #sortedz is nodes sorted by Z axis\n sortedx = sorted(sortedz, key=lambda NGnode: NGnode.x, reverse=True)\n #sortedx is sortedz sorted by -X axis \n sortedNodes = sorted(sortedx, key=lambda NGnode: NGnode.y)\n #sortedNodes is sortedx sorted by Yaxis\n #sortedNodes is nodes sorted by Z axis then -X axis then Y axis?\n \n \n \n # Export\n anewline = '\\n'\n #filename = objsel.name + '.jbeam'\n if '.jbeam' in objsel.name:\n filename = objsel.name\n else:\n filename = objsel.name + '.jbeam'\n print(\"File = \" + str(self.filepath) + filename )\n \n if self.filepath == \"\":\n if context.scene.jbeam.export_path ==\"\":\n self.report({'WARNING'}, 'WARNING : No export folder set. Go to Scene > JBeam Exporter. Export cancelled!')\n return {'CANCELLED'}\n if context.scene.jbeam.export_path.startswith(\"//\") and not context.blend_data.filepath:\n self.report({'ERROR'},\"Save the .blend file first\")\n return {'CANCELLED'}\n self.filepath = bpy.path.abspath(context.scene.jbeam.export_path)\n \n if not context.scene.jbeam.export_path.startswith(\"//\"):\n if not(os.path.isdir(self.filepath)):\n bpy.data.meshes.remove(tempMesh)\n self.report({'WARNING'}, 'WARNING : Must be exported in a directory. Export cancelled!')\n print('CANCELLLED: Must be exported in a directory. drectory = \"'+self.filepath+'\"')\n return {'CANCELLED'}\n file = open(self.filepath+filename, 'wt')\n #file = open(self.filepath + '/' + filename, 'wt')\n \n if not(context.scene.jbeam.listbn):\n if(bpy.context.user_preferences.system.author == \"\"):\n author = 'Blender Jbeam' + ' v' + PrintVer()\n else:\n author = bpy.context.user_preferences.system.author + \",\" + 'Blender Jbeam' + ' v' + PrintVer()\n if '.jbeam' in objsel.name:\n name = objsel.name[0:len(objsel.name)-6]\n else:\n name = objsel.name\n file.write('{\\n\\t\"%s\":{\\n\\t\\t\"information\":{\\n\\t\\t\\t\"name\":\"%s\",\\n\\t\\t\\t\"authors\":\"%s\"},\\n\\t\\t\"slotType\":\"%s\",\\n' % (name,objsel.data.jbeam.name,author,objsel.data.jbeam.slot))\n mesh.update(True, True) #http://www.blender.org/documentation/blender_python_api_2_69_7/bpy.types.Mesh.html?highlight=update#bpy.types.Mesh.update\n \n i = 0\n file.write('//--Nodes--')\n file.write(anewline)\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\"nodes\":[\\n\\t\\t\\t[\"id\", \"posX\", \"posY\", \"posZ\"],\\n')\n for v in sortedNodes:\n if context.scene.jbeam.listbn:\n file.write('[\\\"')\n else:\n file.write('\\t\\t\\t[\\\"')\n if v.x > 0:\n v.nodeName = v.nodeName + 'l' + ('%s' % (i))\n elif v.x < 0:\n v.nodeName = v.nodeName + 'r' + ('%s' % (i))\n else:\n v.nodeName = v.nodeName + ('%s' % (i))\n file.write(v.nodeName)\n file.write('\\\",')\n file.write('%s' % (round(v.x + objsel.delta_location[0], 3))) \n file.write(',') \n file.write('%s' % (round(v.y + objsel.delta_location[1], 3)))\n file.write(',')\n file.write('%s' % (round(v.z + objsel.delta_location[2], 3)))\n file.write('],')\n file.write(anewline)\n i += 1\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\\t],\\n')\n \n \n file.write('//--Beams--')\n file.write(anewline)\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\"beams\":[\\n\\t\\t\\t[\"id1:\", \"id2:\"],\\n')\n for e in mesh.edges:\n if context.scene.jbeam.listbn:\n file.write('[\\\"')\n else:\n file.write('\\t\\t\\t[\\\"')\n nodeIndex1 = ([n.i for n in sortedNodes].index(e.vertices[0]))\n file.write('%s\\\"' % (sortedNodes[nodeIndex1].nodeName)) \n file.write(',') \n nodeIndex2 = ([n.i for n in sortedNodes].index(e.vertices[1]))\n file.write('\\\"%s\\\"' % (sortedNodes[nodeIndex2].nodeName))\n file.write('],')\n file.write(anewline)\n \n if context.scene.jbeam.exp_ef:\n for f in mesh.tessfaces:\n vs = f.vertices\n #if len(vs) == 3:\n # nodeIndex1 = ([n.i for n in sortedNodes].index(vs[0]))\n # nodeIndex2 = ([n.i for n in sortedNodes].index(vs[1]))\n # nodeIndex3 = ([n.i for n in sortedNodes].index(vs[2]))\n # if context.scene.jbeam.listbn:\n # file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex2].nodeName))\n # file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex3].nodeName))\n # file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex3].nodeName, sortedNodes[nodeIndex1].nodeName))\n # else:\n # file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex2].nodeName))\n # file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex3].nodeName))\n # file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex3].nodeName, sortedNodes[nodeIndex1].nodeName))\n if len(vs) == 4:\n nodeIndex1 = ([n.i for n in sortedNodes].index(vs[0]))\n nodeIndex2 = ([n.i for n in sortedNodes].index(vs[1]))\n nodeIndex3 = ([n.i for n in sortedNodes].index(vs[2]))\n nodeIndex4 = ([n.i for n in sortedNodes].index(vs[3]))\n if context.scene.jbeam.listbn:\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex2].nodeName))\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex3].nodeName))\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex3].nodeName, sortedNodes[nodeIndex4].nodeName))\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex4].nodeName, sortedNodes[nodeIndex1].nodeName))\n else:\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex2].nodeName))\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex3].nodeName))\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex3].nodeName, sortedNodes[nodeIndex4].nodeName))\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex4].nodeName, sortedNodes[nodeIndex1].nodeName))\n if context.scene.jbeam.exp_diag:\n if self.listbn:\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex3].nodeName))\n file.write('[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex4].nodeName))\n else:\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex3].nodeName))\n file.write('\\t\\t\\t[\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex4].nodeName))\n #else:\n # report({'ERROR'}, 'ERROR: Face %i isn\\'t tri or quad.' % vs.index)\n # return {'CANCELLED'}\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\\t],\\n')\n \n if context.scene.jbeam.exp_tricol:\n file.write('//--tri col--')\n file.write(anewline)\n ob_new.modifiers.new(\"tricol\",\"TRIANGULATE\")\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=\"tricol\")\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\"triangles\":[\\n\\t\\t\\t[\"id1:\", \"id2:\", \"id3:\"],\\n')\n mesh = ob_new.data\n mesh.update(False, True)\n for f in mesh.tessfaces:\n vs = f.vertices\n if len(vs) == 3:\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\\t')\n nodeIndex1 = ([n.i for n in sortedNodes].index(vs[0]))\n nodeIndex2 = ([n.i for n in sortedNodes].index(vs[1]))\n nodeIndex3 = ([n.i for n in sortedNodes].index(vs[2]))\n file.write('[\"%s\",\"%s\",\"%s\"],\\n' % (sortedNodes[nodeIndex1].nodeName, sortedNodes[nodeIndex2].nodeName, sortedNodes[nodeIndex3].nodeName))\n else:\n self.report({'ERROR'}, 'ERROR: TriCol %i isn\\'t tri' % vs.index)\n return {'CANCELLED'}\n if not(context.scene.jbeam.listbn):\n file.write('\\t\\t\\t],\\n')\n if not(context.scene.jbeam.listbn):\n file.write('\\t}\\n}')\n file.flush()\n file.close()\n \n # Deselect our new object\n ob_new.select = False\n \n # Remove the new temp object\n scene.objects.unlink(ob_new)\n bpy.data.objects.remove(ob_new)\n \n if (mesh.users == 0):\n mesh.user_clear()\n \n bpy.data.meshes.remove(mesh)\n \n if (tempMesh.users == 0):\n tempMesh.user_clear()\n \n bpy.data.meshes.remove(tempMesh)\n \n # Restore selection status\n '''for o in selectedObjects:\n o.select = True'''\n \n # Restore active object\n scene.objects.active = active\n \n return {'FINISHED'}\n \n except Exception as e:\n self.report({'ERROR'}, 'ERROR: ' + str(e))\n print('ERROR: ' + str(e))\n if file: file.close()\n if tempMesh: bpy.data.meshes.remove(tempMesh)\n return {'CANCELLED'}","repo_name":"hborlik/BeamngCar","sub_path":"BlenderScripts/io_mesh_jbeam/export_jbeam.py","file_name":"export_jbeam.py","file_ext":"py","file_size_in_byte":17156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4826342289","text":"import sys\nimport time\nimport numpy as np\n\nfrom contextlib import contextmanager\nfrom os.path import join\n\n\ndef strtime_short(span):\n \"\"\"\n Return a 5-character string identifying the timespan\n \"\"\"\n if span < 1:\n return \"{:3d}ms\".format(int(span * 1000))\n if span < 60:\n return \"{:4.1f}s\".format(span)\n if span < 120:\n return \"{:4d}s\".format(int(span))\n if span < 3600:\n return \"{:4.1f}m\".format(span / 60)\n if span < 86400:\n return \"{:4.1f}h\".format(span / 3600)\n else:\n return \"{:4.1f}d\".format(span / 3600 / 24)\n\n\ndef strtime(span, colour=False):\n \"\"\"\n Return a moderately long string, providing a human-interpretable\n representation of the provided timespan\n \"\"\"\n if colour:\n cms = \"\\033[0m\"\n cs = \"\\033[38;5;246m\"\n cm = \"\\033[38;5;239m\"\n ch = \"\\033[38;5;250m\"\n cd = \"\\033[38;5;242m\"\n cr = \"\\033[0m\"\n else:\n cr = cd = ch = cm = cs = cms = \"\"\n\n if span < 1:\n return cms + \"{:6.3f}ms\".format(span * 1000) + cr\n if span < 120:\n full = int(span)\n return (cs + \"{:3d}s \".format(full) + cms\n + \"{:3d}ms\".format(int((span - full) * 1000)) + cr)\n if span < 3600:\n full = int(span / 60)\n return (cm + \"{:2d}m \".format(full) + cs\n + \"{:2d}s\".format(int(span - full * 60)) + cr)\n if span < 86400:\n full = int(span / 3600)\n return (ch + \"{:2d}h \".format(full) + cm\n + \"{:2d}m\".format(int(span / 60 - full * 60)) + cr)\n else:\n full = int(span / 86400)\n return (cd + \"{:3d}d \".format(full) + ch\n + \"{:2d}h\".format(int(span / 3600 - full * 24)) + cr)\n\n\nclass Timer:\n # TODO More flexible: Time on a subtree\n # TODO Describe function to print a nice table\n def __init__(self):\n self.time_construction = time.perf_counter()\n self.raw_data = {} # Raw data of time intervals [start, end]\n self.start_times = {}\n\n def attach(self, other, subtree=\"\"):\n \"\"\"\n Attach the timing results from another timer,\n i.e. merge both timers together.\n \"\"\"\n for k, v in other.raw_data.items():\n kfull = join(subtree, k)\n if kfull not in self.raw_data:\n self.raw_data[kfull] = []\n self.raw_data[kfull].extend(v)\n\n for k, v in other.start_times.items():\n kfull = join(subtree, k)\n if kfull not in self.start_times:\n self.start_times[kfull] = []\n self.start_times[kfull].extend(v)\n\n def stop(self, task, now=None):\n \"\"\"Stop a task and return runtime of it.\"\"\"\n if now is None:\n now = time.perf_counter()\n if task in self.start_times:\n start = self.start_times.pop(task)\n if task in self.raw_data:\n self.raw_data[task].append((start, now))\n else:\n self.raw_data[task] = [(start, now)]\n return now - start\n return 0\n\n def restart(self, task):\n \"\"\"\n Start a task if it is currently not running\n or stop and restart otherwise.\n \"\"\"\n now = time.perf_counter()\n if self.is_running(task):\n self.stop(task, now)\n self.start_times[task] = now\n\n @contextmanager\n def record(self, task):\n \"\"\"\n Context manager to automatically start and stop a time\n recording as long as context is active.\n\n Parameters\n ----------\n task : str\n The string describing the task\n \"\"\"\n self.restart(task)\n try:\n yield self\n finally:\n self.stop(task)\n\n def is_running(self, task):\n return task in self.start_times\n\n @property\n def tasks(self):\n \"\"\"The list of all tasks known to this object\"\"\"\n all_tasks = set(self.start_times.keys())\n all_tasks.update(set(self.raw_data.keys()))\n return sorted(list(all_tasks))\n\n @property\n def lifetime(self):\n \"\"\"Get total time since this class has been constructed\"\"\"\n return time.perf_counter() - self.time_construction\n\n def intervals(self, task):\n \"\"\"Get all time intervals recorded for a particular task\"\"\"\n if task not in self.raw_data:\n if task not in self.start_times:\n raise ValueError(\"Unknown task: \" + task)\n return self.current(task)\n else:\n intervals = [end - start for start, end in self.raw_data[task]]\n if task in self.start_times:\n intervals.append(time.perf_counter() - self.start_times[task])\n return np.array(intervals)\n\n def total(self, task):\n \"\"\"Get total runtime on a task in seconds\"\"\"\n return np.sum(self.intervals(task))\n\n def best(self, task):\n \"\"\"Get the best time of all the intervals recorded for a task\"\"\"\n return np.min(self.intervals(task))\n\n def median(self, task):\n return np.median(self.intervals(task))\n\n def average(self, task):\n return np.average(self.intervals(task))\n\n def current(self, task):\n \"\"\"Get current time on a task without stopping it\"\"\"\n if self.is_running(task):\n return time.perf_counter() - self.start_times[task]\n else:\n raise ValueError(\"Task not currently running: \" + task)\n\n def describe(self, colour=sys.stdout.isatty()):\n maxlen = max(len(key) for key in self.tasks)\n\n # This is very dummy ... in the future we would like to have\n # a nice little table, which also respects the key hierachy\n # and displays cumulative sums for each level and which\n # tasks care of duplicated time intervals (e.g. if two tasks\n # run at the same time)\n # Colour: Use one for each level\n text = \"Timer \" + strtime_short(self.lifetime) + \" lifetime:\\n\"\n for key in self.tasks:\n fmt = \" {:<\" + str(maxlen) + \"s} {:>20s}\\n\"\n text += fmt.format(key, strtime(self.total(key), colour=colour))\n return text\n\n def _repr_pretty_(self, pp, cycle):\n if cycle:\n pp.text(\"Timer()\")\n else:\n pp.text(self.describe())\n\n\nTimer.start = Timer.restart\n\n\ndef timed_call(f):\n \"\"\"\n Decorator to automatically time function calls.\n The timer object is available under the function attribute _timer\n \"\"\"\n def decorated(*args, **kwargs):\n if not hasattr(decorated, \"_timer\"):\n setattr(decorated, \"_timer\", Timer())\n with getattr(decorated, \"_timer\").record(f.__name__):\n return f(*args, **kwargs)\n decorated.__doc__ = f.__doc__\n return decorated\n\n\ndef timed_member_call(timer=\"timer\"):\n \"\"\"\n Decorator to automatically time calls to instance member functions.\n The name of the instance attribute where timings are stored is the\n timer argument to this function.\n \"\"\"\n def decorator(f):\n def wrapped(self, *args, **kwargs):\n if not hasattr(self, timer):\n setattr(self, timer, Timer())\n with getattr(self, timer).record(f.__name__):\n return f(self, *args, **kwargs)\n wrapped.__doc__ = f.__doc__\n return wrapped\n return decorator\n","repo_name":"adc-connect/adcc","sub_path":"adcc/timings.py","file_name":"timings.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"17814794689","text":"import pandas as pd\nimport numpy as np\nimport cv2\nimport os\nfrom skimage.io import imread\nfrom tqdm import tqdm\n\n\ndef rle_encode(img):\n \"\"\"\n img: numpy array, 1 - mask, 0 - background\n Returns run length as string formatted\n \"\"\"\n pixels = img.T.flatten()\n pixels = np.concatenate([[0], pixels, [0]])\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n runs[1::2] -= runs[::2]\n return ' '.join(str(x) for x in runs)\n\n\ndef rle_decode(mask_rle, shape=(768, 768)):\n \"\"\"\n mask_rle: run-length as string formatted (start length)\n shape: (height,width) of array to return\n Returns numpy array, 1 - mask, 0 - background\n \"\"\"\n s = mask_rle.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape).T # Needed to align to RLE direction\n\n\ndef masks_as_image(row, all_masks=None):\n \"\"\"\n\n :param row: takes DataFrame row\n :param all_masks:\n :return: np.array with shape (768,768,3)\n \"\"\"\n # Take the individual ship masks and create a single mask array for all ships\n if all_masks is None:\n all_masks = np.zeros((768, 768), dtype=np.uint8)\n\n if isinstance(row['EncodedPixels'], str):\n all_masks += rle_decode(row['EncodedPixels'])\n return np.expand_dims(all_masks, -1)\n\n\ndef process_dataframe(data, print_log=True):\n \"\"\"\n :param data: DataFrame with RLE and Image Path\n :param print_log: if you want to print memory usage of created np.arrays\n :return: dict {masks: np.array(num_rows, 768,768,1), \"images_rgb\": (num_rows, 768,768,3)}\n \"\"\"\n num_rows = len(data)\n masks = np.zeros((num_rows, 768, 768, 1), dtype=np.uint8)\n images_rgb = np.zeros((num_rows, 768, 768, 3), dtype=np.uint8)\n\n for i in range(num_rows):\n mask = masks_as_image(data.iloc[i])\n rgb = imread(os.path.join('data/train_v2', data.iloc[i]['ImageId']))\n\n masks[i, :, :, :] = mask\n images_rgb[i, :, :, :] = rgb\n result_dict = {'masks': masks, 'images_rgb': images_rgb}\n if print_log:\n get_memory_usage(result_dict, f'Original size {(768, 768)}')\n\n return result_dict\n\n\ndef resize_image(image, size=(256, 256)):\n \"\"\"\n\n :param image: np.array representation of image\n :param size: size you want to compress your original image\n :return: resized np.array\n \"\"\"\n return cv2.resize(image, size)\n\n\ndef resize_mask(mask, size=(256, 256)):\n \"\"\"\n :param mask: np.array representation of mask\n :param size: size you want to compress your original mask\n :return: resized np.array\n \"\"\"\n return cv2.resize(mask, size)\n\n\ndef rle_decode_resized(mask_rle, original_shape=(768, 768), resized_shape=(256, 256)):\n \"\"\"\n\n :param mask_rle: RLE code\n :param original_shape:\n :param resized_shape:\n :return: np.array with resized shape\n \"\"\"\n img = rle_decode(mask_rle, original_shape)\n resized_img = resize_mask(img, resized_shape)\n return resized_img\n\n\ndef masks_as_image_resized(row, all_masks=None, resized_shape=(256, 256)):\n \"\"\"\n\n :param row:\n :param all_masks:\n :param resized_shape:\n :return:\n \"\"\"\n if all_masks is None:\n all_masks = np.zeros(resized_shape, dtype=np.uint8)\n\n if isinstance(row['EncodedPixels'], str):\n mask = rle_decode_resized(row['EncodedPixels'])\n all_masks += mask\n\n return np.expand_dims(all_masks, -1)\n\n\ndef process_dataframe_resized(data, target_size=(256, 256), print_log=True):\n num_rows = len(data)\n masks = np.zeros((num_rows, *target_size, 1), dtype=np.uint8)\n images_rgb = np.zeros((num_rows, *target_size, 3), dtype=np.uint8)\n\n for i in range(num_rows):\n mask = masks_as_image_resized(data.iloc[i], resized_shape=target_size)\n rgb = imread(os.path.join('data/train_v2', data.iloc[i]['ImageId']))\n rgb_resized = resize_image(rgb, target_size)\n\n masks[i, :, :, :] = mask\n images_rgb[i, :, :, :] = rgb_resized\n\n result_dict = {'masks': masks, 'images_rgb': images_rgb}\n if print_log:\n get_memory_usage(result_dict, f'Resized to {target_size}')\n\n return result_dict\n\n\ndef get_data():\n df = pd.read_csv('data/train_ship_segmentations_v2.csv')\n df = df[~df['ImageId'].isin(['6384c3e78.jpg'])]\n print('Total rows:', len(df))\n df_with_ships = df[~df['EncodedPixels'].isna()]\n print('Total ships on images:', len(df_with_ships))\n df_without_ships = df[df['EncodedPixels'].isna()]\n print('Total images without ships:', len(df_without_ships))\n df_with_ships = df_with_ships.groupby(\"ImageId\")[['EncodedPixels']].agg(\n lambda rle_codes: ' '.join(rle_codes)).reset_index()\n print('Total images with ships:', len(df_with_ships))\n df_without_ships = df_without_ships.sample(n=20000, random_state=42)\n\n data = pd.concat([df_with_ships, df_without_ships], ignore_index=True)\n\n print('Under sampled data with 20k non ships images + images with ships has:', len(data))\n return data\n\n\ndef get_memory_usage(preprocessed_data: dict, type_data=''):\n print(f\"{type_data} : Masks memory usage: {preprocessed_data['masks'].nbytes // (1024 ** 2)} Mb\")\n print(f\"{type_data} : Image memory usage: {preprocessed_data['images_rgb'].nbytes // (1024 ** 2)} Mb\")\n\n\ndef get_preprocessed_batches(data, batch_size=64, print_log=False, resized=True):\n all_indices = data.index.tolist()\n\n np.random.shuffle(all_indices)\n batches = [all_indices[i:i + batch_size] for i in range(0, len(all_indices), batch_size)]\n\n batches_data = {}\n\n i = 0\n print(f'Process all data as batches with size {batch_size}')\n for batch_indices in tqdm(batches):\n batch_data = data.loc[batch_indices]\n if resized:\n temp_dict = process_dataframe_resized(batch_data, print_log=print_log)\n else:\n temp_dict = process_dataframe(batch_data, print_log=print_log)\n batches_data[i] = temp_dict\n i += 1\n\n return batches_data\n\n\ndef get_X_y_data(data, resized=False):\n if resized:\n temp_dict = get_preprocessed_batches(data, resized=True)\n masks_combined = np.empty((0, 256, 256, 1), dtype=np.float32)\n images_rgb_combined = np.empty((0, 256, 256, 3), dtype=np.float32)\n else:\n temp_dict = get_preprocessed_batches(data, resized=False)\n masks_combined = np.empty((0, 768, 768, 1), dtype=np.float32)\n images_rgb_combined = np.empty((0, 768, 768, 3), dtype=np.float32)\n\n print('Merge all batches into one big preprocessed DataSet')\n for key in tqdm(temp_dict):\n masks_combined = np.concatenate((masks_combined, temp_dict[key]['masks']), axis=0)\n images_rgb_combined = np.concatenate((images_rgb_combined, temp_dict[key]['images_rgb']), axis=0)\n\n print('X shape:', images_rgb_combined.shape)\n print('y shape:', masks_combined.shape)\n\n return images_rgb_combined, masks_combined\n","repo_name":"RidzIn/tech_task","sub_path":"utils/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12290043011","text":"#\n# author: vongkh\n# created: Wed Nov 25 2020\n#\n\nfrom sys import stdin, stdout # only need for big input\n\ndef solve():\n n = int(input())\n mp = dict()\n a = [-1] * n\n xor_res = [0] * n\n identical_pair = []\n for i in range(1,n):\n print(f\"XOR {1} {i+1}\")\n xor_res[i] = int(input())\n if xor_res[i] in mp:\n identical_pair = [i, mp[xor_res[i]]]\n else:\n mp[xor_res[i]] = i \n\n if 0 in mp:\n ind = mp[0]\n print(f\"AND {1} {ind+1}\")\n a[0] = int(input())\n\n elif len(identical_pair) > 0:\n ind1 = identical_pair[0]\n ind2 = identical_pair[1]\n print(f\"AND {ind1 + 1} {ind2+1}\")\n a[ind1] = int(input())\n a[0] = a[ind1] ^ xor_res[ind1]\n else:\n #all number are distinct and between 0 and n - 1\n #which means there is aj whose has only 1 bit different from a[0]\n \n #a[ind] whose only the last bit is different ==> a[ind] ^ a[0] == 1\n ind = mp[1]\n print(f\"AND {ind+1} {1}\")\n a[0] = int(input())\n\n #the result is not correct yet because the last bit is 0 but a[0] might be \n #an odd number\n #we need do another query on a number whose only the second last bit is different \n # ==> a[ind] ^ a[0] = 2\n ind = mp[2]\n print(f\"AND {ind+1} {1}\")\n tmp = int(input())\n #the last bit of tmp is the last bit of a[0]\n a[0] += tmp % 2\n\n stdout.write(f\"! {a[0]} \")\n for i in range(1, n):\n a[i] = a[0] ^ xor_res[i]\n stdout.write(f\"{a[i]} \")\n print(\" \")\n\ndef main():\n t = 1\n # t = int(input())\n for _ in range(t):\n solve()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vongkhmer/competitive_programming","sub_path":"CodeForce/CF685Div2/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38385827250","text":"from docx import Document\r\n# def get_para_data(out_doc_name, paragraph):\r\n# out_para = out_doc_name.add_paragraph()\r\n# for run in paragraph.runs:\r\n# out_run = out_para.add_run(run.text)\r\n# # Styling\r\n# out_run.bold = run.bold\r\n# out_run.italic = run.italic\r\n# out_run.underline = run.underline\r\n# out_run.font.color.rgb = run.font.color.rgb\r\n# out_run.style.name = run.style.name\r\n# # Paragraph alignment\r\n# out_para.paragraph_format.alignment = paragraph.paragraph_format.alignment\r\n\r\n# Judul\r\n\r\n\r\ndef get_title(input_doc):\r\n title = input_doc.paragraphs[0].text\r\n return title\r\n\r\n# Author e.g. : Namikaze Minato, Senju Hashirama, Ichinose Mizuhara\r\n\r\n\r\ndef get_author(input_doc):\r\n i = 0\r\n for run in input_doc.paragraphs[1].text:\r\n i += 1\r\n # print(i)\r\n if(i != 0):\r\n return input_doc.paragraphs[1].text\r\n else:\r\n return input_doc.paragraphs[2].text\r\n\r\n# Keywords e.g. : KNN, Machine Learning, CNN\r\n\r\n\r\ndef get_keywords(input_doc):\r\n i = 0\r\n for para in input_doc.paragraphs:\r\n keywords = input_doc.paragraphs[i].text\r\n i += 1\r\n if(\"Keywords\" in para.text):\r\n keywords = keywords.replace('Keywords: ', '')\r\n keywords = keywords.replace('Keywords : ', '')\r\n return keywords\r\n\r\n\r\ndef delete_paragraph(paragraph):\r\n p = paragraph._element\r\n p.getparent().remove(p)\r\n p._p = p._element = None\r\n\r\n\r\n# def getText(doc):\r\n# allText = []\r\n\r\n# for paragraph in doc.paragraphs:\r\n# allText.append(paragraph.text)\r\n# return \"\".join(allText).encode(\"utf-8\")\r\n\r\n# Mengambil seluruh data docx\r\n\r\n# def getTitle():\r\n# # nPrg = 0\r\n# # title = []\r\n# i = 0\r\n# while i <= 999:\r\n# prg = doc.paragraphs[i].text\r\n# if(\"Abstract\" in prg):\r\n# # print(\"Total Paragraf = \", nPrg)\r\n# break\r\n# print(prg)\r\n# # nPrg += 1\r\n# i += 1\r\n\r\n\r\n# def getAbstract():\r\n# i = 0\r\n# while i <= 999:\r\n# prg = doc.paragraphs[i].text\r\n# if(\"Introduction\" in prg):\r\n# break\r\n# print(prg)\r\n# i += 1\r\n\r\n# getTitle()\r\n# getAbstract()\r\n\r\n\r\n# print(getText(doc))\r\n","repo_name":"EvanSinaga/Journal-Formatter","sub_path":"readDocx.py","file_name":"readDocx.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19472493743","text":"import matplotlib.pyplot as plt\nfrom seleniumwire.undetected_chromedriver.v2 import Chrome, ChromeOptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\nfrom PIL import Image, ImageFilter, ImageOps\n\nimport numpy as np\nimport serial\n\nfrom image_difference_processing import get_people_pixels, get_truck_prediction\n\nfrom passwords import GMAIL_USERNAME, GMAIL_PASSWORD\n\n# https://forum.arduino.cc/t/serial-input-basics/278284/2\n# http://www.gammon.com.au/serial\n\nif __name__ == \"__main__\":\n\n # options = {'ca_cert': 'ca.crt'}\n options = {}\n chrome_options = ChromeOptions()\n chrome_options.add_argument('--user-data-dir=hash')\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--ignore-certificate-errors\")\n\n browser = Chrome(seleniumwire_options=options, options=chrome_options)\n\n # TO RUN THIS PART\n # Duplicate the file `passwords_template.py` and rename it to `passwords.py`.\n # Type in your Gmail username and password where indicated. passwords.py should be in the .gitignore\n # and therefore should not be committed to Github, but\n # DOUBLE CHECK THAT YOU ARE NOT COMMITTING YOUR GMAIL PASSWORD BEFORE YOU PUSH!!!!\n\n # wait = WebDriverWait(browser, 10)\n # browser.get('https://gmail.com')\n # wait.until(ec.presence_of_element_located(\n # (By.XPATH, '//*[@id=\"identifierId\"]'))).send_keys(GMAIL_USERNAME)\n # wait.until(ec.presence_of_element_located(\n # (By.XPATH, '//*[@id=\"identifierNext\"]/div/button'))).click()\n # time.sleep(5)\n # wait.until(ec.presence_of_element_located(\n # (By.XPATH, '//*[@id=\"password\"]/div[1]/div/div[1]/input'))).send_keys(GMAIL_PASSWORD)\n # wait.until(ec.presence_of_element_located(\n # (By.XPATH, '//*[@id=\"passwordNext\"]/div/button'))).click()\n # time.sleep(30)\n\n # It will ask you for your 2 factor authentication on your phone here.\n\n # Unlisted youtube video\n browser.get('https://www.youtube.com/watch?v=QBKk4TQi9KU')\n time.sleep(5)\n wait = WebDriverWait(browser, 10)\n wait.until(ec.presence_of_element_located(\n (By.CLASS_NAME, \"html5-video-player\")\n ))\n video_player = browser.find_element(By.CLASS_NAME, \"html5-video-player\")\n video_player.send_keys(\"f\")\n\n # Private youtube video\n # browser.get('https://www.youtube.com/watch?v=JIb4EGf5uFA')\n\n time.sleep(12)\n # Do initial truck processing\n initial_image_path = f'images/science_center{time.time()}.png'\n browser.save_screenshot(initial_image_path)\n truck_image = Image.open(initial_image_path)\n\n truck_1_location = [[1000, 1600], [1500, 2600]]\n truck_2_location = [[800, 1200], [2200, 3000]]\n truck_3_location = [[600, 900], [2800, 3200]]\n\n img = np.asarray(truck_image.convert('RGB'))\n\n truck1 = Image.fromarray(img[truck_1_location[0][0]:truck_1_location[0][1],\n truck_1_location[1][0]:truck_1_location[1][1], :])\n truck2 = Image.fromarray(img[truck_2_location[0][0]:truck_2_location[0][1],\n truck_2_location[1][0]:truck_2_location[1][1], :])\n truck3 = Image.fromarray(img[truck_3_location[0][0]:truck_3_location[0][1],\n truck_3_location[1][0]:truck_3_location[1][1], :])\n\n truck1_color = get_truck_prediction(truck1, 1)\n time.sleep(1)\n truck2_color = get_truck_prediction(truck2, 2)\n time.sleep(1)\n truck3_color = get_truck_prediction(truck3, 3)\n print(truck1_color, truck2_color, truck3_color)\n\n last_image_path = initial_image_path\n this_image_path = None\n image_array = []\n\n while True:\n this_time = time.time()\n this_image_path = f'images/science_center_{time.time()}.png'\n browser.save_screenshot(this_image_path)\n img1 = Image.open(this_image_path)\n image_array.append(img1)\n print('this image size', img1.size)\n img2 = Image.open(last_image_path)\n print('this image size', img2.size)\n print(this_time)\n people_pixels = get_people_pixels(img1, img2)\n plt.imshow(people_pixels)\n plt.savefig(f'pixel_images/{this_time}_pixels.png')\n\n # time.sleep(5)\n last_image_path = this_image_path\n\n browser.close()\n","repo_name":"aprilmusic/es50_final_proj","sub_path":"youtube_private.py","file_name":"youtube_private.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37857236871","text":"from os import system, name\n\ndef clear():\n \n if name == 'nt':\n _ = system('cls')\n \ndef divisible_deux_trois(n):\n #n = int(input(\"Entrer un entier positif: \"))\n if n % 2 == 0 and n % 3 == 0:\n print(f\"Bravo {n} est divisible par 2 et par 3\")\n return True\n elif n % 2 == 0:\n print(f\"Bravoo {n} est divisible par 2 mais pas par 3\")\n return True\n elif n % 3 == 0:\n print(f\"Bravoo {n} est divisible par 3 mais pas par 2\")\n return True\n else:\n print(f\"{n} n'est pas divisible ni par 3 ni par 2\")\n return False\n\ndef affiche_menu_saisie():\n print(\"Enter un entier positif: \", end = \"\")\n nb = int(input())\n return nb\n\ndef divisibilite23():\n ok = False\n while not ok:\n clear()\n nb = affiche_menu_saisie()\n ok = divisible_deux_trois(nb)\n if(not ok):\n input(\"Pour essayer une autre fois tapez sur 2 fois sur Enter \")\n input()\n\ndivisibilite23()\n\n\n","repo_name":"ablakkis/exercices","sub_path":"exercice4sem5.py","file_name":"exercice4sem5.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20334663035","text":"from setuptools import setup\n\nwith open('README.md', 'r') as readme:\n long_description = readme.read()\n\nsetup(\n name='IChicken',\n version='0.0',\n packages=['ichicken'],\n url='https://github.com/venuur/IChicken',\n license='MIT',\n author='Carl Morris',\n author_email='carl.morris.world@outlook.com',\n description='Chicken Scheme kernel for Jupyter.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'\n ]\n)\n","repo_name":"venuur/IChicken","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1573508028","text":"import sys\nfrom .driver import assemble\n\n\ndef main():\n\tif len(sys.argv) < 3:\n\t\tprint(\"Usage: %s input1.ear [ input2.ear ... ] output.peg\" % sys.argv[0])\n\t\tsys.exit(1)\n\t\n\tasm_strs = []\n\t\n\tfor arg in sys.argv[1:-1]:\n\t\t# Read input assembly source from stdin or file\n\t\tif arg == \"-\":\n\t\t\tasmstr = sys.stdin.read()\n\t\telse:\n\t\t\twith open(arg, \"r\") as asm_fp:\n\t\t\t\tasmstr = asm_fp.read()\n\t\t\n\t\t# Collect assembly pieces into their segments\n\t\tasm_strs.append(asmstr)\n\t\n\tdata = assemble(asm_strs)\n\t\n\t# Write the assembled binary output file\n\tout_fname = sys.argv[-1]\n\tif out_fname == \"-\":\n\t\tsys.stdout.buffer.write(data)\n\telse:\n\t\twith open(out_fname, \"wb\") as out_fp:\n\t\t\tout_fp.write(data)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"kjcolley7/PEGASUS","sub_path":"earasm/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"34648776939","text":"import time\n\nimport typer\n\napp = typer.Typer()\ndef main(delete: bool = typer.Option(..., help=\"Supprime les fichiers trouvées\"), extension: str = typer.Argument(..., help=\"Extension à chercher\")):\n \"\"\"\n Affiche les fichiers trouvés avec l'extension donnée.\n :return:\n \"\"\"\n prenom_ = \"Patrick\"\n prenom = typer.style(prenom_, fg=typer.colors.RED, bold=True)\n print(prenom)\n print(delete)\n typer.secho(prenom, fg=typer.colors.BLUE)\n typer.secho(f\"Recherche des fichiers avec l'extension {extension}.\", fg=typer.colors.BLUE)\n if delete:\n typer.confirm(\"Souhaitez vous vraiment supprimer les fichiers ?\", abort=True)\n\n print(\"Suppression des fichiers.\")\n\n liste = range(1000)\n with typer.progressbar(liste) as progress:\n for _ in progress:\n time.sleep(0.001)\n\n\n@app.command(\"search\")\ndef search_py():\n main(delete=False, extension=\"py\")\n\n\n@app.command(\"delete\")\ndef delete_py():\n main(delete=True, extension=\"py\")\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"jeremybh44/apprentissage_python","sub_path":"test_typer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10285221160","text":"from fltk import *\r\n\r\n\r\ndef menu(var=0):\r\n\r\n choix = [0,1,2,3,4]\r\n move = choix.index(var) # Position dans choix de la variante\r\n maps =['maps/map_test.txt','maps/map1.txt','maps/map2.txt','maps/map3.txt','maps/map4.txt']\r\n\r\n efface_tout()\r\n image(-25,-20,'media/background2.png',largeur=800, hauteur=800,ancrage='nw',tag='bg')\r\n image(30,15,'images/wallisyou.png',largeur=600, hauteur=200,ancrage='nw',tag='menu')\r\n \r\n # Choix variante (flèches)\r\n polygone([(200,470),(225,450),(225,490)],remplissage='lightgray',couleur='white',tag='menu')\r\n polygone([(400,470),(375,450),(375,490)],remplissage='lightgray',couleur='white',tag='menu')\r\n texte(305,400,'Level :',ancrage='center',couleur='lightgray',tag='menu')\r\n texte(302,470,str(var),ancrage='center',couleur='lightgray',taille='40',tag='var')\r\n\r\n # Bouton jouer\r\n image(305,620,'images/play.png',largeur=350, hauteur=300,ancrage='center',tag='menu')\r\n\r\n \r\n\r\n\r\n while True:\r\n ev = donne_ev()\r\n tev = type_ev(ev)\r\n x = abscisse_souris()\r\n y = ordonnee_souris()\r\n # Action dépendante du type d'événement reçu:\r\n\r\n # Flèche Gauche\r\n if (tev == 'Touche' and touche(ev) == 'Left')\\\r\n or (tev == 'ClicGauche' and (200 <= x <= 225 and 450 <= y <= 490)):\r\n if move > 0:\r\n var = False\r\n move -= 1\r\n\r\n # Flèche Droite\r\n if (tev == 'Touche' and touche(ev) == 'Right')\\\r\n or (tev == 'ClicGauche' and (375 <= x <= 400 and 450 <= y <= 490)):\r\n if move < len(choix)-1:\r\n var = False\r\n move += 1\r\n \r\n # Bouton jouer\r\n if (tev == 'Touche' and touche(ev) == 'Return')\\\r\n or (tev == 'ClicGauche' and (200 <= x <= 400 and 525 <= y <= 625)):\r\n efface('var')\r\n efface('menu')\r\n efface('bg')\r\n\r\n return maps[var]\r\n\r\n if tev == 'Quitte': # on sort de la boucle\r\n exit()\r\n\r\n \r\n if not var:\r\n var = choix[move]\r\n efface('var')\r\n texte(302,470,var,ancrage='center',couleur='lightgrey',taille='40',tag='var')\r\n\r\n mise_a_jour()\r\n\r\n\r\n\r\n ","repo_name":"antonynavarro/WallisYou","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39037530942","text":"#!/bin/env python2.7\n\nimport psycopg2\nfrom datetime import datetime\n\ndbname = \"news\"\n\n\ndef logs_analysis():\n # Connect to the database\n db = psycopg2.connect(database=dbname)\n\n # Open a cursor to execute PostgreSQL queries in the database session\n c = db.cursor()\n\n # Question 1. What are the most popular three articles of all time?\n # This PostgreSQL query joins the articles table with the log table\n # on the slug by deriving an equivalent slug from the article path\n # and fetches the top 3 rows of popular articles\n query = \"\"\"\n select A.title, count(L.slug) as count\n from articles A\n inner join (select replace(path, '/article/', '') as slug from log) L\n on A.slug = L.slug\n group by A.title\n order by count desc\n fetch first 3 rows only;\n \"\"\"\n c.execute(query)\n rows = c.fetchall()\n\n print('\\n' + \"Three most popular three articles of all time:\")\n\n for (title, views) in rows:\n print(\" \\\"{}\\\" - {} views\".format(title, views))\n\n # Question 2. Who are the most popular article authors of all time?\n # This PostgreSQL query joins the articles and authors tables\n # to list authors by order of total article count\n query = \"\"\"\n select W.name, count(L.slug) as count\n from articles A\n inner join ( select replace(path, '/article/', '') as slug from log ) L\n on A.slug = L.slug\n inner join authors W\n on A.author = W.id\n group by W.name\n order by count desc;\n \"\"\"\n c.execute(query)\n rows = c.fetchall()\n\n print('\\n' + \"Most popular authors of all time:\")\n\n for (name, count) in rows:\n print(\" {} - {} views\".format(name, count))\n\n # Question 3. On which days did more than 1% of requests lead to errors?\n # This PostgreSQL query groups log entries by http status code\n # and caculates a percentage if daily errpr rate exceeds 1%\n query = \"\"\"\n select date(time),\n round(100.0*sum(case log.status when '200 OK' then 0 else 1 end)\n /count(log.status),2) as error_rate\n from log\n group by date(time)\n having round(100.0*sum(case log.status when '200 OK' then 0 else 1 end)\n /count(log.status),2) > 1;\n \"\"\"\n # Use the datetime module to format the sql output of the date\n fmt = \"%B %d, %Y\"\n c.execute(query)\n rows = c.fetchall()\n\n print('\\n' + \"Day(s) on which more than 1% of requests led to errors:\")\n\n for (date, rate) in rows:\n print(\" {} - {}% errors\".format(datetime.strftime(date, fmt), rate))\n\n # Close the cursor and the database session\n c.close()\n db.close()\n\n\nlogs_analysis()\n","repo_name":"cloverhub/Logs-Analysis","sub_path":"logsanalysis.py","file_name":"logsanalysis.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5377864833","text":"from CCNode import *\nimport numpy as np\nfrom utils_gen import get_words\nfrom copy import deepcopy\nimport treelib as tr\nfrom words_tags_ilabels_translation import *\nfrom itertools import product\nfrom utiils_span import *\nfrom CCTagsLine import *\nfrom utils_tree import *\n\n\nclass CCTree:\n \"\"\"\n This class builds a polytree (see utils_tree.py for def of polytree)\n from `ll_ilabel`, which is either an output of a run with task \"cc\",\n or can be derived from a cctags file.\n\n Think of a CCNode by its __str__. For example, a __str__ for a CCNode\n might be (2, 5)6(7, 23). (2, 5) is its left span `span_pair[0]`,\n 6 is its `ccloc` (cc location) and (7, 23) is its right span `span_pair[ \n 1]`. The CCNode's ccloc is always located between but outside the range \n of its left and right spans.\n\n The tree nodes are of type CCNode or int (the int corresponding to \n CCNode.ccloc). Each ccloc can only have one CCNode assigned to it.\n \n \n Attributes\n ----------\n ccnodes: list[CCNode]\n a list of the CCNodes of the polytree `self`.\n ccsents: list[str]\n This class derives from the polytree `self`, a list of cc sentences\n ( i.e, split sentences, obtained by splitting a compound sentence at\n a coordinating conjunction (CC)).\n child_ccloc_to_par_cclocs: dict[int, list[int]]\n dictionary mapping each child's ccloc to a list of the cclocs of the\n parents.\n forced_polytree: bool\n Sometimes ll_ilabel yields more than one parent for a given node,\n but in a polytree every node must have 0 or 1 parent. Iff this is\n True, this tells SentenceAx to discard all but one parent.\n ll_ilabel: list[list[int]]\n a matrix of ints for one sample. The outer dim ranges over depths\n and the inner one over word locations in osent_words.\n orig_sent: str\n the original sentence, before splitting (done first) and extraction (\n done second).\n osent_words: list[str]\n list of words in the original sentence.\n par_ccloc_to_child_cclocs: list[int,list[int]]\n dictionary mapping each parent's ccloc to a list of the cclocs of the\n children.\n root_cclocs: list[int]\n list of the cclocs of the roots of the polytree `self`.\n verbose: str\n True iff want verbose output to be printed out in console.\n \n \"\"\"\n\n def __init__(self, orig_sent, ll_ilabel, forced_polytree=True,\n calc_tree_struc=True, verbose=False):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n orig_sent: str\n ll_ilabel: list[list[int]]\n forced_polytree: bool\n calc_tree_struc: bool\n setting this to False makes this class basically a structureless\n lightweight list of ccnodes. Things like the tree dictionary and\n the ccsents are not calculated.\n verbose: bool\n \"\"\"\n self.orig_sent = orig_sent\n self.osent_words = get_words(orig_sent)\n if verbose:\n print(\"New cctree:\")\n print(orig_sent)\n self.ll_ilabel = ll_ilabel\n self.forced_polytree = forced_polytree\n self.verbose = verbose\n\n # self.osent_locs = range(len(self.osent_words))\n\n self.ccnodes = None\n self.l_cctags_line = None\n # This must be called before calling self.set_tree_structure()\n self.set_ccnodes()\n\n self.root_cclocs = None\n self.par_ccloc_to_child_cclocs = None\n self.child_ccloc_to_par_cclocs = None\n # this fill the 3 previous None's\n if calc_tree_struc:\n self.set_tree_structure()\n\n self.ccsents = None\n # self.level0_to_ccnodes = None\n # self.level0_to_fat_ll_spanned_loc = None\n #\n # self.ll_spanned_loc = None\n # self.l_spanned_word = None\n\n # this fills the previous unfilled None's\n if calc_tree_struc:\n # self.set_spanned_attributes()\n self.set_ccsents()\n\n @staticmethod\n def get_ccnode_from_ccloc(ccloc, ccnodes):\n \"\"\"\n This static method returns the unique CCNode out of `ccnodes` that\n owns the ccloc `ccloc`.\n\n Parameters\n ----------\n ccloc: int\n ccnodes: list[CCNode]\n\n Returns\n -------\n CCNode|None, list[CCNode]\n\n \"\"\"\n unique_k = -1\n l_hot_k = []\n for k, ccnode in enumerate(ccnodes):\n if ccnode.ccloc == ccloc:\n l_hot_k.append(k)\n unique_k = k\n if not l_hot_k:\n return None\n elif len(l_hot_k) > 1:\n # this normally doesn't happen with training extractions\n # but it can happen with predicted extractions\n print(\"more than one ccnode with cc at \" + str(ccloc))\n print(\"culprit sent:\\n\" + str(ccnodes[0].osent_words))\n print(\"ccnodes[k].spanned_locs:\")\n for k in l_hot_k:\n print(\"k=\" + str(k) + \", \" + str(ccnodes[k].spanned_locs))\n assert False\n else:\n return ccnodes[unique_k]\n\n def get_ccnode(self, ccloc):\n \"\"\"\n This non-static method returns the unique CCNode out of\n `self.ccnodes` that owns the ccloc `ccloc`.\n\n Parameters\n ----------\n ccloc: int\n\n Returns\n -------\n CCNode\n\n \"\"\"\n\n return CCTree.get_ccnode_from_ccloc(ccloc, self.ccnodes)\n\n def remove_bad_ccnodes(self):\n \"\"\"\n similar to Openie6.data.coords_to_sentences\n\n This method removes unacceptable ccnodes from the list self.ccnodes\n\n Returns\n -------\n None\n\n \"\"\"\n if self.verbose:\n print(\"nodes before removals: \", [str(ccnode) for ccnode in\n self.ccnodes])\n # enforce one to one mapping between ccnodes and cclocs\n ccloc_to_l_ccnode = {}\n for ccnode in self.ccnodes:\n if ccnode.ccloc not in ccloc_to_l_ccnode.keys():\n ccloc_to_l_ccnode[ccnode.ccloc] = []\n ccloc_to_l_ccnode[ccnode.ccloc].append(ccnode)\n for ccloc in ccloc_to_l_ccnode.keys():\n if len(ccloc_to_l_ccnode[ccloc]) > 1:\n for k, ccnode in enumerate(ccloc_to_l_ccnode[ccloc]):\n if k >= 1:\n if self.verbose:\n print(f\"node {ccnode} removed because there is \"\n \"more than one ccnode with this ccloc\")\n self.ccnodes.remove(ccnode)\n\n for ccnode in self.ccnodes:\n ccloc = ccnode.ccloc\n num_osent_words = len(self.osent_words)\n if ccloc >= num_osent_words:\n if self.verbose:\n print(f\"node {ccnode} removed because \"\n f\"ccloc={ccloc} is >= to len(osent)={num_osent_words}\")\n self.ccnodes.remove(ccnode)\n continue\n ccword = self.osent_words[ccloc]\n loc_to_word = ccnode.get_spanned_unbreakable_loc_to_word()\n if ccword in ['nor', '&']:\n if self.verbose:\n print(f\"node {ccnode} removed because \"\n f\"{ccword} is not allowed as a CC.\")\n self.ccnodes.remove(ccnode)\n elif len(loc_to_word):\n if self.verbose:\n print(f\"node {ccnode} removed because \"\n \"its span contains unbreakable words.\")\n print(\"unbreakable_loc_to_word=\", loc_to_word)\n self.ccnodes.remove(ccnode)\n\n def set_ccnodes(self):\n \"\"\"\n similar to Openie6.metric.get_coords()\n\n This method creates the list of ccnodes `self.ccnodes`.\n\n The method also stores in `l_cctags_line` the info it gleans from\n each line of a sample.\n\n The method also removes bad nodes by calling remove_bad_ccnodes(),\n and it performs a sanity check of the newly created ccnodes by\n calling ccnode.check_self() for each ccnode.\n\n Returns\n -------\n None\n\n \"\"\"\n self.ccnodes = []\n self.l_cctags_line = []\n\n num_depths = len(self.ll_ilabel)\n for depth in range(num_depths):\n l_ilabel = self.ll_ilabel[depth]\n cctags_line = CCTagsLine(depth, l_ilabel)\n self.l_cctags_line.append(cctags_line)\n\n for depth in range(num_depths):\n cctags_line = self.l_cctags_line[depth]\n for ccloc in cctags_line.cclocs:\n spans = cctags_line.spans\n span_pair = CCTagsLine.get_span_pair(spans,\n ccloc,\n throw_if_None=False)\n if span_pair:\n ccnode = CCNode(ccloc,\n depth,\n self.osent_words,\n span_pair)\n self.ccnodes.append(ccnode)\n self.remove_bad_ccnodes()\n # print(\"llm\", [str(ccnode) for ccnode in self.ccnodes])\n for ccnode in self.ccnodes:\n ccnode.check_self()\n\n def set_tree_structure(self):\n \"\"\"\n similar to Openie6.data.get_tree(conj) where conj=coords=ccnodes.\n Openie6 uses conj=ccloc almost everywhere else!\n\n This method creates the dictionaries self.child_ccloc_to_par_cclocs\n and self.par_ccloc_to_child_cclocs that define the polytree.\n\n This method also finds the root nodes of the polytree and stores\n them in the list self.root_nodes.\n\n\n Returns\n -------\n None\n\n \"\"\"\n # par = parent\n\n # same with child and par interchanged\n self.child_ccloc_to_par_cclocs = {}\n for child_ccnode in self.ccnodes:\n child_ccloc = child_ccnode.ccloc\n par_cclocs = []\n for par_ccnode in self.ccnodes:\n par_ccloc = par_ccnode.ccloc\n if child_ccnode.is_child(par_ccnode) and \\\n par_ccloc not in par_cclocs:\n par_cclocs.append(par_ccloc)\n self.child_ccloc_to_par_cclocs[child_ccloc] = par_cclocs\n\n if self.forced_polytree:\n for child_ccnode in self.ccnodes:\n child_ccloc = child_ccnode.ccloc\n mapa = self.child_ccloc_to_par_cclocs\n # force every node to have 0 or 1 parent\n mapa[child_ccloc] = mapa[child_ccloc][:1]\n\n self.par_ccloc_to_child_cclocs = {}\n for par_ccnode in self.ccnodes:\n par_ccloc = par_ccnode.ccloc\n child_cclocs = []\n for child_ccnode in self.ccnodes:\n child_ccloc = child_ccnode.ccloc\n is_parent = (par_ccloc in\n self.child_ccloc_to_par_cclocs[child_ccloc])\n if is_parent and \\\n child_ccloc not in child_cclocs:\n child_cclocs.append(child_ccloc)\n self.par_ccloc_to_child_cclocs[par_ccloc] = child_cclocs\n\n self.root_cclocs = []\n for ccnode in self.ccnodes:\n ccloc = ccnode.ccloc\n if not self.child_ccloc_to_par_cclocs[ccloc]:\n self.root_cclocs.append(ccloc)\n\n # @staticmethod\n # def get_essential_locs(num_osent_words, ccnodes):\n # \"\"\"\n # This method returns the list of all locations in osent that are not\n # spanned, or a cc, or a sep_loc, for ANY ccnode.\n #\n # Parameters\n # ----------\n # num_osent_words: int\n # ccnodes: list[CCNodes]\n #\n # Returns\n # -------\n # list[int]\n #\n # \"\"\"\n # essential_locs = list(range(num_osent_words))\n # for ccnode in ccnodes:\n # spans = ccnode.spans\n # # print(\"essential_locs, ccloc\", essential_locs, ccnode.ccloc)\n # try:\n # essential_locs.remove(ccnode.ccloc)\n # except:\n # pass\n # for i in range(*spans[0]):\n # try:\n # essential_locs.remove(i)\n # except:\n # pass\n # for i in range(*spans[1]):\n # try:\n # essential_locs.remove(i)\n # except:\n # pass\n # for i in ccnode.sep_locs:\n # try:\n # essential_locs.remove(i)\n # except:\n # pass\n # return essential_locs\n\n # @staticmethod\n # def get_ccsents(osent, ccnodes):\n # \"\"\"\n # This method returns a list of the ccsents (conjugate coordination\n # sentences).\n #\n # Parameters\n # ----------\n # osent: str\n # original sentence\n # ccnodes: list[CCNode]\n #\n # Returns\n # -------\n # list[str]\n #\n # \"\"\"\n # osent_words = get_words(osent)\n # essential_locs = CCTree.get_essential_locs(len(osent_words), ccnodes)\n # cclocs = [ccnode.ccloc for ccnode in ccnodes]\n # osent_words = get_words(osent)\n # ccsents = []\n # print_list(\"essential_locs\", essential_locs)\n # for bool_vec in product([0, 1], repeat=len(cclocs)):\n # ccsent_locs = copy(essential_locs)\n # for k in range(len(bool_vec)):\n # ccloc = cclocs[k]\n # spans = CCTree.get_ccnode_from_ccloc(ccloc, ccnodes).spans\n # ccsent_locs += list(range(*spans[bool_vec[k]]))\n # print(\"llojk\", ccsent_locs)\n # ccsent_locs.sort()\n # ccsent = \" \".join([osent_words[loc] for loc in ccsent_locs])\n # ccsents.append(ccsent)\n # return ccsents\n\n def get_all_ccnode_paths(self):\n \"\"\"\n This method calls the global function\n utils_tree.get_all_paths_from_any_root() for the polytree `self`.\n\n This method considers subtrees of the polytree `self` that have as a\n root node one of the root nodes of the polytree, and vary in depth\n from a zero to the depth of the polytree `self`\n\n The method returns a list of all paths of CCNodes for all such\n subtrees. Each path starts at the root_node of its subtree and ends\n at a (nonempty) leaf node of its subtree.\n\n Returns\n -------\n list[list[CCNode]]\n\n \"\"\"\n if self.verbose:\n print(\"child_ccloc_to_par_cclocs\",\n self.child_ccloc_to_par_cclocs)\n print(\"par_ccloc_to_child_cclocs\",\n self.par_ccloc_to_child_cclocs)\n l_ccloc_path = get_all_paths_from_any_root(\n self.par_ccloc_to_child_cclocs,\n self.root_cclocs,\n self.verbose)\n l_ccnode_path = []\n for ccloc_path in l_ccloc_path:\n ccnode_path = [self.get_ccnode(ccloc) for ccloc in ccloc_path]\n l_ccnode_path.append(ccnode_path)\n return l_ccnode_path\n\n @staticmethod\n def get_inc_exc_span_path(ccnode_path,\n l_bit,\n all_span,\n verbose=False):\n \"\"\"\n This method starts with a list of N ccnodes `ccnode_path`\n\n N= len(ccnode_path) = len(l_bit)\n\n The method then uses that ccnode path to generate the list of 2^N\n span paths obtained by choosing either the left span or the right\n span of each ccnode of that ccnode path. Those 2^N span paths are\n labelled by N-bit vectors (0 if left span is chosen or 1 if right\n span is chosen).\n\n The method then chooses, from those 2^N span paths, the single one\n `inc_span_path` that is labelled by the input bit vector `l_bit`. At\n the same time, the method stores another span path `exc_span_path`.\n\n `exc_span_path` is an anti-twin, contrarian twin, to `inc_span_path`\n which chooses the right span every time `inc_span_path` chooses the\n left span (and vice versa).\n\n inc=included, exc=excluded\n\n The method then asks if `inc_span_path` satisfies the\n span_path_is_decreasing() condition. If it does, the method returns\n the pair of span paths `inc_span_path, exc_span_path`. If it doesn't\n satisfy that condition, the method returns `None, None`.\n\n Parameters\n ----------\n ccnode_path: list[CCNode]\n a path (i.e., list) of CCNodes\n l_bit: list[int]\n a list of ints [0, 1]. N=len(l_bit)=len(ccnode_path)\n all_span: tuple[int, int]\n The span (0, number of words in osent)\n verbose: bool\n\n Returns\n -------\n list[tuple[int, int]], list[tuple[int, int]]\n inc_span_path, exc_span_path\n\n \"\"\"\n\n num_depths = len(l_bit)\n inc_span_path = []\n exc_span_path = []\n for depth in range(num_depths):\n ccnode = ccnode_path[depth]\n bit = l_bit[depth]\n inc_span_path.append(ccnode.span_pair[bit])\n exc_span_path.append(ccnode.span_pair[flip(bit)])\n if span_path_is_decreasing(inc_span_path):\n if verbose:\n print(\"included_span_path\", inc_span_path)\n print(\"excluded_span_path\", exc_span_path)\n draw_inc_exc_span_paths(all_span,\n inc_span_path,\n exc_span_path)\n return inc_span_path, exc_span_path\n else:\n return None, None\n\n #\n # @staticmethod\n # def get_donut(span,\n # sub_spans,\n # kept_sub_span):\n # \"\"\"\n #\n # Parameters\n # ----------\n # span: tuple[int]\n # sub_spans: list[list[int]]\n # kept_sub_span: int\n # either 0 or 1\n #\n # Returns\n # -------\n # list[int] | None\n #\n # \"\"\"\n # # print(\"subspan0, span\", sub_spans[0], span)\n # # print(\"subspan1, span\", sub_spans[1], span)\n # if not is_sub_span(sub_spans[0], span) or\\\n # not is_sub_span(sub_spans[1], span):\n # return None\n #\n # span_set = set(range(*span))\n # subset0 = set(range(*sub_spans[0]))\n # subset1 = set(range(*sub_spans[1]))\n #\n # diff_set = (span_set - subset0) - subset1\n # if kept_sub_span == 0:\n # return list(diff_set | subset0)\n # elif kept_sub_span == 1:\n # return list(diff_set | subset1)\n # else:\n # return False\n\n # @staticmethod\n # def get_donut_path(ccnode_path,\n # l_bit,\n # len_osent_words,\n # verbose=False):\n # \"\"\"\n #\n # Parameters\n # ----------\n # ccnode_path: list[CCNode]\n # l_bit: list[int]\n # list of 0's or 1's\n # len_osent_words: int\n #\n # Returns\n # -------\n # list[list[int]]\n #\n # \"\"\"\n # span_path = CCTree.get_span_path(ccnode_path, l_bit, verbose)\n # num_depths = len(l_bit)\n # for depth in range(num_depths):\n # if depth < num_depths - 1:\n # donut = CCTree.get_donut(span_all,\n # ccnode_path[0].spans,\n # kept_sub_span=l_bit[depth])\n # else:\n # donut = CCTree.get_donut(span_path[depth],\n # ccnode_path[depth + 1].spans,\n # kept_sub_span=l_bit[depth])\n # # elif depth == num_depths-1:\n # # donut = CCTree.get_donut(span_path[depth],\n # # [[0,0], [1,1]],\n # # kept_sub_span=l_bit[depth])\n # if not donut:\n # return None\n # else:\n # donut_path.append(donut)\n # if verbose:\n # print(\"donut path: \", donut_path)\n # return donut_path\n\n # @staticmethod\n # def get_donut_path(ccnode_path,\n # l_bit,\n # len_osent_words,\n # verbose=False):\n # \"\"\"\n #\n # Parameters\n # ----------\n # ccnode_path: list[CCNode]\n # l_bit: list[int]\n # list of 0's or 1's\n # len_osent_words: int\n #\n # Returns\n # -------\n # list[list[int]]\n #\n # \"\"\"\n # span_path = CCTree.get_span_path(ccnode_path, l_bit, verbose)\n # # add first dummy item to ccnode_path. Won't be used\n # if not span_path:\n # return None\n # donut_path = []\n # span_all = (0, len_osent_words)\n # num_depths = len(l_bit)\n # for depth in range(-1, num_depths-1):\n # if depth == -1:\n # donut = CCTree.get_donut(span_all,\n # ccnode_path[0].spans,\n # kept_sub_span=l_bit[depth])\n # else:\n # donut = CCTree.get_donut(span_path[depth],\n # ccnode_path[depth+1].spans,\n # kept_sub_span=l_bit[depth])\n # # elif depth == num_depths-1:\n # # donut = CCTree.get_donut(span_path[depth],\n # # [[0,0], [1,1]],\n # # kept_sub_span=l_bit[depth])\n # if not donut:\n # return None\n # else:\n # donut_path.append(donut)\n # if verbose:\n # print(\"donut path: \", donut_path)\n # return donut_path\n\n def get_ccsent(self, exc_span_path):\n \"\"\"\n This method returns the ccsent (cc sentence) corresponding to the\n span path `exc_span_path`. It calculates that ccsent by removing\n from all_span = (0, length of osent_words), all locs included in\n `exc_span_path` (that is why we call it an excluded span path,\n because it dictates what locs to remove/exclude) from all_span. The\n method also removes from all_span, the cclocs (conjunction\n locations), sep_locs (separator locations) and other_locs.\n\n Parameters\n ----------\n exc_span_path: list[tuple[int, int]]\n\n Returns\n -------\n str\n\n \"\"\"\n assert exc_span_path\n all_span = (0, len(self.osent_words))\n fin_set = set(range(*all_span))\n for exc_span in exc_span_path:\n fin_set -= set(range(*exc_span))\n fin_locs = sorted(list(fin_set))\n # remove also cclocs, sep_locs and other_locs\n new_fin_locs = copy(fin_locs)\n for i in fin_locs:\n for cctags_line in self.l_cctags_line:\n if i in cctags_line.cclocs or \\\n i in cctags_line.sep_locs or \\\n i in cctags_line.other_locs:\n if i in new_fin_locs:\n new_fin_locs.remove(i)\n ccsent = \" \".join([self.osent_words[loc] for loc in new_fin_locs])\n return ccsent\n\n def set_ccsents(self):\n \"\"\"\n similar to Openie6.data.get_sentences()\n\n This method sets self.ccsents. It does this by calling\n get_inc_exc_span_path() to get an `exc_span_path`, and then calling\n `get_ccsent(exc_span_path)` to get a ccsent.\n\n Returns\n -------\n None\n\n \"\"\"\n ccnode_paths = self.get_all_ccnode_paths()\n all_span = (0, len(self.osent_words))\n l_ccsent = []\n for path in ccnode_paths:\n if self.verbose:\n print(\"node path: \", [str(ccnode) for ccnode in path])\n for ccnode_path in ccnode_paths:\n path_len = len(ccnode_path)\n for l_bit in product([0, 1], repeat=path_len):\n l_bit = list(l_bit)\n _, exc_span_path = CCTree.get_inc_exc_span_path(\n ccnode_path,\n l_bit,\n all_span,\n self.verbose)\n if exc_span_path:\n ccsent = self.get_ccsent(exc_span_path)\n l_ccsent.append(ccsent)\n\n self.ccsents = l_ccsent\n\n # this mimics Openie6.get_sentences()\n # ccsents = []\n # for level0, ccnodes in self.level0_to_ccnodes.items():\n # fat_ll_spanned_loc = self.level0_to_fat_ll_spanned_loc[level0]\n # for k, node in enumerate(ccnodes):\n # fat_spanned_locs = sorted(fat_ll_spanned_loc[k])\n # spanned_locs = sorted(node.spanned_locs)\n # left_words = []\n # right_words = []\n # for i in fat_spanned_locs:\n # if i >= node.ccloc and i in spanned_locs:\n # pass\n # else:\n # left_words.append(self.osent_words[i])\n # if i <= node.ccloc and i in spanned_locs:\n # pass\n # else:\n # right_words.append(self.osent_words[i])\n # ccsents.append(' '.join(left_words))\n # ccsents.append(' '.join(right_words))\n # self.ccsents = ccsents\n\n # not used anymore\n # @staticmethod\n # def fatten_ll_spanned_loc(ll_spanned_loc,\n # level0_ccnodes,\n # level0,\n # verbose):\n # \"\"\"\n # similar to Openie6.data.get_sentences(sentences,\n # conj_same_level0,\n # conj_coords,\n # sentence_indices)\n # doesn't return anything but changes sentences\n #\n # conj = ccloc, conjunct = spans, coord = ccnode\n # sentences = ll_spanned_loc\n # sentence = level0_spanned_locs\n # conj_same_level0 = level0_cclocs\n # conj_coords = swaps\n # sentence_indices = osent_locs\n #\n # level0 = same/equal level0\n # li = list\n #\n #\n # Parameters\n # ----------\n # ll_spanned_loc: list[list[int]]\n # level0_ccnodes: list[CCNode]\n # level0: int\n #\n # Returns\n # -------\n # list[list[int]]\n #\n # \"\"\"\n # # print(\"level0=\", level0)\n # # print(\"ll_spanned_loc\", ll_spanned_loc)\n # k = 0\n # # print(\"num_nodes\", len(self.ccnodes))\n # for ccnode in level0_ccnodes:\n # # print(\"nnml\", \"node_id\", k)\n # k += 1\n # fat_spanned_locs = ccnode.get_spanned_locs(fat=True)\n # if not ll_spanned_loc:\n # ll_spanned_loc.append(fat_spanned_locs)\n # else:\n # # to_be_added_ll_loc = []\n # # to_be_removed_ll_loc = []\n # for spanned_locs in ll_spanned_loc:\n # # print(\"bhk\", spanned_locs)\n # # only ccnodes that satisfy this have fat-spanned_locs\n # if ccnode.spans[0][0] in spanned_locs:\n # # to_be_added_ll_loc.append(fat_spanned_locs)\n # # to_be_removed_ll_loc.append(spanned_locs)\n # if spanned_locs in ll_spanned_loc:\n # ll_spanned_loc.remove(spanned_locs)\n # ll_spanned_loc.append(fat_spanned_locs)\n #\n # # for l_loc in to_be_removed_ll_loc:\n # # ll_spanned_loc.remove(l_loc)\n # # for l_loc in to_be_added_ll_loc:\n # # ll_spanned_loc.append(l_loc)\n # if verbose:\n # print(\"new_ll_spanned_loc\", ll_spanned_loc)\n # return ll_spanned_loc\n\n # def set_spanned_attributes(self):\n # \"\"\"\n # similar to Openie6.data.coords_to_sentences()\n # ccsents ~ Openie6.split_sentences\n #\n # Returns\n # -------\n # None\n #\n # \"\"\"\n # # self.remove_bad_ccnodes() was called at the end of get_ccnodes()\n #\n # l_spanned_word = []\n # for ccnode in self.ccnodes:\n # for span in ccnode.spans:\n # l_spanned_word.append(\n # ' '.join(self.osent_words[span[0]:span[1]]))\n #\n # level0_nd_count = len(self.root_cclocs)\n # rooty_cclocs = copy(self.root_cclocs)\n # next_level0_nd_count = 0\n #\n # level0_ccnodes = []\n # ll_spanned_loc = [] # node,num_locs\n # self.level0_to_ccnodes = {}\n # self.level0_to_fat_ll_spanned_loc = {}\n #\n # # self.root_cclocs was filled by __init__\n # level0 = 0\n # while len(rooty_cclocs) > 0:\n # if self.verbose:\n # print(\"****************************beginning of while loop\")\n # print(\"rooty_cclocs\", rooty_cclocs)\n # print(\"level0, nd_count, next_nd_count\", level0, \"/\",\n # level0_nd_count,\n # next_level0_nd_count)\n # print(\"ll_spanned_loc\", ll_spanned_loc)\n # print(\"level0_ccnodes\", [str(x) for x in level0_ccnodes])\n #\n # rooty_ccloc = rooty_cclocs.pop(0)\n # rooty_ccnode = self.get_ccnode(rooty_ccloc)\n #\n # # nd=node\n # level0_nd_count -= 1\n # level0_ccnodes.append(rooty_ccnode)\n # ll_spanned_loc = [ccnode.spanned_locs\n # for ccnode in level0_ccnodes if ccnode]\n # if self.verbose:\n # print(\"level0_ccnodes\", [str(x) for x in level0_ccnodes])\n # print(\"ll_spanned_loc\", ll_spanned_loc)\n #\n # for child_ccloc in \\\n # self.par_ccloc_to_child_cclocs[rooty_ccloc]:\n # # child becomes new root as tree is pared down\n # rooty_cclocs.append(child_ccloc)\n # next_level0_nd_count += 1\n # if self.verbose:\n # print(\"level0, nd_count, next_nd_count\", level0, \"/\",\n # level0_nd_count,\n # next_level0_nd_count)\n #\n # if level0_nd_count == 0:\n # ll_spanned_loc = \\\n # CCTree.fatten_ll_spanned_loc(\n # ll_spanned_loc,\n # level0_ccnodes,\n # level0,\n # self.verbose)\n # if level0 not in self.level0_to_fat_ll_spanned_loc.keys():\n # self.level0_to_fat_ll_spanned_loc[level0] = []\n # self.level0_to_ccnodes[level0] = level0_ccnodes\n # self.level0_to_fat_ll_spanned_loc[level0] += ll_spanned_loc\n # level0 += 1\n # level0_nd_count = next_level0_nd_count\n # next_level0_nd_count = 0\n # level0_ccnodes = []\n # ll_spanned_loc = []\n # if self.verbose:\n # print(\"level0, nd_count, next_nd_count\", level0, \"/\",\n # level0_nd_count,\n # next_level0_nd_count)\n #\n # if self.verbose:\n # print(\"level0_to_ll_spanned_loc\", self.level0_to_fat_ll_spanned_loc)\n #\n # # setting value of self.ccsents done here in Openie6\n #\n # # self.ll_spanned_loc = ll_spanned_loc\n # # self.l_spanned_word = l_spanned_word\n #\n # # ccsents, l_spanned_word, ll_spanned_loc\n # # these 3 variables similar to:\n # # word_sentences, conj_words, sentences\n # # split_sentences, conj_words, sentence_indices_list\n\n def draw_self(self):\n \"\"\"\n This method draws self by calling the global function\n utils_tree.draw_polytree().\n\n Returns\n -------\n None\n\n \"\"\"\n\n def fun(x):\n return str(self.get_ccnode(x))\n\n polytree = get_mapped_polytree(self.par_ccloc_to_child_cclocs, fun)\n draw_polytree(polytree)\n\n\nif __name__ == \"__main__\":\n def main1():\n in_fp = \"tests/small_cctags.txt\"\n out_fp = \"tests/cc_ilabels.txt\"\n file_translate_tags_to_ilabels(\"cc\", in_fp, out_fp)\n\n\n def main2(forced_polytree=True):\n in_fp = \"tests/one_sample_cc_ilabels.txt\"\n # out_fp = \"tests/cc_trees.txt\"\n with open(in_fp, \"r\", encoding=\"utf-8\") as f:\n in_lines = get_ascii(f.readlines())\n\n l_osent = []\n lll_ilabel = []\n ll_ilabel = []\n for in_line in in_lines:\n if in_line:\n if in_line[0].isalpha():\n l_osent.append(in_line.strip())\n if ll_ilabel:\n lll_ilabel.append(ll_ilabel)\n ll_ilabel = []\n elif in_line[0].isdigit():\n words = get_words(in_line)\n # print(\"lkll\", words)\n ll_ilabel.append([int(x) for x in words])\n # last one\n if ll_ilabel:\n lll_ilabel.append(ll_ilabel)\n #\n # print(\"lklo\", l_osent)\n # print(\"lklo\", lll_ilabel)\n for k in range(len(l_osent)):\n osent = l_osent[k]\n tree = CCTree(osent,\n lll_ilabel[k],\n forced_polytree,\n verbose=True)\n tree.draw_self()\n for i, sent in enumerate(tree.ccsents):\n print(str(i + 1) + \". \" + sent)\n print()\n\n\n main1()\n main2()\n","repo_name":"rrtucci/SentenceAx","sub_path":"CCTree.py","file_name":"CCTree.py","file_ext":"py","file_size_in_byte":33799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16028731619","text":"import random\r\nname=\"sample\"\r\n\r\ndef check_Db(reply):\r\n file=open(\"history.txt\",\"r\")\r\n line=file.read().splitlines()\r\n file.close()\r\n for x in range(len(line)):\r\n if(reply == line[x]):\r\n return False\r\n return True\r\n\r\ndef get_sentence():\r\n file=open(\"sentences.txt\",\"r\")\r\n line=file.read().splitlines()\r\n file.close()\r\n return random.choice(line)\r\n\r\ndef parseReply(sentence,reply):\r\n file=open(\"history.txt\",\"r\")\r\n line=file.read().splitlines()\r\n file.close()\r\n sample=reply\r\n if(sentence in [\"Please tell me more...\"]):\r\n return ''\r\n elif(sentence in [\"As I have recalled you had said that\",\"You have said earlier that\"] and len(line)>5):\r\n # just pick random from history that is not the past reply\r\n sample=line[random.randrange(0,len(line)-2)]\r\n print(line)\r\n li=list(sample.split(' '))\r\n rule={'I':' you ',\"I'm\":\" you're \",'my':\" you're \", \"i\":\" you \",\"me\":\" you \", \"am\":\" are \",\"are\":\" am \",\"My\":\" your \",\"you\":\" i \",\"You\":\" I \",\"Your\":\" My \",\"your\":\" my \"}\r\n for x in range(len(li)):\r\n for key, value in rule.items():\r\n if(li[x]==key):\r\n li[x]=value\r\n str1=\" \"\r\n return str1.join(li)\r\n else:\r\n #normal conversion of string\r\n #Dictionary\r\n li=list(sample.split(' '))\r\n rule={'I':' you ',\"I'm\":\" you're \",'my':\" you're \", \"i\":\" you \",\"me\":\" you \", \"am\":\" are \",\"are\":\" am \",\"My\":\" your \",\"you\":\" i \",\"You\":\" I \",\"Your\":\" My \",\"your\":\" my \",\"yes\":\"\",\"and\":\"\",\"also\":\"\"}\r\n for x in range(len(li)):\r\n for key, value in rule.items():\r\n if(li[x]==key):\r\n li[x]=value\r\n str1=\" \"\r\n return str1.join(li)\r\n\r\n\r\ndef parse(reply):\r\n #implement the robots reply structure\r\n sen=get_sentence()\r\n return sen+\" \"+parseReply(sen,reply)\r\n\r\n\r\n\"\"\"\" responses\"\"\"\r\ndef robot(reply):\r\n \"\"\"response\"\"\"\r\n # robot does all the computing\r\n # first get the reply of user\r\n if(reply ==\"quit\"):\r\n print(\"bye bye\")\r\n return 0\r\n #second look if same reply happend previously then reply \"Yeah I Know\" and dont store the reply in history return boolean\r\n if(not(check_Db(reply))):\r\n print(\"Please Reply Something Else...\")\r\n else:\r\n # third if succesful parse the string as a reply\r\n response=parse(reply)\r\n #put reply in db\r\n file=open(\"history.txt\",\"a\")\r\n file.write(\"{}\\n\".format(reply))\r\n file.close()\r\n #ROBOT TALK\r\n print(\"Robot: \",response)\r\n converse()\r\n return 0\r\n\r\n\r\ndef user():\r\n \"\"\"reply\"\"\"\r\n #just reply anything and send to robot\r\n reply = input(\"{} : \".format(name))\r\n return reply\r\n\r\ndef converse():\r\n robot(user())\r\n return 0\r\n\r\ndef main():\r\n print(\"You're Name:\",end=\"\\n\")\r\n Name=input()\r\n global name\r\n name=Name\r\n \"\"\" first greet\"\"\"\r\n print(\"Good Day \",Name,\" How are you today?\")\r\n converse()\r\n\r\n\r\n# The entry point for program execution\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"nikkithelegendarypokemonster/Simple-Python-Chatbot","sub_path":"quiz psychotherapyy.py","file_name":"quiz psychotherapyy.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40026555091","text":"import geoip2.database\nfrom pathlib import Path\n\ndef getlocation(ip_address):\n # The directory containing this file\n thispath = Path(__file__).parent\n results = {}\n\n try:\n with geoip2.database.Reader(str(thispath) + '/maxmind/GeoLite2-Country.mmdb') as reader:\n response = reader.country(ip_address)\n ip_isocode = response.country.iso_code\n ip_country = response.country.name\n\n with geoip2.database.Reader(str(thispath) + '/maxmind/GeoLite2-ASN.mmdb') as reader:\n response = reader.asn(ip_address)\n ip_asn = response.autonomous_system_number\n ip_org = response.autonomous_system_organization\n\n\n results.update({'iso_code': ip_isocode})\n results.update({'ip_country': ip_country})\n results.update({'ip_asn': ip_asn})\n results.update({'ip_org': ip_org})\n\n except:\n results.update({'error': 'Unable to lookup geolocation information. Possible missing Maxmind database files.'})\n \n return results\n","repo_name":"F5-Labs/cryptonice","sub_path":"cryptonice/getgeo.py","file_name":"getgeo.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"} +{"seq_id":"3489431035","text":"import argparse\nimport pickle\n\n\ndef calculate_perplexity(lda_model_name, corpus_name):\n \"\"\"\n Calculates the perplexity of a previously saved LDA model.\n :param lda_model_name: The name of the pickled LDA model file.\n :param corpus_name: The name of the pickled corpus file used to build the LDA model.\n :return:\n \"\"\"\n with open(corpus_name, \"rb\") as file_in:\n corpus = pickle.load(file_in)\n with open(lda_model_name, \"rb\") as file_in:\n lda_model = pickle.load(file_in)\n perplexity = lda_model.log_perplexity(corpus)\n return perplexity\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"lda_model\", type=str,\n help=\"The pkl filename containing the lda model you want to calculate perplexity of.\")\n parser.add_argument(\"corpus\", type=str,\n help=\"The pkl filename of the corpus.\")\n args = parser.parse_args()\n perplexity = calculate_perplexity(args.lda_model, args.corpus)\n print(f\"Perplexity: {perplexity}\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"georgetown-cset/unicorn-topics","sub_path":"calculate_perplexity.py","file_name":"calculate_perplexity.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32514025220","text":"# -*- coding:utf-8 -*-\n\nfrom fake_useragent import UserAgent\nimport requests\nfrom requests.exceptions import ReadTimeout, ConnectionError, RequestException\n\n\nclass BoxOfficeSpider:\n def __init__(self):\n self.url = \"http://www.endata.com.cn/API/GetData.ashx\"\n\n def get_items(self):\n try:\n ua = UserAgent()\n headers = {'User-Agent': ua.random}\n data = {'MethodName': 'BoxOffice_GetPcHomeList'}\n response = requests.post(self.url, data=data, headers=headers, timeout=5)\n\n return response.json()\n except ReadTimeout:\n # 超时异常\n print('Timeout')\n except ConnectionError:\n # 连接异常\n print('Connection error')\n except RequestException:\n # 请求异常\n print('Request Error')\n except Exception:\n print(\"Something error\")\n\n def start(self):\n result = self.get_items()\n if result and result['Status'] == 1:\n data = result['Data']['Table1']\n for item in data:\n print(\"No {}. {} 票房:{} 万,排片占比:{}%,上映天数:{}\".format(item['tid'], item['MovieName'], item['boxoffice'], item['paipian'], item['releasedate']))\n\n\ndef main():\n spider = BoxOfficeSpider()\n spider.start()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"chenyuanqi/note","sub_path":"backend/python/spider/example/boxoffice.py","file_name":"boxoffice.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"1503188829","text":"class Solution:\r\n def __init__(self):\r\n self.path = []\r\n self.z = []\r\n self.time = -1.0\r\n self.profit = -1.0\r\n self.weight = -1.0\r\n self.single_objective = -1.0\r\n self.objectives = []\r\n\r\n def get_relation(self, other):\r\n val = 0\r\n for i in range(len(self.objectives)):\r\n if self.objectives[i] < other.objectives[i]:\r\n if val == -1:\r\n return 0\r\n val = 1\r\n elif self.objectives[i] > other.objectives[i]:\r\n if val == 1:\r\n return 0\r\n val = -1\r\n return val\r\n\r\n def equals_in_design_space(self, other):\r\n return self.path == other.path and self.z == other.z\r\n\r\n\r\n\r\n","repo_name":"najeeb-yusuf/traveling-thief-problem","sub_path":"TravelingThiefProblem/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43599434516","text":"import array\nfrom micropython import const\n\n\nclass LSM6DSOX:\n _CTRL3_C = const(0x12)\n _CTRL1_XL = const(0x10)\n _CTRL8_XL = const(0x17)\n _CTRL9_XL = const(0x18)\n\n _CTRL2_G = const(0x11)\n _CTRL7_G = const(0x16)\n\n _OUTX_L_G = const(0x22)\n _OUTX_L_XL = const(0x28)\n _MLC_STATUS = const(0x38)\n\n _DEFAULT_ADDR = const(0x6A)\n _WHO_AM_I_REG = const(0x0F)\n\n _FUNC_CFG_ACCESS = const(0x01)\n _FUNC_CFG_BANK_USER = const(0)\n _FUNC_CFG_BANK_HUB = const(1)\n _FUNC_CFG_BANK_EMBED = const(2)\n\n _MLC0_SRC = const(0x70)\n _MLC_INT1 = const(0x0D)\n _TAP_CFG0 = const(0x56)\n\n _EMB_FUNC_EN_A = const(0x04)\n _EMB_FUNC_EN_B = const(0x05)\n\n def __init__(\n self,\n bus,\n cs_pin=None,\n address=_DEFAULT_ADDR,\n gyro_odr=104,\n accel_odr=104,\n gyro_scale=2000,\n accel_scale=4,\n ucf=None,\n ):\n \"\"\"Initalizes Gyro and Accelerator.\n accel_odr: (0, 1.6Hz, 3.33Hz, 6.66Hz, 12.5Hz, 26Hz, 52Hz, 104Hz, 208Hz, 416Hz, 888Hz)\n gyro_odr: (0, 1.6Hz, 3.33Hz, 6.66Hz, 12.5Hz, 26Hz, 52Hz, 104Hz, 208Hz, 416Hz, 888Hz)\n gyro_scale: (245dps, 500dps, 1000dps, 2000dps)\n accel_scale: (+/-2g, +/-4g, +/-8g, +-16g)\n ucf: MLC program to load.\n \"\"\"\n self.bus = bus\n self.cs_pin = cs_pin\n self.address = address\n self._use_i2c = hasattr(self.bus, \"readfrom_mem\")\n\n if not self._use_i2c and cs_pin is None:\n raise ValueError(\"A CS pin must be provided in SPI mode\")\n\n # check the id of the Accelerometer/Gyro\n if self.__read_reg(_WHO_AM_I_REG) != 108:\n raise OSError(\"No LSM6DS device was found at address 0x%x\" % (self.address))\n\n # allocate scratch buffer for efficient conversions and memread op's\n self.scratch_int = array.array(\"h\", [0, 0, 0])\n\n SCALE_GYRO = {250: 0, 500: 1, 1000: 2, 2000: 3}\n SCALE_ACCEL = {2: 0, 4: 2, 8: 3, 16: 1}\n # XL_HM_MODE = 0 by default. G_HM_MODE = 0 by default.\n ODR = {\n 0: 0x00,\n 1.6: 0x08,\n 3.33: 0x09,\n 6.66: 0x0A,\n 12.5: 0x01,\n 26: 0x02,\n 52: 0x03,\n 104: 0x04,\n 208: 0x05,\n 416: 0x06,\n 888: 0x07,\n }\n\n gyro_odr = round(gyro_odr, 2)\n accel_odr = round(accel_odr, 2)\n\n # Sanity checks\n if not gyro_odr in ODR:\n raise ValueError(\"Invalid sampling rate: %d\" % accel_odr)\n if not gyro_scale in SCALE_GYRO:\n raise ValueError(\"invalid gyro scaling: %d\" % gyro_scale)\n if not accel_odr in ODR:\n raise ValueError(\"Invalid sampling rate: %d\" % accel_odr)\n if not accel_scale in SCALE_ACCEL:\n raise ValueError(\"invalid accelerometer scaling: %d\" % accel_scale)\n\n # Soft-reset the device.\n self.reset()\n\n # Load and configure MLC if UCF file is provided\n if ucf != None:\n self.load_mlc(ucf)\n\n # Set Gyroscope datarate and scale.\n # Note output from LPF2 second filtering stage is selected. See Figure 18.\n self.__write_reg(_CTRL1_XL, (ODR[accel_odr] << 4) | (SCALE_ACCEL[accel_scale] << 2) | 2)\n\n # Enable LPF2 and HPF fast-settling mode, ODR/4\n self.__write_reg(_CTRL8_XL, 0x09)\n\n # Set Gyroscope datarate and scale.\n self.__write_reg(_CTRL2_G, (ODR[gyro_odr] << 4) | (SCALE_GYRO[gyro_scale] << 2) | 0)\n\n self.gyro_scale = 32768 / gyro_scale\n self.accel_scale = 32768 / accel_scale\n\n def __read_reg(self, reg, size=1):\n if self._use_i2c:\n buf = self.bus.readfrom_mem(self.address, reg, size)\n else:\n try:\n self.cs_pin(0)\n self.bus.write(bytes([reg | 0x80]))\n buf = self.bus.read(size)\n finally:\n self.cs_pin(1)\n if size == 1:\n return int(buf[0])\n return [int(x) for x in buf]\n\n def __write_reg(self, reg, val):\n if self._use_i2c:\n self.bus.writeto_mem(self.address, reg, bytes([val]))\n else:\n try:\n self.cs_pin(0)\n self.bus.write(bytes([reg, val]))\n finally:\n self.cs_pin(1)\n\n def __read_reg_into(self, reg, buf):\n if self._use_i2c:\n self.bus.readfrom_mem_into(self.address, reg, buf)\n else:\n try:\n self.cs_pin(0)\n self.bus.write(bytes([reg | 0x80]))\n self.bus.readinto(buf)\n finally:\n self.cs_pin(1)\n\n def reset(self):\n self.__write_reg(_CTRL3_C, self.__read_reg(_CTRL3_C) | 0x1)\n for i in range(0, 10):\n if (self.__read_reg(_CTRL3_C) & 0x01) == 0:\n return\n time.sleep_ms(10)\n raise OSError(\"Failed to reset LSM6DS device.\")\n\n def set_mem_bank(self, bank):\n cfg = self.__read_reg(_FUNC_CFG_ACCESS) & 0x3F\n self.__write_reg(_FUNC_CFG_ACCESS, cfg | (bank << 6))\n\n def set_embedded_functions(self, enable, emb_ab=None):\n self.set_mem_bank(_FUNC_CFG_BANK_EMBED)\n if enable:\n self.__write_reg(_EMB_FUNC_EN_A, emb_ab[0])\n self.__write_reg(_EMB_FUNC_EN_B, emb_ab[1])\n else:\n emb_a = self.__read_reg(_EMB_FUNC_EN_A)\n emb_b = self.__read_reg(_EMB_FUNC_EN_B)\n self.__write_reg(_EMB_FUNC_EN_A, (emb_a & 0xC7))\n self.__write_reg(_EMB_FUNC_EN_B, (emb_b & 0xE6))\n emb_ab = (emb_a, emb_b)\n\n self.set_mem_bank(_FUNC_CFG_BANK_USER)\n return emb_ab\n\n def load_mlc(self, ucf):\n # Load MLC config from file\n with open(ucf, \"r\") as ucf_file:\n for l in ucf_file:\n if l.startswith(\"Ac\"):\n v = [int(v, 16) for v in l.strip().split(\" \")[1:3]]\n self.__write_reg(v[0], v[1])\n\n emb_ab = self.set_embedded_functions(False)\n\n # Disable I3C interface\n self.__write_reg(_CTRL9_XL, self.__read_reg(_CTRL9_XL) | 0x01)\n\n # Enable Block Data Update\n self.__write_reg(_CTRL3_C, self.__read_reg(_CTRL3_C) | 0x40)\n\n # Route signals on interrupt pin 1\n self.set_mem_bank(_FUNC_CFG_BANK_EMBED)\n self.__write_reg(_MLC_INT1, self.__read_reg(_MLC_INT1) & 0x01)\n self.set_mem_bank(_FUNC_CFG_BANK_USER)\n\n # Configure interrupt pin mode\n self.__write_reg(_TAP_CFG0, self.__read_reg(_TAP_CFG0) | 0x41)\n\n self.set_embedded_functions(True, emb_ab)\n\n def read_mlc_output(self):\n buf = None\n if self.__read_reg(_MLC_STATUS) & 0x1:\n self.__read_reg(0x1A, size=12)\n self.set_mem_bank(_FUNC_CFG_BANK_EMBED)\n buf = self.__read_reg(_MLC0_SRC, 8)\n self.set_mem_bank(_FUNC_CFG_BANK_USER)\n return buf\n\n def read_gyro(self):\n \"\"\"Returns gyroscope vector in degrees/sec.\"\"\"\n mv = memoryview(self.scratch_int)\n f = self.gyro_scale\n self.__read_reg_into(_OUTX_L_G, mv)\n return (mv[0] / f, mv[1] / f, mv[2] / f)\n\n def read_accel(self):\n \"\"\"Returns acceleration vector in gravity units (9.81m/s^2).\"\"\"\n mv = memoryview(self.scratch_int)\n f = self.accel_scale\n self.__read_reg_into(_OUTX_L_XL, mv)\n return (mv[0] / f, mv[1] / f, mv[2] / f)\n","repo_name":"OpenAtomFoundation/TencentOS-tiny","sub_path":"components/language/micropython/drivers/lsm6dsox/lsm6dsox.py","file_name":"lsm6dsox.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","stars":5900,"dataset":"github-code","pt":"32"} +{"seq_id":"20352747817","text":"def get_pairs(input):\n pairs = {}\n for i in range (2, len(input)):\n pairs[input[i].split(\" -> \")[0]] = input[i].split(\" -> \")[1]\n\n return pairs\n\ndef perform_step(template, pairs):\n result = [None] * (len(template) * 2 - 1)\n result_idx = 1\n result[0] = template[0]\n for i in range(1, len(template)):\n result[result_idx] = pairs[\"\" + template[i-1] + template[i]]\n result[result_idx+1] = template[i]\n result_idx += 2\n return result\n \n\ninputFile = open(\"input14\", \"r\")\ninput = inputFile.read().splitlines()\ntemplate = input[0]\npairs = get_pairs(input)\nresult = template\nfor i in range(0, 10):\n result = perform_step(result, pairs)\n\ncount = {}\nfor element in result:\n if element in count:\n count[element] += 1\n else:\n count[element] = 1\n\nlargest = 0\nsmallest = 1000000\nfor element in count.keys():\n if count[element] > largest:\n largest = count[element]\n if count[element] < smallest:\n smallest = count[element]\n\n\nprint(\"The answer is: \", largest-smallest)\n\n\n\n","repo_name":"matsrosbach/adventofcode21","sub_path":"day14/polymerization1.py","file_name":"polymerization1.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41200379533","text":"import scanpy as sc\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n'''Plot the resulting cluster annotations'''\n\n# Read AnnData object\ninput_path = '../qc_data/integrated.h5ad'\nadata = sc.read_h5ad(input_path)\n\n# Read markers genes\nmarkers = pd.read_csv('../data/markers.csv')\nmsk = np.array([marker in adata.var.index for marker in markers.Gene])\nmarkers = markers[msk]\nmarker_genes_dict = dict()\nfor ctype in np.unique(markers.Type):\n genes = list(markers.Gene[markers.Type == ctype])\n marker_genes_dict[ctype] = genes\n\n# List of resolutions\nress = [name for name in adata.obs.columns if name.startswith('leiden_res_')]\n\nfor res in ress:\n print(res)\n # Plot projection and dotplot\n fig = plt.figure(figsize=(12,14), dpi=150)\n gs = fig.add_gridspec(3,2)\n\n ax = fig.add_subplot(gs[0:2,:])\n sc.pl.umap(adata, color=res, ax=ax, show=False, \n return_fig=False, frameon=False, legend_loc='on data', size=10)\n\n ax = fig.add_subplot(gs[2,:])\n sc.pl.dotplot(adata, marker_genes_dict, res, dendrogram=True, ax=ax, show=False, return_fig=False)\n\n fig.tight_layout()\n fig.set_facecolor('white')\n \n # Save\n fig.savefig('../plots/{0}.png'.format(res))\n \n","repo_name":"saezlab/scHF_rna","sub_path":"scripts/run_clus_plots.py","file_name":"run_clus_plots.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32993603976","text":"\"\"\"empty message\n\nRevision ID: 245a2ea2ba\nRevises: 4ab6f712577\nCreate Date: 2016-02-19 17:09:48.885139\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '245a2ea2ba'\ndown_revision = '4ab6f712577'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table('picture',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('point_id', sa.Integer(), nullable=False),\n sa.Column('submission_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('filepath', sa.String, nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_picture_point_id'), 'picture', ['point_id'])\n op.execute('INSERT INTO picture (point_id, submission_id, user_id, filepath) SELECT point.id AS point_id, point.submission_id AS submission_id, \"user\".id as user_id, point.image AS filepath FROM point JOIN submission on point.submission_id = submission.id JOIN \"user\" on submission.user_id = \"user\".id where point.image IS NOT NULL AND point.submission_id IS NOT NULL')\n op.drop_column('point', 'image')\n\n\ndef downgrade():\n op.add_column('point', sa.Column('image', sa.String))\n op.execute('UPDATE point SET image = picture.filepath FROM picture WHERE picture.point_id = point.id')\n op.drop_table('picture')","repo_name":"OpenGridMap/pgis","sub_path":"migrations/versions/245a2ea2ba_.py","file_name":"245a2ea2ba_.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"40930709724","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\nfrom collections import defaultdict\nfrom sortedcontainers import SortedSet\n\nimport logging\nlog = logging.getLogger(\"mandoline\")\n\nfrom .graph import load_graph\n\n\n\ndef find_matches_adh(LG, piece, adhesion):\n for iu in LG:\n for match in LG.match(iu, piece):\n yield match\n\ndef main():\n parser = argparse.ArgumentParser(description='Enumerates H in G')\n\n parser.add_argument('H', help='Pattern graph H')\n parser.add_argument('G', help='Host graph G')\n parser.add_argument('--validate', action='store_true')\n parser.add_argument('--debug', action='store_true')\n\n args = parser.parse_args()\n\n # Set up logging\n ch = logging.StreamHandler(sys.stdout)\n if args.debug:\n ch.setLevel(logging.DEBUG)\n else:\n ch.setLevel(logging.INFO)\n log.addHandler(ch)\n log.setLevel(logging.DEBUG)\n\n # Load pattern and graph\n H = load_graph(args.H)\n log.info(\"Loaded pattern graph with {} vertices and {} edges\".format(len(H), H.num_edges()))\n log.info(H)\n\n G = load_graph(args.G)\n log.info(\"Loaded host graph with {} vertices and {} edges\".format(len(G), G.num_edges()))\n\n # Reduce to \\delta(H)-core\n minDeg = min(H.degree_sequence())\n G = G.compute_core(minDeg)\n\n log.info(\"Reduced host graph to {} vertices and {} edges\".format(len(G), G.num_edges()))\n\n LG, _ = G.to_lgraph()\n LG.compute_wr(len(H)-1)\n\n pieces = set()\n for P, _ in H.enum_patterns():\n pieces.update(P.decompose())\n\n log.info(\"Computed all {} pieces\".format(len(pieces)))\n\n matches = [defaultdict(SortedSet) for _ in range(len(pieces))]\n total = 0\n for i, piece in enumerate(pieces):\n log.info(\"Computed matches for piece {}\".format(piece))\n count = 0\n for iu in LG:\n for match in LG.match(iu, piece):\n boundary = match.restrict_to(piece.leaves)\n matches[i][boundary].add(iu)\n count += 1\n total += count\n log.info(\" Found {} matches\".format(count))\n log.info(\"Collected {} total matches\".format(total))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pgdr/mandoline","sub_path":"mandoline/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"13967997849","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\n\nhostName = \"localhost\"\nserverPort = 8080\n\nclass handler(BaseHTTPRequestHandler):\n def calculator(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(str('''\n \n

Hello World!

\n \n ''').encode())\n return\n\n \n def do_GET(self):\n if self.path == '/':\n self.calculator()\n\nwebServer = HTTPServer((hostName, serverPort), handler)\nprint(\"Server started http://%s:%s\" % (hostName, serverPort))\n\ntry:\n webServer.serve_forever()\nexcept KeyboardInterrupt:\n pass\n\nwebServer.server_close()\nprint(\"Server stopped.\")","repo_name":"sreegithub19/programming_languages","sub_path":"Python/training/_10_http.py","file_name":"_10_http.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"7386652008","text":"from argparse import ArgumentParser\nimport yaml\nimport numpy as np\nfrom keras.models import load_model\nfrom fx_replicator import (\n build_model, load_wave, save_wave, sliding_window, LossFunc\n)\n\ndef main():\n\n args = parse_args()\n\n with open(args.config_file) as fp:\n config = yaml.safe_load(fp)\n\n input_timesteps = config[\"input_timesteps\"]\n output_timesteps = config[\"output_timesteps\"]\n batch_size = config[\"batch_size\"]\n\n data = load_wave(args.input_file)\n\n # padding and rounded up to the batch multiple\n block_size = output_timesteps * batch_size\n prepad = input_timesteps - output_timesteps\n postpad = len(data) % block_size\n padded = np.concatenate((\n np.zeros(prepad, np.float32),\n data,\n np.zeros(postpad, np.float32)))\n x = sliding_window(padded, input_timesteps, output_timesteps)\n x = x[:, :, np.newaxis]\n\n model = load_model(\n args.model_file,\n custom_objects={\"LossFunc\": LossFunc(output_timesteps)})\n \n y = model.predict(x, batch_size=batch_size)\n y = y[:, -output_timesteps:, :].reshape(-1)[:len(data)]\n save_wave(y, args.output_file)\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\n \"--config_file\", \"-c\", default=\"./config.yml\",\n help=\"configuration file (*.yml)\")\n parser.add_argument(\n \"--input_file\", \"-i\",\n help=\"input wave file (48kHz/mono, *.wav)\")\n parser.add_argument(\n \"--output_file\", \"-o\", default=\"./predicted.wav\",\n help=\"output wave file (48kHz/mono, *.wav)\")\n parser.add_argument(\n \"--model_file\", \"-m\",\n help=\"input model file (*.h5)\")\n return parser.parse_args()\n\nif __name__ == '__main__':\n main()\n","repo_name":"tetsu/deep-amp","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19249812753","text":"from __future__ import print_function\nimport sys\nfrom csv import reader\nfrom pyspark import SparkContext\n\nif __name__ == \"__main__\":\n\n sc = SparkContext()\n lines = sc.textFile(sys.argv[1], 1)\n lines = lines.mapPartitions(lambda x: reader(x))\n\n header = lines.first()\n data = lines.filter(lambda x: x != header )\n\n#violation = lines.map(lambda x: (x[7], 1)).reduceByKey(lambda x, y: x + y)\n#output = violation.map(lambda x: x[0] + '\\t' + str(x[1]))\n\n violation = data.map(lambda x: (x[7], 1 ))\n violations_reduced = violation.reduceByKey(lambda x, y: x + y)\n output = violations_reduced.map(lambda x: x[0] + '\\t' + str(x[1])).sortBy(lambda x: x[0], True) \n\n output.saveAsTextFile(\"task2.out\")\n\n sc.stop()\n","repo_name":"Pruthviraj98/Panacea-CSGY6513","sub_path":"hw2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"963531701","text":"# -*- coding:utf-8 -*-\nimport threading\n# from tree import tree1\n# from tree2007 import tree1filter\nfrom stockclass import Stock\nfrom data import *\nfrom filter import *\nfrom verify import *\nfrom _datetime import timedelta\n\nclass stockView:\n def __init__(self):\n self.lastClose_bMd = 0.0\n self.lastOpen_bMd = 0.0\n self.lastClose_bUp = 0.0\n self.lastOpen_bUp = 0.0\n self.thisChange = 0.0\n self.thisOpen_bUp = 0.0\n self.thisClose_bUp = 0.0\n self.thisV_b = 0.0\n self.ifHengPan = 0\n self.result = \"GoDown\"\n def fromStock(self, sample, conditionday):\n todayindex = sample.indexof(conditionday)\n yesterdayindex = todayindex - 1\n nextdayindex = todayindex + 1\n if yesterdayindex > -1 and nextdayindex < len(sample.dates) and yesterdayindex > 20 and todayindex > 20:\n bMd = sample.bollMd(conditionday)\n bUp = sample.bollUp(conditionday)\n lastClose = sample.closePrices[yesterdayindex]\n lastOpen = sample.openPrices[yesterdayindex]\n thisChange = sample.changePrices[todayindex]\n thisOpen = sample.openPrices[todayindex]\n thisClose = sample.closePrices[todayindex]\n thisV_b = sample.v_b(sample.v_ma5[todayindex], sample.volume[todayindex])\n if bMd == 0.0 or bUp == 0.0:\n return None\n self.lastClose_bMd = floatFormat((lastClose-bMd)/bMd)\n self.lastOpen_bMd = floatFormat((lastOpen-bMd)/bMd)\n self.lastClose_bUp = floatFormat((lastClose-bUp)/bUp)\n self.lastOpen_bUp = floatFormat((lastOpen-bUp)/bUp)\n self.thisChange = floatFormat(sample.changePrices[todayindex])\n self.thisOpen_bUp = floatFormat((thisOpen-bUp)/bUp)\n self.thisClose_bUp = floatFormat((thisClose-bUp)/bUp)\n self.thisV_b = floatFormat(sample.v_b(sample.v_ma5[todayindex], sample.volume[todayindex]))\n self.ifHengPan = boolToInt(sample.checkIfHengPan(conditionday))\n if sample.changePrices[nextdayindex] > 0.05:\n self.result = \"High\"\n else:\n if sample.changePrices[nextdayindex] >=0 and sample.changePrices[nextdayindex] <= 0.05:\n self.result = \"Low\"\n else:\n if sample.changePrices[nextdayindex] < 0 and sample.changePrices[nextdayindex] >= -0.05:\n self.result = \"MinusLow\"\n else:\n self.result = \"MinusHigh\"\n return self\n else:\n return None\n \n\ndef printGoodStock(stocks, filter, day):\n goodstocks = []\n today = day\n goodstocks = filter(stocks, today)\n logging.info(filter.__name__ + \" Good stocks: \")\n if len(goodstocks) == 0:\n logging.info(\"No good stock found.\")\n for idx1, data1 in enumerate(goodstocks):\n if idx1 < len(goodstocks) - 1:\n for idx2, data2 in enumerate(goodstocks[idx1:]):\n if idx2 < len(goodstocks) - 1:\n if goodstocks[idx2].score < goodstocks[idx2 + 1].score:\n temp = goodstocks[idx2]\n goodstocks[idx2] = goodstocks[idx2 + 1]\n goodstocks[idx2 + 1] = temp\n for stock in goodstocks:\n logging.info(\"{0}\\t{1}\\t{2}\\tlast day of data on {3}\".format(stock.id, stock.name, stock.score, stock.dates[len(stock.dates) - 1]))\n\n# run the model to print the good stocks for next day and verify the model. threshold is 0.0 by default\ndef subprocessfunc(data, day):\n # from tree2007 import tree1filter\n from tree7y600pre import tree7y600prefilter\n printGoodStock(data, tree7y600prefilter, day)\n verify(data, tree7y600prefilter, day, 0.0)\n\n#To predict the result of some specific stock given in datas\ndef subprocessPredictfunc(datas, day):\n # from tree2007 import tree1filter\n from tree7y600pre import tree7y600prePrediction\n for data in datas:\n score = tree7y600prePrediction(data, day)\n logging.info(\"Stock {1} - High: {0[0]}, Low: {0[1]}, MinusLow: {0[2]}, MinusHigh: {0[3]}\".format(score, data.id))\n \n#main\n#prefixes = ['60050']\n\nprefixes = []\nprefs = ['600','601', '603']\nfor pref in prefs:\n for i in range(10):\n prefixes.append(pref + str(i).zfill(1))\n\nlogging.basicConfig(filename= datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")+ '.log',level=logging.DEBUG)\n\n\n# if(sys.getrecursionlimit() < 4000):\n# sys.setrecursionlimit(4000)\n# logging.info(\"Current stack limit: {0}\".format(sys.getrecursionlimit()))\n\nstartday = datetime.strptime(\"2015-07-01\", '%Y-%m-%d').date()\nendday = datetime.strptime(\"2015-10-16\", '%Y-%m-%d').date()\nstocks = fetchData_mongo(prefixes, startday, endday)\n# writeToArffFile(stocks,\"stock_7year.arff\")\n\nthreading.stack_size(231072000)\nsubthread = threading.Thread(target=subprocessfunc, args=(stocks, endday))\n# subthread = threading.Thread(target=subprocessPredictfunc, args=(stocks, endday))\nlogging.info(\"subthread started.\")\nsubthread.start()\nsubthread.join()\n\n# printGoodStock(stocks, tree1filter)\n# verify(stocks, tree1filter, date.today(), 0.0)\nlogging.shutdown()\nprint (\"Script ends.\")\n","repo_name":"josephzhong/stock","sub_path":"stockMain.py","file_name":"stockMain.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35345648333","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.core import urlresolvers\nfrom citation_manager import models\n\nregister = template.Library()\n\n@register.filter(is_safe=True)\n@stringfilter\ndef author_links(text):\n\ttext = text.replace(' ', ' ')\n\tfor author in models.Author.objects.filter(user_id__isnull=False):\n\t\tauthor_page = urlresolvers.reverse('person', args=(author.user_id.pk,))\n\t\ttext = text.replace(author.published_name, '%s' % (author_page, author.published_name))\n\treturn text\n\n@register.inclusion_tag('citation_manager/includes/publications.html')\ndef list_publications(pubs):\n\treturn {'pubs':pubs}","repo_name":"dlaz/robotlab-site","sub_path":"citation_manager/templatetags/citation_tags.py","file_name":"citation_tags.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27801419775","text":"# -*- coding: utf-8 -*-\nimport random\n\nimport numpy as np\n\nfrom memory import ExperienceBuffer\nfrom task import Task\n\nDv = 50 # 车的最大通信范围\nFv = 4000 # 车最大计算能力 MHZ\nalpha = 0.25\nMAX_TASK = 10 # 任务队列最大长度\n\nCAPACITY = 20000 # 缓冲池大小\nTASK_DISTRIBUTE = 4 # 可分的任务段数\nTASK_SOLT = 20 # 任务产生时隙\n\nnp.random.seed(0)\n\n\nclass Vehicle:\n # 位置:x,y 速度、方向:-1左,1右\n def __init__(self, id, loc_x, loc_y, direction, velocity=20):\n # 车的位置信息\n self.loc_x = loc_x\n self.loc_y = loc_y\n self.loc = [loc_x, loc_y]\n self.velocity = velocity # m/s\n self.direction = direction\n self.id = id\n # 功率和信道增益\n self.alpha = alpha\n # 通信范围\n self.range = Dv\n # 邻居表\n self.neighbor = []\n # 最近的mec\n self.mec_lest = None\n # 当前时间\n self.cur_frame = 0\n # 接受的任务的列表\n self.accept_task = []\n # 接受任务的数量\n self.sum_needDeal_task = 0\n # 此时刻有多少动作选则我\n self.len_action = 0\n # 当前可用资源\n self.resources = round((1 - np.random.randint(1, 5) / 10) * Fv, 2) # MHz\n # 表示当前是否有任务正在传输(0:没有,1:有)\n self.trans_task = 0\n # 当前处理的任务(用于计算奖励,不用于状态信息)\n self.cur_task = None\n # 任务队列\n self.total_task = []\n # 任务队列的长度\n self.len_task = len(self.total_task)\n # 当前状态信息\n self.otherState = []\n # 当前任务队列状态\n self.taskState = []\n # 去除邻居的状态信息用于邻居车观察和全局critic的处理\n self.excludeNeighbor_state = []\n # 缓冲池\n self.buffer = ExperienceBuffer(capacity=CAPACITY)\n # 总奖励\n self.reward = []\n # 任务溢出的数量\n self.overflow = 0\n # 需等待时长\n self.hold_on = 0\n # 上一个任务产生的时间\n self.lastCreatWorkTime = 0\n\n self.create_work()\n\n # 获得位置\n @property\n def get_location(self):\n return self.loc\n\n # 设置位置\n def set_location(self, loc_x, loc_y):\n self.loc_x = loc_x\n self.loc_y = loc_y\n self.loc = [self.loc_x, self.loc_y]\n\n # 获得x\n @property\n def get_x(self):\n return self.loc_x\n\n # 获得y\n @property\n def get_y(self):\n return self.loc_y\n\n # 产生任务 传入当前时间\n def create_work(self):\n # 每隔一段时间进行一次任务产生\n if (self.cur_frame - self.lastCreatWorkTime) % TASK_SOLT == 0:\n # 每次有0.6的概率产生任务\n if random.random() < 0.6:\n if self.len_task < MAX_TASK: # 队列不满\n task = Task(self, self.cur_frame)\n self.lastCreatWorkTime = self.cur_frame\n self.total_task.append(task)\n self.len_task += 1\n print(\"第{}辆车产生了任务\".format(self.id))\n self.overflow = 0\n else:\n print(\"第{}辆车任务队列已满\".format(self.id))\n self.overflow += 1\n\n \"\"\"\n 获得状态\n \"\"\"\n\n def get_state(self):\n self.otherState = []\n self.excludeNeighbor_state = []\n self.taskState = []\n\n # 位置信息 4\n self.otherState.extend(self.loc)\n self.otherState.append(self.velocity)\n self.otherState.append(self.direction)\n self.excludeNeighbor_state.extend(self.loc)\n self.excludeNeighbor_state.append(self.velocity)\n self.excludeNeighbor_state.append(self.direction)\n\n # 资源信息(可用资源)\n self.otherState.append(self.resources)\n self.excludeNeighbor_state.append(self.resources)\n\n # 当前是否有任务在传输\n self.excludeNeighbor_state.append(self.trans_task)\n self.otherState.append(self.trans_task)\n\n # 正在传输的任务信息\n # if self.trans_task is not None:\n # self.otherState.append(self.trans_task.need_trans_size)\n # self.excludeNeighbor_state.append(self.trans_task.need_trans_size)\n # else:\n # self.otherState.append(0)\n # self.excludeNeighbor_state.append(0)\n self.otherState.append(self.len_task) # 当前队列长度\n self.excludeNeighbor_state.append(self.len_task)\n\n # 邻居表 7*数量\n for neighbor in self.neighbor:\n self.otherState.extend(neighbor.position) # 位置\n self.otherState.append(neighbor.velocity) # 速度\n self.otherState.append(neighbor.direction) # 方向\n self.otherState.append(neighbor.resources) # 可用资源\n\n # 最近mec的状态 6\n if self.mec_lest is not None:\n self.otherState.extend(self.mec_lest.get_state())\n\n # 任务状态信息\n for i in range(MAX_TASK):\n if i < self.len_task:\n task = self.total_task[i]\n self.taskState.append([task.create_time, task.need_trans_size, task.need_precess_cycle, task.max_time])\n else:\n self.taskState.append([0, 0, 0, 0])\n\n return self.excludeNeighbor_state\n","repo_name":"chenyuhaoCYH/DRL","sub_path":"experiment/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"38569570389","text":"'''\nhttps://leetcode.com/problems/partition-to-k-equal-sum-subsets/discuss/2281522/Python-or-90ms-Faster-than-92-or-95-Less-Memory\n\n'''\n\nclass Solution:\n def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n \n s = sum(nums)\n n=len(nums)\n \n if s%k!=0:\n return False\n \n subset_val = s//k\n \n arr=[0]*(k)\n \n def recur(idx,subset_val):\n if idx==n:\n return True\n \n curr = nums[idx]\n \n for j in range(k):\n if arr[j]+curr<=subset_val:\n arr[j]+=curr\n if recur(idx+1,subset_val):\n return True\n arr[j]-=curr\n if arr[j]==0:return False\n \n return recur(0,subset_val)\n \n ","repo_name":"iamheavymetalx7/LeetCode-Submissions","sub_path":"0698-partition-to-k-equal-sum-subsets/0698-partition-to-k-equal-sum-subsets.py","file_name":"0698-partition-to-k-equal-sum-subsets.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8065018900","text":"from matplotlib import pyplot as plt\n\nfrom model.name2idx import V\n\n\ndef timecourse(sim):\n plt.figure(figsize=(16, 12))\n plt.rcParams['font.size'] = 18\n plt.rcParams['axes.linewidth'] = 1\n plt.rcParams['lines.linewidth'] = 2\n\n plt.subplots_adjust(wspace=0.4, hspace=0.5)\n\n # IKK activity (Fig.2E) ----------------------------------------------------\n plt.subplot(2, 2, 1)\n plt.plot(\n sim.ta[200:], 102*sim.Ya[0, 200:, V.IKK2]+305*sim.Ya[0, 200:, V.IKK3], \n color='navy', label='IKK2+IKK3'\n )\n plt.plot(\n sim.ta[200:], 102*sim.Ya[0, 200:, V.IKK2], \n color='tomato', label='IKK2'\n )\n plt.plot(\n sim.ta[200:], 305*sim.Ya[0, 200:, V.IKK3], \n color='brown', label='IKK3'\n )\n plt.xlim(-0.5, 9.5)\n plt.xticks([0, 1.5, 3, 4.5, 6, 7.5, 9])\n plt.ylim(0, 120)\n plt.xlabel('Time (min)')\n plt.ylabel('IKK activity\\n(relative % max.)')\n plt.yticks([0, 20, 40, 60, 80, 100])\n plt.legend(loc='upper left', frameon=False)\n\n # BAY (Fig.3D right) -------------------------------------------------------\n plt.subplot(2, 2, 2)\n plt.plot(\n sim.ta[200:], 115*sim.Ya[0, 200:, V.TAK1a], \n color='navy', label='WT'\n )\n plt.plot(\n sim.tb[200:], 115*sim.Yb[200:, V.TAK1a], \n color='brown', label='BAY'\n )\n plt.xlim(-0.5, 9.5)\n plt.xticks([0, 3, 6, 9])\n plt.ylim(0, 120)\n plt.xlabel('Time (min)')\n plt.ylabel('TAK1 activity\\n(relative % max.)')\n plt.yticks([0, 50, 100])\n plt.legend(ncol=2, loc='upper left', frameon=False)\n\n # Feedback(-) (Fig.3F right) -----------------------------------------------\n plt.subplot(2, 2, 3)\n plt.plot(\n sim.ta[200:], 115*sim.Ya[0, 200:, V.TAK1a], \n color='navy', label='Intact'\n )\n plt.plot(\n sim.ta[200:], 115*sim.Ya[1, 200:, V.TAK1a], \n color='lime', label='Feedback(-)'\n )\n plt.xlim(-0.5, 9.5)\n plt.xticks([0, 3, 6, 9])\n plt.ylim(0, 120)\n plt.xlabel('Time (min)')\n plt.ylabel('TAK1 activity\\n(relative % max.)')\n plt.yticks([0, 50, 100])\n plt.legend(ncol=2, loc='upper right', frameon=False)\n\n # Feedback(-)+P668(↓) (Fig.3G right) ---------------------------------------\n plt.subplot(2, 2, 4)\n plt.plot(\n sim.ta[200:], 115*sim.Ya[0, 200:, V.TAK1a], \n color='navy', label='Intact'\n )\n plt.plot(\n sim.ta[200:], 115*sim.Ya[2, 200:, V.TAK1a], \n color='orchid', label='Feedback(-)+\\nP668(↓)'\n )\n plt.xlim(-0.5, 9.5)\n plt.xticks([0, 3, 6, 9])\n plt.ylim(0, 120)\n plt.xlabel('Time (min)')\n plt.ylabel('TAK1 activity\\n(relative % max.)')\n plt.yticks([0, 50, 100])\n plt.legend(ncol=2, loc='upper right', frameon=False)\n\n plt.show()","repo_name":"okadalabipr/Shinohara2014","sub_path":"plot_func.py","file_name":"plot_func.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72436666650","text":"from . import fasta\nfrom . import fastq\n\nclass Iterator(object):\n def __init__(self, fname):\n self.flist = []\n if fname.endswith(\".txt\"):\n for line in open(fname):\n n = line.strip()\n if len(n) > 0:\n self.flist.append(n)\n else:\n self.flist.append(fname)\n\n self.index = 0\n self.iterator = self.__create_iterator(self.flist[0])\n\n def __create_iterator(self, fname):\n if fname.endswith('.fasta') or fname.endswith(\".fasta.gz\"):\n print(\"-\", fname)\n return fasta.Iterator(fname)\n elif fname.endswith('.fastq'):\n return fastq.Iterator(fname)\n elif fname.endswith(\".txt\"):\n return Iterator(fname)\n else:\n return None\n\n def __iter__(self): return self\n\n \n def __next__(self):\n while self.index < len(self.flist):\n try:\n return self.iterator.__next__()\n except StopIteration:\n self.index += 1\n if self.index < len(self.flist):\n self.iterator = self.__create_iterator(self.flist[self.index])\n else:\n break\n \n\n raise StopIteration\n","repo_name":"lemene/mbio","sub_path":"mbio/io/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30890249758","text":"import logging\nfrom typing import Dict, List, Optional, Union\n\nfrom bkapi_client_core.exceptions import APIGatewayResponseError\nfrom django.conf import settings\nfrom typing_extensions import Protocol\n\nfrom paasng.infras.bkmonitorv3.backend.apigw import Client\nfrom paasng.infras.bkmonitorv3.backend.esb import get_client_by_username\nfrom paasng.infras.bkmonitorv3.definitions import BkMonitorSpace\nfrom paasng.infras.bkmonitorv3.exceptions import (\n BkMonitorApiError,\n BkMonitorGatewayServiceError,\n BkMonitorSpaceDoesNotExist,\n)\nfrom paasng.infras.bkmonitorv3.params import QueryAlertsParams\n\nlogger = logging.getLogger(__name__)\n\n\nclass BkMonitorBackend(Protocol):\n \"\"\"Describes protocols of calling API service\"\"\"\n\n def metadata_get_space_detail(self, *args, **kwargs) -> Dict:\n ...\n\n def metadata_create_space(self, *args, **kwargs) -> Dict:\n ...\n\n def metadata_update_space(self, *args, **kwargs) -> Dict:\n ...\n\n def search_alert(self, *args, **kwargs) -> Dict:\n ...\n\n def promql_query(self, *args, **kwargs) -> Dict:\n ...\n\n def as_code_import_config(self, *args, **kwargs) -> Dict:\n ...\n\n\nclass BKMonitorSpaceManager:\n \"\"\"BK Monitor Space Management API provider\"\"\"\n\n def __init__(self, backend: BkMonitorBackend):\n self.client = backend\n\n def create_space(self, space: BkMonitorSpace) -> BkMonitorSpace:\n \"\"\"在蓝鲸监控上创建应用对应的空间\"\"\"\n data = {\n \"space_name\": space.space_name,\n \"space_id\": space.space_id,\n \"space_type_id\": space.space_type_id.value,\n \"creator\": space.creator,\n }\n try:\n resp = self.client.metadata_create_space(data=data)\n except APIGatewayResponseError as e:\n raise BkMonitorGatewayServiceError('Failed to create space on BK Monitor') from e\n\n if not resp.get('result'):\n logger.error('Failed to create space on BK Monitor, resp:%s \\ndata: %s', resp, data)\n raise BkMonitorApiError(resp['message'])\n\n resp_data = resp.get('data', {})\n return BkMonitorSpace(\n space_type_id=resp_data[\"space_type_id\"],\n space_id=resp_data[\"space_id\"],\n space_name=resp_data[\"space_name\"],\n creator=resp_data[\"creator\"],\n id=resp_data[\"id\"],\n space_uid=resp_data[\"space_uid\"],\n extra_info=resp_data,\n )\n\n def update_space(self, space: BkMonitorSpace) -> BkMonitorSpace:\n \"\"\"更新空间\"\"\"\n data = {\n \"space_name\": space.space_name,\n \"space_id\": space.space_id,\n \"space_type_id\": space.space_type_id,\n \"creator\": space.creator,\n }\n try:\n resp = self.client.metadata_update_space(\n data=data,\n )\n except APIGatewayResponseError as e:\n raise BkMonitorGatewayServiceError('Failed to update app space on BK Monitor') from e\n\n if not resp.get('result'):\n logger.info(f'Failed to update app space on BK Monitor, resp:{resp} \\ndata: {data}')\n raise BkMonitorApiError(resp['message'])\n\n resp_data = resp.get('data', {})\n return BkMonitorSpace(\n space_type_id=resp_data[\"space_type_id\"],\n space_id=resp_data[\"space_id\"],\n space_name=resp_data[\"space_name\"],\n creator=resp_data[\"creator\"],\n id=resp_data[\"id\"],\n space_uid=resp_data[\"space_uid\"],\n extra_info=resp_data,\n )\n\n def get_space_detail(self, space: BkMonitorSpace) -> BkMonitorSpace:\n \"\"\"获取空间详情\"\"\"\n data = {\"space_type_id\": space.space_type_id, \"space_id\": space.space_id}\n try:\n resp = self.client.metadata_get_space_detail(data=data)\n except APIGatewayResponseError as e:\n raise BkMonitorGatewayServiceError('Failed to get app space detail on BK Monitor') from e\n\n # 目前监控的API返回值只有 true 和 false,没有更详细的错误码来确定是否空间已经存在\n # 监控侧暂时也没有规划添加错误码来标识空间是否已经存在\n if not resp.get('result'):\n logger.info('Failed to get space detail of %s on BK Monitor, resp: %s', space, resp)\n raise BkMonitorSpaceDoesNotExist(resp['message'])\n\n resp_data = resp.get('data', {})\n return BkMonitorSpace(\n space_type_id=resp_data[\"space_type_id\"],\n space_id=resp_data[\"space_id\"],\n space_name=resp_data[\"space_name\"],\n creator=resp_data[\"creator\"],\n id=resp_data[\"id\"],\n space_uid=resp_data[\"space_uid\"],\n extra_info=resp_data,\n )\n\n\nclass BkMonitorClient:\n \"\"\"API provided by BK Monitor\n\n :param backend: client 后端实际的 backend\n \"\"\"\n\n def __init__(self, backend: BkMonitorBackend):\n self.client = backend\n\n def query_alerts(self, query_params: QueryAlertsParams) -> List:\n \"\"\"查询告警\n\n :param query_params: 查询告警的条件参数\n \"\"\"\n try:\n resp = self.client.search_alert(json=query_params.to_dict())\n except APIGatewayResponseError:\n # 详细错误信息 bkapi_client_core 会自动记录\n raise BkMonitorGatewayServiceError('an unexpected error when request bkmonitor apigw')\n\n if not resp.get('result'):\n raise BkMonitorApiError(resp['message'])\n\n return resp.get('data', {}).get('alerts', [])\n\n def promql_query(self, bk_biz_id: Optional[str], promql: str, start: str, end: str, step: str) -> List:\n \"\"\"\n 通过 promql 语法访问蓝鲸监控,获取容器 cpu / 内存等指标数据\n\n :param bk_biz_id: 集群绑定的蓝鲸业务 ID\n :param promql: promql 查询语句,可参考 PROMQL_TMPL\n :param start: 起始时间戳,如 \"1622009400\"\n :param end: 结束时间戳,如 \"1622009500\"\n :param step: 步长,如:\"1m\"\n :returns: 时序数据 Series\n \"\"\"\n params: Dict[str, Union[str, int, None]] = {\n 'promql': promql,\n 'start_time': start,\n 'end_time': end,\n 'step': step,\n 'bk_biz_id': bk_biz_id,\n }\n\n # TODO: 监控功能对接蓝鲸应用空间时需要将参数修改成传递 space_uid\n headers = {'X-Bk-Scope-Space-Uid': f'bkcc__{bk_biz_id}'}\n try:\n resp = self.client.promql_query(headers=headers, data=params)\n except APIGatewayResponseError:\n # 详细错误信息 bkapi_client_core 会自动记录\n raise BkMonitorGatewayServiceError('an unexpected error when request bkmonitor apigw')\n\n if resp.get('error'):\n raise BkMonitorApiError(resp['error'])\n\n return resp.get('data', {}).get('series', [])\n\n def as_code_import_config(\n self, configs: Dict, biz_or_space_id: int, config_group: str, overwrite: bool = False, incremental: bool = True\n ):\n \"\"\"通过 ascode 下发告警规则\n\n :param biz_or_space_id: 业务或空间 ID\n :param config_group: 配置分组组名, 默认 default\n :param overwrite: 是否跨分组覆盖同名配置,\n :param incremental: 是否增量更新\n \"\"\"\n try:\n resp = self.client.as_code_import_config(\n data={\n \"bk_biz_id\": biz_or_space_id,\n \"configs\": configs,\n \"app\": config_group,\n \"overwrite\": overwrite,\n \"incremental\": incremental,\n }\n )\n except APIGatewayResponseError:\n raise BkMonitorGatewayServiceError('an unexpected error when request bkmonitor apigw')\n\n if not resp.get('result'):\n raise BkMonitorApiError(resp['message'])\n\n\ndef _make_bk_minotor_backend() -> BkMonitorBackend:\n if settings.ENABLE_BK_MONITOR_APIGW:\n apigw_client = Client(\n endpoint=settings.BK_API_URL_TMPL,\n stage=settings.BK_MONITOR_APIGW_SERVICE_STAGE,\n )\n apigw_client.update_bkapi_authorization(\n bk_app_code=settings.BK_APP_CODE,\n bk_app_secret=settings.BK_APP_SECRET,\n )\n return apigw_client.api\n\n # ESB 开启了免用户认证,但限制用户名不能为空,因此给默认用户名\n esb_client = get_client_by_username(\"admin\")\n return esb_client.monitor_v3\n\n\ndef make_bk_monitor_client() -> BkMonitorClient:\n return BkMonitorClient(_make_bk_minotor_backend())\n\n\ndef make_bk_monitor_space_manager() -> BKMonitorSpaceManager:\n return BKMonitorSpaceManager(_make_bk_minotor_backend())\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/infras/bkmonitorv3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"6197779522","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ni=0\nv = []\ncounter = [0,0,0,0,0,0]\nfor i in range(100):\n\n x= np.random.randint(1,7)\n v.append(x)\n if(v[i]==1):\n counter[0] += 1\n elif(v[i]==2):\n counter[1] += 1\n elif(v[i]==3):\n counter[2] += 1 \n elif(v[i]==4):\n counter[3] += 1 \n elif(v[i]==5):\n counter[4] += 1 \n elif(v[i]==6):\n counter[5] += 1 \n \nplt.hist(v, bins=np.linspace(1,6), align='mid', ec='b')\nplt.show() \n\n","repo_name":"markomamic22/PSU_LV","sub_path":"LV2/Drugi.py","file_name":"Drugi.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25498575162","text":"class Node: # Klasa Node obslugujaca wezly listy z dowiazaniami\r\n def __init__(self, initdata): # Tworzenie obiektu klasy Node\r\n self.data = initdata # z Dana initdata\r\n self.next = None # Oraz adresem nastepnego rownym None\r\n\r\n\r\n def getData(self): # Funkcja zwracajaca wartosc Node\r\n return self.data\r\n\r\n\r\n def getNext(self): # Funkcja zwracajaca adres do nastepnego Node\r\n return self.next\r\n\r\n\r\n def setData(self, newdata): # Funkcja zmieniajaca wartosc Node\r\n self.data = newdata\r\n\r\n\r\n def setNext(self, newnext): # Funkcja ustwiajaca inny Node jako nastepny\r\n self.next = newnext\r\n\r\n\r\nclass OrderedList: # Klasa obiektu listy z dowiazaniami, uporzadkowana.\r\n def __init__(self): # Tworzenie obiektu klasy\r\n self.head = None # Na poczatku head = None poniewaz nie ma zadnych Node\r\n\r\n\r\n def __str__(self): # Zmiana metody print() po to by moc wyswietlac obiekty klasy OderedList\r\n Elementy = [] # Stworzenie pustej listy\r\n Element = self.head # Ustalenie pierwszego elementu\r\n while(Element != None): # Dopoki nie dotre do ostatniego elementu rownego None\r\n Elementy.append(Element.getData()) # Dopisywanie do listy wartosci wezlow\r\n Element = Element.getNext() # Przejscie na kolejny wezel\r\n return str(Elementy) # Zwrocenie tabelki jako string\r\n\r\n\r\n def isEmpty(self): # Funkcja sprawdzajaca czy obiekt jest pusty\r\n return self.head == None # Jesli head jest rowny None oznacza ze nie ma zadnych wezlow czyli jest pusty\r\n\r\n\r\n def add(self, item): # Funkcja dodajaca wezel z item do listy\r\n Element = self.head # Ustalenie pierwszego elementu\r\n Ostatni = None # ustalenie elementu ostatnio mijanego czyli poprzedniego\r\n while Element != None: # Przejscie po wszystkich wezlach listy\r\n if Element.getData() < item: # Jesli wartosc w obecnym wezle jest mniejsza od wstawianego item\r\n break # Konczy petle by obecny byl odpowiedni wezel\r\n else: # Jesli nie\r\n Ostatni = Element # To wartosc poprzedniego jest rowna obecnemu\r\n Element = Element.getNext() # A obecny elemeny przyjmuje nastepny\r\n Tymczasowy = Node(item) # Stworzenie nowego wezla ktory zostanie wstawiony\r\n if Ostatni == None: # Jesli Poprzedni jest rowny None czyli obecny element to poczatek listy\r\n Tymczasowy.setNext(self.head)\r\n self.head = Tymczasowy # Tymczasowy zostaje wstawiony przed head i staje sie nowym head\r\n else: # Jesli nie\r\n Tymczasowy.setNext(Element)\r\n Ostatni.setNext(Tymczasowy) # Tymczasowy zostaje wepchniety miedzy wartosc mniejsza i wieksza\r\n\r\n\r\n def size(self): # Funkcja sprawdzajaca ilosc elementow w obiekcie\r\n Element = self.head # Ustwienie pierwszego elementu\r\n count = 0 # Ustawienie licznika\r\n while Element != None: # petla iterujaca po wszystkich wezlach\r\n count = count + 1 # Zwiekszenie licznika\r\n Element = Element.getNext() # Przejscie na kolejny element\r\n\r\n return count # Zwraca wartosc licznika\r\n\r\n\r\n def search(self, item): # Funkcja sprawdzajaca czy jest item na liscie oraz na jakim miejscu\r\n Indeks = 0 # Ustalenie poczatkowej wartosci indeksu\r\n Znalezione = False # Ustawienie poczatkowej wartosci Znalezione\r\n Element = self.head # Ustawienie wezla poczatkowego\r\n if Element.getData() == item: # Jesli wartosc wezla jest rowna item\r\n Znalezione = True # Zmiana Znalezione na True\r\n else: # Jesli nie\r\n while(Element != None): # Petla iterujaca po wszystkich wezlach\r\n if Element.getData() == item: # Jesli wartosc wezla jest rowna item\r\n Znalezione = True # Zmiana Znalezione na True\r\n return Indeks, Znalezione # Zwraca wartosc Indeks oraz Czy jest Znalezione\r\n Element = Element.getNext() # Przejscie na kolejny element\r\n Indeks += 1 # Zwiekszenie indeks o 1\r\n return Indeks, Znalezione # Zwraca wartosc Indeks oraz Czy jest Znalezione\r\n\r\n\r\n def remove(self): # Usuniecie najwiekszego elementu z listy\r\n self.head = self.head.getNext() # Usuniecie poprzez ustanowienie drugiego elementu pierwszym\r\n\r\n\r\nif __name__ == '__main__':\r\n Lista = OrderedList()\r\n print('Czy lista jest pusta? ' + str(Lista.isEmpty()))\r\n Lista.add(3)\r\n print(Lista)\r\n Lista.add(4)\r\n print(Lista)\r\n Lista.add(7)\r\n print(Lista)\r\n Lista.add(1)\r\n print(Lista)\r\n Lista.add(8)\r\n print(Lista)\r\n print('Czy lista jest pusta? ' + str(Lista.isEmpty()))\r\n print('Rozmiar Listy: ' + str(Lista.size()))\r\n Liczba = 4\r\n Indeks, Wynik = Lista.search(Liczba)\r\n if Wynik == True:\r\n print('Liczba ' + str(Liczba) + ' znajduje sie na pozycji ' + str(Indeks))\r\n else:\r\n print('Liczba ' + str(Liczba) + ' nie znajduje sie na liscie.')\r\n Liczba = 9\r\n Indeks, Wynik = Lista.search(Liczba)\r\n if Wynik == True:\r\n print('Liczba ' + str(Liczba) + ' znajduje sie na pozycji ' + str(Indeks))\r\n else:\r\n print('Liczba ' + str(Liczba) + ' nie znajduje sie na liscie.')\r\n print(Lista)\r\n Lista.remove()\r\n print(Lista)\r\n Lista.remove()\r\n print(Lista)\r\n Lista.remove()\r\n print(Lista)\r\n Lista.remove()\r\n print(Lista)\r\n Lista.remove()\r\n print(Lista)\r\n\r\n","repo_name":"JozAle/List","sub_path":"Wiazania.py","file_name":"Wiazania.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20338792098","text":"\"\"\"Contains the Isotherm base class.\"\"\"\n\nimport typing as t\n\nfrom pygaps import logger\nfrom pygaps.core.adsorbate import Adsorbate\nfrom pygaps.core.material import Material\nfrom pygaps.units.converter_mode import _LOADING_MODE\nfrom pygaps.units.converter_mode import _MATERIAL_MODE\nfrom pygaps.units.converter_mode import _PRESSURE_MODE\nfrom pygaps.units.converter_mode import c_temperature\nfrom pygaps.units.converter_unit import _PRESSURE_UNITS\nfrom pygaps.units.converter_unit import _TEMPERATURE_UNITS\nfrom pygaps.utilities.exceptions import ParameterError\nfrom pygaps.utilities.hashgen import isotherm_to_hash\n\nSHORTHANDS = {\n 'm': \"material\",\n 't': \"temperature\",\n 'a': \"adsorbate\",\n}\n\n\nclass BaseIsotherm():\n \"\"\"\n Class which contains the general data for an isotherm, real or model.\n\n The isotherm class is the parent class that both PointIsotherm and\n ModelIsotherm inherit. It is designed to contain the information about\n an isotherm (such as material, adsorbate, data units etc.) but without\n any of the data itself.\n\n Think of this class as a extended python dictionary.\n\n Parameters\n ----------\n material : str\n Name of the material on which the isotherm is measured.\n adsorbate : str\n Isotherm adsorbate.\n temperature : float\n Isotherm temperature.\n\n Other Parameters\n ----------------\n pressure_mode : str, optional\n The pressure mode, either 'absolute' pressure or 'relative'\n ('relative%') in the form of p/p0.\n pressure_unit : str, optional\n Unit of pressure, if applicable.\n loading_basis : str, optional\n Whether the adsorbed amount is in terms of either 'volume_gas'\n 'volume_liquid', 'molar', 'mass', or a fraction/percent basis.\n loading_unit : str, optional\n Unit in which the loading basis is expressed.\n material_basis : str, optional\n Whether the underlying material is in terms of 'per volume'\n 'per molar amount' or 'per mass' of material.\n material_unit : str, optional\n Unit in which the material basis is expressed.\n\n Notes\n -----\n The class is also used to prevent duplication of code within the child\n classes, by calling the common inherited function before any other specific\n implementation additions.\n\n The minimum arguments required to instantiate the class are\n ``material``, ``temperature', ``adsorbate``.\n \"\"\"\n\n # strictly required attributes\n _required_params = [\n 'material',\n 'temperature',\n 'adsorbate',\n ]\n # unit-related attributes and their defaults\n _unit_params = {\n 'pressure_mode': 'absolute',\n 'pressure_unit': 'bar',\n 'material_basis': 'mass',\n 'material_unit': 'g',\n 'loading_basis': 'molar',\n 'loading_unit': 'mmol',\n 'temperature_unit': 'K',\n }\n # other special reserved parameters\n # subclasses extend this\n _reserved_params = [\n \"_material\",\n \"_adsorbate\",\n \"_temperature\",\n \"m\",\n \"t\",\n \"a\",\n ]\n\n ##########################################################\n # Instantiation and classmethods\n\n def __init__(\n self,\n material: t.Union[str, dict, Material] = None,\n adsorbate: t.Union[str, Adsorbate] = None,\n temperature: t.Union[float, str] = None,\n **properties: dict,\n ):\n \"\"\"\n Instantiate is done by passing a dictionary with the parameters,\n as well as the info about units, modes and data columns.\n\n \"\"\"\n # commonly used shorthands\n for shorthand, prop in SHORTHANDS.items():\n data = properties.pop(shorthand, None)\n if data:\n if prop == \"material\":\n material = data\n elif prop == \"adsorbate\":\n adsorbate = data\n elif prop == \"temperature\":\n temperature = data\n\n # Must-have properties of the isotherm\n #\n # Basic checks\n if None in [material, adsorbate, temperature]:\n raise ParameterError(\n f\"Isotherm MUST have the following properties: {self._required_params}\"\n )\n\n self.material = material\n self.adsorbate = adsorbate\n self.temperature = temperature\n\n # Isotherm units\n #\n for uparam, udefault in self._unit_params.items():\n if uparam not in properties:\n logger.warning(f\"WARNING: '{uparam}' was not specified, assumed as '{udefault}'\")\n properties[uparam] = udefault\n\n # TODO deprecation\n if self._unit_params['loading_basis'] == 'volume':\n self._unit_params['loading_basis'] = 'volume_gas'\n logger.warning(\n \"Loading basis as 'volume' is unclear and deprecated. \"\n \"Assumed as 'volume_gas'.\"\n )\n\n self.pressure_mode = properties.pop('pressure_mode')\n self.pressure_unit = properties.pop('pressure_unit')\n if self.pressure_mode.startswith('relative'):\n self.pressure_unit = None\n self.material_basis = properties.pop('material_basis')\n self.material_unit = properties.pop('material_unit')\n self.loading_basis = properties.pop('loading_basis')\n self.loading_unit = properties.pop('loading_unit')\n self.temperature_unit = properties.pop('temperature_unit')\n\n # Check basis / mode\n if self.pressure_mode not in _PRESSURE_MODE:\n raise ParameterError(\n f\"Mode selected for pressure ({self.pressure_mode}) is not an option. \"\n f\"See viable values: {_PRESSURE_MODE.keys()}\"\n )\n\n if self.loading_basis not in _LOADING_MODE:\n raise ParameterError(\n f\"Basis selected for loading ({self.loading_basis}) is not an option. \"\n f\"See viable values: {_LOADING_MODE.keys()}\"\n )\n\n if self.material_basis not in _MATERIAL_MODE:\n raise ParameterError(\n f\"Basis selected for material ({self.material_basis}) is not an option. \"\n f\"See viable values: {_MATERIAL_MODE.keys()}\"\n )\n\n # Check units\n if self.pressure_mode == 'absolute' and self.pressure_unit not in _PRESSURE_UNITS:\n raise ParameterError(\n f\"Unit selected for pressure ({self.pressure_unit}) is not an option. \"\n f\"See viable values: {_PRESSURE_UNITS.keys()}\"\n )\n\n if self.loading_basis not in [\n \"percent\", \"fraction\"\n ] and self.loading_unit not in _LOADING_MODE[self.loading_basis]:\n raise ParameterError(\n f\"Unit selected for loading ({self.loading_unit}) is not an option. \"\n f\"See viable values: {_LOADING_MODE[self.loading_basis].keys()}\"\n )\n\n if self.loading_basis not in [\n \"percent\", \"fraction\"\n ] and self.material_unit not in _MATERIAL_MODE[self.material_basis]:\n raise ParameterError(\n f\"Unit selected for material ({self.material_unit}) is not an option. \"\n f\"See viable values: {_MATERIAL_MODE[self.loading_basis].keys()}\"\n )\n\n if self.temperature_unit not in _TEMPERATURE_UNITS:\n raise ParameterError(\n f\"Unit selected for temperature ({self.temperature_unit}) is not an option. \"\n f\"See viable values: {_TEMPERATURE_UNITS.keys()}\"\n )\n\n # Other named properties of the isotherm\n\n # Save the rest of the properties as metadata\n self.properties = properties\n\n ##########################################################\n # Overloaded and own functions\n\n @property\n def iso_id(self) -> str:\n \"\"\"Return an unique identifier of the isotherm.\"\"\"\n return isotherm_to_hash(self)\n\n @property\n def material(self) -> Material:\n \"\"\"Return underlying material.\"\"\"\n return self._material\n\n @material.setter\n def material(self, value: t.Union[str, dict, Material]):\n if isinstance(value, dict):\n name = value.pop('name', None)\n try:\n self._material = Material.find(name)\n self._material.properties.update(**value)\n except ParameterError:\n self._material = Material(name, **value)\n return\n try:\n self._material = Material.find(value)\n except ParameterError:\n self._material = Material(value)\n\n @property\n def adsorbate(self) -> Adsorbate:\n \"\"\"Return underlying adsorbate.\"\"\"\n return self._adsorbate\n\n @adsorbate.setter\n def adsorbate(self, value: t.Union[str, Adsorbate]):\n try:\n self._adsorbate = Adsorbate.find(value)\n except ParameterError:\n self._adsorbate = Adsorbate(value)\n logger.warning(\n \"Specified adsorbate is not in internal list \"\n \"(or name cannot be resolved to an existing one). \"\n \"Thermodynamic backend disabled for this gas/vapour.\"\n )\n\n @property\n def temperature(self) -> float:\n \"\"\"Return underlying temperature, always in kelvin.\"\"\"\n if self.temperature_unit == \"K\":\n return self._temperature\n return c_temperature(self._temperature, self.temperature_unit, \"K\")\n\n @temperature.setter\n def temperature(self, value: t.Union[float, str]):\n self._temperature = float(value)\n\n @property\n def units(self) -> dict:\n \"\"\"Return a dictionary of all isotherm units\"\"\"\n return {unit: getattr(self, unit) for unit in self._unit_params}\n\n def __eq__(self, other_isotherm) -> bool:\n \"\"\"\n Overload the equality operator of the isotherm.\n\n Since id's should be unique and representative of the\n data inside the isotherm, all we need to ensure equality\n is to compare the two hashes of the isotherms.\n \"\"\"\n return self.iso_id == other_isotherm.iso_id\n\n def __repr__(self) -> str:\n \"\"\"Print key isotherm parameters.\"\"\"\n return f\"<{type(self).__name__} {self.iso_id}>: '{self.adsorbate}' on '{self.material}' at {self.temperature} K\"\n\n def __str__(self) -> str:\n \"\"\"Print a short summary of all the isotherm parameters.\"\"\"\n string = \"\"\n\n # Required\n string += f\"Material: { str(self.material) }\\n\"\n string += f\"Adsorbate: { str(self.adsorbate) }\\n\"\n string += f\"Temperature: { str(self.temperature) }K\\n\"\n\n # Units/basis\n string += \"Units: \\n\"\n string += f\"\\tUptake in: {self.loading_unit}/{self.material_unit}\\n\"\n if self.pressure_mode.startswith('relative'):\n string += \"\\tRelative pressure\\n\"\n else:\n string += f\"\\tPressure in: {self.pressure_unit}\\n\"\n\n if self.properties:\n string += \"Other properties: \\n\"\n for prop, val in self.properties.items():\n string += (f\"\\t{prop}: {str(val)}\\n\")\n\n return string\n\n def to_dict(self) -> dict:\n \"\"\"\n Returns a dictionary of the isotherm class\n Is the same dictionary that was used to create it.\n\n Returns\n -------\n dict\n Dictionary of all parameters.\n \"\"\"\n parameter_dict = vars(self).copy()\n\n # These line are here to ensure that material/adsorbate are copied as a string\n parameter_dict['adsorbate'] = str(parameter_dict.pop('_adsorbate'))\n material = parameter_dict.pop('_material')\n if material.properties:\n parameter_dict['material'] = material.to_dict()\n else:\n parameter_dict['material'] = str(material)\n parameter_dict['temperature'] = parameter_dict.pop('_temperature')\n\n # Remove reserved parameters\n for param in self._reserved_params:\n parameter_dict.pop(param, None)\n\n # Add metadata\n parameter_dict.update(parameter_dict.pop('properties'))\n\n return parameter_dict\n\n def to_json(self, path=None, **kwargs) -> t.Union[None, str]:\n \"\"\"\n Convert the isotherm to a JSON representation.\n\n Parameters\n ----------\n path\n File path or object. If not specified, the result is returned as a string.\n kwargs\n Custom arguments to be passed to \"json.dump\", like `indent`.\n\n Returns\n -------\n None or str\n If path is None, returns the resulting json as a string.\n Otherwise returns None.\n \"\"\"\n from pygaps.parsing.json import isotherm_to_json\n return isotherm_to_json(self, path, **kwargs)\n\n def to_csv(self, path=None, separator=',', **kwargs) -> t.Union[None, str]:\n \"\"\"\n Convert the isotherm to a CSV representation.\n\n Parameters\n ----------\n path\n File path or object. If not specified, the result is returned as a string.\n separator : str, optional\n Separator used int the csv file. Defaults to '',''.\n\n Returns\n -------\n None or str\n If path is None, returns the resulting csv as a string.\n Otherwise returns None.\n \"\"\"\n from pygaps.parsing.csv import isotherm_to_csv\n return isotherm_to_csv(self, path, separator, **kwargs)\n\n def to_xl(self, path, **kwargs):\n \"\"\"\n Save the isotherm as an Excel file.\n\n Parameters\n ----------\n path\n Path where to save Excel file.\n\n \"\"\"\n from pygaps.parsing.excel import isotherm_to_xl\n return isotherm_to_xl(self, path, **kwargs)\n\n def to_aif(self, path=None, **kwargs) -> t.Union[None, str]:\n \"\"\"\n Convert the isotherm to a AIF representation.\n\n Parameters\n ----------\n path\n File path or object. If not specified, the result is returned as a string.\n\n Returns\n -------\n None or str\n If path is None, returns the resulting AIF as a string.\n Otherwise returns None.\n \"\"\"\n from pygaps.parsing.aif import isotherm_to_aif\n return isotherm_to_aif(self, path, **kwargs)\n\n def to_db(\n self,\n db_path: str = None,\n verbose: bool = True,\n autoinsert_material: bool = True,\n autoinsert_adsorbate: bool = True,\n **kwargs\n ):\n \"\"\"\n Upload the isotherm to an sqlite database.\n\n Parameters\n ----------\n db_path : str, None\n Path to the database. If none is specified, internal database is used.\n autoinsert_material: bool, True\n Whether to automatically insert an isotherm material if it is not found\n in the database.\n autoinsert_adsorbate: bool, True\n Whether to automatically insert an isotherm adsorbate if it is not found\n in the database.\n verbose : bool\n Extra information printed to console.\n\n \"\"\"\n from pygaps.parsing.sqlite import isotherm_to_db\n return isotherm_to_db(\n self,\n db_path=db_path,\n autoinsert_material=autoinsert_material,\n autoinsert_adsorbate=autoinsert_adsorbate,\n verbose=verbose,\n **kwargs\n )\n\n def convert_temperature(\n self,\n unit_to: str,\n verbose: bool = False,\n ):\n \"\"\"\n Convert isotherm temperature from one unit to another.\n\n Parameters\n ----------\n unit_to : str\n The unit into which the internal temperature should be converted to.\n verbose : bool\n Print out steps taken.\n\n \"\"\"\n self._temperature = c_temperature(self._temperature, self.temperature_unit, unit_to)\n self.temperature_unit = unit_to\n\n if verbose:\n logger.info(f\"Changed temperature unit to '{unit_to}'.\")\n\n # Figure out the adsorption and desorption branches\n @staticmethod\n def _splitdata(data, pressure_key: bool):\n \"\"\"\n Split isotherm data into an adsorption and desorption part and\n return a column which marks the transition between the two.\n \"\"\"\n from pygaps.utilities.math_utilities import split_ads_data\n return split_ads_data(data, pressure_key)\n","repo_name":"pauliacomi/pyGAPS","sub_path":"src/pygaps/core/baseisotherm.py","file_name":"baseisotherm.py","file_ext":"py","file_size_in_byte":16422,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"32"} +{"seq_id":"70309589532","text":"\"\"\"\nThis script uses Tracknet to track shuttlecock in a dataset of videos.\n\"\"\"\n\nimport argparse\nimport os\n\nimport cv2\nimport numpy as np\nfrom predict import predict\nfrom smooth_trajectory import smooth_trajectory\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset_path\", type=str, required=True)\n parser.add_argument(\"--load_weights\", type=str, required=True)\n parser.add_argument(\"--save_video\", action=\"store_true\")\n return parser.parse_args()\n\n\ndef plot_shuttle_cock(coords, video_path, output_path):\n cap = cv2.VideoCapture(video_path)\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n out = cv2.VideoWriter(output_path, fourcc, 30.0, (1280, 720))\n\n for coord in coords:\n ret, frame = cap.read()\n if not ret:\n break\n\n if not np.isnan(coord).any():\n cv2.circle(frame, (int(coord[0]), int(coord[1])), 5, (0, 0, 255), -1)\n\n out.write(frame)\n\n cap.release()\n out.release()\n\n\ndef is_amateur(video_path):\n \"\"\"Check if the video is amateur or professional\"\"\"\n cap = cv2.VideoCapture(video_path)\n\n # Get the middle frame\n len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n cap.set(cv2.CAP_PROP_POS_FRAMES, len // 2)\n _, img = cap.read()\n cap.release()\n\n top_left_corner = img[: img.shape[0] // 4, : img.shape[1] // 3, :]\n\n return top_left_corner.sum() > 30000000\n\n\ndef mask_amateur_vid(video_path):\n \"\"\"Add a polygon mask to the right side of the video to blockout noises\"\"\"\n cap = cv2.VideoCapture(video_path)\n output_path = video_path.replace(\".mp4\", \"_masked.mp4\")\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n out = cv2.VideoWriter(output_path, fourcc, 30.0, (1280, 720))\n\n mask_pts = np.array([[890, 350], [1280, 500], [1280, 0], [890, 0]])\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n cv2.fillPoly(frame, pts=[mask_pts], color=(0, 0, 0))\n\n out.write(frame)\n\n cap.release()\n out.release()\n\n return output_path\n\n\ndef main(args):\n dataset_path = args.dataset_path\n load_weights = args.load_weights\n save_video = args.save_video\n\n for data in sorted(os.listdir(dataset_path)):\n print(f\"Processing {os.path.join(dataset_path, data)}...\")\n\n data_path = os.path.join(dataset_path, data)\n\n video_path = os.path.join(data_path, f\"{data}.mp4\")\n\n if is_amateur(video_path):\n video_path = mask_amateur_vid(video_path)\n\n coords = predict(video_path, load_weights)\n coords = smooth_trajectory(coords)\n\n output_file = f\"{data}_trajectory.csv\"\n output_path = os.path.join(data_path, output_file)\n\n with open(output_path, \"w\") as f:\n f.write(\"Frame,Visibility,X,Y\\n\")\n for frame, coord in enumerate(coords):\n if np.isnan(coord).any():\n f.write(f\"{frame},0,0,0\\n\")\n else:\n f.write(f\"{frame},1,{int(coord[0])},{int(coord[1])}\\n\")\n\n if save_video:\n plot_shuttle_cock(coords, video_path, output_path.replace(\".csv\", \".mp4\"))\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n","repo_name":"jerrykal/AI_CUP_2023_Spring_Computer_Vision","sub_path":"src/shuttlecock_tracker/process_dataset.py","file_name":"process_dataset.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25941230890","text":"import requests, threading, time, json, random, pygame, sys\nfrom Classes.Levels import Menus\nfrom Classes.Settings import *\n\n\npygame.init()\nclock = pygame.time.Clock()\nwindows = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\npygame.display.set_caption(\"3.14 Doors\")\n\nsurface = pygame.Surface((WIN_WIDTH, WIN_HEIGHT))\n\nMENU = Menus.Menu(surface)\nLEVEL_SELECTOR = Menus.LevelSelector(surface)\nSETTINGS = Menus.Settings(surface)\nUPGRADE = Menus.Upgrade(surface)\nMENU_BETWEEN = Menus.MenuBetweenLevelCreator(surface)\nGAME = None\nLEVEL_CREATOR = None\n\nrunGame = True\npygame.key.set_repeat(1, 10)\nwhile runGame:\n windows.fill(Color.Black)\n if (Menus.currentStage == \"Menu\"):\n MENU.Update(windows)\n if (GAME):\n GAME = None\n if (LEVEL_CREATOR):\n LEVEL_CREATOR = None\n elif (Menus.currentStage == \"Level Selector\"):\n LEVEL_SELECTOR.Update(windows)\n if (GAME):\n GAME = None\n if (LEVEL_CREATOR):\n LEVEL_CREATOR = None\n elif (Menus.currentStage == \"Settings\"):\n SETTINGS.Update(windows)\n elif (Menus.currentStage == \"Upgrade\"):\n UPGRADE.Update(windows)\n elif (Menus.currentStage == \"Game\"):\n if (not GAME):\n GAME = Menus.Game(windows)\n elif (GAME.reload):\n GAME = Menus.Game(windows)\n GAME.Update(windows)\n elif (Menus.currentStage == \"MenuB\"):\n MENU_BETWEEN.Update(windows)\n if (LEVEL_CREATOR):\n LEVEL_CREATOR = None\n elif (Menus.currentStage == \"Level Creator\"):\n if (not LEVEL_CREATOR):\n LEVEL_CREATOR = Menus.LevelCreator(windows)\n LEVEL_CREATOR.Update(windows)\n\n clock.tick(FPS)\n pygame.display.flip()\n\n\npygame.quit()","repo_name":"shogo-makishima/3.14-Doors","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21244991318","text":"from datetime import timedelta\n\nfrom eggroll.core.proto import deepspeed_pb2\nfrom google.protobuf.duration_pb2 import Duration\nfrom torch.distributed import Store\n\nfrom ..client import BaseClient\nfrom .commands import RendezvousStoreCommands\n\n\nclass EggrollStore(Store):\n def __init__(self, host, port, prefix, timeout: timedelta = timedelta(hours=24)):\n self._prefix = prefix\n self._timeout = timeout\n super().__init__()\n\n self._client = BaseClient(host=host, port=port)\n\n def get(self, key, timeout: timedelta = None):\n if isinstance(key, str):\n key = key.encode()\n if timeout is None:\n timeout = self._timeout\n seconds = int(timeout.total_seconds())\n nanos = int((timeout - timedelta(seconds=seconds)).microseconds * 1000)\n response = self._client.do_sync_request(\n input=deepspeed_pb2.StoreGetRequest(\n prefix=self._prefix, key=key, timeout=Duration(seconds=seconds, nanos=nanos)\n ),\n output_type=deepspeed_pb2.StoreGetResponse,\n command_uri=RendezvousStoreCommands.GET,\n )\n if response.is_timeout:\n raise RuntimeError(\"Socket Timeout\")\n return response.value\n\n def set(self, key, value):\n if isinstance(key, str):\n key = key.encode()\n return self._client.do_sync_request(\n deepspeed_pb2.StoreSetRequest(prefix=self._prefix, key=key, value=value),\n output_type=deepspeed_pb2.StoreSetResponse,\n command_uri=RendezvousStoreCommands.SET,\n )\n\n def add(self, key, amount):\n if isinstance(key, str):\n key = key.encode()\n response = self._client.do_sync_request(\n deepspeed_pb2.StoreAddRequest(prefix=self._prefix, key=key, amount=amount),\n output_type=deepspeed_pb2.StoreAddResponse,\n command_uri=RendezvousStoreCommands.ADD,\n )\n return response.amount\n\n def destroy(self):\n return destroy(self._client, self._prefix)\n\n\ndef destroy(client, prefix):\n return client.do_sync_request(\n deepspeed_pb2.StoreDestroyRequest(prefix=prefix),\n output_type=deepspeed_pb2.StoreDestroyResponse,\n command_uri=RendezvousStoreCommands.DESTROY,\n )\n","repo_name":"FederatedAI/eggroll","sub_path":"python/eggroll/deepspeed/store/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"32"} +{"seq_id":"29992720676","text":"\"\"\"Markov models.\"\"\"\n\n\nimport numpy as np\nfrom math import log \n\n\nclass MarkovModel:\n \"\"\"Representation of a Markov model.\"\"\"\n\n init_probs: list[float]\n trans: list[list[float]]\n\n def __init__(self,\n init_probs: list[float],\n trans: list[list[float]]):\n \"\"\"Create model from initial and transition probabilities.\"\"\"\n # Sanity check...\n k = len(init_probs)\n assert k == len(trans)\n for row in trans:\n assert k == len(row)\n\n self.init_probs = init_probs\n self.trans = trans\n\n\ndef likelihood(x: list[int], mm: MarkovModel) -> float:\n \"\"\"\n Compute the likelihood of mm given x.\n\n This is the same as the probability of x given mm,\n i.e., P(x ; mm).\n \"\"\"\n if not x:\n return 1\n \n i = 1\n probability = mm.init_probs[x[0]]\n while i < len(x):\n probability *= mm.trans[x[i-1]][x[i]]\n i += 1 \n return probability\n\ndef log_likelihood(x: list[int], mm: MarkovModel) -> float:\n \"\"\"\n Computes the log likelihood of mm given x\n \"\"\"\n if not x:\n return log(1)\n \n i = 1\n probability = log(mm.init_probs[x[0]])\n while i < len(x):\n probability += log(mm.trans[x[i-1]][x[i]])\n i += 1 \n return probability","repo_name":"birc-ctib-2022/markov-models-MEskerod","sub_path":"src/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30062060627","text":"\"\"\"\r\n\n\nWrite a function that transforms all letters from `[a, m]` to `0` and letters\nfrom `[n, z]` to `1` in a string.\n\n### Examples\n\n convert_binary(\"house\") ➞ \"01110\"\n \n convert_binary(\"excLAIM\") ➞ \"0100000\"\n \n convert_binary(\"moon\") ➞ \"0111\"\n\n### Notes\n\nConversion should be case **insensitive** (see example #2).\n\n\"\"\"\r\n\ndef convert_binary(string):\n zero_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm']\n zero_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M']\n one_lower = ['n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n one_upper = ['N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n result = ''\n for char in string:\n if (char in zero_lower) or (char in zero_upper):\n result += '0'\n elif (char in one_lower) or (char in one_upper):\n result += '1'\n return result\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"Ddmh9KYg7xA4m9uE7_23.py","file_name":"Ddmh9KYg7xA4m9uE7_23.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33825209322","text":"# Leetcode problem - 287. Find the Duplicate Number\n# Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.\n# There is only one repeated number in nums, return this repeated number.\n# You must solve the problem without modifying the array nums and uses only constant extra space.\n\n# The idea here is that since the array length is n+1 and the range of the values is [1,n]\n# 1. There will be atleast one duplicate (since we have to fill N+1 positions with N numbers)\n# 2. If we look at the values in the list as an index, there will be one index to which multiple\n# values will be pointing to, in the case below, both the values at index 3,4 are pointing to\n# index 2. This is similar to a cycle in linked list and can be solved using Floyd's Algorithm.\n# Example:\n# 0 1 2 3 4 <-- index\n# Input: nums = [1,3,4,2,2]\n# 0 --> 3 --> 2 --> 4 \n# |<--- |\n\n# Time:O(n) Space: O(1)\ndef findDuplicate(nums):\n # 2 pointers starting from the index 0 \n slow = 0\n fast = 0\n while True:\n # move slow pointer forward by 1 position, fast by 2\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast:\n break; # fast and slow pointers met\n \n slow2 = 0 # create another pointer starting at index 0\n while True:\n # move both the slow pointers forward by 1 position\n slow = nums[slow]\n slow2 = nums[slow2]\n if slow == slow2:\n # slow pointers met, this is the starting of the cycle,\n # return slow pointer which is the duplicate value\n return slow\n\n\nif __name__ == \"__main__\":\n nums = [1,3,4,2,2]\n print(f\"duplicate in {nums} is: {findDuplicate(nums)}\")\n nums = [3,1,3,4,2]\n print(f\"duplicate in {nums} is: {findDuplicate(nums)}\")\n","repo_name":"SharmaJi-Engineer/Leetcode","sub_path":"python/findDuplicate.py","file_name":"findDuplicate.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30296375769","text":"import argparse\nimport shutil\nfrom typing import Callable\nfrom .config import Config\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef create(ModelConfig: Config) -> None:\n parser = argparse.ArgumentParser(description='Create model config')\n parser.add_argument('model_dir')\n args = parser.parse_args()\n config = ModelConfig()\n config.save(args.model_dir)\n\n\ndef train(ModelConfig: Config, train_func: Callable[[Config], None]) -> None:\n parser = argparse.ArgumentParser(description='Train using model config')\n parser.add_argument('model_dir')\n args = parser.parse_args()\n config = ModelConfig(args.model_dir)\n shutil.rmtree(config.model_dir)\n config.save()\n\n train_func(config)\n\ndef visualize(ModelConfig: Config, visualize_func: Callable[[Config], None]) -> None:\n parser = argparse.ArgumentParser(description='Visualize model')\n parser.add_argument('model_dir')\n parser.add_argument('epoch', type=int, nargs='?')\n args = parser.parse_args()\n config = ModelConfig(args.model_dir)\n\n epoch = args.epoch\n if epoch is None:\n epoch = config.n_epochs\n\n model = ProblemModel(config)\n checkpoint = torch.load(config.get_checkpoint_path(epoch))\n model.load_state_dict(checkpoint['model'])\n\n visualize_func(model)\n","repo_name":"samedii/didactic-meme","sub_path":"didactic_meme/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34994807952","text":"from django.http import JsonResponse\nfrom .models import DeviceModel\nfrom django.core.exceptions import ObjectDoesNotExist\nimport json\n\n# Create your views here.\n\n\ndef hello(request):\n return JsonResponse({\"message\": \"Hi\"})\n\n\ndef device(request):\n if request.method == \"POST\":\n device_ob = DeviceModel.objects.create(\n mac_address=json.loads(request.body).get('mac_address')\n )\n device_ob.save()\n return JsonResponse(status=201, data={\n 'mac_address': device_ob.mac_address,\n 'is_active': device_ob.is_active\n }, safe=False)\n else:\n try:\n device_ob = DeviceModel.objects.get(pk=request.GET.get('mac_address'))\n return JsonResponse(status=200, data={\n 'mac_address': device_ob.mac_address,\n 'is_active': device_ob.is_active\n })\n except ObjectDoesNotExist:\n return JsonResponse(status=404, data=\"Does Not exist\", safe=False)\n","repo_name":"kulkarnisaumitra98/door-app","sub_path":"doorApp/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20426034183","text":"from sqlalchemy import func\n\nfrom model import User, Avatar, Place, Event, connect_to_db, db\nfrom server import app\n\nimport pandas as pd \n\ndefault_url = 'https://images.unsplash.com/photo-1537151672256-6caf2e9f8c95?ixlib=rb-1.2.1&q=85&fm=jpg&crop=entropy&cs=srgb&dl=ipet-photo-1061142-unsplash.jpg'\n\ndef set_val_user_id():\n \"\"\"Set value for the next user_id after seeding database\"\"\"\n\n # Get the Max user_id in the database\n result = db.session.query(func.max(User.user_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next user_id to be max_id + 1\n query = \"SELECT setval('users_user_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id + 1})\n db.session.commit()\n\ndef load_avatars(filename):\n\n df = pd.read_excel(filename)\n avatar_list = list(df['Name'])\n\n for url in avatar_list:\n avatar = Avatar(url=url)\n\n db.session.add(avatar)\n\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n connect_to_db(app)\n db.create_all()\n\n excel_file = 'avatars.xlsx'\n load_avatars(excel_file)\n\n user = User(user_id=1,fname='Jenny',lname='Trieu',email='jennytrieu10@gmail.com', password=\"evil\", url = default_url)\n place = Place(user_id=1, place_name = 'WinterWonderland', place_address = '100 Main St, San Francisco, CA 94110', place_imURL = default_url)\n event = Event(user_id=1, event_name = 'Doggy Dayzz', eventbrite_id = '1234asdf', event_address = '200 Main St, San Francisco, CA 94110', event_date = 'January 1, 2019', event_imURL = default_url)\n db.session.add(user)\n db.session.add(place)\n db.session.add(event)\n db.session.commit()\n set_val_user_id()\n\n\n ","repo_name":"jtrieu2/PressPaws","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19362790066","text":"import tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\nfrom tornado.options import define,options,parse_command_line\nfrom utils.settings import TEMPLATE_PATH,STATIC_PATH\nfrom apps.views import RegisterHandler,LoginHandler,ChatHandler\n\ndefine('port',default=8080,help='default port',type=int)\n\ndef make_app():\n return tornado.web.Application(handlers=[\n (r'/signup/',RegisterHandler),\n (r'/signin/',LoginHandler),\n (r'/chat/',ChatHandler),\n ],autoreload=True,debug=True,template_path=TEMPLATE_PATH,static_path=STATIC_PATH,cookie_secret='adfafa12sad=-1223'\n)\n\nif __name__ == \"__main__\":\n parse_command_line()\n app = make_app()\n server = tornado.httpserver.HTTPServer(app)\n server.listen(options.port,address='0.0.0.0')\n tornado.ioloop.IOLoop.instance().start()","repo_name":"gaohj/nzflask_bbs","sub_path":"chart_demo聊天室/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18969466515","text":"import sys, os, glob\nsys.path.append(os.getcwd()+\"/freelabel\")\n\nfrom django.shortcuts import render\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.shortcuts import render_to_response\n\n# Import the Category model\nfrom freelabel.models import Category, Page\n\nfrom freelabel.forms import UserForm\n\nimport numpy as np\nimport json\nimport urllib.request as ur\n\nfrom skimage.draw import line\nfrom ourLib import startRGR, traceLine, cmpToGT, saveGTasImg, saveAnnsAsPNG, \\\n tracePolyline, readLocalImg, traceCircle, traceRect\n\nfrom random import shuffle\n\nimport scipy.io as sio\n\nimport datetime, math\n\nfrom threading import Thread\n\n# for local folder usage (https://stackoverflow.com/questions/39801718/how-to-run-a-http-server-which-serves-a-specific-path)\nfrom http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler\n# import SimpleHTTPServer\n\nclass HTTPHandler(SimpleHTTPRequestHandler):\n # def do_POST(self):\n # print(\"here\")\n # if self.path.startswith('/kill_server'):\n # print(\"Server is going down, run it again manually!\")\n # def kill_me_please(server):\n # server.shutdown()\n # server.server_close()\n # # httpd = HTTPServer('', (\"\", 8889))\n # t=Thread(target=kill_me_please,args=(self.server,))\n # t.start() \n # self.send_error(500) \n # print(\"move on\")\n # return \n\n \"\"\"This handler uses server.base_path instead of always using os.getcwd()\"\"\"\n def translate_path(self, path):\n path = SimpleHTTPRequestHandler.translate_path(self, path)\n relpath = os.path.relpath(path, os.getcwd())\n fullpath = os.path.join(self.server.base_path, relpath)\n return fullpath \n\nclass HTTPServer(BaseHTTPServer):\n \"\"\"The main server, you pass in base_path which is the path you want to serve requests from\"\"\"\n def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler):\n self.base_path = base_path\n BaseHTTPServer.__init__(self, server_address, RequestHandlerClass)\n\n# used to return numpy arrays via AJAX to JS side\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n# defines which page (.html) is loaded first\ndef main(request):\n return render(request, 'freelabel/register.html')\n\n# renders the main playing page\ndef play(request): \n return render(request, 'freelabel/main.html') \n\n####\ndef playCustom(request): \n return render(request, 'freelabel/customset.html')\n\ndef playCustomScratch(request):\n return render(request, 'freelabel/customsetScratch.html')\n\ndef threadfunction(web_dir):\n\n PORT = 8889\n # web_dir = '/home/philipe/Pictures/test/'\n httpd = HTTPServer(web_dir, (\"\", 0)) \n \n httpd.handle_request()\n\ndef setcustomfolder(httpd):\n # If the request is a HTTP POST, try to pull out the relevant information.\n # if request.method == 'POST':\n # web_dir = request.POST.get('folderpath')\n # web_dir = '/home/philipe/Pictures/test/'\n\n httpd.serve_forever()\n\n # print(\"###### DONE ###########\")\n\n # return render(request, 'freelabel/register.html') \n\ndef loadcustom(request):\n \n localFolder = request.POST.get('folderpath')\n setname = request.POST.get('datasetname')\n outputFolder = request.POST.get('outputpath')\n\n username = request.user.username\n\n outputFolder = os.path.join(outputFolder,username,setname)\n\n if not os.path.exists(outputFolder):\n os.makedirs(outputFolder)\n\n # # web_dir = '/home/philipe/Pictures/test/'\n # httpd = HTTPServer(localFolder, (\"\", PORT))\n # httpd.handle_request()\n\n httpd = HTTPServer(localFolder, (\"\", 0))\n sockinfo = httpd.socket.getsockname()\n PORT = sockinfo[1]\n\n t=Thread(target=setcustomfolder,args=[httpd])\n t.start()\n\n # get list of files in folder of custom dataset\n imgList = []\n cnnList = []\n\n files_ = glob.glob(os.path.join(localFolder,\"*.jpg\"))\n files_.extend(glob.glob(os.path.join(localFolder,\"*.png\")))\n files_.extend(glob.glob(os.path.join(localFolder,\"*.JPEG\")))\n files_.extend(glob.glob(os.path.join(localFolder,\"*.jpeg\")))\n imgList = [os.path.basename(x) for x in files_]\n\n # in case of loading pre-segmentation maps\n for it,x in enumerate(files_):\n cnnList.append(imgList[it][0:-4] + \".png\")\n\n # imgList = ['/' + s for s in imgList]\n print(imgList)\n catList = ['eraser']\n # load text file with list of categories in the dataset\n if os.path.exists(os.path.join(localFolder,'categories.txt')):\n f = open(os.path.join(localFolder,'categories.txt'), 'r')\n for elem in f.readlines():\n catList.append(elem)\n f.close()\n else:\n catList.append('background')\n catList.append('building')\n\n # check if there is already a sequence of images for this user.\n # If not, creates one\n filename = 'static/lists/imgs_' + setname + '_' + username + '.txt'\n if not os.path.exists(filename):\n shuffledIds = shuffleList(filename,len(imgList))\n # cnnList = cnnList[shuffledIds]\n\n idsList = np.loadtxt(filename, delimiter=',') \n\n idsList = list(idsList)\n\n # get current total score and next image to be labeled\n filename = 'static/lists/info'+ setname +'_' + username + '.txt'\n if not os.path.exists(filename):\n nextId = 0\n else: \n info = np.loadtxt(filename) \n nextId = int(info)\n # to append bar if needed\n localFolder = os.path.join(localFolder,\"\")\n return HttpResponse(json.dumps({'PORT':PORT,'imgList': imgList,'cnnList': cnnList,'catList':catList,\\\n 'idsList': idsList,'username': username,'nextId':nextId,\\\n 'localFolder':localFolder, 'outputFolder':outputFolder }), content_type=\"application/json\")\n\n# redirecting for compatibility with older versions\ndef refine(request):\n refineCustom(request)\n\ndef refineCustom(request): \n # get array of user traces from json \n jsonAnns = json.loads(request.session['userAnns'])\n # convert it to numpy\n userAnns = np.array(jsonAnns[\"userAnns\"])\n\n # get coordinates of trace to be drawn\n traces = request.POST.getlist('trace[]') \n\n userAnns = drawTrace(userAnns,traces)\n\n # check if both classes have been annotated\n # list of annotated classes\n clsList = np.unique(userAnns)\n clsList = np.delete(clsList,0) # remove class 0\n numCls = clsList.size # number of classes\n\n if numCls > 1:\n username = request.user.username\n\n # flag indicating if annotation shall be merged with presegmentation\n mergePreSeg = True if request.POST.get('mergePreSeg') == 'true' else False\n\n # get URL of image\n url = request.POST.get('img')\n\n # get random ID that defines mask filename\n ID = request.POST.get('ID')\n # weight of traces, which defines the spacing between samples in RGR\n weight_ = int(request.POST.get('weight'))\n\n # theta_m: regulates weight of color-similarity vs spatial-proximity\n # divide by to adjust from [1,10] to [.1,1]\n m = float(request.POST.get('m'))/10\n\n # remove older files\n for filename in glob.glob(\"static/\"+username+\"/refined*\"):\n os.remove(filename)\n\n # open image URL\n img = readLocalImg(url)\n # download image and convert to numpy array\n img = np.asarray(img, dtype=\"uint8\")\n\n # call RGR and get mask as return\n im_color = startRGR(username,img,userAnns,ID,weight_,m,url,mergePreSeg)\n askForAnns = False\n else:\n askForAnns = True\n\n request.session['userAnns'] = json.dumps({'userAnns': userAnns}, cls=NumpyEncoder)\n # return render(request, 'freelabel/main.html')\n return HttpResponse(json.dumps({'askForAnns': askForAnns}), content_type=\"application/json\")\n\ndef writeCustomLog(request):\n\n # get the username\n username = request.user.username\n\n jsonAnns = json.loads(request.session['userAnns'])\n anns = np.array(jsonAnns[\"userAnns\"])\n\n # total score and next i in list of images to load\n next_i = int(request.POST.get('next_i')) \n filename = 'static/lists/infoCustom_' + username + '.txt'\n np.savetxt(filename,[next_i], fmt='%d', delimiter=',') \n\n #id of image\n img_file = request.POST.get('img_file') \n\n # get newest ID of file once window reload \n # file_ID = request.POST.get('fileID') \n file_ID = username\n # save .mat with final mask and annotations, just in case we need it afterwards\n finalMask = np.load('static/'+username+'/lastmask.npy')\n \n setname = request.POST.get('datasetname')\n\n outputfolder = request.POST.get('outputfolder')\n directory = os.path.join(outputfolder,file_ID,setname)\n\n # if not os.path.exists(directory):\n # os.makedirs(directory)\n\n base_ = os.path.basename(img_file)\n filename = directory + '/' + os.path.splitext(base_)[0]\n sio.savemat(filename+ '.mat', mdict={'finalMask': finalMask, 'anns': anns})\n\n saveAnnsAsPNG(filename,finalMask)\n\n # compute percentage of how many pixels were annotated by the user\n total_anns = np.count_nonzero(anns)\n total_anns = 100*(total_anns/anns.size)\n\n # filename = 'static/log/Results_' + file_ID + '.txt'\n filename = 'static/log/Log'+setname+'_' + username + '.txt'\n\n # if file exists, only append data\n if not os.path.exists(filename):\n a = open(filename, 'w+')\n a.close()\n\n #time spend\n time = request.POST.get('time')\n\n #number of traces\n trace_number = request.POST.get('trace_number')\n\n #length of all traces\n\n #number of clicks on \"refine\"\n refine_number = request.POST.get('refine_number')\n\n #accuracies obtained \n accuracies = request.POST.getlist('accuracies[]')\n\n # string containing all info for this image: \n str_ = str(os.path.basename(img_file)) + ';' + str(time) + ';' + \\\n str(trace_number) + ';' + '%.3f'%(float(total_anns)) + ';' + \\\n str(refine_number)\\\n\n if accuracies is None:\n accuracies = 0\n\n for acc_ in accuracies:\n str_ = str_ + ',' + '%.3f'%(float(acc_))\n\n # get array of accuracies for each class + average. If empty (i.e. no refinement performed yet)\n str_ = str_ + '\\n'\n\n a=open(filename, \"a+\")\n a.write(str_)\n a.close()\n\n # remove older files\n for filename in glob.glob(\"static/\"+username+\"/GTimage*\"):\n os.remove(filename) \n\n return render(request, 'freelabel/main.html') \n\n####\ndef playVideo(request):\n return render(request, 'freelabel/video.html')\n\ndef shuffleList(filename,lst_length):\n str_ = ''\n\n shuffled_ = np.random.permutation(lst_length)\n np.savetxt(filename, shuffled_, fmt='%d', delimiter=',')\n return shuffled_\n\n# initialize array with user traces for this iamge\ndef initanns(request):\n\n username = request.user.username\n\n # delete pre-existent mask .npy file\n if os.path.exists('static/'+username+'/lastmask.npy',):\n os.remove('static/'+username+'/lastmask.npy',) \n\n img_size = request.POST.getlist('img_size[]') \n \n height = int(img_size[0])\n width = int(img_size[1])\n\n # create array with users annotations (same dimensions as image)\n userAnns = np.zeros((height,width),dtype=int)\n\n np.save('static/'+username+'/lastmask.npy', userAnns)\n\n # using sessions allow us to keep updating and accessing this same variable back and forth here in the views.py\n request.session['userAnns'] = json.dumps({'userAnns': userAnns}, cls=NumpyEncoder) \n request.session.save()\n # get bounding boxes\n # download url as a local file\n #\n # return HttpResponse(json.dumps({'bbList': bbList}), content_type=\"application/json\")\n return render(request, 'freelabel/main.html')\n\n\ndef cmpGT(request):\n username = request.user.username\n\n # get URL of ground truth file\n urlGT = request.POST.get('GT')\n # download this URL as local file GT.mat\n ur.urlretrieve(urlGT, \"static/\"+username+\"/GT.mat\")\n\n # call function that computes accuracies\n acc = cmpToGT(username)\n\n return HttpResponse(json.dumps({'acc': acc}, cls=NumpyEncoder), content_type=\"application/json\")\n\ndef showFinalImg(request):\n username = request.user.username\n\n # get random ID that defines mask filename\n ID = int(request.POST.get('ID'))\n\n # remove older files\n for filename in glob.glob(\"static/\"+username+\"/GTimage*\"):\n os.remove(filename) \n\n # call asImg and get image \n im_color = saveGTasImg(username,ID);\n\n return render(request, 'freelabel/main.html')\n\ndef drawTrace(userAnns,traces):\n\n img = np.uint8(userAnns)\n\n for itline in range(0,len(traces)):\n traceStr = traces[itline]\n trace = [x.strip() for x in traceStr.split(',')]\n\n # each trace \"coordinate\" contains: x,y,thickness,category,\n # so a line is defined by (trace[i],trace[i+1])--(trace[i+4],trace[i+5]), \n # with thickness=trace[i+2] (or trace[i+6]) and category=trace[i+3](or trace[i+7]) \n pts = np.empty(shape=[0, 2])\n for i in range(0,len(trace)-6,5):\n \n # trace line between coordinates\n c0 = int(trace[i]) # i.e. x0\n r0 = int(trace[i+1]) # i.e. y0\n \n c1 = int(trace[i+5])\n r1 = int(trace[i+6])\n\n pts = np.append(pts,[[c0,r0]],axis=0)\n pts = np.append(pts,[[c1,r1]],axis=0)\n\n thick = int(trace[i+2])\n # workaround to the fact that JS variable can't handle negatives, but -1 indicates to CV to fill\n if thick > 8:\n thick = -1\n catId = int(trace[i+3])\n type_ = int(trace[i+4])\n\n if type_ == 0:\n userAnns = tracePolyline(img,pts,catId,thick)\n else:\n if type_ == 1:\n userAnns = traceCircle(img, pts, catId,thick)\n else:\n userAnns = traceRect(img,pts,catId,thick)\n\n return userAnns \n\ndef register(request):\n\n # A boolean value for telling the template whether the registration was successful.\n # Set to False initially. Code changes value to True when registration succeeds.\n registered = False\n\n # If it's a HTTP POST, we're interested in processing form data.\n if request.method == 'POST':\n # Attempt to grab information from the raw form information.\n # Note that we make use of both UserForm and UserProfileForm.\n user_form = UserForm(data=request.POST)\n # profile_form = UserProfileForm(data=request.POST)\n\n # If the two forms are valid...\n if user_form.is_valid():\n # Save the user's form data to the database.\n user = user_form.save()\n\n # Now we hash the password with the set_password method.\n # Once hashed, we can update the user object.\n user.set_password(user.password)\n user.save()\n \n # Update our variable to tell the template registration was successful.\n registered = True\n\n # Invalid form or forms - mistakes or something else?\n # Print problems to the terminal.\n # They'll also be shown to the user.\n else:\n print (user_form.errors)\n\n # Not a HTTP POST, so we render our form using two ModelForm instances.\n # These forms will be blank, ready for user input.\n else:\n user_form = UserForm()\n \n # Render the template depending on the context.\n return render(request,\n 'freelabel/register.html',\n {'user_form': user_form, 'registered': registered} ) \n\ndef user_login(request):\n\n # If the request is a HTTP POST, try to pull out the relevant information.\n if request.method == 'POST':\n # Gather the username and password provided by the user.\n # This information is obtained from the login form.\n # We use request.POST.get('') as opposed to request.POST[''],\n # because the request.POST.get('') returns None, if the value does not exist,\n # while the request.POST[''] will raise key error exception\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n # Use Django's machinery to attempt to see if the username/password\n # combination is valid - a User object is returned if it is.\n user = authenticate(username=username, password=password)\n\n # If we have a User object, the details are correct.\n # If None (Python's way of representing the absence of a value), no user\n # with matching credentials was found.\n if user:\n # Is the account active? It could have been disabled.\n if user.is_active:\n # If the account is valid and active, we can log the user in.\n # We'll send the user back to the homepage.\n login(request, user)\n\n # show log in time \n username = request.user.username\n filename = 'static/log/Log_' + username + '.txt'\n\n # if file exists, only append data\n if not os.path.exists(filename):\n a = open(filename, 'w+')\n a.close()\n\n login_time = datetime.datetime.now()\n\n print(login_time)\n\n str_ = \"#\" + str(login_time) + '\\n'\n\n a=open(filename, \"a+\")\n a.write(str_)\n a.close()\n\n directory = 'static/'+username\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n return HttpResponseRedirect('/freelabel/')\n # return render(request, 'freelabel/login.html', {})\n else:\n # An inactive account was used - no logging in!\n return HttpResponse(\"Your freelabel account is disabled.\")\n else:\n # Bad login details were provided. So we can't log the user in.\n print (\"Invalid login details: {0}, {1}\".format(username, password))\n return HttpResponse(\"Invalid login details supplied.\")\n\n # The request is not a HTTP POST, so display the login form.\n # This scenario would most likely be a HTTP GET.\n else:\n # No context variables to pass to the template system, hence the\n # blank dictionary object...\n return render(request, 'freelabel/login.html', {})\n\n# Use the login_required() decorator to ensure only those logged in can access the view.\n@login_required\ndef user_logout(request):\n # show log in time \n username = request.user.username\n\n filename = 'static/log/Log_' + username + '.txt'\n\n # if file exists, only append data\n if not os.path.exists(filename):\n a = open(filename, 'w+')\n a.close()\n\n logout_time = datetime.datetime.now()\n\n\n print(logout_time)\n\n\n str_ = \"!\" + str(logout_time) + '\\n'\n\n a=open(filename, \"a+\")\n a.write(str_)\n a.close()\n\n\n # Since we know the user is logged in, we can now just log them out.\n logout(request)\n\n # Take the user back to the homepage.\n return HttpResponseRedirect('/freelabel/register') \n","repo_name":"philadias/freelabel","sub_path":"freelabel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19588,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"32"} +{"seq_id":"4270023387","text":"\r\n# !/usr/bin/python\r\ntry:\r\n import sys, os, arcpy, logging\r\n import arcpy\r\n from arcpy.sa import *\r\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + \"\\\\.site_packages\\\\riverpy\\\\\")\r\n import config\r\n import fGlobal as fGl\r\nexcept:\r\n print(\"ExceptionERROR: Missing fundamental packages (required: arcpy, os, sys, logging).\")\r\n\r\ntry:\r\n from rpy2 import robjects\r\nexcept:\r\n print(\"WARNING: Missing package (rpy2 - required for RiverBuilder).\")\r\n\r\n\r\nclass RiverBuilder:\r\n def __init__(self, units):\r\n # pipes to riverbuilder.r\r\n # units = STR (either \"us\" or \"si\")\r\n self.logger = logging.getLogger(\"logfile\")\r\n\r\n self.dir2rb = config.dir2mt + \"/RiverBuilder/\"\r\n self.dir_out = config.dir2mt + \"Output/RiverBuilder\"\r\n\r\n self.R = robjects.r\r\n\r\n # set unit system variables\r\n if (\"us\" in str(units)) or (\"si\" in str(units)):\r\n self.units = units\r\n else:\r\n self.units = \"us\"\r\n self.logger.info(\"WARNING: Invalid unit_system identifier. unit_system must be either \\'us\\' or \\'si\\'.\")\r\n self.logger.info(\" Setting unit_system default to \\'us\\'.\")\r\n\r\n if self.units == \"us\":\r\n self.ft2m = config.ft2m\r\n else:\r\n self.ft2m = 1.0\r\n\r\n def run_riverbuilder(self, input_file_name):\r\n # input_file_name = STR of RiverBuilder Input.txt file that must be stored in self.dir\r\n self.R.setwd(self.dir2rb)\r\n self.R.source('riverbuilder.r')\r\n # self.R.get(\"riverbuilder\") # uncomment if next command doesn't work\r\n self.R.riverbuilder(input_file_name, self.dir_out, overwrite='TRUE') # difference to R: TRUE as STR\r\n return self.dir_out\r\n\r\n def __call__(self, *args, **kwargs):\r\n print(\"Class Info: = RiverBuilder (%s)\" % os.path.dirname(__file__))\r\n print(dir(self))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"RiverArchitect/program","sub_path":"ModifyTerrain/cRiverBuilder.py","file_name":"cRiverBuilder.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"26694839676","text":"# import budget data\nimport os\nimport csv\n\n\nfpath = os.path.join(\"Resources\", \"budget_data.csv\")\n\nbd_rows = {}\nmonths = 0\nnet_tot_amt = 0\nchange = 0\nchange_list = {}\nchange_tot = 0\naver_change = 0\ngreatest_inc = {}\ngreatest_dec = {}\n\nwith open(fpath) as bd_csv:\n bd_reader = csv.reader(bd_csv, delimiter=\",\")\n bd_header = next(bd_reader)\n for r in bd_reader:\n bd_rows[r[0]] = r[1]\n\n# The total number of months included in the dataset\n months = len(list(bd_rows))\n #print (f\"{months}\")\n\n# The net total amount of \"Profit/Losses\" over the entire period\n bd_csv.seek(0)\n next(bd_reader)\n net_tot_amt = sum(int(r[1]) for r in bd_reader)\n #print(net_tot_amt)\n\n# Calculate the changes in \"Profit/Losses\" over the entire period, then find the average of those changes\n bd_csv.seek(0)\n next(bd_reader)\n count = 0\n for r in bd_reader:\n if count == 0:\n first = int(r[1])\n else: \n second = int(r[1])\n change = second - first\n change_list[count] = change\n change_tot += change\n first = second\n count += 1\n aver_change = round(change_tot/(months-1), 2)\n #print(list(change_list.values()))\n #print(aver_change)\n\n# The greatest increase in profits (date and amount) over the entire period\n amount = max(change_list.values())\n amount_idx = (list(change_list.keys()))[list(change_list.values()).index(amount)]\n date = list(bd_rows)\n greatest_inc[date[amount_idx]] = amount\n #print(greatest_inc)\n\n# The greatest decrease in profits (date and amount) over the entire period\n amount = min(change_list.values())\n amount_idx = (list(change_list.keys()))[list(change_list.values()).index(amount)]\n date = list(bd_rows)\n greatest_dec[date[amount_idx]] = amount\n #print(greatest_dec)\n\nprint(\"Financial Analysis\")\nprint(\"-----------------------------\")\nprint(f\"Total Months: {months}\")\nprint(f\"Total: {net_tot_amt}\")\nprint(f\"Average Change: {aver_change}\")\nfor key,value in greatest_inc.items():\n print(f\"Greatest Increase in Profits: {key} (${value})\")\nfor key,value in greatest_dec.items():\n print(f\"Greatest Decrease in Profits: {key} (${value})\")\n\nf = open(\"analysis/analysis.txt\", \"w\")\nf.write(f\"Financial Analysis\\n-----------------------------\\nTotal Months: {months}\\nTotal: {net_tot_amt}\\nAverage Change: {aver_change}\\n\")\nfor key,value in greatest_inc.items():\n f.write(f\"Greatest Increase in Profits: {key} (${value})\\n\")\nfor key,value in greatest_dec.items():\n f.write(f\"Greatest Decrease in Profits: {key} (${value})\\n\")\nf.close()\n\n","repo_name":"thepianist86/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38177930684","text":"class Solution:\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n roads = defaultdict(list)\n for i, l in enumerate(isConnected):\n for j, v in enumerate(l):\n if v == 0:\n continue\n if i == j:\n continue\n roads[i].append(j)\n\n visited = [False] * len(isConnected)\n\n def dive(from_):\n visited[from_] = True\n for next_ in roads[from_]:\n if not visited[next_]:\n dive(next_)\n\n provinces = 0\n for i in range(len(isConnected)):\n if not visited[i]:\n provinces += 1\n dive(i)\n\n return provinces\n","repo_name":"blockinhead/algo_python","sub_path":"leetcode/547_number_of_provinces.py","file_name":"547_number_of_provinces.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2472452230","text":"#print(oct(100)[2:])\nnum=int(input())\ndval=0\ni=0\nwhile(num!=0):\n rem=num%10\n dval=dval+rem*pow(2,i)\n num=num//10\n i=i+1\nprint(abs(dval))\n'''\nbinary\n'''\n\n","repo_name":"rajeswari98/Python-Codes","sub_path":"binaryToDecimal.py","file_name":"binaryToDecimal.py","file_ext":"py","file_size_in_byte":165,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21801137969","text":"\n\n################################################################################\n#\n# calc_het.py - this is a script used to calculate missing genotypes, based off of Dan Drinan's hetVsReadDepth.py\n#\n# MF 9/29/2017\n\n# Based ib 2017-March-28 script written by Daniel Drinan (ddrinan@uw.edu)\n\n################################################################################\n\nimport argparse, subprocess\n\nparser = argparse.ArgumentParser()\n# it is either run with '-l' if you want to compare heterozygosity and read depth\n# of a bunch of individuals\n#\n# or, if you are only interested in a single individual, you run with '-i' and\n# '-f' \nparser.add_argument(\"-l\", \"--list\", help=\"Population map, or any white space delimited list of individuals with sample name in first column\")\nparser.add_argument(\"-i\", \"--ind\", help=\"name of individual to investigate (mutually \\\n exclusive to '-l' and requires '-f') - \\\n UNTESTED\")\nparser.add_argument(\"-f\", \"--file\", help=\"location of file with genotypes (assumes \\\n genepop format)\")\nparser.add_argument(\"-o\", \"--output\", help=\"name of output file\")\nparser.add_argument(\"-d\", \"--denominator\", help=\"use 2 if counting a FASTA file \\\n or 4 if counting a FASTQ file\")\nargs = parser.parse_args()\n\noutput_file = open(args.output, 'w')\n\n\n\n\n\n########################\n\n# function to count the proportion of heterozygous loci in a sample\ndef calcMissing(sample_name):\n individuals_genotypes = subprocess.Popen([\"grep \" + sample_name + \" \" + \\\n args.file], stdout=subprocess.PIPE, shell=True)\n (genotypes_out, genotypes_err) = individuals_genotypes.communicate()\n genotypes_out = genotypes_out.split(',')[1] # removing everything except genotypes\n genotypes_out = genotypes_out.split()\n\n tmp_missing = 0.0 # number of genotypes missing\n tmp_total = 0.0000000000000001 # number of genotyped loci\n\n for item in genotypes_out:\n tmp_total += 1\n if item.count('0') == len(item): # if true, a genotype does not exist\n tmp_missing += 1\n return float(tmp_missing)/float(tmp_total)\n\n\n\n#############################\n##\n## main\n##\n\n#############################\noutput_file.write('sample prop_missing\\n')\n\nif args.list:\n\n list_file = open(args.list, 'r')\n\n for line in list_file:\n sample_name = line.strip().split()[0]\n\n # extract the list of genotypes for the individual from the genepop file\n\n tmp_proportion_missing = calcMissing(sample_name)\n\n tmp_output = sample_name + ' ' + str(tmp_proportion_missing) + '\\n'\n\n output_file.write(tmp_output)\n\n\n list_file.close()\n\noutput_file.close()","repo_name":"mfisher5/PCod-Compare-repo","sub_path":"analyses/calc_missing_genotypes.py","file_name":"calc_missing_genotypes.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"71347366490","text":"#https://www.acmicpc.net/problem/5427\nimport sys\nfrom collections import deque\n\nT = int(input())\n\nfor l in range(T):\n C, R = map(int, sys.stdin.readline().split())\n dx = [1, 0, -1, 0]\n dy = [0, 1, 0, -1]\n board = []\n fire = []\n move = []\n for i in range(R):\n line = list(str(input()))\n board.append(line.copy())\n fire.append(line.copy())\n move.append(line.copy())\n fire_queue = deque()\n move_queue = deque()\n ans = \"IMPOSSIBLE\"\n\n for i in range(R):\n for r in range(C):\n if fire[i][r] == \"*\":\n fire[i][r] = 0\n move[i][r] = \"#\"\n fire_queue.append((i, r))\n elif fire[i][r] == \".\":\n fire[i][r] = -1\n move[i][r] = -1\n elif fire[i][r] == \"@\":\n fire[i][r] = -1\n move[i][r] = 0\n move_queue.append((i, r))\n\n while fire_queue:\n x, y = fire_queue.popleft()\n for dir in range(0, 4):\n nx = x + dx[dir]\n ny = y + dy[dir]\n if nx < 0 or nx >= R or ny < 0 or ny >= C:\n continue\n if fire[nx][ny] == \"#\":\n continue\n if fire[nx][ny] >= 0:\n continue\n fire[nx][ny] = fire[x][y] + 1\n fire_queue.append((nx, ny))\n\n flag = 1\n while move_queue and flag:\n x, y = move_queue.popleft()\n for dir in range(0, 4):\n nx = x + dx[dir]\n ny = y + dy[dir]\n if nx < 0 or nx >= R or ny < 0 or ny >= C:\n ans = move[x][y] + 1\n flag = 0\n break\n if move[nx][ny] == \"#\" or move[nx][ny] >= 0:\n continue\n if move[x][y] + 1 >= fire[nx][ny] and fire[nx][ny] != -1:\n continue\n move[nx][ny] = move[x][y] + 1\n move_queue.append((nx, ny))\n print(ans)\n\n#https://www.acmicpc.net/problem/7562\n\nimport sys\nfrom collections import deque\n\nT = int(input())\ndx = [2, 1, -1, -2, 2, 1, -1, -2]\ndy = [1, 2, 2, 1, -1, -2, -2, -1]\n\n'''def print_arr(arr):\n for i in range(len(arr)):\n print(arr[i])'''\n\nfor i in range(T):\n queue = deque()\n l = int(input())\n cur_x, cur_y = map(int, sys.stdin.readline().split())\n move_x, move_y = map(int, sys.stdin.readline().split())\n vis = [[-1 for r in range(l)] for i in range(l)]\n vis[cur_x][cur_y] = 0\n queue.append((cur_x, cur_y))\n\n while vis[move_x][move_y] == -1:\n x, y = queue.popleft()\n for dir in range(8):\n nx = x + dx[dir]\n ny = y + dy[dir]\n if nx < 0 or nx >= l or ny < 0 or ny >= l:\n continue\n if vis[nx][ny] != -1:\n continue\n vis[nx][ny] = vis[x][y] + 1\n queue.append((nx, ny))\n print(vis[move_x][move_y])","repo_name":"SUNMI-KIM/algorithm_workbook","sub_path":"backjoon/2023_06_09.py","file_name":"2023_06_09.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33850282721","text":"import rpyc\nimport sys\nimport argparse\nimport random\nimport threading\nimport time\n\nCHECK_PERIOD = 0.05\n\n\n\nargv = argparse.ArgumentParser()\nargv.add_argument(\"config\")\nargv.add_argument(\"node_id\")\nargv.add_argument(\"port\")\nargv = argv.parse_args()\n\ndef flush_print(*values):\n print(*values, flush=True)\n\n\n\n'''\nA RAFT RPC server class.\n\nPlease keep the signature of the is_leader() method unchanged (though\nimplement the body of that function correctly. You will need to add\nother methods to implement ONLY the leader election part of the RAFT\nprotocol.\n'''\nclass RaftNode(rpyc.Service):\n\n\n \"\"\"\n Initialize the class using the config file provided and also initialize\n any datastructures you may need.\n \"\"\"\n def __init__(self, config, id):\n self.voted = False\n self.heartbeat_period = 0.2\n self.check_period = 0.05\n flush_print(id)\n # flush_print(self.read_config(config))\n self.config = self.read_config(config)\n self.N = len(self.config)\n self.id = id\n # self.other_nodes = self.get_other_nodes(self.config, self.id)\n # self.other_nodes[\"1\"].root.say_hi()\n self.send_heartbeat()\n self.is_leader = False\n self.elect_time = random.randint(100, 350) / 100 #second\n flush_print(f\"elect time {self.elect_time}\")\n self.last_heard = time.time()\n self.term = 0\n self.num_votes = 0\n threading.Thread(target=self.start_beating).start()\n threading.Thread(target=self.monitor_leader_beat).start()\n\n\n\n def heartbeat_send_thread(self, id, ip, port):\n try:\n c = rpyc.connect(ip, port)\n c.root.receive_heartbeat(self.id, self.is_leader, self.term)\n except OSError:\n # flush_print(f\"node {id} is down\")\n return\n\n def monitor_leader_beat(self):\n while True:\n if self.is_leader:\n self.last_heard = time.time()\n time.sleep(self.check_period)\n if time.time() - self.last_heard > self.elect_time:\n self.send_candidate_message()\n else:\n time.sleep(self.check_period)\n\n def exposed_vote(self, id, term):\n if self.term >= term:\n return\n self.term = term\n self.last_heard = time.time()\n ip, port = self.config[id]\n try:\n c = rpyc.connect(ip, port)\n c.root.receive_vote(self.id, self.term)\n except OSError:\n # flush_print(f\"node {id} is down\")\n return\n\n def exposed_receive_vote(self, id, term):\n if term == self.term:\n self.num_votes += 1\n if self.num_votes + 1 > self.N / 2:\n self.is_leader = True\n flush_print(f\"node {self.id} is the leader with {self.num_votes+1} votes\")\n\n\n def start_beating(self):\n while True:\n if self.is_leader:\n self.send_heartbeat()\n time.sleep(self.heartbeat_period)\n\n def send_heartbeat(self):\n for id in self.config:\n if id == self.id:\n continue\n ip, port = self.config[id]\n threading.Thread(target=self.heartbeat_send_thread, args=(id, ip, port, )).start()\n\n def candidate_message_thread(self, ip, port):\n try:\n c = rpyc.connect(ip, port)\n c.root.vote(self.id, self.term)\n except OSError:\n # flush_print(f\"node {id} is down\")\n return\n\n def send_candidate_message(self):\n self.last_heard = time.time()\n self.term += 1\n flush_print(f\"became candidate for term {self.term}\")\n self.num_votes = 0\n for id in self.config:\n if id == self.id:\n continue\n ip, port = self.config[id]\n threading.Thread(target=self.candidate_message_thread, args=(ip, port, )).start()\n\n def exposed_receive_heartbeat(self, other_id, is_leader, term):\n # flush_print(f\"received heart beat from {other_id}\")\n if is_leader:\n if self.term > term:\n return\n elif self.term < term:\n self.is_leader = False\n self.term = term\n self.last_heard = time.time()\n\n def get_other_nodes(self, config_dict, this_id):\n nodes = {}\n for id in config_dict:\n if id == this_id:\n continue\n ip, port = config_dict[id]\n nodes[id] = rpyc.connect(ip, id)\n\n\n\n '''\n x = is_leader(): returns True or False, depending on whether\n this node is a leader\n\n As per rpyc syntax, adding the prefix 'exposed_' will expose this\n method as an RPC call\n\n CHANGE THIS METHOD TO RETURN THE APPROPRIATE RESPONSE\n '''\n def exposed_is_leader(self):\n return self.leader_id == self.id\n\n def read_config(self, file):\n config_dict = {}\n with open(file) as f:\n N = int(f.readline().split()[-1])\n for i in range(N):\n line = f.readline()\n id, ip, port_num = line.split(\":\")\n id = id[4:]\n ip = ip.strip()\n port_num = int(port_num.strip())\n config_dict[id] = (ip, port_num)\n return config_dict\n\n def exposed_say_hi(self):\n # flush_print(f\"hello my name is {self.id}\")\n return\n\nif __name__ == '__main__':\n from rpyc.utils.server import ThreadPoolServer\n print(\"flag 1\", flush=True)\n server = ThreadPoolServer(RaftNode(argv.config, argv.node_id), port = int(argv.port))\n server.start()\n print(\"flag 2\", flush=True)\n","repo_name":"Kokkini/distributed_system_raft_algorithm","sub_path":"raftnode.py","file_name":"raftnode.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15282888680","text":"aluno = dict()\naluno['nome'] = str(input('Nome: '))\naluno['media'] = float(input('Média: '))\nif aluno['media'] >= 6:\n aluno['situacao'] = '\\033[32maprovado\\033[m'\nelse:\n aluno['situacao'] = '\\033[31mreprovado\\033[m'\n'''print(f'{aluno[\"nome\"]} teve a média {aluno[\"media\"]} e foi {aluno[\"situacao\"]}!')'''\nfor k, v in aluno.items():\n print(f'{k} é igual a {v}')\n","repo_name":"rrmagalhaes/python","sub_path":"cursoEmVideo/exercicios/scripts/desafio090.py","file_name":"desafio090.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25330738078","text":"from pydub import AudioSegment, playback\r\n\r\nAudioSegment.converter = \"ffmpeg\\\\ffmpeg.exe\"\r\n\r\nsound1 = AudioSegment.from_mp3(\"1.mp3\")\r\nsound2 = AudioSegment.from_mp3(\"2.mp3\")\r\noutput = sound1.overlay(sound2) #Lenh overlay\r\n\r\nclass MediaPlayer:\r\n def __init__(self):\r\n l=[\"Mua tren pho bay xa - Tran Minh Hoang.mp3\",\"Nhu Chua Bat Dau - Minh Hoang.mp3\",'Trai Tim Khong Ngu Yen - Tran Minh Hoang.mp3']\r\n self.list=[]\r\n for x in l:\r\n sound = AudioSegment.from_mp3(x)\r\n self.list.append(sound)\r\n def Play(self, index):\r\n for x in self.list:\r\n if index == (self.list.index(x)+1):\r\n playback.play(x)\r\n def MixAndPlay(self, index1, index2):\r\n for x in self.list:\r\n if index1 == (self.list.index(x)+1):\r\n for y in self.list:\r\n if index2 == (self.list.index(y)+1):\r\n output = x.overlay(y)\r\n playback.play(output)\r\n\r\nhoang = MediaPlayer()\r\n# hoang.Play(2)\r\nhoang.MixAndPlay(1,3)\r\n","repo_name":"hiepxanh/C4E4","sub_path":"HoangTran/test_pydub_play.py","file_name":"test_pydub_play.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2760201529","text":"#imports here\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport time\nfrom datetime import datetime\nimport re\nimport os\nimport psycopg2\nfrom psycopg2 import sql\n\n\ndef get_comp_name(driver):\n\thead_container = driver.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb j83agx80 cbu4d94t g5gj957u d2edcug0 hpfvmrgz on77hlbc buofh1pr o8rfisnq ph5uu5jm b3onmgus ihqw7lf3 ecm0bbzt']\")\n\tif len(head_container) > 0:\n\t\tname_container = head_container[0].find_elements_by_css_selector(\"div[class='bi6gxh9e aov4n071']\")\n\telse:\n\t\thead_container = driver.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb j83agx80 cbu4d94t buofh1pr tgvbjcpo']\")\n\t\tname_container = head_container[0].find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb j83agx80 cbu4d94t pfnyh3mw d2edcug0 bp9cbjyn jb3vyjys']\")\n\treturn name_container[0].text\n\t\ndef get_fb_link(link):\n\treturn link\n\ndef get_messenger(link):\n\tmess_pattern = 'https://www.facebook.com/messages/t/'\n\tfb_pattern = 'https://www.facebook.com/'\n\treturn mess_pattern + link[len(fb_pattern):]\n\ndef get_phone_number(info):\n\tphone_pattern = '\\+852\\s[0-9]{4}\\s[0-9]{4}'\n\treturn re.fullmatch(phone_pattern, info.text)\ndef get_email(info):\n\treturn '@' in info.text\ndef get_website(info):\n\treturn 'http' in info.text or '.com' in info.text\ndef get_business_class(info):\n\treturn len(info.find_elements_by_css_selector(\"a[class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso i1ao9s8h esuyzwwr f1sip0of lzcic4wl oo9gr5id gpro0wi8 lrazzd5p']\")) > 0\n\ndef get_other_about_info(about_container, keyword):\n\tphone = '?'\n\tmail = '?'\n\tweb = '?'\n\tbusclass = '?'\n\tdes = ''\n\tsee_more_buttons = about_container.find_elements_by_css_selector(\"div[class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso i1ao9s8h esuyzwwr f1sip0of lzcic4wl oo9gr5id gpro0wi8 lrazzd5p']\")\n\tfor butt in see_more_buttons:\n\t\tif 'See more' in butt.text:\n\t\t\tbutt.click()\n\tinfos = about_container.find_elements_by_css_selector(\"div[class='j83agx80']\")\n\tsecond_struct = False\n\tif len(infos) == 0:\n\t\tinfos = about_container.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb j83agx80 pfnyh3mw jifvfom9 gs1a9yip owycx6da btwxx1t3 discj3wi b5q2rw42 lq239pai mysgfdmx hddg9phg']\")\n\t\tsecond_struct = True\n\t\tbusclass = keyword\n\tfor info in infos:\n\t\tif get_phone_number(info):\n\t\t\tphone = info.text\n\t\telif get_email(info):\n\t\t\tmail = info.text\n\t\telif get_website(info):\n\t\t\tweb = info.text\n\t\telif not second_struct and get_business_class(info):\n\t\t\tbusclass = info.text\n\t\telse:\n\t\t\tdes+=info.text + '\\n'\n\treturn phone, mail, web, busclass, des\n\ndef get_followers(about_container):\n\tnum = ''\n\tlike_follow = about_container.find_elements_by_css_selector(\"div[class='taijpn5t cbu4d94t j83agx80']\")\n\tif len(like_follow) > 0:\n\t\tfor text in like_follow:\n\t\t\tif 'follow' in text.text:\n\t\t\t\tnum = text.text[:-19]\n\t\t\t\tbreak\n\t\tnum = ''.join(num.split(','))\n\telse:\n\t\tul_elem = about_container.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb j83agx80 pfnyh3mw jifvfom9 gs1a9yip owycx6da btwxx1t3 jb3vyjys b5q2rw42 lq239pai mysgfdmx hddg9phg']\")\n\t\tfor text in ul_elem:\n\t\t\tif 'followers' in text.text:\n\t\t\t\tnum = text.text[:-10]\n\t\t\t\tbreak\n\t\tif 'K' in num:\n\t\t\tnum = float(num[:-1])*1000\n\treturn int(num)\n\ndef get_page_created_date(driver, link):\n\tpcd_container = driver.find_elements_by_css_selector(\"span[class='ll8tlv6m j83agx80 wkznzc2l dhix69tm aov4n071']\")\n\tpcd_pattern = 'Page created – '\n\tif len(pcd_container) == 0:\n\t\tabout_link = link + '/about_profile_transparency'\n\t\tdriver.get(about_link)\n\t\ttransparent_container = driver.find_elements_by_css_selector(\"div[class='dati1w0a tu1s4ah4 f7vcsfb0 discj3wi']\")\n\t\tlist_text = transparent_container[0].text.split('\\n')\n\t\tfor id_text in range(len(list_text)):\n\t\t\tif list_text[id_text] == 'Page creation date':\n\t\t\t\treturn list_text[id_text-1]\n\treturn pcd_container[0].text[len(pcd_pattern):]\n\ndef get_about_container(keyword,driver):\n\t'''block_titles = driver.find_elements_by_css_selector(\"span[class='a8c37x1j ni8dbmo4 stjgntxs l9j0dhe7 r8blr3vg']\")\n\t\t\t\tabout_index = 0\n\t\t\t\tfor index in range(len(block_titles)):\n\t\t\t\t\tif 'About' in block_titles[index].text or 'Intro' in block_titles[index].text:\n\t\t\t\t\t\tabout_index = index\n\t\t\t\t\t\tbreak'''\n\tleft_container = driver.find_elements_by_css_selector(\"div[class='sjgh65i0']\")\n\tfor container in left_container:\n\t\tif container.text[:5] == 'About' or container.text[:5] == 'Intro':\n\t\t\tabout_container = container\n\t\t\tbreak\n\ttry:\n\t\tfoll = get_followers(about_container)\n\texcept:\n\t\tfoll = '-1'\n\ttry:\n\t\tphone, mail, web, busclass, des = get_other_about_info(about_container, keyword)\n\texcept:\n\t\tphone, mail, web, busclass, des = 'error', 'error', 'error', 'error', 'error'\n\treturn phone, mail, web, busclass, des, foll\n\ndef get_left_info(driver, link, keyword):\n\t#left_containter = driver.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb fhuww2h9 hpfvmrgz o387gat7 g1e6inuh g5gj957u aov4n071 oi9244e8 bi6gxh9e h676nmdw aghb5jc5 rek2kq2y']\")\n\t#print(len(left_containter))\n\t#print(pcd)\n\tphone, mail, web, busclass, des, foll = get_about_container(keyword, driver)\n\ttry:\n\t\tpcd = get_page_created_date(driver, link)\n\texcept:\n\t\tpcd = 'error'\n\treturn phone, mail, web, busclass, des, foll, pcd\n\n\ndef get_latest_post(right_container):\n\tfeeds = right_container[0].find_elements_by_css_selector(\"div[role='feed']\")\n\tif len(feeds) > 0:\n\t\tpost_container = feeds[-1]\n\telse:\n\t\tpost_container = right_container[0]\n\tposts = post_container.find_elements_by_css_selector(\"div[class='du4w35lb k4urcfbm l9j0dhe7 sjgh65i0']\")\n\tif len(posts) == 0:\n\t\treturn 'No post'\n\tlatest_post = posts[0]\n\tpost_time = latest_post.find_elements_by_css_selector(\"a[class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso i1ao9s8h esuyzwwr f1sip0of lzcic4wl gmql0nx0 gpro0wi8 b1v8xokw']\")[0]\n\t#may change\n\t#print(\"get_latest_post\")\n\treturn post_time.text\n\ndef get_right_info(driver):\n\tright_container = driver.find_elements_by_css_selector(\"div[class='dp1hu0rb d2edcug0 taijpn5t j83agx80 gs1a9yip']\")\n\tif len(right_container) == 0:\n\t\tright_container = driver.find_elements_by_css_selector(\"div[class='rq0escxv l9j0dhe7 du4w35lb fhuww2h9 hpfvmrgz gile2uim pwa15fzy g5gj957u aov4n071 oi9244e8 bi6gxh9e h676nmdw aghb5jc5']\")\n\tlatest_post = get_latest_post(right_container)\n\t#print(\"get_right_info\")\n\treturn latest_post\n\ndef transform_keyword(keyword):\n\ttmp_key = keyword\n\tif '-' in keyword:\n\t\ttmp_key = '_'.join(tmp_key.split('-'))\n\tif ' ' in keyword:\n\t\ttmp_key = '_'.join(tmp_key.split(' '))\n\treturn tmp_key\n\ndef insert_to_db(scroll_num, keyword, res):\n\t(comp, fb, phone, mess, des, mail, web, foll, latest_post, scraping_time, pcd, busclass) = res\n\ttmp_key = transform_keyword(keyword)\n\tconn = psycopg2.connect(DATABASE_URL, sslmode='require')\n\tcur = conn.cursor()\n\ttry:\n\t\tcur.execute(\"\"\"SELECT Count(*) FROM _Master\n\t WHERE Facebook = %s\"\"\", (fb,))\n\t\tdata = cur.fetchone()[0]\n\t\tif data == 0:\n\t\t\tsqlite_insert_with_param = \"\"\"INSERT INTO _Master\n\t\t\t\t\t\t\t(Company, Facebook, Phone, Messenger, Description, Email, Website, NumFollowers, LatestPost, ScrapingTime, PageCreatedDate, BusinessClassification, ScrollNumber, Keyword) \n\t\t\t\t\t\t\t\tSELECT %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n\t\t\t\t\t\t\t\tWHERE NOT EXISTS\n\t\t\t\t\t\t\t\t(SELECT Facebook FROM _Master\n\t\t\t\t\t\t\t\t\tWHERE Facebook = %s);\"\"\"\n\t\t\tdata_tuple = (comp, fb, phone, mess, des, mail, web, foll, latest_post, scraping_time, pcd, busclass, scroll_num, keyword, fb)\n\t\t\tcur.execute(sqlite_insert_with_param, data_tuple)\n\t\t\tconn.commit()\n\t\t\tprint(\"Record \", comp, phone, file=open(file_name, 'a', encoding='utf-8'))\n\t\t\tprint(\"Record \", comp, phone)\n\t\t\ttry:\n\t\t\t\tcur.execute(sql.SQL('''CREATE TABLE {}\n\t\t\t\t\t\t\t(Company text, Facebook text, Phone text, Messenger text, Description text, Email text, Website text, NumFollowers integer,LatestPost text, ScrapingTime text, PageCreatedDate text, BusinessClassification text, ScrollNumber integer, Keyword text)''').format(sql.Identifier(tmp_key)))\n\t\t\texcept:\n\t\t\t\tconn.rollback()\n\t\t\tquery = sql.SQL(\"\"\"INSERT INTO {} (Company, Facebook, Phone, Messenger, Description, Email, Website, NumFollowers, LatestPost, ScrapingTime, PageCreatedDate, BusinessClassification, ScrollNumber, Keyword)\n\t\t\t\t\t\t\t\tSELECT %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s;\"\"\").format(sql.Identifier(tmp_key))\n\t\t\ttup = (comp, fb, phone, mess, des, mail, web, foll, latest_post, scraping_time, pcd, busclass, scroll_num, keyword,)\n\t\t\tcur.execute(query, tup)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tcur.execute(\"\"\"SELECT Count(*) FROM _Master\n\t\t\t\t\t\t\tWHERE Facebook = %s AND LatestPost = %s\"\"\", (fb, latest_post,))\n\t\t\t\tdata = cur.fetchone()[0]\n\t\t\t\tif data == 0:\n\t\t\t\t\tcur.execute(\"\"\"UPDATE _Master\n\t\t\t\t\t\t\tSET LatestPost = %s, ScrapingTime = %s\n\t\t\t\t\t\t\tWHERE Facebook = %s\"\"\", (latest_post, datetime.now().strftime(\"%Y-%m-%d\"), fb))\n\t\t\t\t\tprint(\"Update \", comp, phone, file=open(file_name, 'a', encoding='utf-8'))\n\t\t\t\t\tprint(\"Update \", comp, phone)\n\t\t\t\t\tcur.execute(sql.SQL(\"\"\"UPDATE {} SET LatestPost = %s, ScrapingTime = %s\n\t\t\t\t\t\t\t\t\t\t\tWHERE Facebook = %s\"\"\").format(sql.Identifier(tmp_key)), (latest_post, datetime.now().strftime(\"%Y-%m-%d\"), fb)) \n\t\t\t\telse:\n\t\t\t\t\tprint(comp, \" already exists\", file=open(file_name, 'a', encoding='utf-8'))\n\t\t\t\t\tprint(comp, \" already exists\")\n\t\t\texcept:\n\t\t\t\tconn.rollback()\n\texcept:\n\t\tprint(\"db error\")\n\tconn.commit()\n\tcur.close()\n\tconn.close()\n\n\ndef get_page_data(driver, link, keyword, scroll_num):\n\t#print(\"get_page_data\")\n\tdriver.get(link)\n\ttime.sleep(3)\n\ttry:\n\t\tcomp = get_comp_name(driver)\n\texcept:\n\t\tcomp = 'error'\n\t#print(comp)\n\tfb = get_fb_link(link)\n\tmess = get_messenger(link)\n\ttry:\n\t\tlatest_post = get_right_info(driver)\n\texcept:\n\t\tlatest_post = 'error'\n\tphone, mail, web, busclass, des, foll, pcd = get_left_info(driver, link, keyword)\n\tscraping_time = datetime.now().strftime(\"%Y-%m-%d\")\n\tres = (comp, fb, phone, mess, des, mail, web, foll, latest_post, scraping_time, pcd, busclass)\n\t#print(\"before insert\")\n\tinsert_to_db(scroll_num, keyword, res)\n\t#print(\"after insert\")\n\t#print('comp: ', comp, '\\nfb: ', fb, '\\nphone: ', phone, '\\nmess: ', mess, '\\ndes: ', des, '\\nmail: ', mail, '\\nweb: ', web, '\\nfoll: ', foll, '\\nlast_pst:', latest_post, '\\nscraping time: ',scraping_time, '\\npcd: ', pcd, '\\nclass: ', busclass)\n\ndef get_driver():\n\t#print(\"get_driver\")\n\toptions = webdriver.ChromeOptions()\n\toptions.binary_location = os.environ[\"GOOGLE_CHROME_BIN\"]\n\toptions.add_argument(\"--headless\")\n\toptions.add_argument(\"--no-sandbox\")\n\toptions.add_argument(\"--disable-dev-sh-usage\")\n\toptions.add_argument(\"--disable-gpu\")\n\toptions.add_argument(\"--remote-debugging-port=9222\")\n\t#prefs = {\"profile.default_content_setting_values.notifications\" : 2}\n\t#chrome_options.add_experimental_option(\"prefs\",prefs)\n\t#specify the path to chromedriver.exe (download and save on your computer)\n\tdriver = webdriver.Chrome(executable_path= os.environ[\"CHROMEDRIVER_PATH\"], chrome_options=options)\n\t#open the webpage\n\tdriver.get(\"http://www.facebook.com\")\n\n\t#target username\n\tusername = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[name='email']\")))\n\tpassword = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[name='pass']\")))\n\t\n\t#enter username and password\n\tusername.clear()\n\tusername.send_keys(\"xayob70017@dedatre.com\")\n\tpassword.clear()\n\tpassword.send_keys(\"icho2019\")\n\t\n\t#target the login button and click it\n\tbutton = WebDriverWait(driver, 2).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"button[type='submit']\"))).click()\n\treturn driver\n\ndef create_db():\n\t#print(\"create_db\")\n\tconn = psycopg2.connect(DATABASE_URL, sslmode='require')\n\tcur = conn.cursor()\n\ttry:\n\t\tcur.execute('''CREATE TABLE _Master\n\t (Company text, Facebook text, Phone text, Messenger text, Description text, Email text, Website text, NumFollowers integer,LatestPost text, ScrapingTime text, PageCreatedDate text, BusinessClassification text, ScrollNumber integer, Keyword text)''')\n\texcept:\n\t conn.rollback()\n\tconn.commit()\n\tcur.close()\n\tconn.close()\n\ndef get_num_link(keyword):\n\t#print(\"get_num_link\")\n\tconn = psycopg2.connect(DATABASE_URL, sslmode='require')\n\tcur = conn.cursor()\n\tcur.execute(\"\"\"SELECT Count(Facebook) FROM _Master\n\t\t\t\tWHERE Keyword = %s\"\"\", (keyword,))\n\tnum_links = cur.fetchone()[0]\n\tif num_links == 0:\n\t\tcur.close()\n\t\tconn.close()\n\t\treturn 0, 0\n\telse:\n\t\tcur.execute(\"\"\"SELECT ScrollNumber FROM _Master\n\t\t\t\t\tWHERE Keyword = %s\n\t\t\t\t\tORDER BY ScrollNumber DESC\"\"\", (keyword,))\n\t\tscroll_pos= cur.fetchone()[0] + 1\n\t\tcur.close()\n\t\tconn.close()\n\t\treturn num_links, scroll_pos\n\ndef check_skip(len_new_link, cur_count):\n\treturn len_new_link == 0 and cur_count == 3\n\ndef main_func():\n\tcreate_db()\n\tdriver1 = get_driver()\n\t#print(driver.page_source)\n\tdriver2 = get_driver()\n\ttime.sleep(3)\n\t\n\tkey_list = ['Advertising', 'marketing', 'Agriculture', 'Arts', 'entertainment','Beauty', 'cosmetic', 'personal care',\n\t'Commercial', 'industrial', 'Education', 'Finance', 'Food', 'drink', 'Hotel', 'Legal', 'Local service',\n\t'Media', 'news company', 'Medical', 'health', 'Non-governmental organisation', 'Non-profit organisation',\n\t'Property', 'Public service', 'government service', 'Science', 'technology', 'engineering', 'Shopping', 'retail',\n\t'Sport', 'recreation', 'Travel', 'transport', 'Vehicle', 'aircraft', 'boat']\n\n\tfilter_var = '&filters=eyJmaWx0ZXJfcGFnZXNfbG9jYXRpb246MCI6IntcIm5hbWVcIjpcImZpbHRlcl9wYWdlc19sb2NhdGlvblwiLFwiYXJnc1wiOlwiMTEzMzE3NjA1MzQ1NzUxXCJ9In0%3D'\n\tdomain = 'https://www.facebook.com/search/pages?q='\n\tscroll_range = 2\n\n\t#temp_break = 0\n\n\tfor keyword in key_list:\n\t\t#if temp_break == 2:\n\t\t#\tbreak\n\t\t#temp_break+=1\n\t\tsearch_link = domain + keyword + filter_var\n\t\tdriver1.get(search_link)\n\t\ttime.sleep(3)\n\t\t#print(driver.page_source)\n\t\tcur_count = 0\n\t\tnum_links, scroll_pos = get_num_link(keyword)\n\t\t#print(\"num_links: \", num_links)\n\t\t#print(\"scroll_pos: \", scroll_pos)\n\t\tfor times_to_ignore in range(0, scroll_pos):\n\t\t\tdriver1.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\t#print(\"scroll over\")\n\t\t\ttime.sleep(2)\n\t\tfor scroll_num in range(scroll_pos, scroll_pos + scroll_range):\n\t\t\tlinks = []\n\t\t\t#print(\"come here\")\n\t\t\tresult_container = driver1.find_elements_by_css_selector(\"a[class='oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x jb3vyjys rz4wbd8a qt6c0cv9 a8nywdso i1ao9s8h esuyzwwr f1sip0of lzcic4wl oo9gr5id gpro0wi8 lrazzd5p dkezsu63']\")\n\t\t\tfor index_link in range(num_links, len(result_container)):\n\t\t\t\tlinks.append(result_container[index_link].get_attribute('href'))\n\t\t\tnum_links+=len(links)\n\t\t\tprint(\"Page\",scroll_num, \"of\",keyword, \":\", len(links), \"links recorded\", file=open(file_name, 'a', encoding='utf-8'))\n\t\t\tprint(\"Page\",scroll_num, \"of\",keyword, \":\", len(links), \"links recorded\")\n\t\t\tif len(links) == 0:\n\t\t\t\tcur_count+=1\n\t\t\t\tif check_skip(len(links), cur_count):\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcur_count = 0\n\t\t\tfor link in links:\n\t\t\t\tget_page_data(driver2, link, keyword, scroll_num)\n\t\t\t\t#\tnum_links-=1\n\t\t\t\t#\tcontinue\n\t\t\tdriver1.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\ttime.sleep(2)\n\nif __name__ == '__main__':\n\tDATABASE_URL = os.environ['DATABASE_URL']\n\tfile_name = datetime.now().strftime(\"%Y_%m_%d_%Hh_%Mm_%Ss\") + '.txt'\n\tmain_func()\n\n","repo_name":"opro1801/WebScraping","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":15848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42211311218","text":"from vediotest.my_recorder import *\nfrom moviepy.editor import *\nimport cv2\nimport numpy as np\nfrom vediotest.detect.my_detector import YoloDetector\nfrom vediotest.audio.my_detector import AudioDetector\nimport os\ndef vediotest(ip,savDir,recordTime):\n #录制视频\n vedioresult = 0\n voiceresult = 0\n reco=Recorder(ip,savDir,recordTime)\n reco.record()\n vet,videoPath,aet,audioPath = reco.read(audio=True)\n print ( vet,videoPath,aet,audioPath)\n if aet:\n ad =AudioDetector()\n reta, ra = ad.start(audioPath)\n if reta:\n if ra > 50 :\n print(\"检测到校验音频\")\n voiceresult = 1\n else:\n print(\"未检测到校验音频\")\n voiceresult = 0\n\n else:\n print(\"校验音频失败\")\n voiceresult = -1\n else:\n print(\"录制音频失败\")\n voiceresult = -2\n if vet:\n # 读取视频文件 \n videoCapture = cv2.VideoCapture(videoPath)\n #每秒读取一帧\n success, frame = videoCapture.read()\n i = 0\n timeF = 25\n j=0\n while success :\n i = i + 1\n if (i % timeF == 0):\n j = j + 1\n # save_image(frame,'./results/',j)\n imgPath = savDir+'/' +str(j)+ '.jpg'\n cv2.imwrite(imgPath,frame)\n yd = YoloDetector()\n img = cv2.imread(imgPath)\n results = yd.start(img)\n if results:\n vedioresult=vedioresult+1\n print(\"img test success, result: \", results)\n #print('save image:',j)\n success, frame = videoCapture.read()\n if vedioresult>0:\n print(\"检测到校验视频\")\n else:\n print(\"未检测到校验视频\")\n else:\n print(\"录制视频失败\")\n vedioresult = -2\n assert vedioresult and voiceresult\n\ndef voicetest(ip,savDir,recordTime):\n #录制视频\n reco=Recorder(ip,savDir,recordTime)\n reco.record()\n vet,videoPath,aet,audioPath = reco.read(audio=True)\n if aet:\n ad =AudioDetector()\n reta, ra = ad.start(audioPath)\n if reta:\n if ra > 50 :\n print(\"检测到校验音频\")\n assert True\n else:\n print(\"未检测到校验音频\")\n assert False\n\n else:\n print(\"校验音频失败\")\n assert False\n else:\n print(\"录制音频失败\")\n assert False","repo_name":"hjw199/python-behave-airtest-for-smart-tv","sub_path":"vediotest/vediotest.py","file_name":"vediotest.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12954766214","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nimport itertools\nimport numpy as np\nimport os\n\nfrom collections import Counter\nfrom math import floor\nfrom math import log\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\n'''\n This class provides conversion of text documents into a gram matrix\n to be used in an SVM for text classification purposes.\n\n To receive gram matrix, initialize class and call gram_matrix with\n a list of strings which are to be classified:\n\n >>> WK_kernel = WK()\n >>> WK_kernel.gram_matrix([text1, text2,...])\n\n To receive a vectorized input from string, one must first have made a\n gram matrix as above, then:\n\n >>> WK_kernel.vectorize(text)\n'''\n\nclass WK(object):\n\n def __init__(self, docs):\n self.docs = docs\n self.n = len(docs)\n all_text = Counter(word_tokenize(\" \".join(docs)))\n all_words = self.filterFewOccurences(all_text, 3)\n self.df = self.document_frequency(all_words, docs)\n self.unique_words = self.df\n\n def gram_matrix(self, docs):\n self.docs, docs = docs, docs\n self.n, n = len(docs), len(docs)\n # Join all text to find unique words and word frequency\n all_words = Counter(word_tokenize(\" \".join(docs)))\n all_words = self.filterFewOccurences(all_words, 3)\n self.df = self.document_frequency(all_words, docs)\n self.unique_words = [word for word in self.df.keys()]\n # Calculate feature vector and use these to calc gram matrix\n # feature_vectors = self.featureVectors(docs, self.unique_words, self.df, n)\n # m = np.zeros((n, n))\n # for idx, (doc1, doc2) in enumerate(itertools.product(feature_vectors, feature_vectors)):\n # m[floor(idx/n)][idx%n] = np.dot(doc1, doc2) / \\\n # ( np.dot(doc1, doc1) * np.dot(doc2, doc2) )**0.5\n # return m\n\n def kernel(self, doc1, doc2):\n v = self.vectorize(doc1)\n w = self.vectorize(doc2)\n normalize = ( np.dot(v, v) * np.dot(w, w) )**0.5\n if normalize != 0:\n return np.dot(v, w) / ( np.dot(v, v) * np.dot(w, w) )**0.5\n return np.dot(v, w)\n\n def vectorize(self, text):\n v = self.featureVectors([text], self.unique_words, self.df, self.n)[0]\n return v\n\n def filterFewOccurences(self, words, limit):\n for key, count in itertools.dropwhile(lambda key_count:\n key_count[1] > limit, words.most_common()):\n del words[key]\n return words\n\n def document_frequency(self, words, docs):\n for word in words.keys():\n words[word] = sum([1 for doc in docs if word in doc])\n return words\n\n def featureVectors(self, docs, unique_words, df, n):\n vectors = []\n # print(\"DF: \", self.df)\n # print(\"********************************\")\n # print(\"UW: \", self.unique_words)\n for doc in docs:\n doc_count = Counter(word_tokenize(doc))\n v = [\n log( 1 + doc_count[word] ) *\n log( n / self.df[word] )\n for word in unique_words.keys()\n if self.df[word] > 0\n ]\n vectors.append(np.asarray(v))\n return vectors\n\n\n# ___ This code if for testing purposes! ___\n\n# Comment out main() when done with testing.\ndef main():\n docs = [\n \"So she was considering in her own mind (as well as she could, for the hot day made her feel very sleepy and stupid), whether the pleasure of making a daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly a White Rabbit with pink eyes ran close by her.\",\n \"The rabbit-hole went straight on like a tunnel for some way, and then dipped suddenly down, so suddenly that Alice had not a moment to think about stopping herself before she found herself falling down a very deep well.\",\n \"There were doors all round the hall, but they were all locked; and when Alice had been all the way down one side and up the other, trying every door, she walked sadly down the middle, wondering how she was ever to get out again.\"\n ]\n text = \"There were doors all round the hall, but they were all locked; and when Alice had been all the way down one side and up the other, trying every door, she walked sadly down the middle, wondering how she was ever to get out again.\"\n WK_kernel = WK(docs)\n print(\"DF: \", WK_kernel.df)\n print(\"UW: \", WK_kernel.unique_words)\n# print('Gram matrix:\\n', WK_kernel.gram_matrix(indata))\n# print('Feature vector:\\n', WK_kernel.vectorize(text))\n print('~*~ End Of Word Kernel ~*~')\n#\nif __name__ == '__main__':\n main()\n","repo_name":"ThonyPrice/TextClassificationKernel","sub_path":"WK.py","file_name":"WK.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5894247992","text":"from modules.CifraCesar import CifraCesar\nfrom PySimpleGUI import PySimpleGUI as sg\n\n# layout\nsg.theme('reddit')\nlayout = [\n [sg.Text('Digite a chave da cifra')],\n [sg.Input(key='key')],\n [sg.Text('Insira a mensagem')],\n [sg.Input(key='message')],\n [sg.Text('Criptografia:')],\n [sg.Input(readonly=True, key='cifra')],\n [sg.Button('Criptografar')],\n [sg.Button('Descriptografar')],\n]\n\n\n# Window\njanela = sg.Window('Verificação de acesso', layout)\n# ler Dados\n\nwhile True:\n eventos, valores = janela.read()\n\n if eventos == sg.WIN_CLOSED:\n break\n if eventos == 'Criptografar':\n try:\n key = int(valores['key'])\n crypt = CifraCesar(key)\n messageEncrypted = crypt.encryptMessage(valores['message'])\n janela['cifra'].update(messageEncrypted)\n except:\n janela['cifra'].update('Utilize apenas números para a chave')\n\n if eventos == 'Descriptografar':\n try:\n crypt.decryptMessage()\n janela['cifra'].update(crypt.getMessage())\n except:\n janela['cifra'].update('Utilize apenas números para a chave')\n","repo_name":"guilherme-biancardi/cifra-cesar-python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33851361498","text":"import os\n\ndef get_files(dir):\n\tdir_files = []\n\tsubdirs = [x[0] for x in os.walk(dir)]\n\tfor subdir in subdirs:\n\t\tfiles = os.walk(subdir).next()[2]\n\t\tif (len(files) > 0):\n\t\t\tfor file in files:\n\t\t\t\tdir_files.append(subdir + \"/\" + file)\n\treturn dir_files\n\ndef desktop_sort(source, target_folder, Files):\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n folders = []\n for f in Files:\n extension = f.split('.')[-1]\n if not extension in folders and extension != 'desktop':\n folders.append(extension)\n path = target_folder + '/' + str(extension)\n if not os.path.exists(path):\n os.makedirs(path)\n if extension!='desktop':\n des = target_folder + '/' + extension\n os.popen('cp '+ f + ' ' + des)\n\nsource = '/Desktop/source'\ntarget_folder = '/Documents/target_folder'\nFiles = get_files(source)\ndesktop_sort(source, target_folder, Files)\n","repo_name":"vagdevik/InSolved","sub_path":"desktop_sort.py","file_name":"desktop_sort.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33947940575","text":"import bottle\nimport model\n\nDATOTEKA_S_STANJEM = 'stanje.json'\nSKRIVNOST = 'skrivnost'\nDATOTEKA_BESED = 'besede.txt'\nvislice = model.Vislice(DATOTEKA_S_STANJEM, DATOTEKA_BESED)\n\n\n\n@bottle.get(\"/\")\ndef index():\n return bottle.template('index.tpl')\n\n\n\n\n\n@bottle.post(\"/nova_igra/\")\ndef nova_igra():\n id_igre = vislice.nova_igra()\n bottle.response.set_cookie(\"id_igre\", id_igre, secret=SKRIVNOST, path = \"/\")\n bottle.redirect(\"/igra/\")\n\n@bottle.get(\"/igra/\")\ndef pokazi_igro():\n id_igre = bottle.request.get_cookie(\"id_igre\", secret=SKRIVNOST)\n return bottle.template('igra.tpl',\n igra=vislice.igre[id_igre][0],\n id_igre=id_igre,\n poskus = vislice.igre[id_igre][1]\n )\n\n@bottle.post(\"/igra/\")\ndef ugibaj():\n id_igre = bottle.request.get_cookie(\"id_igre\", secret=SKRIVNOST)\n crka_ugiba = bottle.request.forms.getunicode(\"crka\")\n if not crka_ugiba.isalpha() or len(crka_ugiba) > 1:\n bottle.redirect(\"/napaka/\")\n vislice.ugibaj(id_igre, crka_ugiba)\n bottle.redirect(\"/igra/\")\n\n@bottle.get(\"/napaka/\")\ndef napaka():\n return bottle.template('napaka.tpl')\n\n\n\n@bottle.get(\"/img//\")\ndef serve_pictures(picture):\n return bottle.static_file(picture, root='img')\n\n\n\n\n\n\n\n\n\nbottle.run(reloader=True, debug=True)","repo_name":"Matematik411/Vislice","sub_path":"vislice.py","file_name":"vislice.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8947578124","text":"import streamlit as st\nimport pandas as pd\nfrom io import StringIO\nimport func as f\nimport frontend as fr\nimport altair as alt\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import (AutoMinorLocator, MultipleLocator)\nimport json\n\n'''\n# Welds\n'''\n\n# Save / Load\n# with st.sidebar.expander(\"Save / Load\"):\n# fr.download_upload_settings()\n\n# Weld Inputs\nwith st.sidebar.expander(\"Weld Inputs\"):\n weld_inputs_mode = st.selectbox('Weld Input Mode', ['Unique values', 'From Excel'],\n key='weld input mode',\n index=0)\n if weld_inputs_mode == 'Unique values':\n list_of_weld_types = ['double fillet', 'single fillet', 'double partial pen', 'single partial pen', 'full pen']\n w_type = st.selectbox('Weld type', list_of_weld_types, index=0, key='weld type', help=None)\n col1, col2 = st.columns(2)\n with col1:\n a = st.number_input(r'$\\small{a (mm)}$', min_value=3, value=3, step=1, key='a', help='''\n Weld size. For fillet welds:\\n\n $\\\\leq7mm$ -> 1x pass\\n\n $\\\\leq9mm$ -> 2x pass\\n\n $\\\\leq11mm$ -> 4x pass\\n\n ''')\n tpl1 = st.number_input('$\\small{t_{pl1} (mm)}$', min_value=0, value=15, step=1, key='tpl1',\n help=\"Thickness of welded plate\")\n beta_w = st.number_input('$\\small{\\\\beta_w}$', min_value=0.00, value=0.90, step=0.01, key='beta_w', help='''\n S235 -> $\\\\beta_w$ = 0.80\\n\n S275 -> $\\\\beta_w$ = 0.85\\n\n S355 -> $\\\\beta_w$ = 0.90\\n\n S420 -> $\\\\beta_w$ = 0.10\\n\n S460 -> $\\\\beta_w$ = 0.10\\n\n ''')\n with col2:\n fu = st.number_input('$\\small{f_u (MPa)}$', min_value=0, value=470, step=1, key='fu',\n help=\"$\\small{f_u}$ of welded plates (470MPa for S355)\")\n tpl2 = st.number_input('$\\small{t_{pl2} (mm)}$', min_value=0, value=15, step=1, key='tpl2', help=\"Thickness of receiver plate\")\n g_M2 = st.number_input('$\\small{\\\\gamma_{M2}}$', min_value=0.00, value=1.25, step=0.01, key='g_m2', help=None)\n weld_inputs = {\n 'w_type': w_type,\n 'a' : a,\n 'tpl1': tpl1,\n 'tpl2': tpl2,\n 'beta_w': beta_w,\n 'fu': fu,\n 'g_M2': g_M2\n }\n elif weld_inputs_mode == 'From Excel':\n weld_csv = st.file_uploader(\"Choose a file\", type=\"csv\")\n\n data = [[0, 10, 10, 'double fillet', 3, 0.9, 470, 1.25],\n [0.5, 10, 10, 'double fillet', 3, 0.9, 470, 1.25],\n [1, 10, 10, 'double fillet', 3, 0.9, 470, 1.25]]\n weld_input_template = pd.DataFrame(data, columns=[\n 'x', 'tpl1', 'tpl2', 'w_type', 'a', 'beta_w', 'fu', 'g_M2'\n ])\n csv = f.convert_df(weld_input_template)\n st.download_button(\n label=\"Download template\",\n data=csv,\n file_name='weld_input.csv',\n mime='text/csv',\n )\n\n if weld_csv is not None:\n weld_inputs = pd.read_csv(weld_csv)\n else:\n weld_inputs = weld_input_template\n\n\n# Forces Input\nwith st.sidebar.expander(\"Forces Input\"):\n forces_input_mode = st.selectbox('Forces Input Mode', ['Manual', 'From Wingraf'],\n key='forces input mode',\n index=0)\n if forces_input_mode == 'From Wingraf':\n # TODO: give instructions for wingraf\n # Mode\n calc_mode = st.selectbox('Calculation Mode', ['Values along weld', 'Max per weld'],\n key='calc mode',\n index=0,\n help='''\n \"Values along weld\" : meant for a few continuous welds,\n like for bridge longitudinal welds\\n\n \"Max per weld\" : meant for many discrete welds, like for bridge stiffeners welds\n ''')\n forces_input = st.file_uploader(\"Choose a file\")\n elif forces_input_mode == 'Manual':\n forces_input = None\n calc_mode = None\n pass\n\n# Calculated values\nweld_inputs['fw_perp'] = 0.9 * weld_inputs['fu'] / weld_inputs['g_M2']\nweld_inputs['fw_vm'] = weld_inputs['fu'] / (weld_inputs['beta_w'] * weld_inputs['g_M2'])\n\n# Calculate graph\nwith st.expander(\"Graph\"):\n if forces_input_mode == 'From Wingraf':\n forces = f.get_forces(forces_input_mode, forces_input, calc_mode)\n weld = weld_inputs\n f.calc_graph(forces, weld, calc_mode)\n elif forces_input_mode == 'Manual':\n # Manual input of forces\n init = pd.DataFrame(\n [\n {\"LC\": \"1\", \"N\": 10, \"M\": 10, \"Vt\": 10, \"Vl\": 10}\n ]\n )\n forces_man = st.experimental_data_editor(init, width=None, height=None, use_container_width=True,\n num_rows=\"dynamic\", disabled=False, key='manual forces')\n df_man = forces_man\n df_man['d'] = forces_man['LC']\n df_man['w_type'] = weld_inputs['w_type']\n df_man['tpl1'] = weld_inputs['tpl1']\n df_man['tpl2'] = weld_inputs['tpl2']\n df_man['a'] = weld_inputs['a']\n df_man['beta_w'] = weld_inputs['beta_w']\n df_man['fu'] = weld_inputs['fu']\n df_man['g_M2'] = weld_inputs['g_M2']\n df_man['fw_vm'] = weld_inputs['fw_vm']\n df_man['fw_perp'] = weld_inputs['fw_perp']\n calc_cut = f.calculate(df_man)\n st.pyplot(fig=f.make_plot_man(calc_cut), clear_figure=None, use_container_width=True)\n\n# Calculated Values\nif weld_inputs_mode == 'Unique values':\n with st.expander(\"Calculated values\"):\n col1, col2, col3, col4 = st.columns(4)\n with col1:\n string = '\\small{' + 'f_{w,\\perp}' + f'= {\"{:.2f}\".format( weld_inputs[\"fw_perp\"] )}' + 'MPa' + '}'\n st.latex(string)\n with col2:\n string = '\\small{' + 'f_{w,vm}' + f'= {\"{:.2f}\".format(weld_inputs[\"fw_vm\"])}' + 'MPa' + '}'\n st.latex(string)\n\n\n","repo_name":"NEY-DPI/Welds","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14607327364","text":"# Made by: Bram Nijhoff\n# Date: 2/12/2022\n\n#!/usr/bin/env python\n# ~ from AIGB_setup import *\nfrom Dashboard import send_sensor_data\nimport Temp_hum_sens\n# ~ from LCD_screen import visualise_screen\nimport time\nfrom co2 import get_co2\nimport AIGB_display\nimport cond_ph\n\n\"\"\"\nWelcome to the Automate Indoor Growing Box (AIGB) control program.\nThe main file is used to control incoming signals of sensors \nand buttons, processing the data by visualising it on the display, \nand sending outgoing signals to the motors and LED-strips.\n\n\nGoal: Automate the process of growing plants.\n\nResult: \tA fully growed herb or lettuce plant, or germinated sprouds \n\t\t\tof plants like vegetables and fruits.\n\"\"\"\n\n\"\"\"\nHow-to:\nFollow the guide on the following website:\nhttps://www.waveshare.com/wiki/2inch_LCD_Module\n\"\"\"\n\n# Dashboard channels\n\ntemp_channel = 10\nhum_channel = 20\nwater_tank_channel = 21\nco2_channel = 30\nph_channel = 40\nph_value_channel = 41\nled_intensity_channel = 50\nled_switch_channel = 51\nled_slide_channel = 52\nled_red_slide_channel = 53\nled_green_slide_channel = 54\nled_blue_slide_channel = 55\nconductivity_channel = 60\n\ni = 0\ntimestamp = 0\n\nif __name__ == \"__main__\":\n\twhile True:\n\t\ttemp, hum, press, alt = Temp_hum_sens.temp_hum_run()\n\t\tco2 = get_co2()\n\t\tph_ec_temp, ph, ec = cond_ph.read_ph_ec() #ec en ph omgewisseld hardwarematig\n\t\t# \t\tprint(temp, hum, press, alt)\n\t\t# visualise_screen(temp, hum, press, alt)\n\t\tsend_sensor_data(temp_channel, temp)\n\t\tsend_sensor_data(hum_channel, hum)\n\t\tsend_sensor_data(co2_channel, co2)\n\t\tsend_sensor_data(conductivity_channel, ec)\n\t\tsend_sensor_data(ph_channel, ph)\n\t\tAIGB_display.view_display(temp, hum, co2, colour=0, lux=0, ph=0)\n\t\t# ~ if (time.time() > timestamp + 10):\n\t\t\n\t\t\t# ~ timestamp = time.time()\n\t\t\t# ~ i = i+1\n\tAIGB_display.display_close()\t\n","repo_name":"bwni/Project_AIGB","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"6533773968","text":"from collections import deque\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n\n que = deque()\n que.append((root, 0))\n mem = dict()\n\n while que:\n n, d = que.popleft()\n\n mem[d] = mem.get(d, []) + [n.val]\n\n if n.left:\n que.append((n.left, d + 1))\n\n if n.right:\n que.append((n.right, d + 1))\n\n return list(mem.values())","repo_name":"meowpunch/meowrithm","sub_path":"python/leetcode/Binary Tree Level Order Traversal Solution.py","file_name":"Binary Tree Level Order Traversal Solution.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20094265810","text":"import time\nfrom datetime import datetime\n\nimport discord\nfrom discord import Webhook, RequestsWebhookAdapter\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\n\n\nclass serverManager:\n def __init__(self):\n self.serversBefore = {}\n self.updates = 0\n self.avar = \"https://cdn.discordapp.com/app-icons/973942027563712632/3043f3b6d99b2b737ef7216e8c14c106.png?size=256\"\n\n def addLootrunner(self, lootrunner):\n if self.serversBefore.__contains__(lootrunner.server):\n if lootrunner.disc:\n for player in self.serversBefore[lootrunner.server][\"players\"]:\n if player.disc:\n if player.name == lootrunner.name and player.predict == lootrunner.predict:\n return\n if lootrunner.timeStamp < self.serversBefore[lootrunner.server][\"uptime\"]:\n return\n self.serversBefore[lootrunner.server][\"players\"].append(lootrunner)\n self.serversBefore[lootrunner.server][\"lastLooted\"] = lootrunner.timeStamp\n\n def updateServers(self, wynnApi):\n serversOnline = wynnApi.getServerUptime()\n if len(serversOnline) > 0:\n for serverToCheck in serversOnline:\n if not self.serversBefore.__contains__(serverToCheck):\n self.serversBefore[serverToCheck] = {\n \"uptime\": serversOnline[serverToCheck],\n \"players\": [],\n \"lastLooted\": serversOnline[serverToCheck]\n }\n elif serversOnline[serverToCheck] != self.serversBefore[serverToCheck][\"uptime\"]:\n self.serversBefore[serverToCheck][\"uptime\"] = self.serversBefore[serverToCheck][\"lastLooted\"] = serversOnline[serverToCheck]\n\n servers = self.serversBefore.copy().keys()\n for serverToCheck in servers:\n if not serversOnline.__contains__(serverToCheck):\n del self.serversBefore[serverToCheck]\n\n for servers in self.serversBefore:\n i = 0\n while i < len(self.serversBefore[servers][\"players\"]):\n if (round(time.time() * 1000) - self.serversBefore[servers][\"players\"][i].timeStamp) / 1000 / 60 > 60*4+15:\n self.serversBefore[servers][\"players\"].pop(i)\n i -= 1\n i += 1\n\n def exportServers(self, webhook, idReport):\n if webhook == \"\":\n return\n serversSorted = self.getSortedServers()\n self.updates += 1\n webhook = Webhook.from_url(webhook, adapter=RequestsWebhookAdapter()) # Initializing webhook\n embed = discord.Embed(\n title=f\"Starting the report n^\" + str(self.updates),\n color=0x40a0c6)\n embed.timestamp = datetime.utcnow()\n embed.set_footer(text=\"Id: \" + idReport)\n webhook.send(username=\"wynnStalker\", avatar_url=self.avar, embed=embed)\n time.sleep(1)\n\n for servers in serversSorted:\n embed = discord.Embed(\n title=f\"World: `\"+servers['server']+\"`\",\n description=f\"uptime: last looted: Lootrunners: {len(servers['players'])}\\n\",\n color=0x40a0c6)\n listPlayers = \"\"\n listPrediction = \"\"\n for player in servers[\"players\"]:\n if not player.disc:\n listPlayers += f\"{player.name} ({player.blocksNow}-{player.blocksTotal}) {player.mins}mins {player.nameClass} Low: {str(player.low)}\\n\"\n else:\n listPlayers += f\"{player.name} low: False\\n\"\n if player.predict != -1:\n listPrediction += f\"{player.getPredictionName()} \\n\"\n listPrediction.replace(\"*\", \"\\*\")\n if listPlayers != \"\":\n embed.add_field(name=\"Players: \",\n value=listPlayers)\n if listPrediction != \"\":\n embed.add_field(name=\"Prediction spots\",\n value=listPrediction,\n inline=False)\n try:\n webhook.send(username=\"wynnStalker\", avatar_url=self.avar, embed=embed)\n time.sleep(1)\n except discord.errors.HTTPException:\n pass\n\n nowTime = datetime.now()\n stageMinute = int(nowTime.minute / 15)\n nextReportTimeStamp = datetime.timestamp(\n datetime(nowTime.year, nowTime.month,\n nowTime.day + (1 if nowTime.hour == 23 and stageMinute == 3 else 0),\n nowTime.hour + (\n -23 if stageMinute == 3 and nowTime.hour == 23 else 1 if stageMinute == 3 else 0),\n 0 if stageMinute == 3 else 30 if stageMinute == 2 else 30 if stageMinute == 1 else 15)\n )\n\n embed = discord.Embed(\n title=f\"Ended the report n^\" + str(self.updates),\n description=f\"Next report: \",\n color=0x40a0c6)\n embed.timestamp = datetime.utcnow()\n webhook.send(username=\"wynnStalker\", avatar_url=self.avar, embed=embed)\n webhook.send(\"<@&1000312359224623155>\", username=\"wynnStalker\", avatar_url=self.avar)\n\n '''\n - Bottom servers with less then 1.2 hours\n - Middle servers sorted by lootrunners found\n - Top with no lootrunners\n '''\n def getSortedServers(self):\n # Output and temp variable to mess with\n serverTemp = {}\n noLootrunners = []\n withLootunners = []\n less1Hours = []\n # Set lastLooted in every servers\n for server in self.serversBefore:\n serverTemp[server] = {}\n serverTemp[server][\"lastLooted\"] = self.serversBefore[server][\"lastLooted\"]\n serverTemp[server][\"players\"] = self.serversBefore[server][\"players\"]\n serverTemp[server][\"uptime\"] = self.serversBefore[server][\"uptime\"]\n serverTemp[server][\"server\"] = server\n if (time.time() - serverTemp[server][\"uptime\"]/1000) / 60 > 60:\n if len(self.serversBefore[server][\"players\"]) > 0:\n withLootunners.append(serverTemp[server])\n else:\n noLootrunners.append(serverTemp[server])\n else:\n less1Hours.append(serverTemp[server])\n # Now sort\n withLootunners = sorted(withLootunners, key=lambda d: d['lastLooted'])\n noLootrunners = sorted(noLootrunners, key=lambda d: d['players'])\n output = noLootrunners\n output.extend(withLootunners)\n output.extend(less1Hours)\n\n return output\n","repo_name":"TechAle/wynnUtilities","sub_path":"stalker/classes/serverClass.py","file_name":"serverClass.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"28815538839","text":"arrayIndex = [1, 52, 35, 6, 72, 7, 3, 19, 32, 54, 78, 95, 97]\r\n\r\n\r\ndef evenIndexMultiplier(arr):\r\n result = []\r\n for index, number in enumerate(arr):\r\n if index % 2 == 0:\r\n result.append(number * 10)\r\n else:\r\n result.append(number)\r\n return result\r\n\r\n\r\nevenIndexes = evenIndexMultiplier(arrayIndex)\r\nprint(evenIndexes)\r\n","repo_name":"singharaj-usai/Python-DSAs","sub_path":"evenIndexMultiplier.py","file_name":"evenIndexMultiplier.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26375951118","text":"import os\nimport json \nfrom tqdm import tqdm\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.bpr import BPR\nfrom utilities.dataset.bpr_dataloader import Scheduler\nfrom utilities.dataset.bpr_dataset import YelpDataset as Dataset\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom ndcg import ndcg\nimport torch.optim as optim\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nimport ray\nfrom ray import tune\nfrom ray.tune import CLIReporter\nfrom ray.tune.schedulers import ASHAScheduler\nfrom ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback\n\nos.environ['RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE'] = '1'\n\nclass ModelTrainer( pl.LightningModule ):\n def __init__( self, config : dict, dataset=None ):\n super().__init__()\n self.dataset = ray.get( dataset )\n self.val_interact, self.val_test_y = self.dataset.val_interact()\n self.test_interact, _ = self.dataset.test_interact()\n self.n_users, self.n_items = self.dataset.n_users, self.dataset.n_items\n self.config = config\n\n self.model = BPR( self.n_users, self.n_items, config['num_latent'] )\n\n def train_dataloader( self ):\n return DataLoader( self.dataset, batch_size=self.config['batch_size'] )\n\n def val_dataloader( self ):\n return DataLoader( TensorDataset( torch.arange( 1 ).reshape( -1, 1 ) ), batch_size=1 )\n\n def test_dataloader( self ):\n return DataLoader( TensorDataset( torch.arange( 1 ).reshape( -1, 1 ) ), batch_size=1 )\n\n def evaluate( self, true_rating, predict_rating, hr_k, recall_k, ndcg_k ):\n user_mask = torch.sum( true_rating, dim=-1 ) > 0\n predict_rating = predict_rating[ user_mask ]\n true_rating = true_rating[ user_mask ]\n\n _, top_k_indices = torch.topk( predict_rating, k=hr_k, dim=1, largest=True )\n hr_score = torch.mean( ( torch.sum( torch.gather( true_rating, dim=1, index=top_k_indices ), dim=-1 ) > 0 ).to( torch.float ) )\n\n _, top_k_indices = torch.topk( predict_rating, k=recall_k, dim=1, largest=True )\n\n recall_score = torch.mean( \n torch.sum( torch.gather( true_rating, dim=1, index = top_k_indices ), dim=1 ) /\n torch.minimum( torch.sum( true_rating, dim=1 ), torch.tensor( [ recall_k ] ) )\n )\n\n ndcg_score = torch.mean( ndcg( predict_rating, true_rating, [ ndcg_k ] ) )\n\n return hr_score.item(), recall_score.item(), ndcg_score.item()\n\n def training_step( self, batch, batch_idx ):\n pos_interact, neg_interact = batch\n batch_size = pos_interact.shape[0]\n\n input_idx = torch.cat( ( pos_interact, neg_interact ), dim=0 )\n res = self.model( input_idx[:,0], input_idx[:,1] )\n pos_res_out, neg_res_out = torch.split( res, split_size_or_sections=batch_size, dim=0 )\n\n loss = - torch.sum( torch.log( torch.sigmoid( pos_res_out - neg_res_out ) ) )\n\n #self.log_dict({ 'loss' : loss.item() })\n\n return loss\n\n def validation_step( self, batch, batch_idx ):\n pass\n\n def on_validation_epoch_end( self ):\n y_pred = self.model( None, None, is_test=True ).cpu()\n y_pred = torch.gather( y_pred, 1, self.val_interact )\n\n hr_score, recall_score, ndcg_score = self.evaluate( self.val_test_y, y_pred, self.config['hr_k'], self.config['recall_k'], self.config['ndcg_k'] )\n\n metric = {\n 'hr' : hr_score,\n 'recall' : recall_score,\n 'ndcg' : ndcg_score\n }\n\n self.log_dict({\n 'hr' : hr_score,\n 'recall' : recall_score,\n 'ndcg' : ndcg_score\n })\n\n return metric\n\n\n def test_step( self, batch, batch_idx ):\n y_pred = self.model( None, None, is_test=True ).cpu()\n y_pred = torch.gather( y_pred, 1, self.test_interact )\n\n hr_score, recall_score, ndcg_score = self.evaluate( self.val_test_y, y_pred, self.config['hr_k'], self.config['recall_k'], self.config['ndcg_k'] )\n\n self.log_dict({\n 'hr' : hr_score,\n 'recall' : recall_score,\n 'ndcg' : ndcg_score\n })\n\n def configure_optimizers( self ):\n optimizer = optim.SGD( self.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'] )\n return optimizer\n\ndef train_model( config, checkpoint_dir=None, dataset=None ):\n trainer = pl.Trainer(\n gpus=1,\n max_epochs=64, \n num_sanity_val_steps=0,\n callbacks=[\n Scheduler(),\n TuneReportCheckpointCallback( {\n 'hr' : 'hr',\n 'recall' : 'recall',\n 'ndcg' : 'ndcg'\n },\n on='validation_end',\n filename='checkpoint'\n ),\n EarlyStopping(monitor=\"ndcg\", patience=10, mode=\"max\", min_delta=1e-4)\n ]\n )\n\n model = ModelTrainer( config, dataset )\n\n trainer.fit( model )\n\ndef test_model( config : dict, checkpoint_dir : str, dataset ):\n model = ModelTrainer.load_from_checkpoint( config=config, checkpoint_path=os.path.join( checkpoint_dir, 'checkpoint' ), dataset=dataset )\n\n trainer = pl.Trainer()\n result = trainer.test( model )\n\n save_json = {\n 'checkpoint_dir' : str( checkpoint_dir ),\n 'result' : result[0]\n }\n\n with open('best_model_result.json','w') as f:\n json.dump( save_json, f )\n\ndef tune_model():\n ray.init( num_cpus=8, num_gpus=8 )\n dataset = ray.put( Dataset( './yelp_dataset/', '1' ) )\n config = {\n # grid search parameter\n 'num_latent' : 64,\n 'weight_decay' : tune.grid_search([ 1e-4, 1e-3, 1e-2, 1e-1 ]),\n 'batch_size' : tune.grid_search([ 128, 256, 512, 1024 ]),\n 'lr' : tune.grid_search([ 1e-4, 1e-3, 1e-2, 1e-1 ]),\n\n 'hr_k' : 1,\n 'recall_k' : 10,\n 'ndcg_k' : 10\n }\n\n scheduler = ASHAScheduler(\n grace_period=10,\n reduction_factor=2\n )\n\n analysis = tune.run( \n partial( train_model, dataset=dataset ),\n resources_per_trial={ 'cpu' : 1, 'gpu' : 1 },\n metric='ndcg',\n mode='max',\n num_samples=1,\n verbose=1,\n config=config,\n scheduler=scheduler,\n name=f'yelp_bpr_1',\n local_dir=\".\",\n keep_checkpoints_num=1, \n checkpoint_score_attr='ndcg'\n )\n\n test_model( analysis.best_config, analysis.best_checkpoint, dataset )\n\nif __name__ == '__main__':\n tune_model()\n","repo_name":"NutyHw/MixtureGaussianRec","sub_path":"train_bpr.py","file_name":"train_bpr.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39656588533","text":"'''\n将标注的图形进行修改即可\n'''\nimport os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\ndef xml_to_csv(path):\n xml_list = []\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\ndef main():\n # image_path = os.path.join(os.getcwd(), 'annotations') # 这需要给出图像(xml格式)的存储路径\n image_path = 'D:\\\\program file\\\\python program1\\\\untitled\\\\objection detection\\\\payment_detection\\\\annotation' #os.getcwd()\n xml_df = xml_to_csv(image_path)\n xml_df.to_csv('payment_labels.csv', index=None)\n print(\"OK\")\nif __name__ == '__main__' :\n main()","repo_name":"lijibo123/testprogram","sub_path":"xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38609325985","text":"class node:\n def __init__(self,data):\n self.data = data \n self.next = None \n\nclass LL : \n def __init__(self): \n self.head = None \n \n def appendstart(self,data): \n newnode = node(data)\n newnode.next = self.head \n self.head = newnode \n\n def appendmid(self,data,data1):\n prevnode = node(data)\n newnode = node(data1)\n currnode = self.head \n count = 0\n while currnode:\n if currnode.data == prevnode.data:\n prevnode = currnode\n count = 1\n break \n else: \n count = 0 \n\n currnode = currnode.next\n if count == 0 :\n print(\"not in list \")\n return\n newnode.next = prevnode.next \n prevnode.next = newnode\n \n def appendend(self,data):\n newnode= node(data)\n if self.head == None :\n self.head= newnode \n return\n lastnode = self.head \n while lastnode.next :\n lastnode = lastnode.next \n lastnode.next = newnode \n def print(self):\n currentnode = self.head \n while currentnode:\n print(currentnode.data)\n currentnode = currentnode.next\n\n def deletestart(self):\n currnode = self.head\n self.head = currnode.next \n \n def deleteend(self):\n currnode = self.head\n while currnode:\n if currnode.next is not None :\n prevnode = currnode \n currnode = currnode.next\n prevnode.next = None\n\n\n def deletemid(self,data) : \n tofindnode = node(data)\n currnode = self.head\n while currnode:\n tofindnode = node(data)\n if currnode.data == tofindnode.data :\n break\n else : \n xnode = currnode\n currnode = currnode.next \n xnode.next = currnode.next \n currnode.next = None\n\n def swapnodes(self,data,data1) : \n if data == data1 :\n return \n prev1 = None \n curr1 = self.head\n while curr1 and curr1.data != data :\n prev1 = curr1 \n curr1 = curr1.next \n prev2 = None \n curr2 = self.head \n while curr2 and curr2.data != data1 :\n prev2 = curr2 \n curr2 = curr2.next \n # print(prev1.data, curr1.data , prev2.data, curr2.data)\n temp1 = curr1 \n temp2 = curr2 \n curr2.next = curr1.next \n prev1.next = curr2 \n prev2.next = temp1\n temp1.next = temp2.next\n # print(temp.data)\n\n\n\nll = LL()\nll.appendend(1)\nll.appendend(2)\nll.appendend(3)\nll.appendend(10)\nll.appendend(30)\nll.print()\n\nll.swapnodes(2,10)\nll.print()\n","repo_name":"dubblin27/algorithms","sub_path":"Algo_DS/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18894410138","text":"import os\nfrom json import dumps\nfrom queue import Queue\n\nimport boto3\nfrom src.domain_model import Event, StatusDetails\nfrom src.event_status_manager import EventStatusManager\nfrom src.logger import Logger\nfrom src.operator import Operator\nfrom src.utils import get_current_utc_iso, dt_to_iso\n\n\nclass EventHandler(Operator):\n def __init__(self, logger: Logger, source_queue: Queue = None):\n self.esm = EventStatusManager()\n self.kinesis_client = boto3.client(\"kinesis\", region_name=\"us-east-1\", endpoint_url=os.environ.get(\"ENDPOINT_URL\"))\n super(EventHandler, self).__init__(\n name=__name__,\n logger=logger,\n source_queue=source_queue\n )\n\n def process(self):\n while True:\n event = self.get_source()\n if event is not None:\n self.logger.debug(\"Get event\")\n status = \"ACTIVE\" if event.winning_team_abbrev is None else \"INACTIVE\"\n status_details = StatusDetails(\n status=status,\n home_team_abbrev=event.home_team_abbrev,\n away_team_abbrev=event.away_team_abbrev\n )\n cached_status_details = self.esm.get(event.event_id)\n\n # NEW EVENT to exchange\n if not cached_status_details.status and status_details.status == \"ACTIVE\":\n self.handle_new_event(event=event, status_details=status_details)\n # EVENT to be removed from exchange and send winner to outgoing kinesis\n elif cached_status_details.status == \"ACTIVE\" and status_details.status == \"INACTIVE\":\n self.handle_inactive_event(event=event, status_details=status_details)\n\n def handle_new_event(self, event: Event, status_details: StatusDetails):\n # update cache\n self.esm.set(event_id=event.event_id, status_details=status_details)\n\n # tell outgoing stream that new event is available\n payload = {\n \"action\": \"ACTIVE_EVENT\",\n \"timestamp\": get_current_utc_iso(),\n \"value\": {\n \"event_id\": event.event_id,\n \"sport\": event.sport,\n \"home_team_abbrev\": event.home_team_abbrev,\n \"away_team_abbrev\": event.away_team_abbrev,\n \"home_team_name\": event.home_team_name,\n \"away_team_name\": event.away_team_name,\n \"home_team_score\": event.home_team_score,\n \"away_team_score\": event.away_team_score,\n \"winning_team_abbrev\": event.winning_team_abbrev,\n \"losing_team_abbrev\": event.losing_team_abbrev,\n \"date\": dt_to_iso(event.date)\n }\n }\n self.logger.debug(f\"New event: {payload}\")\n try:\n self.kinesis_client.put_record(\n StreamName=os.environ.get(\"OUTGOING_EVENTS_KINESIS_STREAM_NAME\"),\n Data=dumps(payload),\n PartitionKey=event.event_id\n )\n except Exception as e:\n self.esm.delete(event_id=event.event_id)\n raise e\n\n def handle_inactive_event(self, event: Event, status_details: StatusDetails):\n # update cache\n self.esm.set(event_id=event.event_id, status_details=status_details)\n\n # poison pill the event for the incoming stream so that it cancels bets for that event\n payload = {\n \"action\": \"INACTIVE_EVENT\",\n \"timestamp\": get_current_utc_iso(),\n \"value\": {\n \"event_id\": event.event_id,\n \"sport\": event.sport,\n \"home_team_abbrev\": event.home_team_abbrev,\n \"away_team_abbrev\": event.away_team_abbrev,\n \"home_team_name\": event.home_team_name,\n \"away_team_name\": event.away_team_name,\n \"home_team_score\": event.home_team_score,\n \"away_team_score\": event.away_team_score,\n \"winning_team_abbrev\": event.winning_team_abbrev,\n \"losing_team_abbrev\": event.losing_team_abbrev,\n \"date\": dt_to_iso(event.date)\n }\n }\n self.logger.debug(f\"Inactive event: {payload}\")\n try:\n self.kinesis_client.put_record(\n StreamName=os.environ.get(\"INCOMING_BETS_KINESIS_STREAM_NAME\"),\n Data=dumps(payload),\n PartitionKey=event.event_id\n )\n self.kinesis_client.put_record(\n StreamName=os.environ.get(\"OUTGOING_EVENTS_KINESIS_STREAM_NAME\"),\n Data=dumps(payload),\n PartitionKey=event.event_id\n )\n except Exception as e:\n # set back to active to revert update to cache\n status_details.status = \"ACTIVE\"\n self.esm.set(event_id=event.event_id, status_details=status_details)\n raise e\n","repo_name":"bexh/bexh-event-connector-aws-ecs","sub_path":"src/event_handler.py","file_name":"event_handler.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11952405060","text":"import numpy as np\nimport scipy.stats\nfrom matplotlib import pyplot as pplt\n\nimport config as c\n\nreqd_ymds = []; reqd_msms = []\npp_names = \"m! y!\" # indeces 0 to 1\npp_ix, pp_values = c.set_pp(pp_names) # Set up config info\nfor n,ix in enumerate(pp_ix):\n if ix == 0: # m (50xx) msm_ids\n reqd_msms = c.check_msm_ids(pp_values[n])\n elif ix == 1: # y (yyyymmdd) dates\n reqd_ymds = c.check_ymds(pp_values[n])\n else:\n exit()\nif len(reqd_ymds) == 0:\n reqd_ymds = [c.start_ymd]\nelif len(reqd_ymds) > 1:\n print(\"More than one ymd specified!\"); exit()\nif len(reqd_msms) == 0:\n reqd_msms = [c.msm_id]\n print(\"reqd_ymds %s, reqd_msms %s\" % (reqd_ymds, reqd_msms))\n\ndef counts_array(msm_id):\n nfn = c.nodes_fn(msm_id)\n print(\"nodes_fn = %s\" % nfn)\n mx_depth = 0\n counts = np.zeros(61)\n nf = open(nfn, \"r\")\n for line in nf:\n la = line.split()\n depth = int(la[2])\n if depth > mx_depth:\n mx_depth = depth\n counts[depth] += 1\n print(\"%s: mx_depth = %d\" % (msm_id, mx_depth))\n\n tot_depths = np.sum(counts)\n yp = 100*counts/tot_depths # Convert to percentages\n #print(yp.astype(float)) #print(yp.astype(int))\n\n xa = np.ones(mx_depth+1); xa[0] = 0\n x = np.cumsum(xa) # x values\n\n return x[:mx_depth+1], np.cumsum(yp)[:mx_depth+1]\n\n'''\ndef h_subplot(ax0,ax1, x,y, msm_id, pc_97):\n ax0.set_xlabel(\"in_count\", fontsize=10)\n ax0.set_ylabel(\"% Nodes\", fontsize=10)\n ax0.set_xlim([-1.5,32])\n ax0.set_xticks([1,3,6,9,12,15,18,21,24,27,30])\n ax1.set_ylim([22,105])\n ax0.set_yticks([30,50,70,90],minor=True)\n ax0.grid(True, which='both')\n ax0.text(21,42, \"msm_id %d\" % msm_id, fontsize=12)\n ax0.text(13,32, \"97%% with in_count <= %2d\" % pc_97, fontsize=12)\n ax0.plot(x[:31],y[:31])\n\n ax1.set_xlabel(\"in_count\", fontsize=10)\n ax1.set_ylabel(\"% Nodes\", fontsize=10)\n ax1.set_xscale('log')\n ax1.set_yscale('linear') # But both axes drawn log-scaled !!\n ax1.set_xlim([21,38000])\n ax1.set_xticks([30, 100, 300, 1000, 3000, 10000, 30000])\n ax1.set_xticklabels(['30', '100', '300', '1000', '3000', '10k', '30k'])\n ax1.set_ylim([96.65,100.2])\n ax1.set_yticks([97.5,98.5,99.5],minor=True)\n ax1.grid(True, which='both')\n ax1.text(4300, 97.05, \"msm_id %d\\n%5d Nodes\" \\\n % (msm_id, len(y)-1), fontsize=12)\n ax1.plot(x[26:],y[26:])\n #ax1.semilogy(x[26:],y[26:]) # This doesn't work!!\n'''\ndef d_subplot(ax, x,y, msm_id):\n ax.set_xlabel(\"depth\", fontsize=10)\n ax.set_ylabel(\"% Nodes\", fontsize=10)\n #ax.set_yscale('log')\n ax.set_xlim([-1,42]) #len(x)+1])\n ax.set_xticks(np.arange(0,45,5))\n ax.set_ylim([-5,105])\n ax.set_yticks([0,30,60,90])\n ax.grid(True, which='both')\n ax.text(30,15, \"msm_id %d\" % msm_id, fontsize=12)\n #ax.text(13,32, \"97%% with in_count <= %2d\" % pc_97, fontsize=12)\n ax.plot(x,y)\n\nn_msms = len(reqd_msms)\n'''\n# W 2,1 horizontal (adjust settings are fractions of window!)\nfig, axes = pplt.subplots(n_msms,2, figsize=(11,2.5*n_msms), squeeze=False)\n#print(\"axes >%s< %s\" % (axes, np.shape(axes)))\npplt.subplots_adjust(left=0.08, bottom=0.06, right=0.95, top=0.93,\n wspace=0.24, hspace=0.66) # 0.67\n'''\n\nfig, axes = pplt.subplots(n_msms,1, figsize=(8,1+3*n_msms), squeeze=False)\npplt.subplots_adjust(left=0.12, bottom=0.11, right=0.95, top=0.94,\n wspace=0.20, hspace=0.80) # 0.67\nprint(\"axes >%s< %s\" % (axes, np.shape(axes)))\n\ndt = c.date_from_ymd(reqd_ymds[0], c.start_hhmm)\nfig.suptitle(\"Node depth cumulative %% distributions: %s\" % \\\n dt.strftime(\"%a %d %b %Y (UTC)\"), \\\n fontsize=14,horizontalalignment='center')\n\np_xlim = 41\nfor n,msm_id in enumerate(reqd_msms):\n x,y = counts_array(msm_id)\n #print(\"len(x)=%d, len(y)=%d, x %s, y %s\" % (len(x), len(y), x, y))\n d_subplot(axes[n,0], x[:p_xlim],y[:p_xlim], msm_id)\n\n#pplt.show()\nplot_fn = \"depth_pc.svg\"\npplt.savefig(plot_fn)\n \ndef run_cmd(cmd):\n output, rc = c.run_bash_commands(cmd)\n if rc != 0:\n print(output)\n return rc\n\nrt = run_cmd(\"python3 publishing/tweak-svg-headers.py %s\" % plot_fn)\nif rt != 0:\n print(\">>>>> tweak run failed!\"); exit()\n","repo_name":"nevil-brownlee/Atlas-graphs","sub_path":"publishing/depth-distribution.py","file_name":"depth-distribution.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34535753995","text":"from django import forms\nfrom wiki.editors.base import BaseEditor\n\nclass ModernWidget(forms.Widget):\n template_name = 'wiki/forms/modern.html'\n\n def __init__(self, attrs=None):\n default_attrs = {\n 'class': 'modern',\n 'rows': '10',\n 'cols': '40',\n }\n if attrs:\n default_attrs.update(attrs)\n super().__init__(default_attrs)\n\nclass ModernAdminWidget(ModernWidget):\n template_name = 'wiki/forms/modern-admin.html'\n\nclass Modern(BaseEditor):\n editor_id = 'modern'\n\n def get_admin_widget(self, instance=None):\n return ModernAdminWidget()\n\n def get_widget(self, instance=None):\n return ModernWidget()\n\n class AdminMedia:\n css = {\n 'all': (\n )\n }\n js = (\n )\n\n class Media:\n css = {\n 'all': (\n )\n }\n js = (\n )\n","repo_name":"strafrecht/app_legacy","sub_path":"editors/modern.py","file_name":"modern.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24268882006","text":"from django.shortcuts import render, redirect,HttpResponse,reverse\nfrom django.contrib.auth.decorators import login_required # 프로필 창 로그인 필요\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Profile\nfrom .forms import RegisterProfileForm, ObjectGoalNumberForm\nfrom django import forms\nfrom .forms import CreateUserForm, CustomAuthenticationForm # 회원가입\nfrom django.contrib.auth.forms import AuthenticationForm # 로그인\nfrom django.contrib.auth import login as auth_login,update_session_auth_hash # 로그인 및 비번 변경\nfrom photo.models import Photo\nfrom django.contrib.auth import logout as auth_logout # 로그아웃\nfrom django.contrib import messages\nfrom django.contrib.auth.views import PasswordResetView, PasswordResetConfirmView, PasswordChangeView\nfrom django.urls import reverse_lazy\ndef login(request):\n if request.method == 'POST':\n login_form = CustomAuthenticationForm(request, request.POST)\n if login_form.is_valid():\n auth_login(request, login_form.get_user())\n # else:\n # # raise forms.ValidationError('로그인이 제대로 되지 않았습니다.')\n # # return HttpResponse(\"로그인이 제대로 되지 않았습니다\")\n # messages.error(request, 'Invalid login credentials')\n return redirect('main:first')\n else:\n return render(request, 'create_profile/login.html', {'error': '아이디 또는 비밀번호가 올바르지 않습니다'})\n elif request.method == 'GET':\n login_form = CustomAuthenticationForm()\n return render(request, 'create_profile/login.html', {'login_form': login_form})\n\n\ndef login_success(request):\n try:\n profile = request.user.user_profile\n return redirect('main:first')\n except Exception:\n return redirect('create_profile:register')\n\ndef need_login(request):\n return render(request,'create_profile/need_login.html')\n\ndef need_profile(request):\n return render(request,'create_profile/need_profile.html')\n\ndef signup(request):\n if request.method == \"POST\":\n user_form = CreateUserForm(request.POST)\n if user_form.is_valid():\n user = user_form.save()\n auth_login(request, user, backend='django.contrib.auth.backends.ModelBackend') # 소셜로그인이 아닌 로그인 처리의 백엔드 지정\n return redirect('create_profile:register')\n\n elif request.method == \"GET\":\n user_form = CreateUserForm()\n\n return render(request, 'create_profile/signup.html', {\n 'user_form': user_form,\n })\n\n\n@login_required\ndef goal_get(request):\n import datetime\n user = request.user\n profile = request.user.user_profile\n goal_form = ObjectGoalNumberForm(request.POST)\n if request.method == \"POST\":\n if goal_form.is_valid():\n goal_count = request.POST['goal_count']\n profile.goal_count = goal_count\n profile.save()\n return redirect('main:first')\n else:\n goal_form = ObjectGoalNumberForm()\n return render(request, 'create_profile/goal_get.html', {'goal_form': goal_form,'profile':profile})\n\n\ndef logout(request):\n auth_logout(request)\n return redirect('main:first')\n\n# @login_required\n# def profile_look(request, pk):\n# user = User.objects.get(id=pk)\n# try:\n# # profile = request.user.user_profile(id=pk)\n# profile = user.user_profile\n# ctx = {\n# 'profile': profile,\n# }\n# return render(request, 'create_profile/profile.html', ctx)\n# except Exception:\n# if user == request.user:\n# return redirect('create_profile:register')\n# else:\n# return render(request,'http404.html')\n\n@login_required\ndef profile_look(request, pk):\n user = User.objects.get(id=pk)\n profile = user.user_profile\n user_registered_photos = Photo.objects.filter(author=profile)\n ctx = {\n 'profile': profile,\n 'user_registered_photos':user_registered_photos,\n }\n return render(request, 'create_profile/profile.html', ctx)\n\n\ndef show_each_profile(request,user_id):\n user = User.objects.get(id=user_id)\n profile = user.user_profile\n user_registered_photos = Photo.objects.filter(author=profile)\n ctx = {\n 'profile': profile,\n 'user_registered_photos': user_registered_photos,\n }\n return render(request,'create_profile/show_each_profile.html',ctx)\n\n@login_required\ndef register(request):\n user = request.user\n if request.method == 'POST':\n profile_form = RegisterProfileForm(request.POST, request.FILES)\n if profile_form.is_valid():\n profile = Profile.objects.create(\n user=user,\n nickname=profile_form.cleaned_data['nickname'],\n photo=profile_form.cleaned_data['photo'],\n # age=profile_form.cleaned_data['age'],\n # birthday=profile_form.cleaned_data['birthday']\n interested=profile_form.cleaned_data['interested'],\n job=profile_form.cleaned_data['job'],\n description=profile_form.cleaned_data['description'],\n )\n return redirect('create_profile:goal_get')\n else:\n profile_form = RegisterProfileForm()\n\n return render(request, 'create_profile/register.html', {\n 'profile_form': profile_form,\n })\n\n\n\n\n\n# def profile_edit(request, pk):\n# profile = Profile.objects.get(pk=pk)\n# if request.method == \"POST\":\n# profile_form = RegisterProfileForm(request.POST, request.FILES, instance=profile)\n# goal_form = ObjectGoalNumberForm(request.POST, instance=profile)\n# if profile_form.is_valid():\n# # profile = profile_form.save()\n# profile_form.save()\n# goal_form.save()\n# return redirect('create_profile:profile_look', request.user.pk)\n# else:\n# profile_form = RegisterProfileForm(instance=profile)\n# goal_form = ObjectGoalNumberForm(instance=profile)\n# ctx = {\n# 'profile_form': profile_form,\n# 'goal_form': goal_form,\n# 'profile': profile,\n# }\n# return render(request, 'create_profile/profile_update.html', ctx)\n\ndef profile_edit(request, pk):\n profile = Profile.objects.get(id=pk)\n if request.method == \"POST\":\n profile_form = RegisterProfileForm(request.POST, request.FILES, instance=profile)\n goal_form = ObjectGoalNumberForm(request.POST, instance=profile)\n # profile = profile_form.save()\n profile_form.save()\n goal_form.save()\n return redirect('create_profile:profile_look', request.user.pk)\n else:\n profile_form = RegisterProfileForm(instance=profile)\n goal_form = ObjectGoalNumberForm(instance=profile)\n ctx = {\n 'profile_form': profile_form,\n 'goal_form': goal_form,\n 'profile': profile,\n }\n return render(request, 'create_profile/profile_update.html', ctx)\n\n# def profile_edit(request,pk):\n# profile = Profile.objects.get(pk=pk)\n# if request.method == \"POST\":\n# photo = request.FILES['photo']\n# nickname = request.POST['nickname']\n# age = request.POST['age']\n# job = request.POST['job']\n# description = request.POST['description']\n# goal_count = request.POST['goal_count']\n# return redirect('create_profile:profile_look', request.user.pk)\n# else:\n# profile_form = RegisterProfileForm(instance=profile)\n# goal_form = ObjectGoalNumberForm(instance=profile)\n# ctx = {\n# 'profile_form': profile_form,\n# 'goal_form': goal_form,\n# 'profile':profile,\n# }\n# return render(request,'create_profile/profile_update.html',ctx)","repo_name":"pirogramming/Life_Finder","sub_path":"config/create_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70469871773","text":"from random import random, seed, shuffle, gauss\nimport graphics\nfrom math import ceil, sqrt\nfrom functools import reduce\nfrom time import time\nfrom ucb import main\nfrom Particles import Particle\n\ndefault_num_particles = 20\ndefault_steps = 10000\n\n##################\n# Initialization #\n##################\n\ndef make_particles(n):\n \"\"\"Construct a list of n particles in two dimensions, initially distributed\n evenly but with random velocities. The resulting list is not spatially\n sorted.\"\"\"\n seed(1000)\n sx = ceil(sqrt(n))\n sy = (n + sx - 1) // sx\n start_id = Particle.next_id\n Particle.box_size = sqrt(Particle.density * n)\n particles = [Particle(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0,0.0,0.0,gauss(10.5,0.5),1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0) for _ in range(n)]\n size = Particle.box_size\n\n # Make sure particles are not spatially sorted\n shuffle(particles)\n\n for p in particles:\n # Distribute particles evenly to ensure proper spacing\n i = p.id - start_id\n p.rtd0._x = size * (1 + i % sx) / (1 + sx)\n p.rtd0._y = size * (1 + i / sx) / (1 + sy)\n\n # Assign random velocities within a bound\n p.rtd1._x = random() * 2 - 1\n p.rtd1._y = random() * 2 - 1\n\n return particles\n\ndef init_graphics(distribution, total, update_interval=1, size=600):\n \"\"\"Initialize the visualization, if update_interval is nonzero. distribution\n is the set of particles, divided into lists for each thread or process.\n total is the total number of particles. size is the base size of the\n simulation; the window size will be slightly larger.\"\"\"\n if not update_interval:\n return None, None\n\n psize = ceil(sqrt(10000 / total)) # particle size\n # Adjust window size so that particle edges do not go off the screen\n wsize = size + psize * 2 + 5\n win = graphics.GraphWin('Particle Simulation', wsize, wsize,\n autoflush=False)\n win.setBackground('white')\n\n # Initialize particle graphics\n Particle.scale_pos = size / Particle.box_size\n energy = 0\n for t in range(len(distribution)):\n particles = distribution[t]\n for p in particles:\n p.init_graphic(win, psize, t)\n energy += p.energy\n\n # Initialize step number\n text = graphics.Text(graphics.Point(wsize // 2, 20),\n 'step = 0, energy = ' + str(energy))\n text.setSize(18)\n text.draw(win)\n\n return win, text\n\ndef update_step(win, text, step, energy, update_interval):\n \"\"\"Update the visualization if appropriate given the step number and update\n interval.\"\"\"\n if update_interval and step % update_interval == 0:\n format_str = 'step = {0}, energy = {1}'\n text.setText(format_str.format(step, round(1000 * energy)))\n win.update()\n\n#####################\n# Serial Simulation #\n#####################\n\ndef serial_simulation(n, steps, num_threads=1, normalize_energy=False,\n update_interval=1):\n \"\"\"Simulate n particles sequentially for steps steps. num_threads should\n always be 1. update_interval is the visualization update interval.\"\"\"\n assert num_threads == 1, 'serial_simulation cannot use multiple threads'\n\n # Create particles\n particles = make_particles(n)\n #initial_energy = reduce(lambda x, p: x + p.energy, particles, 0)\n\n # Initialize visualization\n win, text = init_graphics((particles,), n, update_interval)\n\n # Perform simulation\n start = time()\n for step in range(steps):\n # Compute forces\n for p1 in particles:\n # p1.rtd2._x = p1.rtd2._y = 0 # reset accleration to 0\n p1.set_force_to_zero()\n p1.predict()\n p1.boundary()\n for p2 in particles:\n if p2.id is not p1.id:\n p1.apply_force(p2)\n p1.correct()\n\n # Move particles\n for p in particles:\n # Energy normalization\n p.rtd1._x *= Particle.energy_correction\n p.rtd1._y *= Particle.energy_correction\n\n # Update visualization\n energy = 0\n for p in particles:\n p.move_graphic()\n energy += p.energy\n update_step(win, text, step, energy, update_interval)\n\n # Energy normalization\n if normalize_energy:\n Particle.energy_correction = sqrt(initial_energy / energy)\n end = time()\n\n print('serial simulation took {0} seconds'.format(end - start))\n\n@main\ndef run(*args):\n simulation, num_threads = serial_simulation, 1\n num_particles, steps = default_num_particles, default_steps\n normalize_energy = False\n update_interval = 0\n i = 0\n while i < len(args):\n if args[i] == '-t':\n simulation = thread_simulation\n num_threads = int(args[i+1])\n elif args[i] == '-p':\n simulation = process_simulation\n num_threads = int(args[i+1])\n elif args[i] == '-n':\n num_particles = int(args[i+1])\n elif args[i] == '-s':\n steps = int(args[i+1])\n elif args[i] == '-g' or args[i] == '-v':\n update_interval = 1\n i -= 1\n elif args[i] == '-u':\n update_interval = int(args[i+1])\n elif args[i] == '-dt':\n Particle.dt = float(args[i+1])\n elif args[i] == '-e':\n normalize_energy = True\n i -= 1\n else:\n if args[i] != '-h' and args[i] != '-help':\n print('unknown argument:', args[i], file=sys.stderr)\n print('Options:\\n' +\n ' -t run with threads\\n' +\n ' -p run with processes\\n' +\n ' -n simulate particles\\n' +\n ' -s run for timesteps\\n' +\n ' -v, -g enable visualization\\n' +\n ' -u update visualization every steps\\n' +\n ' -dt use as length of timestep\\n',\n ' -e normalize total energy in each timestep',\n file=sys.stderr)\n return\n i += 2\n simulation(num_particles, steps, num_threads, normalize_energy, update_interval)\n","repo_name":"bryanfoley/Python","sub_path":"verlet.py","file_name":"verlet.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11474496973","text":"from mimetypes import guess_all_extensions\r\nimport numbers\r\nimport random\r\nnumber=random.randint(1, 9)\r\nchance=0\r\n\r\nprint(\"Number guessing game\")\r\nwhile chance<5:\r\n guess=int(input(\"Guess the number between 1-9: \"))\r\n if guess == number:\r\n print('Congratulation! YOU WON!')\r\n break\r\n if not chance<5:\r\n print('YOU LOSE!')\r\n chance=chance+1","repo_name":"tywab/pr-97","sub_path":"guessgame.py","file_name":"guessgame.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29953849993","text":"#!/bin/python3\n\n\ndef flavors(m, n, c_i):\n min_i = 0\n max_i = n - 1\n\n c_i_sorted = sorted(c_i)\n\n while True:\n amount = c_i_sorted[min_i] + c_i_sorted[max_i]\n\n if amount > m:\n max_i = max_i - 1\n elif amount < m:\n min_i = min_i + 1\n else:\n index_1 = c_i.index(c_i_sorted[min_i]) + 1\n index_2 = c_i.index(c_i_sorted[max_i]) + 1\n i = 0\n while index_2 == index_1:\n i = i + 1\n index_2 = c_i.index(c_i_sorted[max_i], i) + 1\n\n return sorted([index_1, index_2])\n\n\nif __name__ == \"__main__\":\n t = int(input().strip())\n for t_i in range(t):\n m = int(input().strip())\n n = int(input().strip())\n c_i = list(map(int, input().strip().split(' ')))\n flavors_index = flavors(m, n, c_i)\n print(str(flavors_index[0]) + \" \" + str(flavors_index[1]))\n","repo_name":"thelfensdrfer/hackerrank","sub_path":"python/algorithms/search/icecream-parlor/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37223871073","text":"import pygame\nfrom qiskit import BasicAer, QuantumRegister, ClassicalRegister, QuantumCircuit, execute\nfrom qiskit.tools.visualization import plot_histogram\n\nfrom .. import load_image, file_path\n\nDEFAULT_NUM_SHOTS = 1000\n\n\nclass MeasurementsHistogram(pygame.sprite.Sprite):\n \"\"\"Displays a histogram with measurements\"\"\"\n def __init__(self, circuit, num_shots=DEFAULT_NUM_SHOTS):\n pygame.sprite.Sprite.__init__(self)\n self.image = None\n self.rect = None\n self.set_circuit(circuit, num_shots)\n\n # def update(self):\n # # Nothing yet\n # a = 1\n\n def set_circuit(self, circuit, num_shots=DEFAULT_NUM_SHOTS):\n backend_sim = BasicAer.get_backend('qasm_simulator')\n qr = QuantumRegister(circuit.n_qubits, 'q')\n cr = ClassicalRegister(circuit.n_qubits, 'c')\n meas_circ = QuantumCircuit(qr, cr)\n meas_circ.barrier(qr)\n meas_circ.measure(qr, cr)\n complete_circuit = circuit + meas_circ\n\n job_sim = execute(complete_circuit, backend_sim, shots=num_shots)\n\n result_sim = job_sim.result()\n\n counts = result_sim.get_counts(complete_circuit)\n print(counts)\n\n histogram = plot_histogram(counts)\n filename = 'bell_histogram.png'\n full_path = file_path('images', filename)\n histogram.savefig(full_path)\n\n self.image, self.rect = load_image(filename, -1)\n self.image.convert()\n","repo_name":"HuangJunye/Qiskit-for-GameDev","sub_path":"pygame/qgame/viz/measurements_histogram.py","file_name":"measurements_histogram.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"32"} +{"seq_id":"7120390344","text":"import os\nimport sys\n\ndef makeDir(dir_name):\n try:\n os.mkdir(dir_name)\n except FileExistsError:\n pass\n\n\ndef outputDirName(num):\n return \"stat_%03d_results\" % (num)\n\n\ndef title(text):\n print(\"=\" * 50)\n print(text)\n print(\"=\" * 50)\n print(\"\")\n\n\ndef printResults(feats, cls, predicted):\n sys.stdout.write(\"Features:\\t[\");\n for i in feats[:-1]:\n sys.stdout.write(\"%.02f, \" % (i));\n sys.stdout.write(\"%.02f]\\n\" % (feats[-1]));\n\n print(\"Class:\\t\\t%s\" % (cls))\n print(\"Predicted:\\t%s\" % (predicted))\n print(\"\")\n\n\ndef printByCondition(condition, df, clf, limit=None):\n count = 0;\n\n def prepare(df, index=None):\n if index:\n return zip(df[0][:index], df[1][:index])\n return zip(df[0], df[1])\n\n for feats, cls in prepare(df):\n predicted = clf.predict([feats])\n\n if condition(feats, cls, predicted):\n printResults(feats, cls, predicted)\n count += 1\n\n if limit is not None and count >= limit:\n return\n","repo_name":"petermlm/MastersDegreeThesis","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18182071234","text":"import torch\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\n\nLR = 0.1 # 设置初始学习率\niteration = 10\nmax_epoch = 200\n\n# --------- fake data and optimizer ---------\nweights = torch.randn((1), requires_grad=True)\ntarget = torch.zeros((1))\n\n# 构建虚拟优化器,为了lr_scheduler关联优化器\noptimizer = optim.SGD([weights], lr=LR, momentum=0.9)\n\n# ------------- 3 Exponential LR -----------\n# flag = 0\nflag = 1\nif flag:\n gamma = 0.95\n scheduler_lr = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)\n\n lr_list, epoch_list = list(), list()\n for epoch in range(max_epoch):\n lr_list.append(scheduler_lr.get_lr())\n epoch_list.append(epoch)\n\n for i in range(iteration):\n loss = torch.pow((weights - target), 2)\n\n loss.backward()\n # 优化器参数更新\n optimizer.step()\n optimizer.zero_grad()\n # 学习率更新\n scheduler_lr.step()\n\n plt.plot(epoch_list, lr_list, label=\"Exponential LR Scheduler\\ngamma:{}\".format(gamma))\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Learning rate\")\n plt.legend()\n plt.savefig('./ExponentialLR.jpg')\n # plt.show()\n","repo_name":"yifuxiong/DETR-PyTorch","sub_path":"lr_scheduler.py","file_name":"lr_scheduler.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72768558811","text":"import csv\nimport json\n\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom nltk.corpus.reader import framenet\n\n\ndef load_sentences(path: str):\n \"\"\"Load Framenet sentence generator\"\"\"\n fn = framenet.FramenetCorpusReader(path, fileids=None)\n return fn.sents()\n\n\ndef save_sentence(sentence, filename):\n \"\"\"Save sentence and frame information\"\"\"\n with open(filename, mode='w') as f:\n fieldnames = ['frame', 'sentence', 'frame_definition']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n\n writer.writeheader()\n\n writer.writerow({\n \"frame\": sentence.frame.name, \n \"sentence\": sentence.text, \n \"frame_definition\": sentence.frame.definition\n })\n\n\ndef save_sentence_data(framenet_path, save_root, num_samples=100_000):\n \"\"\"Save sentence data as individual files\n\n Every sample has two documents, a sentence from \n Framenet as well as the frame definition. This \n can be used for text summarization Seq2Seq models.\n \n Note:\n As a proof of concept, this only saves the first\n `num_samples`.\n\n Args:\n framenet_path: path to the `fndata-1.7` dataset\n save_root: root path to save individual sentence csvs\n num_samples: number of samples to save\n \"\"\"\n sentences = load_sentences(framenet_path)\n\n root = Path(save_root)\n root.mkdir(parents=True, exist_ok=True)\n\n for idx in tqdm(range(num_samples)):\n sent = sentences[idx]\n # Name the file based on frame type\n path = root.joinpath(f\"{sent.frame.name}_{idx}.csv\")\n save_sentence(sent, path)\n\n\ndef load_sentence(filename):\n \"\"\"Load a single sentence, frame definition pair\"\"\"\n with open(filename, mode='r') as f:\n reader = csv.DictReader(f)\n\n # Each file we processed is just a single dicionary,\n # we can grab that and run.\n for line in reader:\n sentence = dict(line)\n\n return sentence\n\n\ndef load_sentence_data(save_root):\n \"\"\"Load all raww sentence, frame definition pairs\n\n We will get a list of dictionaries, each\n representing a raw text sentence, frame \n definition pair.\n \"\"\"\n root = Path(save_root).glob(\"**/*\")\n files = [x for x in root if x.is_file()]\n\n sentences = []\n for file in tqdm(files):\n sentences.append(\n load_sentence(file)\n )\n\n return sentences\n\n\ndef save_json(sentence, filename):\n \"\"\"Save sentence frame pairs as json\"\"\"\n data = {\n \"frame\": sentence.frame.name, \n \"sentence\": sentence.text, \n \"frame_definition\": sentence.frame.definition\n }\n\n with open(filename, 'w') as f:\n json.dump(data, f)\n\n\ndef save_sentence_json(framenet_path, save_root, num_samples=100_000):\n \"\"\"Save sentence data as individual files as json\n\n Every sample has two documents, a sentence from \n Framenet as well as the frame definition. This \n can be used for text summarization Seq2Seq models.\n \n Note:\n As a proof of concept, this only saves the first\n `num_samples`.\n\n Args:\n framenet_path: path to the `fndata-1.7` dataset\n save_root: root path to save individual sentence csvs\n num_samples: number of samples to save\n \"\"\"\n sentences = load_sentences(framenet_path)\n\n root = Path(save_root)\n root.mkdir(parents=True, exist_ok=True)\n\n for idx in tqdm(range(num_samples)):\n sent = sentences[idx]\n # Name the file based on frame type\n path = root.joinpath(f\"{sent.frame.name}_{idx}.json\")\n save_json(sent, path)\n\n\ndef data_paths(root):\n \"\"\"Get the paths for the processed data\n\n Note:\n This first requires that you use `frame`'s cli\n to preprocess the framenet data into json files.\n\n Args:\n root: root path to the json preprocessed data\n\n Raises:\n `ValueError`: if the files do not exist in the `root` \n directory. Make sure to prepocess the data using `frame`'s\n frame.cli:preprocess-framenet before loading the data.\n \"\"\"\n path = Path(root).glob(\"**/*\")\n paths = [str(p) for p in path]\n\n if len(paths) == 0:\n raise ValueError(\n f\"Preprocessed data not found at <{root}>! Use frame.cli:preprocess-framenet\"\n )\n\n return paths\n\n","repo_name":"yngtodd/frame","sub_path":"frame/framenet.py","file_name":"framenet.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15425698088","text":"import random\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom RGB2RAW import RGB2Bayer_RG\r\n\r\ndef black_level_correct_more(img, color_num=255):\r\n #E-BLC\r\n mask = np.zeros(img.shape, img.dtype)\r\n mask[:, :, 1] = color_num\r\n dst = cv2.addWeighted(img, 0.7, mask, 0.3, 0)\r\n return dst\r\n\r\ndef black_level_correct_less(img, color_num=255):\r\n #I-BLC\r\n mask = np.zeros(img.shape, img.dtype)\r\n mask[:, :, 2] = color_num\r\n dst = cv2.addWeighted(img, 0.7, mask, 0.3, 0)\r\n return dst\r\n\r\n# '''\r\n#test\r\nori_img = cv2.imread(\"../VOCdevkit/VOC2007/JPEGImages/000000.jpg\")\r\n# cv2.imshow('original img', ori_img)\r\nblack_src = black_level_correct_less(ori_img)\r\ncv2.imshow('black img', black_src)\r\ncv2.waitKey(0)\r\n# '''\r\n","repo_name":"realcorruption/image-noise-pattern","sub_path":"I-BLC_E-BLC.py","file_name":"I-BLC_E-BLC.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18354712663","text":"from typing import List, Tuple\n\n\ndef find_e(inp: List[List[str]], goal: Tuple[int, int], queue: List[Tuple[Tuple[int, int], int]]) -> int:\n offsets = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\n visited = set()\n while queue:\n coord, distance = queue.pop(0)\n\n if coord in visited:\n continue\n\n if coord == goal:\n return distance\n\n letter = inp[coord[0]][coord[1]]\n allowed = [chr(i) for i in range(ord('a'), ord(letter) + 2)]\n\n visited.add(coord)\n\n for offset in offsets:\n y = coord[0] + offset[0]\n x = coord[1] + offset[1]\n\n if y < 0 or y >= len(inp):\n continue\n if x < 0 or x >= len(inp[y]):\n continue\n\n if inp[y][x] in allowed:\n queue.append(((y, x), distance + 1))\n\n return -1\n\n\ndef solution(inp: List[str]) -> int:\n grid = [list(row) for row in inp]\n\n starting_position = (-1, -1)\n ending_position = (-1, -1)\n\n for row in range(len(inp)):\n for col in range(len(inp[row])):\n if inp[row][col] == 'S':\n starting_position = (row, col)\n elif inp[row][col] == 'E':\n ending_position = (row, col)\n\n grid[starting_position[0]][starting_position[1]] = 'a'\n grid[ending_position[0]][ending_position[1]] = 'z'\n\n queue = [(starting_position, 0)]\n return find_e(grid, ending_position, queue)\n\n\ndef result(inp: List[str]) -> int:\n return solution(inp)\n\n\ndef test(examples: List[List[str]]) -> None:\n example = 0\n exp = 31\n res = result(examples[example])\n assert res == exp, f\"example {example}: result was {res}, expected {exp}\"\n","repo_name":"Arham4/advent-of-code","sub_path":"2022/day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"4819716818","text":"import collections\nimport copy\n\nfrom creme import base\nfrom creme import utils\n\n\n__all__ = ['OneVsRestClassifier']\n\n\nclass OneVsRestClassifier(base.Wrapper, base.MultiClassifier):\n \"\"\"One-vs-the-rest (OvR) multiclass strategy.\n\n This strategy consists in fitting one binary classifier per class. Because we are in a\n streaming context, the number of classes isn't known from the start, hence new classifiers are\n instantiated on the fly. Likewise, the predicted probabilities will only include the classes\n seen up to a given point in time.\n\n Parameters:\n classifier: A binary classifier, although a multi-class classifier will work too.\n\n Attributes:\n classifiers (dict): A mapping between classes and classifiers.\n\n Example:\n\n >>> from creme import datasets\n >>> from creme import linear_model\n >>> from creme import metrics\n >>> from creme import model_selection\n >>> from creme import multiclass\n >>> from creme import preprocessing\n\n >>> X_y = datasets.ImageSegments()\n\n >>> scaler = preprocessing.StandardScaler()\n >>> ovr = multiclass.OneVsRestClassifier(linear_model.LogisticRegression())\n >>> model = scaler | ovr\n\n >>> metric = metrics.MacroF1()\n\n >>> model_selection.progressive_val_score(X_y, model, metric)\n MacroF1: 0.774148\n\n \"\"\"\n\n def __init__(self, classifier: base.BinaryClassifier):\n self.classifier = classifier\n self.classifiers = {}\n\n @property\n def _wrapped_model(self):\n return self.classifier\n\n def fit_one(self, x, y):\n\n # Instantiate a new binary classifier if the class is new\n if y not in self.classifiers:\n self.classifiers[y] = copy.deepcopy(self.classifier)\n\n # Train each label's associated classifier\n for label, model in self.classifiers.items():\n model.fit_one(x, y == label)\n\n return self\n\n def predict_proba_one(self, x):\n y_pred = {\n label: model.predict_proba_one(x)[True]\n for label, model in self.classifiers.items()\n }\n return utils.math.softmax(y_pred)\n","repo_name":"Sahanduiuc/creme","sub_path":"creme/multiclass/ovr.py","file_name":"ovr.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"71931784411","text":"# 답은 정확히 다 나오는데 어떤 부분이 틀렸는지 모르겠다.\n# d <-처럼 한개만 나온경우를 고려안해줘서..?\n# 다른 사람 코드 홀수개가 한개만 나온경우를 고려안해준거같은데 확인해보자\n\nimport sys\ninput = sys.stdin.readline\n\ntmp = list(map(str, input().strip()))\n\n\ntmp.sort() # 알파벳 순서대로 정렬\n\n\n# 딕셔너리를 선언해 알파벳과 개수 추가\ncount = {}\nfor s in tmp:\n if s in count:\n count[s]+=1\n else:\n count[s]=1\n\n\n# 짝수의 개수, 홀수의 개수가 몇개인지 알아보기 위해\ncount_list = list(count.values()) # 각 키의 개수들을 반환\ncount_even =0\ncount_odd = 0\nfor i in count_list:\n if i%2==0:\n count_even+=1\n else:\n count_odd+=1\n\n\n\nresult = '' # 반씩 반씩 더해서 출력할 예정\ntmp2 = []\nif count_odd >1:\n print(\"I'm Sorry Hansoo\")\nelse:\n if count_odd==1 and count_even==0: # d처럼 한개만 홀수개수 한개만 나온경우\n print(tmp[0]*len(tmp))\n else:\n for s in count:\n if count[s]%2==0:# 짝수개면\n result+=s*(count[s]//2)\n else: # 홀수개면\n result+=s*(count[s]//2)\n tmp2.append(s)\n if len(tmp2)!=0: # tmp2가 비어있지 않은경우, 즉 홀수 개수가 1개 나온 경우\n result = result + tmp2[0]+result[::-1]\n print(result)\n else:\n print(result+result[::-1])\n\n\n\n","repo_name":"epzlfnql/algorithm_python","sub_path":"week7/팰린드롬만들기(1213).py","file_name":"팰린드롬만들기(1213).py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7443687996","text":"# taken from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little\n\nfrom setuptools import setup, find_packages\n\n# random values\n__version__ = '0.1.0'\n\n# this part taken from https://github.com/dr-guangtou/riker\nwith open('requirements.txt') as infd:\n INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()]\n\n# code taken from above\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\nsetup(name='mlfinder',\n version=__version__,\n description='Find possible microlensing events.',\n long_description=readme(),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Text Processing :: Linguistic',\n ],\n keywords='astronomy',\n url='https://github.com/JudahRockLuberto/mlfinder',\n author='Judah Luberto',\n author_email='jluberto@ucsc.edu',\n license='MIT',\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n include_package_data=True,\n zip_safe=False,\n python_requires='>=3.6')\n","repo_name":"JudahRockLuberto/mlfinder","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"919641833","text":"\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**9)\n\nh,w = map(int,input().split())\ns = [list(input().rstrip()) for _ in range(h)]\n\ndx = [0,0,-1,1]\ndy = [-1,1,0,0]\n\nresult = 'Yes'\n\nfor i in range(h):\n for j in range(w):\n if s[i][j] == '#':\n for d in range(4):\n idx,jdy = i+dx[d],j + dy[d]\n if idx in range(h) and jdy in range(w):\n if s[idx][jdy] == \"#\":\n break\n else:\n result = 'No'\n break\n\nprint(result)\n","repo_name":"Eggngineer/atcoder","sub_path":"ABCs/ABC096/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9963852054","text":"from fastapi import FastAPI\nimport telegram_send\n\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nfrom fastapi.middleware.cors import CORSMiddleware\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nclass Message(BaseModel):\n firstname: str\n lastname : str\n email : str\n phone : str\n message : str\n\n@app.get(\"/\")\ndef index():\n return \"This is working\"\n\n@app.get(\"/test\")\ndef test():\n telegram_send.send(messages=[\"Test success\"])\n return \"success\"\n\n@app.post(\"/message\")\ndef message(message: Message):\n # telegram_send.send(messages=[message])\n print(message)\n return message\n\n\n ","repo_name":"Sendeyo/max_api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39443185550","text":"from typing import List\nimport onnx\nfrom onnx2pytorch import ConvertModel\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom constants import CONTOURS_BINS_PER_SEMITONE, N_FREQ_BINS_CONTOURS\nfrom q_transform import NormalizedCQT, log_base_b\n\n\nclass BasicPitch(nn.Module):\n def __init__(\n self,\n n_harmonics: int = 8,\n n_filters_contour: int = 32,\n n_filters_onsets: int = 32,\n n_filters_notes: int = 32,\n ):\n super().__init__()\n self.normalized_cqt = NormalizedCQT(n_harmonics, use_batchnorm=True)\n harmonics = [0.5] + list(range(1, n_harmonics)) if n_harmonics > 1 else [1]\n self.harmonic_stacking = HarmonicStacking(CONTOURS_BINS_PER_SEMITONE, harmonics, N_FREQ_BINS_CONTOURS)\n self.contour = BasicPitchContour(n_filters_contour)\n\n def forward(self, x):\n x = self.normalized_cqt(x)\n x = self.harmonic_stacking(x)\n x = self.contour(x)\n return {\"contour\": x}\n\n\nclass BasicPitchContour(nn.Module):\n def __init__(self, n_filters_contour: int = 32):\n super().__init__()\n # self.contour_conv_1 = nn.Conv2d(8, n_filters_contour, (5, 5), padding=\"same\")\n # self.contour_batch_norm_1 = nn.BatchNorm2d(n_filters_contour)\n # self.contour_activation_1 = nn.ReLU()\n self.contour_conv_2 = nn.Conv2d(8, 8, (3, 3 * 13), padding=\"same\")\n self.contour_batch_norm_2 = nn.BatchNorm2d(8)\n self.contour_activation_2 = nn.ReLU()\n self.contour_conv_3 = nn.Conv2d(8, 1, (5, 5), padding=\"same\")\n self._load_weights_from_onnx()\n\n def _load_weights_from_onnx(self):\n onnx_model = onnx.load('/home/bgrimm/basic_pitch.onnx')\n pytorch_model = ConvertModel(onnx_model)\n m = list(pytorch_model.modules())\n # self.contour_conv_1.load_state_dict(m[-17].state_dict())\n self.contour_conv_2.load_state_dict(m[-15].state_dict())\n self.contour_conv_3.load_state_dict(m[-13].state_dict())\n\n def forward(self, x):\n # x = self.contour_conv_1(x)\n # x = self.contour_batch_norm_1(x)\n # x = F.relu(x)\n x = self.contour_conv_2(x)\n x = self.contour_batch_norm_2(x)\n x = F.relu(x)\n x = self.contour_conv_3(x)\n x = F.sigmoid(x)\n x = x.squeeze(1)\n return x\n\n\nclass HarmonicStacking(nn.Module):\n \"\"\"Harmonic stacking layer\n\n Input shape: (n_batch, n_times, n_freqs, 1)\n Output shape: (n_batch, n_times, n_output_freqs, len(harmonics))\n\n n_freqs should be much larger than n_output_freqs so that information from the upper\n harmonics is captured.\n\n Attributes:\n bins_per_semitone: The number of bins per semitone of the input CQT\n harmonics: List of harmonics to use. Should be positive numbers.\n shifts: A list containing the number of bins to shift in frequency for each harmonic\n n_output_freqs: The number of frequency bins in each harmonic layer.\n \"\"\"\n\n def __init__(self, bins_per_semitone: int, harmonics: List[float], n_output_freqs: int):\n \"\"\"Downsample frequency by stride, upsample channels by 4.\"\"\"\n super().__init__()\n self.bins_per_semitone = bins_per_semitone\n self.harmonics = harmonics\n shifts = 12.0 * bins_per_semitone * log_base_b(torch.tensor(harmonics), 2)\n self.shifts = torch.round(shifts, decimals=2).int()\n self.n_output_freqs = n_output_freqs\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # (n_batch, n_times, n_freqs, 1)\n assert len(x.shape) == 4\n channels = []\n for shift in self.shifts:\n if shift == 0:\n padded = x\n elif shift > 0:\n paddings = (0, shift)\n padded = F.pad(x[:, :, :, shift:], paddings)\n elif shift < 0:\n paddings = (-shift, 0)\n padded = F.pad(x[:, :, :, :shift], paddings)\n else:\n raise ValueError\n channels.append(padded)\n\n x = torch.cat(channels, dim=1)\n x = x[:, :, :, :self.n_output_freqs] # return only the first n_output_freqs frequency channels\n return x\n\n\n","repo_name":"bradgrimm/quick_pitch","sub_path":"quick_pitch.py","file_name":"quick_pitch.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17517993545","text":"from __future__ import annotations\nimport os, shutil\n\nfrom src.controller.AbstractController import AbstractController\nfrom src.controller.ProjectManager import ProjectManager\n\nimport unittest\n\n\nclass TestAbstractController(unittest.TestCase):\n __BASE_PATH = f'{os.path.dirname(__file__)}/../resources/test_resources'\n\n def setUp(self):\n os.mkdir(TestAbstractController.__BASE_PATH)\n\n def tearDown(self):\n shutil.rmtree(TestAbstractController.__BASE_PATH)\n\n def test_get_project(self):\n c = AbstractController()\n p = c.get_project()\n\n self.assertIs(p, ProjectManager().get_project())\n\n ProjectManager().new()\n\n self.assertIs(c.get_project(), ProjectManager().get_project())\n self.assertIsNot(p, ProjectManager().get_project())\n self.assertIsNot(p, c.get_project())\n\n def test_save(self):\n target = f'{TestAbstractController.__BASE_PATH}/project'\n\n c = AbstractController()\n c.get_project().set_path(target)\n\n c.save()\n\n c._AbstractController__saving_thread.join()\n\n self.assertTrue(os.path.isdir(target))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"einDamian/discrete-choice","sub_path":"src/test/controller/test_AbstractController.py","file_name":"test_AbstractController.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19273299150","text":"# importing libraries\nimport json\nimport requests\nimport pandas as pd\n\n# downloading data into a dictionary\nr = requests.get('https://api.binance.com/api/v3/exchangeInfo')\nrdict = r.json()\n\n# looping over rate limits\ndata = []\nfor i,rate in enumerate(rdict[\"rateLimits\"]):\n data.append([rate[\"rateLimitType\"],rate[\"interval\"],rate[\"intervalNum\"],rate[\"limit\"]])\n\n# outputing data to csv\ncolumns = [\"rateLimitType\",\"interval\",\"intervalNum\",\"limit\"]\ndf = pd.DataFrame(data=data,columns=columns)\ndf.to_csv(\"rates.csv\",index=False)\n\n# looping over symbol data\ndata = []\nfor i,symbol in enumerate(rdict[\"symbols\"]):\n data.append([symbol[\"symbol\"],symbol[\"status\"]])\n\n# outputing data to csv\ncolumns = [\"symbol\",\"status\"]\ndf = pd.DataFrame(data=data,columns=columns)\ndf.to_csv(\"symbols.csv\",index=False)\n\n\n\n","repo_name":"Smeths/Binance-API","sub_path":"Exchange Info End Point/apiPut.py","file_name":"apiPut.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41468941069","text":"import argparse\nfrom . import run_database_server\n\nparser = argparse.ArgumentParser(description=\"VecDB\")\n\nparser.add_argument(\n \"--host\", type=str, default=\"127.0.0.1\", help=\"Host IP address to bind to\"\n)\nparser.add_argument(\"--port\", type=int, default=6969, help=\"Host port to bind to\")\nparser.add_argument(\n \"--dir\", type=str, default=\"./vecdb\", help=\"Path to database directory\"\n)\nparser.add_argument(\n \"--keyfile\",\n type=str,\n help=\"Path to key file with openai api key and org id. key=value format\",\n)\n\nargs = parser.parse_args()\n\ntry:\n with open(args.keyfile) as f:\n key, org = map(lambda x: x.split(\"=\")[1].strip(), f.read().split(\"\\n\")[:2])\nexcept FileNotFoundError:\n raise ValueError(f\"Could not find keyfile at {args.keyfile}\")\n\n\nrun_database_server(\n host=args.host,\n port=args.port,\n vec_dir=args.dir,\n openai_api_key=key,\n openai_org_id=org,\n)\n","repo_name":"purplelemons-dev/vecdb","sub_path":"src/vecdb/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30017678317","text":"\ndef is_untouchable(n):\n if n < 2:\n return 'Invalid Input'\n \n limit = n**2\n totals = [1 for _ in range(limit)]\n \n for i in range(2, limit):\n j = i\n while j < limit - 1:\n totals[j] += i\n j += i \n touchable = [i for i in range(2, limit) if totals[i] - i == n]\n return True if not touchable else touchable\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"uojwbCn2yyqqk9Wpf_16.py","file_name":"uojwbCn2yyqqk9Wpf_16.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30255560492","text":"import streamlit as st\nimport time\n\nwith open(\"code.txt\") as f:\n code = f.read().strip()\n\n# check function\ndef check_attempt(attempt):\n for i in range(len(code)):\n if code[i] != attempt[i]:\n return False\n \n time.sleep(0.03)\n \n return True\n\nst.title(\"Timing Side Channel Attack\")\n\nst.write(\"The following piece of check function for a PIN is vulnerable to a time side channel attack.\")\n\nst.code(\"\"\"\ndef check(attempt):\n for i in range(len(code)):\n if code[i] != attempt[i]:\n return False\n \n time.sleep(0.03)\n \n return True\n\"\"\", language=\"python\")\n\nst.write(\"The function takes longer to evaluate for every correct digit. By comparing the time it takes to check a PIN attempt, an attacker could deduce if a digit is correct or not. \")\nst.code(\"\"\"\nstart = time.time()\ncheck(attempt)\nend = time.time()\n\nif (end - start) > last_diff: # correct digit! Move on!\n\"\"\", language=\"python\")\n\nst.write(\"\")\ncode = st.text_input(\"Enter a PIN to be cracked:\", placeholder=\"Ex. 937462\")\nst.write(f\"Attempting to crack: {code}\")\n\ncrack_btn = st.button(\"Crack\")\n\nif crack_btn:\n with st.empty():\n cracked = False\n\n attempt_list = [\"0\"] * len(code)\n\n # combination checker\n combination_checks = 0\n\n for i in range(len(code)):\n\n last_time = None\n\n if cracked:\n break\n\n for num in \"01234567890\":\n attempt_list[i] = str(num)\n attempt = ''.join(attempt_list)\n print(f\"Testing {attempt}...\")\n st.metric(f\"Testing #{combination_checks}...\", value=attempt)\n \n start = time.time()\n if check_attempt(attempt): \n cracked = True\n st.metric(f\"Cracked!\", value=attempt)\n break\n end = time.time()\n combination_checks += 1\n \n diff = end - start\n\n if last_time == None:\n last_time = diff\n continue\n \n #print(f\"\\t{diff} vs {last_time}\")\n #print(f\"\\t{round(diff, 2)} vs {round(last_time, 2)}\")\n\n if round(diff, 3) > round(last_time, 3) + 0.025:\n break\n\n last_time = diff\n\n if cracked:\n\n print()\n print(\"Cracked!\")\n print(f\"The code was {''.join(attempt_list)}\")\n print()\n\n brute = str(10**len(code))\n print(f\"Brute force Attempts : {brute} combinations\")\n print(f\"Side Channel Attempts: {str(combination_checks).ljust(len(brute))} combinations\")\n\n st.write(f\"**Brute force Attempts:** {brute} combinations\")\n st.write(f\"**Side Channel Attempts:** {str(combination_checks).ljust(len(brute))} combinations\")\n","repo_name":"midnight-mouse/side-channels","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25491839573","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/simplified-fractions/\n# Author: Miao Zhang\n# Date: 2021-05-07\n\nclass Solution:\n def simplifiedFractions(self, n: int) -> List[str]:\n res = []\n for i in range(1, n):\n for j in range(i + 1, n + 1):\n if math.gcd(i, j) == 1:\n res.append(str(i) + '/' + str(j))\n return res\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/simplifiedFractions/simplifiedFractions.py","file_name":"simplifiedFractions.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"49065587242","text":"# coding:utf-8\nimport os, sys, time, random, yaml\nimport argparse, pickle\nimport numpy as np\nimport pandas as pd\nimport SimpleITK as sitk\n\nsys.path.append(os.path.normpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), '../..')))\nimport util.ioFunction_version_4_3 as IO\n\ndef reset_seed(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--inputImageFile', '-i', help='input image file')\n parser.add_argument('--outputDir', '-o', help='output directory')\n parser.add_argument('--patch_coordinate_list', '-p', help='csv file that include patch origin')\n\n parser.add_argument('--num_patches', type=int, default=500,\n help='number of patches that you want to extract')\n parser.add_argument('--patch_side', type=int, default=64, help='patch side')\n parser.add_argument('--seed', type=int, default=0, help='seed for random')\n\n args = parser.parse_args()\n\n reset_seed(args.seed)\n\n print('----- Save configs -----')\n fn = os.path.splitext(os.path.basename(args.inputImageFile))[0]\n result_dir = args.outputDir\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n with open('{}/{}_configs.yml'.format(result_dir, fn), 'w') as f:\n f.write(yaml.dump(vars(args), default_flow_style=False))\n\n print('----- Read dataset -----')\n coordinate_list = pd.read_csv(args.patch_coordinate_list, names=(\"x\",\"y\",\"z\")).values.tolist()\n nda = IO.read_mhd_and_raw(args.inputImageFile)\n\n print('----- Extract patches ------')\n save_list = []\n for n in range(args.num_patches):\n idx = np.random.randint(0, len(coordinate_list))\n x, y, z = coordinate_list[idx]\n save_list.append(coordinate_list[idx])\n x_s, x_e = x, x + args.patch_side\n y_s, y_e = y, y + args.patch_side\n z_s, z_e = z, z + args.patch_side\n patch = nda[z_s:z_e, y_s:y_e, x_s:x_e]\n\n sitkImg = sitk.GetImageFromArray(patch)\n sitkImg.SetSpacing([1,1,1])\n sitk.WriteImage(sitkImg, '{}/{}_{:04d}.mhd'.format(result_dir, fn, n))\n\n np.savetxt('{}/coordinate.csv'.format(result_dir), np.asarray(save_list, dtype=int), delimiter=',')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"RyosukeKawai/3DSRcycleGAN","sub_path":"source/analyze_patches_using_pca/extract_patch.py","file_name":"extract_patch.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30759733740","text":"from API.Table import *\nfrom preprocessor import Preprocessor\n\nall_combos = np.array([], dtype=np.str)\nall_hands = np.array(list(Hand), dtype=np.object)\n\nfor hand in all_hands:\n all_combos = np.hstack((all_combos, hand.to_combos()))\n\n\nclass Game:\n def __init__(self):\n print(\"New Game\")\n self.hand = None\n self.hand_df_line = None\n self.level = None\n self.hero = None\n self.table = Table()\n self.temporary_history = np.array([])\n self.pp = Preprocessor()\n self.history = None\n\n def input_amount(self, msg: str = \"How many chips?\\n\"):\n try:\n amount = float(input(msg))\n if amount < 0:\n raise ValueError\n return amount\n except ValueError:\n print(\"Choose a positive numeric value\")\n return self.input_amount()\n\n def input_level(self):\n try:\n nb = int(input(\"Level Number\\n\"))\n try:\n bb = float(input(\"Level Big Blind\\n\"))\n try:\n ante = float(input(\"Level ante\\n\"))\n if ante > 0.3 * bb or ante < 0:\n raise ValueError\n level = Level(level=nb, ante=ante, bb=bb)\n self.set_level(level=level)\n print(level)\n except ValueError:\n ante = 0.125 * bb\n level = Level(level=nb, ante=ante, bb=bb)\n self.set_level(level=level)\n print(\"Without a correct ante, it's 1/8th of BB\")\n print(level)\n except ValueError:\n print(\"Big blind must be a float. Try again\")\n self.input_level()\n except ValueError:\n print(\"Level must be an int. Try again\")\n self.input_level()\n\n def input_new_player(self):\n table = self.hand.table\n name = input(\"Player Name?\\n\")\n occupied_seats = [player.seat for player in table.players]\n free_seats = [k for k in range(1, table.max_players + 1) if k not in occupied_seats]\n try:\n seat = int(input(\"Seat?\\n\"))\n if seat not in free_seats:\n raise ValueError\n stack = self.input_amount(\"Stack?\\n\")\n player = Player(name=name, seat=seat, stack=stack)\n self.hand.table.add_player(player=player)\n print(\"New Player:\", player.name)\n except ValueError:\n print(f\"Choose a seat between 1 and {table.max_players}, and a positive numeric stack.\")\n self.input_new_player()\n\n def input_hero(self):\n try:\n seat = int(input(\"Choose Hero's Seat\\n\"))\n if seat not in range(1, self.table.max_players+1):\n raise ValueError\n stack = float(input(\"What's your stack?\\n\"))\n if stack < 0:\n raise ValueError\n hero = Player(name=\"Manggy94\", seat=seat, stack=stack)\n hero.is_hero = True\n self.hand.table.add_player(player=hero)\n self.hero = hero\n print(f\"You're at seat n°{hero.seat} with {hero.stack} chips\")\n except ValueError:\n print(f\"Choissez un siège comprise entre 1 et {self.table.max_players}, et un stack numérique positif.\")\n self.input_hero()\n\n def input_players(self):\n table = self.hand.table\n self.input_hero()\n choice = None\n while choice != 0 and len(table.players) < table.max_players:\n try:\n choice = int(input(\"Voulez vous ajouter un autre joueur? \\n0:Non\\n1:Oui\\n\"))\n if choice not in range(2):\n raise ValueError\n except ValueError:\n print(\"Mauvaise saisie, complétion par défaut\")\n choice = 0\n if choice:\n self.input_new_player()\n else:\n try:\n choice2 = int(input(\"Voulez vous compléter la table? \\n0:Non\\n1:Oui\\n\"))\n if choice2 not in range(2):\n raise ValueError\n except ValueError:\n print(\"Mauvaise saisie, complétion par défaut\")\n choice2 = 1\n if choice2:\n self.complete_table()\n print(\"Liste des joueurs à la table:\")\n s_dict = table.players.seat_dict\n print([s_dict[i].name for i in sorted(s_dict)])\n\n def complete_table(self):\n table = self.hand.table\n occupied_seats = table.players.seat_dict\n new_seats = [k for k in range(1, table.max_players+1) if k not in occupied_seats]\n for seat in new_seats:\n player = Player(name=f\"Villain {seat}\", seat=seat, stack=20000.0)\n table.add_player(player)\n\n def input_max_players(self):\n try:\n max_players = int('0'+input(\"Combien de joueurs max à cette table?\\n\"))\n if max_players in range(2, 10):\n self.hand.table.max_players = max_players\n print(f\"Nombre Maximum de joueurs à la table: {self.hand.table.max_players}\\n\")\n else:\n raise ValueError\n except ValueError:\n print(\"Vous devez choisir un entier entre 2 et 10\")\n self.input_max_players()\n\n def input_tournament(self):\n table = self.hand.table\n try:\n choice = int(input(\"Voulez-vous changer le format du tournoi? \\n0:Non\\n1:Oui\\n\"))\n if choice not in [0, 1]:\n raise ValueError\n except ValueError:\n print(\"Vous devez choisir entre 0 pour Non et 1 pour Oui\")\n return self.input_tournament()\n if choice:\n name = input(\"Nom du tournoi\\n\")\n buyin = self.input_buyin()\n tournament = Tournament(name=name, buyin=buyin)\n hand.tournament = tournament\n print(f\"Touroi {table.tournament.name}\\nBuy-in {table.tournament.buyin}€\")\n\n def new_game(self):\n try:\n choice = int(input(\"Voulez-vous créér une nouvelle table ou changer l'actuelle? \\n0:Non\\n1:Oui\\n\"))\n if choice not in [0, 1]:\n raise ValueError\n except ValueError:\n print(\"Vous devez choisir entre 0 pour Non et 1 pour Oui\")\n return self.new_game()\n if choice:\n self.hand = None\n self.hand = HandHistory()\n self.hand.table = self.table\n self.input_max_players()\n self.hand.table.tournament = Tournament()\n self.input_tournament()\n self.input_level()\n self.input_players()\n\n def set_level(self, level: Level):\n self.hand.level = level\n self.level = level\n\n def input_buyin(self):\n try:\n buyin = float(input(\"Buy-in (en €)\\n\"))\n return buyin\n except ValueError:\n print(\"Vous devez insérer un Buy-In numérique\")\n return self.input_buyin()\n\n def choose_bb(self):\n bb_seat = int(input(\"Choisissez le siège de la BB\\n\"))\n bb_pl = self.hand.table.players.seat_dict[bb_seat]\n bb_pl.position = Position(\"BB\")\n self.hand.table.players.positions[str(bb_pl.position)] = bb_pl.seat\n self.hand.table.distribute_positions()\n self.hand.button = self.hand.table.players.positions[\"BTN\"].seat\n s_dict = self.hand.table.players.seat_dict\n print(f\"Résumé des positions: {[(s_dict[i].seat, s_dict[i].name, s_dict[i].position) for i in sorted(s_dict)]}\")\n\n def pregame_posting(self):\n for pl in self.hand.table.players:\n self.hand.table.post_ante(pl, self.hand.level.ante)\n print(f\"{pl.name} posts ante {self.hand.level.ante}\")\n if pl.position == Position(\"SB\"):\n self.hand.table.bet(pl, self.hand.level.sb)\n elif pl.position == Position(\"BB\"):\n self.hand.table.bet(pl, self.hand.level.bb)\n\n def input_hero_combo(self):\n try:\n combo = Combo(input(\"Entrez vos 2 cartes\\n\"))\n self.hand.table.draw_card(combo.first)\n self.hand.table.draw_card(combo.second)\n self.hero.combo = combo\n print(f\"On vous a distribué:{combo}\")\n except ValueError:\n print(\"Il faut 2 cartes style 'AdKd'\")\n return self.input_hero_combo()\n\n def play_hand(self):\n self.hand.table.find_active_players(self.hand.table.current_street)\n self.input_street_actions()\n print(self.hand.table.current_street.remaining_players)\n if len(self.hand.table.current_street.remaining_players) > 1:\n self.hand.table.make_flop()\n self.input_flop()\n if len(self.hand.table.current_street.remaining_players) > 1:\n self.hand.table.make_turn()\n self.input_turn()\n if len(self.hand.table.current_street.remaining_players) > 1:\n self.hand.table.make_river()\n self.input_river()\n if len(self.hand.table.current_street.remaining_players) > 1:\n self.hand.table.make_showdown()\n self.input_showdown()\n\n else:\n raise TableEvaluationError\n\n def input_action(self, pl: Player, street: Street):\n phr = \"\"\n to_call = pl.to_call(self.hand.table)\n odds = pl.pot_odds(self.hand.table)\n req_eq = pl.req_equity(self.hand.table)\n print(f\"{pl.name} ({pl.position}) plays with {pl.stack} chips. Pot: {self.hand.table.pot}, {to_call} to call, \"\n f\"Odds:{odds} vs 1.\\nRequired Equity: {req_eq}\\n\")\n ch = [\"Fold\", \"Check\", \"Call\", \"Bet\", \"Raise\"]\n if to_call != 0:\n ch.remove(\"Check\")\n ch.remove(\"Bet\")\n else:\n ch.remove(\"Call\")\n ch.remove(\"Raise\")\n if to_call >= pl.stack:\n ch.remove(\"Raise\")\n for x in ch:\n phr += f\"{ch.index(x)}:{x} \"\n try:\n choice = int(input(f\"{phr}\"))\n if choice not in range(len(ch)):\n raise ValueError\n else:\n action = None\n move = ch[choice]\n if move in cst.Action(\"fold\").value:\n action = Action(player=pl, move=cst.Action(\"fold\"), value=0.0)\n elif move in cst.Action(\"check\").value:\n action = Action(player=pl, move=cst.Action(\"check\"), value=0.0)\n elif move in cst.Action(\"call\").value:\n action = Action(player=pl, move=cst.Action(\"calls\"), value=to_call)\n elif move in cst.Action(\"bet\").value:\n amount = self.input_amount()\n action = Action(player=pl, move=cst.Action(\"bets\"), value=amount)\n elif move in cst.Action(\"raise\").value:\n amount = self.input_amount()\n action = Action(player=pl, move=cst.Action(\"raises\"), value=amount-to_call)\n print(action)\n self.hand.table.add_action(street=street, action=action)\n except ValueError:\n print(\"Choisissez une action entre 0 et 4\")\n self.input_action(pl, street)\n\n def input_sd_action(self, pl: Player):\n try:\n combo = Combo(input(f\"{pl.name} shows:\"))\n pl.shows(combo=combo)\n except ValueError:\n print(\"Il faut 2 cartes style 'AdKd'\")\n return self.input_sd_action(pl=pl)\n\n def input_street_actions(self):\n street = self.hand.table.current_street\n current_pl = street.next_player()\n street.current_pl = current_pl\n while (street.init_pl is not street.current_pl) and len(street.remaining_players) > 1:\n if street.current_pl.can_play(self.hand.table):\n self.input_action(pl=street.current_pl, street=street)\n else:\n print(f\"{street.current_pl.is_all_in}, {street.current_pl.played}, \"\n f\"{street.current_pl.to_call(self.hand.table)}\")\n next_pl = street.next_player()\n if street.current_pl == next_pl:\n break\n street.current_pl = next_pl\n print(f\"End of {street.name}\")\n\n def input_flop(self):\n fc1 = Card(input(\"First flop card?\"))\n fc2 = Card(input(\"Second flop card?\"))\n fc3 = Card(input(\"Third flop card?\"))\n self.table.draw_flop(fc1, fc2, fc3)\n self.input_street_actions()\n\n def input_turn(self):\n tc = Card(input(\"Turn card?\"))\n self.table.draw_turn(tc)\n self.input_street_actions()\n\n def input_river(self):\n rc = Card(input(\"River card?\"))\n self.table.draw_river(rc)\n self.input_street_actions()\n\n def input_showdown(self):\n pass\n\n def new_hand(self):\n self.new_game()\n self.choose_bb()\n self.input_hero_combo()\n self.pregame_posting()\n self.play_hand()\n","repo_name":"Manggy94/PokerBrain","sub_path":"Game/gameAPI.py","file_name":"gameAPI.py","file_ext":"py","file_size_in_byte":13075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36572032885","text":"from random import uniform\nimport numpy as np\n\nclass Neuron:\n \n def __init__(self, number_of_connections, neuron_index):\n self.neuron_index = neuron_index\n self.transfer_function_input = 0\n self.output_value = 0\n self.eta = 0.01\n self.alpha = 0.9\n self.gradient = 0\n self.output_weights = []\n for connection in range(number_of_connections + 1):\n self.output_weights.append({\"weight\":self.random_weight(), \"delta_weight\":0, \"gradient\":0})\n\n\n def feed_forward(self, previous_layer):\n summed_values = 0.0\n for neuron_index in range(len(previous_layer)):\n summed_values += previous_layer[neuron_index].output_value * \\\n previous_layer[neuron_index].output_weights[self.neuron_index]['weight']\n #print([neuron_index, previous_layer[neuron_index].output_weights[self.neuron_index]['weight'], previous_layer[neuron_index].output_value])\n\n self.transfer_function_input = summed_values\n self.output_value = self.transfer_function(summed_values)\n\n\n def sumDOW(self, next_layer):\n sum = 0.0\n for n in range(len(next_layer)):\n sum += self.output_weights[n]['weight'] * next_layer[n].gradient * Neuron.transfer_function_derivative(self.transfer_function_input)\n\n return sum \n\n def calc_output_gradients(self, target_val, prev_layer):\n delta = self.output_value - target_val\n self.gradient = delta * Neuron.transfer_function_derivative(self.transfer_function_input)\n for n in range(len(prev_layer)):\n prev_layer[n].output_weights[self.neuron_index]['gradient'] += self.gradient * prev_layer[n].output_value\n\n\n def calc_hidden_gradients(self, prev_layer, next_layer):\n self.gradient = self.sumDOW(next_layer)\n for n in range(len(prev_layer)):\n prev_layer[n].output_weights[self.neuron_index]['gradient'] = self.gradient * prev_layer[n].output_value\n\n\n def update_output_weights(self, prev_layer):\n for n in range(len(prev_layer)):\n neuron = prev_layer[n]\n old_delta_weight = neuron.output_weights[self.neuron_index]['delta_weight']\n new_delta_weight = -self.eta * neuron.output_weights[self.neuron_index]['gradient'] + self.alpha * old_delta_weight\n neuron.output_weights[self.neuron_index]['gradient'] = 0\n\n neuron.output_weights[self.neuron_index]['weight'] += new_delta_weight\n neuron.output_weights[self.neuron_index]['delta_weight'] = new_delta_weight\n\n\n \n @staticmethod\n def random_weight():\n return uniform(0, 1)\n\n @staticmethod\n def transfer_function(x):\n return 1/(1+np.exp(-x))\n\n @staticmethod\n def transfer_function_derivative(x):\n return Neuron.transfer_function(x)*(1.0 - Neuron.transfer_function(x))\n\n","repo_name":"matthew-butterfly19/diagnosis-of-the-disease","sub_path":"PythonApplication1/Neuron.py","file_name":"Neuron.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32579658704","text":"#\n# @lc app=leetcode.cn id=838 lang=python3\n#\n# [838] 推多米诺\n#\n\n\n# @lc code=start\nclass Solution:\n def pushDominoes(self, dominoes: str) -> str:\n n = len(dominoes)\n while True:\n new = list(dominoes)\n for i in range(n):\n if dominoes[i] == '.':\n if i > 0 and dominoes[i - 1] == 'R' and i < n - 1 and dominoes[i + 1] == 'L':\n continue\n elif i > 0 and dominoes[i - 1] == 'R':\n new[i] = 'R'\n elif i < n - 1 and dominoes[i + 1] == 'L':\n new[i] = 'L'\n new_dominoes = ''.join(new)\n if new_dominoes == dominoes:\n break\n dominoes = new_dominoes\n return dominoes\n\n\n# @lc code=end\n","repo_name":"Phil2ng/LeetCode","sub_path":"838.推多米诺.py","file_name":"838.推多米诺.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14395543664","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 2 10:54:03 2020\n\n@author: Kirty\n\"\"\"\n\n# Given an array of pairs, find all symmetric pairs in it\n# hashing of pair 1\n\narr=[[11,20],[30,40],[5,10],[40,30],[10,5]]\n\nd = {}\n\nfor i in range(len(arr)):\n first = arr[i][0] \n sec = arr[i][1]\n if sec in d.keys() and d[sec]==first:\n print((sec,first))\n else:\n d[first]=sec\n\n\n# time complexity - O(n)\n# aux space = O(n)\n\n \n \n","repo_name":"kirtymeena/DSA","sub_path":"3.hashing/hashing of pair 1.py","file_name":"hashing of pair 1.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9530412358","text":"'''\n一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为 “Start” )。\n\n机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为 “Finish” )。\n\n问总共有多少条不同的路径?\n'''\n\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n res = self.process(0, 0, m, n)\n print(res)\n res1 = self.process1(m - 1, n - 1)\n print(res1)\n\n cache = [[None] * (n + 1) for _ in range(m + 1)]\n # for row in cache:\n # print(row)\n res2 = self.process_cache(m - 1, n - 1, cache)\n print(res2)\n self.process_dp(m, n)\n\n def process_cache(self, m, n, cache):\n if cache[m][n] is None:\n if m == 0 or n == 0:\n cache[m][n] = 1\n else:\n cache[m][n] = self.process1(m - 1, n) + self.process1(m, n - 1)\n\n return cache[m][n]\n\n def process_dp(self, m, n):\n dp = [[None] * (n) for _ in range(m)]\n for i in range(m):\n for j in range(n):\n if j == 0 or i == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n print(dp[m-1][n-1])\n\n def process(self, i, j, m, n):\n # 走到最右只能向下走,走到最下的时候只能向右走\n if i == m - 1 or j == n - 1:\n return 1\n return self.process(i + 1, j, m, n) + self.process(i, j + 1, m, n)\n\n def process1(self, m, n):\n # 最上行只能从左边过来,最左列只能从上面下来\n if m == 0 or n == 0:\n return 1\n # (i,j)格子可以从(i-1,j)和(i, j-1)格子过来\n return self.process1(m - 1, n) + self.process1(m, n - 1)\n\n\nif __name__ == '__main__':\n s = Solution()\n s.uniquePaths(3, 7)\n","repo_name":"imlifeilong/MyAlgorithm","sub_path":"leetcode/动态规划/62不同路径.py","file_name":"62不同路径.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7803661186","text":"from argparse import ArgumentParser\nimport time\nimport yarp\n\nimport matplotlib.pyplot as plt\n\nparser = ArgumentParser(description='Visualize step response and manually tune a PID.')\nparser.add_argument('--prefix', required=True, type=str, help='robot port prefix')\nparser.add_argument('--carrier', default='unix_stream', type=str, help='carrier used for reading robot state')\nparser.add_argument('--joint', required=True, type=int, help='joint id')\nparser.add_argument('--type', default='pos', type=str, help='PID type (pos, vel)')\nparser.add_argument('--kp', type=float, help='proportional gain')\nparser.add_argument('--ki', type=float, help='integral gain')\nparser.add_argument('--kd', type=float, help='derivative gain')\nparser.add_argument('--kff', type=float, help='feed-forward gain')\nparser.add_argument('--initial', type=float, help='joint value to be achieved on start [deg]')\nparser.add_argument('--step', type=float, default=1.0, help='step size [deg]')\nparser.add_argument('--duration', type=float, default=5.0, help='command duration [s]')\nparser.add_argument('--sampling', type=float, default=1e-3, help='sampling period [s]')\nparser.add_argument('--period', type=float, help='command period [s] (enables staircase reference)')\nparser.add_argument('--store', action='store_true', help='preserve new PID coefficients')\nargs = parser.parse_args()\n\nif args.duration <= 0.0:\n raise ValueError('Duration must be positive')\n\nif args.sampling <= 0.0:\n raise ValueError('Sampling period must be positive')\n\nif args.period is not None and args.period <= 0.0:\n raise ValueError('Command period must be positive')\n\nyarp.Network.init()\n\nif not yarp.Network.checkNetwork():\n raise RuntimeError('YARP server not running')\n\noptions = yarp.Property()\noptions.put('device', 'remote_controlboard')\noptions.put('remote', args.prefix)\noptions.put('local', '/pidTuning' + args.prefix)\noptions.put('carrier', args.carrier)\n\ndd = yarp.PolyDriver(options)\n\nif not dd.isValid():\n raise RuntimeError('Unable to open device')\n\ntime.sleep(0.5) # wait for first data to arrive\n\nlimits = dd.viewIControlLimits()\nmode = dd.viewIControlMode()\nenc = dd.viewIEncoders()\npid = dd.viewIPidControl()\npos = dd.viewIPositionControl()\nposd = dd.viewIPositionDirect()\n\nif not 0 <= args.joint < enc.getAxes():\n raise ValueError('Joint id %d out of range' % args.joint)\n\nmin = yarp.DVector(1)\nmax = yarp.DVector(1)\n\nif not limits.getLimits(args.joint, min, max):\n raise RuntimeError('Unable to get joint limits')\n\nprint('Joint limits: %f [deg], %f [deg]' % (min[0], max[0]))\n\npidType = yarp.encode(args.type)\nv_pid = yarp.PidVector(1)\n\nif not pid.getPid(pidType, args.joint, v_pid):\n raise RuntimeError('Unable to get PID')\n\nprint('Current PID: kp=%f, ki=%f, kd=%f, kff=%f' % (v_pid[0].kp, v_pid[0].ki, v_pid[0].kd, v_pid[0].kff))\n\nif args.initial is not None:\n print('Moving joint q%d to %f [deg]' % (args.joint, args.initial))\n\n if not mode.setControlMode(args.joint, yarp.VOCAB_CM_POSITION):\n raise RuntimeError('Unable to set POS mode')\n\n if not pos.positionMove(args.joint, args.initial):\n raise RuntimeError('Unable to move joint')\n\n while True:\n time.sleep(0.1)\n if pos.checkMotionDone(): break\n\n time.sleep(1.0) # give it some time to settle\n\ninitial = enc.getEncoder(args.joint)\nprint('Initial position: %f [deg]' % initial)\n\nif not mode.setControlMode(args.joint, yarp.VOCAB_CM_POSITION_DIRECT):\n raise RuntimeError('Unable to set POSD mode')\n\nusing_new_pid = False\n\nif args.kp is not None or args.ki is not None or args.kd is not None or args.kff is not None:\n new_pid = yarp.Pid(v_pid[0].kp, v_pid[0].kd, v_pid[0].ki,\n v_pid[0].max_int, v_pid[0].scale, v_pid[0].max_output,\n v_pid[0].stiction_up_val, v_pid[0].stiction_down_val, v_pid[0].kff)\n\n if args.kp is not None: new_pid.kp = args.kp\n if args.ki is not None: new_pid.ki = args.ki\n if args.kd is not None: new_pid.kd = args.kd\n if args.kff is not None: new_pid.kff = args.kff\n\n if not pid.setPid(pidType, args.joint, new_pid):\n raise RuntimeError('Unable to set new PID')\n\n print('New PID: kp=%f, ki=%f, kd=%f, kff=%f' % (new_pid.kp, new_pid.ki, new_pid.kd, new_pid.kff))\n using_new_pid = True\n\nprint('Starting... step=%f [deg], duration=%f [s], sampling=%f [ms]' % (args.step, args.duration, args.sampling * 1000))\n\nt = []\ny_enc = []\ny_ref = []\nstart = time.time()\n\ndef doStep(target, samples):\n reference = abs(target - initial)\n\n for i in range(samples):\n if not posd.setPosition(args.joint, target):\n raise RuntimeError('Unable to move joint')\n\n now = time.time()\n t.append(now - start)\n y_enc.append(abs(enc.getEncoder(args.joint) - initial))\n y_ref.append(reference)\n\n # https://stackoverflow.com/a/25251804/10404307\n time.sleep(args.sampling - ((now - start) % args.sampling))\n\nif args.period is not None:\n target = initial\n samples = int(args.period / args.sampling)\n n_steps = int(args.duration / args.period)\n\n print('Staircase reference: period=%f [s], n_steps=%d, distance=%f [deg], speed=%f [deg/s]' % (\n args.period, n_steps, abs(n_steps * args.step), abs(args.step) / args.period\n ))\n\n for i in range(n_steps):\n target += args.step\n doStep(target, samples)\nelse:\n target = initial + args.step\n samples = int(args.duration / args.sampling)\n doStep(target, samples)\n\nprint('Done')\nplt.plot(t, y_enc, 'b')\nplt.plot(t, y_ref, 'r')\nplt.xlabel('time (s)')\nplt.ylabel('position (deg)')\nplt.title('Step response')\n\nplt.show()\n\nif using_new_pid:\n if args.store:\n print('Preserving new PID coefficients')\n else:\n if not pid.setPid(pidType, args.joint, v_pid[0]):\n raise RuntimeError('Unable to restore old PID')\n\n print('Restored old PID')\n","repo_name":"roboticslab-uc3m/tools","sub_path":"programs/pidTuning.py","file_name":"pidTuning.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29765251503","text":"### Cadastro ###\r\nimport time #importei o time para ajudar na barra de carregamento\r\nimport random # importei o random para gerar um numero aleatorio\r\nfrom produtos import * # importei o modulo produtos\r\n#print(opcao)\r\nglobal cadastro # defini o modulo cadastro como sendo uma variavel global para nao da erro nas demais funcoes\r\n#### Listas ####\r\ncadastro = [] # lista de cadastro\r\ncod_cachorro_lista = [] # lista dos codigos dos cachoros\r\nverifica_aux = [] # lista que verifica o codigo inserido\r\ncarrinho_lista=[] # lista dos produtos comprados\r\n#####################\r\nglobal opcao # defini opcao como sendo global para ter acesso a todo o programa\r\nverifica = 0 # variaveis definidas\r\ncod_cachorro = 0 # variaveis definidas\r\nopcao = int(input(\"Digite sua Opção: \")) # pergunta a opcao do usuario.\r\ncodigo = 49946325 # Minha senha de ADM , para ter acesso a funcoes do programa\r\nwhile opcao != 5: # criei um loop que enquanto a opcao for diferente de 5\r\n if opcao >=6: # agora se a opao or maior ou igual a 6 ele imprime o erro\r\n print(\"Opcão invalida!\")\r\n opcao = int(input(\"Digite sua Opcão: \")) # pergunta novamente a opcao desejada\r\n else:\r\n def cadastros(opcao): # Funcao principal do programa cria cadastros e os armazena\r\n global cod_cachorro # variavel global de codigo_ cachorro\r\n # parte de cadastro\r\n if opcao == 1: # se a opcao do usuario for igual a 1 entao ele cadastra\r\n print(\"- Cadastro -\") #imprime\r\n nome_cliente = input(\"Digite seu nome: \") # pega o nome do cliente\r\n cpf_cliente = input(\"Digite seu CPF: \") # pega o cpf do cliente\r\n end = input(\"Digite seu endereço: \") # pega o endereço do cliente\r\n nome_cachorro = input(\"Digite o nome do cachorro: \") # nome cachorro\r\n cod_cachorro = random.randint(1,500) # gera um codigo aleatorio para os cachorros\r\n cadastro.append([nome_cliente,cpf_cliente,end,nome_cachorro,cod_cachorro]) #criei um vetor que salva tudo na lista cadastro\r\n cod_cachorro_lista.append(cod_cachorro) # logo em seguida armezados\r\n for i in range(40): # criei um loop para fazer a barra de carregamento\r\n time.sleep(0.1) # usei o sleep() para da um delay\r\n print(\"#\",end= '')\r\n print() \r\n print(\"Cadastrado!\")\r\n print()\r\n print(\"O codigo de\",nome_cachorro,\"é\",cod_cachorro)\r\n return cadastro,cod_cachorro # em seguida retorna a lista cadastro e o codigo do cachrro\r\n \r\n\r\n\r\n def listar(opcao,codigo): #FUNCAO LISTAR PARA LISTAR OS PRODUTOS\r\n global cont #variavel cont para gerar erro 2x\r\n cont = 0\r\n if opcao == 2: #se a opcao for 2 entao entra na parte do ADM\r\n teste_cod = int(input(\"SÓ ADM, CODIGO: \")) \r\n if teste_cod == codigo: # se o codigo for igual ao codigo_pass entao tem acesso\r\n print(\"Acesso autorizado!\")\r\n for i in cadastro: # loop proucura dentro de cadastro cada elemento informando o conteudo\r\n print(\"Nome:\",i[0],\"CPF\",i[1],\"ENDEREÇO:\",i[2],\"Nome Cachorro:\",i[3],\"CODIGO CACHORRO:\",i[4])\r\n else: # caso contrario coddigo_pass esta errado o cont imprime 2x um erro e tira da funcao\r\n print(\"Codigo errado\")\r\n teste_cod = int(input(\"SÓ ADM, SENHA: \"))\r\n cont += 1 \r\n if cont == 3: # preciso ajeitar o cont para resolver o problema do erro de password\r\n return cont # retorna cont\r\n\r\n def search(opcao,cadastro): #funcao que procura os clientes cadastrados\r\n aux = ''\r\n #cont1 = True\r\n sair = True \r\n \r\n if opcao == 3: # 3> entao pergunta nome e cpf\r\n nome_search = ''\r\n cpf_search = ''\r\n nome_search = input(\"Digite o nome: \")\r\n cpf_search = input(\"CPF: \")\r\n while sair:\r\n for disc in cadastro:\r\n if nome_search == disc[0] and cpf_search == disc[1]: # se for True essa operacao\r\n aux = 'Encontrado' # cliente encontrado / senao ele nao imprime nada e sai\r\n \r\n sair = input(\"SAIDA [s]: \").upper()\r\n if sair == \"S\":\r\n sair = False\r\n \r\n return print(aux)\r\n \r\n \r\n \r\n def produtos(opcao): #Funcao de produtos parametro opcao == 4\r\n global carrinho # global carrinho\r\n carrinho = 0\r\n auxiliar = 0\r\n preco_total = 0\r\n total = 0\r\n res= True\r\n if opcao == 4:\r\n print(\"Para ter acesso a loja insira o codigo do cachorro!! \")\r\n time.sleep(0.2)\r\n global verifica\r\n verifica = int(input(\"Digite o código do cachorro: \"))\r\n verifica_aux.append(verifica)\r\n if verifica in cod_cachorro_lista: # se o codigo do cachorro estiver inserido dentro da lista entao ele tem acesso\r\n lista_produtos() # layout dos produtos\r\n while res == True:\r\n print()\r\n print(\"--------------------------------------------------------\")\r\n escolha = int(input(\"Digite o numero do produto que deseja comprar: \"))\r\n print(\"--------------------------------------------------------\")\r\n if escolha == 1:\r\n print(\"O preço do produto é de:\",preco_tapete,\"Reais!\")\r\n auxiliar = preco_tapete\r\n \r\n elif escolha == 2:\r\n print(\"O preço do produto é de\",preco_racao,\"Reais\")\r\n auxiliar = preco_racao\r\n \r\n elif escolha == 3:\r\n print(\"O preço do produto é de\",preco_pingente,\"Reais\")\r\n auxiliar = preco_pingente\r\n elif escolha == 4:\r\n print(\"O preço do produto é de\",preco_coleira,\"Reais\")\r\n auxiliar = preco_coleira\r\n elif escolha == 5:\r\n print(\"O preço do produto é de\",preco_guia,\"Reais\")\r\n auxiliar = preco_guia\r\n elif escolha == 6:\r\n print(\"O preço do produto é de\",preco_conjunto,\"Reais\")\r\n auxiliar = preco_conjunto\r\n elif escolha == 7:\r\n print(\"O preço do produto é de\",preco_bebedouro,\"Reais\")\r\n auxiliar = preco_bebedouro\r\n elif escolha == 8:\r\n print(\"O preço do produto é de\",preco_comedouro,\"Reais\")\r\n auxiliar = preco_comedouro\r\n \r\n \r\n res = input(\"FINALIZAR COMPRA [S/N]: \").upper()\r\n carrinho += auxiliar # variavel que acumula o total de compras realizadas\r\n #\r\n print()\r\n print(\"O valor total da compra é de\",carrinho,\"Reais!\")\r\n print()\r\n if res == \"S\":\r\n total = carrinho # entao total recebe a variavel que acumula\r\n break\r\n else:\r\n res = True\r\n carrinho_lista.append(total) #a lista adiciona todos os totais\r\n elif cod_cachorro == 0:\r\n print(\"é necessario fazer o cadastro para entrar no programa de produtos!\")\r\n \r\n else:\r\n print(\"Codigo cachorro invalido!\")\r\n \r\n return verifica,carrinho_lista #retorna verifia e a lista de compras\r\n \r\n def arquivo(cadastro,produtos):\r\n \"\"\"\r\n Essa Funcao joga todos os cadastros dentro de um arquivo e os amazena\r\n \"\"\"\r\n salva = 0\r\n cont = 0\r\n arquivo = open(\"Cadastros_salvos.txt\",\"w\")\r\n print(\"-------------- Cadastros Salvos --------------- \",file = arquivo)\r\n for k in cadastro:\r\n nome = k[0]\r\n cpf = k[1]\r\n endereço = k[2]\r\n nome_cachorro = k[3]\r\n codigo_cachorro = k[4]\r\n print(\"Nome:\",nome,file=arquivo)\r\n print(\"CPF:\",cpf,file=arquivo)\r\n print(\"ENDEREÇO:\",endereço,file=arquivo)\r\n print(\"Nome do Cachorro:\",nome_cachorro,file=arquivo)\r\n print(\"Codigo_ cachorro:\",codigo_cachorro,file=arquivo)\r\n print(\"\\n\",file=arquivo)\r\n if codigo_cachorro in verifica_aux:\r\n #print(carrinho_lista)\r\n for i in carrinho_lista:\r\n salva = i\r\n cont += 1\r\n print(\"Compra realizada do cadastro:\",cont,\"total\",salva,\"Reais\",file=arquivo)\r\n #print(carrinho_lista)\r\n \r\n print(\"------------------------------------------------\",file=arquivo)\r\n arquivo.close()\r\n \r\n \r\n \r\n \r\n \r\n\r\n cadastros(opcao)\r\n listar(opcao,codigo)\r\n search(opcao,cadastro)\r\n produtos(opcao)\r\n arquivo(cadastro,produtos)\r\n encerrar = input(\"Deseja sair [S/N]: \").upper()\r\n if encerrar == \"S\":\r\n print(\"Voce saiu do Programa!\")\r\n break\r\n else:\r\n opcao = int(input(\"Digite sua opcao: \"))\r\n \r\n","repo_name":"lsbloo/Gestao-Comercial","sub_path":"petshop/cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":10187,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29928268097","text":"\ndef missing_letter(lst):\n \n a = 'a b c d e f g h i j k l m n o p q r s t u v w x y z' + ' a b c d e f g h i j k l m n o p q r s t u v w x y z'.upper()\n a = a.split()\n \n indexes = []\n \n for l8r in lst:\n for n in range(0, len(a)):\n tl8r = a[n]\n if tl8r == l8r:\n indexes.append(n)\n \n indexes = sorted(indexes)\n \n wanted = []\n \n for n in range(indexes[0], indexes[-1] + 1):\n wanted.append(a[n])\n \n for item in wanted:\n if item not in lst:\n return item\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"CqNoAPcQrckobTacs_3.py","file_name":"CqNoAPcQrckobTacs_3.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28577011254","text":"from dataclasses import dataclass\nimport numpy as np\nimport re\nimport sys, os, subprocess\nfrom pathlib import Path\nfrom datetime import datetime\nimport re\nimport torch\n\nfrom tiramisu_programs.schedule_utils import TimeOutException\n\n\nclass CPP_File(object):\n\n @classmethod\n def compile_and_run_tiramisu_code(cls,\n config,\n file_path,\n log_message=\"No message\"):\n \"\"\"Compiles and runs a C++ file.\n\n Args:\n config (RLAutoSchedulerConfig): The experiment config.\n file_path (str): The path to the C++ file to compile.\n log_message (str, optional): _description_. Defaults to \"No message\".\n\n Returns:\n bool: Whether or not the compilation and running was successful.\n \"\"\"\n # print(\"inside compile and run\")\n os.environ[\"FUNC_DIR\"] = (\"/\".join(Path(file_path).parts[:-1]) if len(\n Path(file_path).parts) > 1 else \".\") + \"/\"\n os.environ[\"FILE_PATH\"] = file_path\n\n failed = cls.launch_cmd(config.tiramisu.compile_tiramisu_cmd,\n file_path)\n if failed:\n print(f\"Error occured while compiling {file_path}\")\n with open(file_path) as file:\n print(file.read(), file=sys.stderr, flush=True)\n return False\n else:\n failed = cls.launch_cmd(config.tiramisu.run_tiramisu_cmd,\n file_path)\n if failed:\n print(f\"Error occured while running {file_path}\")\n return False\n return True\n\n @classmethod\n def launch_cmd(cls,\n step_cmd,\n file_path,\n cmd_type=None,\n nb_executions=None,\n initial_exec_time=None):\n \"\"\"Execute a command on the shell.\n\n Args:\n step_cmd (str): The command to execute.\n file_path (str): The file besides which the error message is output in case of error.\n cmd_type (str, optional): Can take three values: \"initial_exec\" for commands used to get intital execution time,\"sched_eval\" for commands used to evauate a schedule, and None for everything else. Defaults to None.\n nb_executions (int, optional): The number of times to execyte the shell command. Defaults to None.\n initial_exec_time (float, optional): The program intial execution time. It is used with the \"sched_eval\" option . Defaults to None.\n\n Raises:\n TimeOutException: The shell command exceeded the timeout.\n\n Returns:\n bool: Whether or not the command failed.\n \"\"\"\n failed = False\n try:\n if cmd_type == \"initial_exec\":\n out = subprocess.run(\n step_cmd,\n check=True,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n timeout=15 * nb_executions,\n )\n # print(\"after running initial exec\")\n elif cmd_type == \"sched_eval\":\n out = subprocess.run(\n step_cmd,\n check=True,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n timeout=15 + 10 * nb_executions * initial_exec_time / 1000,\n )\n # print(\"after running sched eval\")\n\n else:\n out = subprocess.run(\n step_cmd,\n check=True,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n except subprocess.TimeoutExpired:\n raise TimeOutException\n\n except Exception as e:\n print(\n f\"\\n# {str(datetime.now())} ---> Error running {step_cmd} \\n\" +\n e.stderr.decode(\"UTF-8\"),\n file=sys.stderr,\n flush=True,\n )\n out = e\n failed = True\n else: # no exception rised\n if \"error\" in out.stderr.decode(\"UTF-8\"):\n print(\n f\"\\n# {str(datetime.now())} ---> Error running {step_cmd} \\n\"\n + out.stderr.decode(\"UTF-8\"),\n file=sys.stderr,\n flush=True,\n )\n failed = True\n if failed:\n func_folder = (\"/\".join(Path(file_path).parts[:-1])\n if len(Path(file_path).parts) > 1 else \".\") + \"/\"\n with open(func_folder + \"error.txt\", \"a\") as f:\n f.write(\"\\nError running \" + step_cmd +\n \"\\n---------------------------\\n\" +\n out.stderr.decode(\"UTF-8\") + \"\\n\")\n return failed\n\n @classmethod\n def get_cpp_file(cls, Dataset_path, func_name):\n \"\"\"Backup the dataset generator files into the folder Dataset_copies, stored locally.\n\n Args:\n Dataset_path (str): The path to the dataset.\n func_name (str): The function to copy\n\n Returns:\n str: The new copied function path.\n \"\"\"\n file_name = func_name + \"_generator.cpp\"\n original_path = Dataset_path + \"/\" + func_name + \"/\" + file_name\n dc_path = Path(Dataset_path).parts[:-1]\n target_path = \"{}/Dataset_copies/{}\".format(\".\", func_name)\n\n if not os.path.isdir(\"./Dataset_copies/\"):\n os.mkdir(\"./Dataset_copies/\")\n\n if os.path.isdir(target_path):\n os.system(\"rm -r {}\".format(target_path))\n # print(\"directory removed\")\n\n os.mkdir(target_path)\n os.system(\"cp -r {} {}\".format(original_path, target_path))\n return target_path + \"/\" + file_name\n \n @classmethod\n def clean_cpp_file(cls, Dataset_path, func_name):\n \"\"\"Backup the dataset generator files into the folder Dataset_copies, stored locally.\n\n Args:\n Dataset_path (str): The path to the dataset.\n func_name (str): The function to copy\n\n Returns:\n str: The new copied function path.\n \"\"\"\n target_path = \"{}/Dataset_copies/{}\".format(\".\", func_name)\n\n if os.path.isdir(\"./Dataset_copies/\") and os.path.isdir(target_path):\n os.system(\"rm -r {}\".format(target_path))\n return True\n else:\n return False","repo_name":"Tiramisu-Compiler/tiramisu","sub_path":"utils/rl_autoscheduler/tiramisu_programs/cpp_file.py","file_name":"cpp_file.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","stars":876,"dataset":"github-code","pt":"32"} +{"seq_id":"40287686748","text":"# greedy 방법을 이용하는 문제\n# 숫자 자리수가 최대 1,000,000자리이기 때문에 combination을 사용하면 초과가 될 것 같음\n# 제거만 가능하지 순서를 바꾸는건 아니기 때문에 stack을 이용하여 큰수를 넣고 자리수도 맞춰주는 방법 이용!\n# 간단하지만 생각하기까지 시간이 꽤 걸리는구나..\ndef solution(number, k):\n answer = []\n \n for i in number :\n while k > 0 and answer and answer[-1] < i :\n answer.pop()\n k -= 1\n answer.append(i)\n \n answer = ''.join(answer[:len(answer) - k]) # 제거 횟수를 다 사용하지 않았을때 남은 횟수만큼 리스트 뒷부분을 잘라 주기\n\n\n return answer","repo_name":"simsang1l/Programmers","sub_path":"python/level2/큰_수_만들기.py","file_name":"큰_수_만들기.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38883826947","text":"import pygame\nfrom game.definitions import WHITE\nfrom .utils import draw_text\nimport os\nimport json\nfrom datetime import datetime\nclass Benchmark:\n def __init__(self,clock):\n self.fps = 0\n self.time = 0\n self.clock = clock\n self.list = [100,200,300,400,500,600,700,800,900,1000]\n \n self.data = {\n \n }\n try:\n with open('data/benchmark.txt') as bench_file:\n self.data = json.load(bench_file)\n except:\n print(\"No benchmark files found\") \n\n\n def update(self):\n self.time = round(pygame.time.get_ticks() * 0.1)\n if(self.time in self.list ):\n self.fps += round(self.clock.get_fps())\n self.list.remove(self.time)\n if self.time == 1000: \n now = datetime.now().strftime(\"%d/%m %H:%M:%S\")\n self.data[\" \" + os.getlogin() + \" \" + now] = self.fps/10 \n with open('data/benchmark.txt','w') as bench_file:\n json.dump(self.data,bench_file)\n\n def draw(self,screen):\n draw_text(screen,'Benchmark {}'.format(self.data).replace(\"{\",\"\").replace(\"}\",\"\"),20,(255,100,0),(10,100))\n","repo_name":"Ilirys/AgeOfCheapEmpire","sub_path":"game/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28519439773","text":"#prosedural\n\nstart = 96\nkotak=start **2\nincrement = kotak + 1\ncube = increment ** 3\ndecrement = cube - 1\nresult = print(decrement)\n\n#fungsi\ndef call(x,f):\n return f(x)\n\nsquare = lambda x : x*x\ninc = lambda x : x+1\nkubus = lambda x : x*x*x\ndec = lambda x: x - 1\nfuncs=[square,inc,kubus,dec]\n\nfrom functools import reduce\nprint(reduce(call,funcs,96))","repo_name":"julius-risky/praxis-academy","sub_path":"novice/02-01/latihan/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30756605273","text":"class Solution:\n # Approach: Sliding Window + Hashmap, Complexity: O(n), O(n)\n def lengthOfLongestSubstring(self, s: str) -> int:\n # Trivial case with no window possible.\n if not s:\n return 0\n\n # Initialize window with first element.\n seen = {s[0]}\n l = 0\n res = 1\n\n for r, ch in enumerate(s):\n if ch in seen:\n # Pop elements from the window until the duplicate is found.\n while l < r and s[l] != ch:\n seen.remove(s[l])\n l += 1\n # Remove the duplicate. Its later occurence is in the window\n # so no need to remove it from the set.\n if l != r:\n l += 1\n else:\n # Add unique element to window and check if the current\n # substring is longer than the previous longest.\n seen.add(ch)\n res = max(res, r-l+1)\n return res\n","repo_name":"ihadouken/lc","sub_path":"3.longest-substring-without-repeating-characters.py","file_name":"3.longest-substring-without-repeating-characters.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25638479550","text":"#! /usr/bin/python3\n\nimport sys\nimport argparse\nfrom parse_fasta import parse_fasta\n\n\nif __name__ == \"__main__\":\n if len(sys.argv[1:]) == 0:\n sys.argv.append(\"-h\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"alignment\", help=\"FASTA formatted alignment\")\n parser.add_argument(\"reference\", help=\"Sequence to extract states\")\n args = parser.parse_args()\n\n seqs = dict([x for x in parse_fasta(args.alignment)])\n\n try:\n for s in seqs[args.reference]:\n print(s)\n except KeyError:\n print(\"sequence is not in alignment\")\n sys.exit()","repo_name":"NatJWalker-Hale/alignment_and_tree_tools","sub_path":"src/print_states_in_col.py","file_name":"print_states_in_col.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39608935366","text":"#!/usr/bin/env python\n\nimport datetime\nimport os.path as osp\n\nimport torch\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom dorec.datasets import build_dataset\nfrom dorec.core import Config\nfrom dorec.core.utils import save_yaml, makedirs, get_logger, AverageMeter\n\nfrom .utils import worker_init_fn, parse_device, DataParallel\nlogger = get_logger(modname=__name__)\n\n\nclass RunnerBase(object):\n \"\"\"Base class for running train, test or inference\n Args:\n config (dorec.utils.Config): loaded config from /configs/config.yml\n is_test (bool): indicates whether do test\n \"\"\"\n\n def __init__(self, config, is_test):\n assert isinstance(config, Config)\n assert isinstance(is_test, bool)\n self._config = config\n self._is_test = is_test\n\n # Reset parameters\n self._init_parameters()\n self._reset_phase_parameters()\n\n # Set working directory\n self._set_work_dir()\n\n # Summary writer\n self.writer = SummaryWriter(log_dir=self.work_dir)\n\n # Display and save parameters\n logger.info(self.config.pretty_text)\n cfg_save_path = osp.join(self.work_dir, self.config_name + \".yml\")\n save_yaml(cfg_save_path, self.config.to_dict(), mode=\"w\")\n logger.info(\"Config file is saved to: {}\".format(cfg_save_path))\n\n # Global epoch\n self.epoch = 0\n self.viz_cnt = 0\n\n def _init_parameters(self):\n \"\"\"Initialize basic parameters from config\"\"\"\n self._config_name = self.config.name\n self._checkpoint = self.config.checkpoint\n self._work_dir = self.config.work_dir\n\n self._task = self.config.task\n self._parameters_cfg = self.config.parameters\n self._model_cfg = self.config.model\n self._dataset_cfg = self.config.dataset\n self._optimizer_cfg = self.config.optimizer\n self._scheduler_cfg = self.config.scheduler\n self._loss_cfg = self.config.loss\n self._evaluation_cfg = self.config.evaluation\n\n def _set_work_dir(self):\n date_info = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self._work_dir = osp.join(\n self.config.work_dir, self.config_name + \"_\" + date_info)\n self._checkpoint_dir = osp.join(self.work_dir, \"checkpoints\")\n self._vizdir = osp.join(self.work_dir, \"viz\")\n\n makedirs(self.work_dir, exist_ok=True)\n makedirs(self.checkpoint_dir, exist_ok=True)\n makedirs(self.vizdir, exist_ok=True)\n logger.info(\"Working directory is: {}\".format(self.work_dir))\n\n def _reset_phase_parameters(self):\n \"\"\"if change phase, reset parameters depends on phase\"\"\"\n if self.is_test:\n phase = \"test\"\n logger.info(\"****Test mode****\")\n self._batch_size = int(self._dataset_cfg.test.get(\"batch_size\", 1))\n self._device = parse_device(\n self._parameters_cfg.test.device,\n self._parameters_cfg.test.gpu_ids)\n self._max_epoch = int(\n self._parameters_cfg.test.get(\"max_epoch\", 1))\n else:\n phase = \"train\"\n logger.info(\"****Train mode****\")\n self._batch_size = int(\n self._dataset_cfg.train.get(\"batch_size\", 1))\n self._device = parse_device(\n self._parameters_cfg.train.device,\n self._parameters_cfg.train.gpu_ids)\n self._max_epoch = int(\n self._parameters_cfg.train.get(\"max_epoch\", 1))\n\n self.epoch = 0\n self.viz_cnt = 0\n\n logger.info(\"Parameters is reseted for: {}\".format(phase))\n\n @property\n def config(self):\n return self._config\n\n @property\n def config_name(self):\n return self._config_name\n\n @property\n def is_test(self):\n return self._is_test\n\n @is_test.setter\n def is_test(self, b):\n assert isinstance(b, bool), \"expected bool, but got {}\".format(type(b))\n self._is_test = b\n phase = \"test\" if b else \"train\"\n logger.info(\"Switched phase to: {}\".format(phase))\n self._reset_phase_parameters()\n\n @property\n def task(self):\n return self._task\n\n @property\n def model_cfg(self):\n return self._model_cfg\n\n @property\n def dataset_cfg(self):\n return self._dataset_cfg\n\n @property\n def parameters_cfg(self):\n return self._parameters\n\n @property\n def optimizer_cfg(self):\n return self._optimizer_cfg\n\n @property\n def scheduler_cfg(self):\n return self._scheduler_cfg\n\n @property\n def loss_cfg(self):\n return self._loss_cfg\n\n @property\n def evaluation_cfg(self):\n return self._evaluation_cfg\n\n @property\n def input_type(self):\n return self._dataset_cfg.input_type\n\n @property\n def use_dims(self):\n return self._dataset_cfg.use_dims\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def max_epoch(self):\n return self._max_epoch\n\n @property\n def device(self):\n return self._device\n\n @device.setter\n def device(self, d):\n self._device = d\n\n @property\n def work_dir(self):\n return self._work_dir\n\n @property\n def checkpoint(self):\n return self._checkpoint\n\n @checkpoint.setter\n def checkpoint(self, p):\n assert isinstance(p, str), \"expected str, but got {}\".format(type(p))\n self._checkpoint = p\n\n @property\n def checkpoint_dir(self):\n return self._checkpoint_dir\n\n @property\n def vizdir(self):\n return self._vizdir\n\n def train(self):\n \"\"\"Execute train\"\"\"\n raise NotImplementedError\n\n def test(self):\n \"\"\"Exectute test\"\"\"\n raise NotImplementedError\n\n def _evaluate(self, model, dataloader, avg_meters, optimizer=None):\n \"\"\"Execute evaluation for validation or test\"\"\"\n raise NotImplementedError\n\n def inference(self, root):\n \"\"\"Execute inference\"\"\"\n raise NotImplementedError\n\n def visualize(self, inputs, outputs, max_try=1, vis_random=True):\n \"\"\"visualize model outputs result for inputs image\"\"\"\n raise NotImplementedError\n\n def show_data(self, max_try=10):\n \"\"\"show reuslt of data augmentation\"\"\"\n raise NotImplementedError\n\n def load_data(self):\n \"\"\"Load dataset\n Returns:\n - train_dataloader, val_dataloader (tuple[DataLoader]): if is_test=False\n - test_dataloader (torch.utils.data.DataLoader): if is_test=True\n \"\"\"\n dataset_cfg = self.dataset_cfg.copy()\n train_dataset_cfg = dataset_cfg.pop(\"train\")\n test_dataset_cfg = dataset_cfg.pop(\"test\")\n\n if self.dataset_cfg.get(\"val\") is not None:\n val_dataset_cfg = dataset_cfg.pop(\"val\")\n val_dataset_cfg.update(dataset_cfg)\n val_dataset_cfg.task = self.task\n else:\n val_dataset_cfg = None\n\n train_dataset_cfg.update(dataset_cfg)\n train_dataset_cfg.task = self.task\n test_dataset_cfg.update(dataset_cfg)\n test_dataset_cfg.task = self.task\n\n if self.is_test:\n test_dataset = build_dataset(test_dataset_cfg)\n logger.info(\"Loaded dataset Test: {}\".format(len(test_dataset)))\n\n assert len(test_dataset) >= self.batch_size, \\\n \"batch size must be smaller than total number of data\"\n\n test_dataloader = DataLoader(\n test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=8,\n pin_memory=True,\n drop_last=True,\n worker_init_fn=worker_init_fn\n )\n return test_dataloader\n\n if val_dataset_cfg is not None:\n train_dataset = build_dataset(train_dataset_cfg)\n val_dataset = build_dataset(val_dataset_cfg)\n else:\n # Split train : val = 8 : 2\n dataset = build_dataset(train_dataset_cfg)\n num_data = len(dataset)\n train_size = int(num_data * 0.8)\n val_size = num_data - train_size\n train_dataset, val_dataset = random_split(\n dataset, [train_size, val_size])\n\n if len(train_dataset) <= self.batch_size:\n raise ValueError(\n \"batch size must be smaller than total number of data\")\n if len(val_dataset) <= (self.batch_size + 3) // 4:\n raise ValueError(\n \"batch size must be smaller than total number of data\")\n\n logger.info(\"Loaded dataset Train: {}\".format(len(train_dataset)))\n logger.info(\"Loaded dataset Validation: {}\".format(len(val_dataset)))\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=8,\n pin_memory=True,\n drop_last=True,\n worker_init_fn=worker_init_fn\n )\n\n val_dataloader = DataLoader(\n val_dataset,\n batch_size=(self.batch_size + 3) // 4,\n shuffle=True,\n num_workers=8,\n pin_memory=True,\n drop_last=True,\n worker_init_fn=worker_init_fn\n )\n return train_dataloader, val_dataloader\n\n def load_checkpoint(self, model, optimizer=None, checkpoint=None):\n \"\"\"Load checkpoint from chekpoint path\n if checkpoint_path is None, only load model on GPU\n Args:\n model (torch.nn.Module)\n optimizer (torch.optim.Optimizer, optional)\n chekpoint (str, optional)\n init (bool, optional)\n Returns:\n model (torch.nn.Module)\n optimizer (Optional[torch.optim.Optimizer])\n \"\"\"\n # Load checkpoint\n if checkpoint is not None:\n logger.info(\"model is loaded with: {}\".format((checkpoint)))\n state_dict = torch.load(checkpoint, map_location=self.device)\n if \"model\" in state_dict.keys():\n model.load_state_dict(state_dict[\"model\"])\n else:\n model.load_state_dict()\n if optimizer is not None:\n optimizer.load_state_dict(state_dict[\"optimizer\"])\n for state in optimizer.state.values():\n for k, v in state.items():\n state[k] = v.to(self.device)\n\n # Apply model parralel\n if torch.cuda.device_count() > 1 and not self.device == torch.device(\"cpu\"):\n model_name = model.name\n model = DataParallel(model)\n model.name = model_name\n\n model = model.to(self.device)\n\n if self.is_test:\n model.eval()\n else:\n model.train()\n\n return model, optimizer\n\n def save_checkpoint(self, model, checkpoint_path, optimizer=None):\n \"\"\"save model to checkpoint path as .pth\n Args:\n model (torch.nn.Module)\n optimizer (torch.optim.Optimizer)\n checkpoint_path (str): Path to save checkpoint\n \"\"\"\n if isinstance(model, torch.nn.DataParallel):\n model_state = model.module.state_dict()\n else:\n model_state = model.state_dict()\n\n optimizer_state = optimizer.state_dict() if optimizer is not None else None\n\n state = {\n \"model\": model_state,\n \"optimizer\": optimizer_state\n }\n\n logger.info(\"Saving model to: {}\".format(checkpoint_path))\n torch.save(state, checkpoint_path)\n\n def set_avg_meters(self):\n \"\"\"Set AverageMeter\n Returns:\n avg_meters (dict[str, any])\n \"\"\"\n avg_meters = {\"loss\": dict(), \"score\": dict()}\n avg_meters[\"loss\"][\"total\"] = AverageMeter()\n for tsk in self.task:\n avg_meters[\"loss\"][tsk] = AverageMeter()\n avg_meters[\"score\"][tsk] = dict()\n for mth in list(self.evaluation_cfg[tsk].methods):\n avg_meters[\"score\"][tsk][mth] = AverageMeter()\n\n return avg_meters\n\n def _reset_avg_meters(self, avg_meters):\n \"\"\"\n Args:\n avg_meters (dict[str, any])\n \"\"\"\n for tsk in self.task:\n avg_meters[\"loss\"][tsk].reset()\n for mth in list(self.evaluation_cfg[tsk].methods):\n avg_meters[\"score\"][tsk][mth].reset()\n\n def _record_results(self, avg_meters, losses, scores):\n \"\"\"Record loss and score to AverageMeter()\n Args:\n avg_meters (dict[str, AverageMeters])\n losses (dict[str, torch.Tensor])\n scores (dict[str, float])\n \"\"\"\n avg_meters[\"loss\"][\"total\"].update(\n losses[\"total\"].item(), self.batch_size)\n\n for tsk in self.task:\n avg_meters[\"loss\"][tsk].update(\n losses[tsk].item(), self.batch_size)\n score_avg_meters = avg_meters[\"score\"][tsk]\n score_dict = scores[tsk]\n for mth in list(self.evaluation_cfg[tsk].methods):\n score_avg_meters[mth].update(\n score_dict[mth], self.batch_size)\n\n def _logging_results(self, avg_meters):\n \"\"\"Logging results\n Args:\n avg_meters (dict[str, AverageMeter])\n \"\"\"\n loss_msg = \"total: {}\\n\".format(avg_meters[\"loss\"][\"total\"].val)\n score_msg = \"\"\n for tsk in self.task:\n loss_msg += \"{}: {}\\n\".format(\n tsk, avg_meters[\"loss\"][tsk].val)\n score_msg += \"{}:\\n\".format(tsk)\n for mth in list(self.evaluation_cfg[tsk].methods):\n score_msg += \" [{}]: {}\".format(\n mth, avg_meters[\"score\"][tsk][mth].val)\n score_msg += \"\\n\"\n\n msg = r\"\"\"\n|Epoch|: {}\n|Loss|:\n{}\n|Score|:\n{}\n \"\"\".format(self.epoch, loss_msg, score_msg)\n logger.info(msg)\n\n def _update_summary_writer(self, train_avg_meters, val_avg_meters, epoch):\n \"\"\"Update summary writer with AverageMeters()\n Args:\n train_avg_meters (dict[dict[str, AverageMeters()]])\n val_avg_meters (dict[dict[str, AverageMeters()]])\n epoch (int)\n \"\"\"\n self.writer.add_scalars(\n \"toal loss\",\n {\"train\": train_avg_meters[\"loss\"][\"total\"].val,\n \"val\": val_avg_meters[\"loss\"][\"total\"].val}, epoch\n )\n\n for tsk in self.task:\n self.writer.add_scalars(\n tsk + \" loss\",\n {\"train\": train_avg_meters[\"loss\"][tsk].val,\n \"val\": val_avg_meters[\"loss\"][tsk].val}, epoch\n )\n self.writer.add_scalars(\n tsk + \" score\",\n {\"train\": train_avg_meters[\"score\"][tsk].val,\n \"val\": val_avg_meters[\"score\"][tsk].val}, epoch\n )\n","repo_name":"ktro2828/dorec","sub_path":"dorec/runners/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7822566332","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport geopandas as gpd\r\nfrom shapely import wkt\r\nfrom shapely.geometry import LineString, Polygon\r\nimport h3\r\nimport mapclassify\r\nimport folium\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport contextily as cx\r\nfrom PIL import UnidentifiedImageError\r\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\r\nfrom matplotlib.figure import Figure\r\nimport contextily as ctx\r\nfrom IPython.display import display\r\nfrom matplotlib_scalebar.scalebar import ScaleBar\r\nfrom matplotlib.collections import QuadMesh\r\nfrom pathlib import Path\r\nfrom matplotlib import colors as mcolors\r\nfrom matplotlib.text import Text\r\nfrom mycolorpy import colorlist as mcp\r\nfrom requests.exceptions import ConnectionError as r_ConnectionError\r\nfrom pandas.io.sql import DatabaseError\r\n\r\nfrom urbantrips.kpi import kpi\r\nfrom urbantrips.carto import carto\r\nfrom urbantrips.geo import geo\r\nfrom urbantrips.geo.geo import (\r\n normalizo_lat_lon, crear_linestring)\r\nfrom urbantrips.utils.utils import (\r\n leer_configs_generales,\r\n traigo_db_path,\r\n iniciar_conexion_db,\r\n leer_alias,\r\n duracion)\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef plotear_recorrido_lowess(id_linea, etapas, recorridos_lowess, alias):\r\n \"\"\"\r\n Esta funcion toma un id_linea, un df de etapas, un gdf de recorridos_lowess\r\n y un alias y produce una viz de las etapas y el recorrido\r\n \"\"\"\r\n e = etapas.loc[etapas.id_linea == id_linea, :]\r\n r = recorridos_lowess.loc[recorridos_lowess.id_linea == id_linea, :]\r\n\r\n if (len(e) > 0) & (len(r) > 0):\r\n try:\r\n fig, ax = plt.subplots(figsize=(3, 3), dpi=150)\r\n\r\n ax.scatter(e.longitud, e.latitud, color='orange', s=.3)\r\n r.plot(color='black', lw=.8, legend=False, ax=ax)\r\n\r\n ax.set_title(f'Linea {id_linea}', fontsize=6)\r\n ax.axis('off')\r\n\r\n db_path = os.path.join(\"resultados\", \"png\",\r\n f\"{alias}linea_{id_linea}.png\")\r\n\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n plt.close(fig)\r\n except (AttributeError, ValueError):\r\n pass\r\n\r\n else:\r\n print(f\"No se pudo producir un grafico para el id_linea {id_linea}\")\r\n\r\n\r\n@duracion\r\ndef visualize_route_section_load(id_linea=False, rango_hrs=False,\r\n day_type='weekday',\r\n n_sections=10, section_meters=None,\r\n indicador='cantidad_etapas', factor=1,\r\n factor_min=50,\r\n save_gdf=False):\r\n \"\"\"\r\n Visualize the load per route section data per route\r\n\r\n Parameters\r\n ----------\r\n id_linea : int, list of ints or bool\r\n route id present in the ocupacion_por_linea_tramo table.\r\n rango_hrs : tuple or bool\r\n tuple holding hourly range (from,to) and from 0 to 24.\r\n day_type: str\r\n type of day. It can take `weekday`, `weekend` or a specific\r\n day in format 'YYYY-MM-DD'\r\n n_sections: int\r\n number of sections to split the route geom\r\n section_meters: int\r\n section lenght in meters to split the route geom. If specified,\r\n this will be used instead of n_sections.\r\n indicator: str\r\n Tipe of section load to display. 'cantidad_etapas' (amount of legs)\r\n or `prop_etapas` (proportion of legs)\r\n factor: int\r\n scaling factor to use for line width to plot section load\r\n factor_min: int\r\n minimum width of linea for low section loads to be displayed\r\n\r\n \"\"\"\r\n sns.set_style(\"whitegrid\")\r\n\r\n if id_linea:\r\n\r\n if type(id_linea) == int:\r\n id_linea = [id_linea]\r\n\r\n table = get_route_section_load(\r\n id_linea=id_linea,\r\n rango_hrs=rango_hrs,\r\n day_type=day_type,\r\n n_sections=n_sections,\r\n section_meters=section_meters)\r\n\r\n # Create a viz for each route\r\n table.groupby('id_linea').apply(\r\n viz_etapas_x_tramo_recorrido,\r\n indicator=indicador,\r\n factor=factor,\r\n factor_min=factor_min,\r\n return_gdfs=False,\r\n save_gdf=save_gdf,\r\n )\r\n\r\n\r\ndef get_route_section_load(id_linea=False, rango_hrs=False, day_type='weekday',\r\n n_sections=10, section_meters=None,):\r\n \"\"\"\r\n Get the load per route section data\r\n\r\n Parameters\r\n ----------\r\n id_linea : int, list of ints or bool\r\n route id present in the ocupacion_por_linea_tramo table.\r\n rango_hrs : tuple or bool\r\n tuple holding hourly range (from,to) and from 0 to 24.\r\n day_type: str\r\n type of day. It can take `weekday`, `weekend` or a specific\r\n day in format 'YYYY-MM-DD'\r\n n_sections: int\r\n number of sections to split the route geom\r\n section_meters: int\r\n section lenght in meters to split the route geom. If specified,\r\n this will be used instead of n_sections.\r\n\r\n Returns\r\n -------\r\n table : pandas.Data.Frame\r\n dataframe with load per section per route\r\n\r\n recorridos : geopandas.GeoDataFrame\r\n geodataframe with route geoms\r\n\r\n \"\"\"\r\n\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n\r\n # route id filter\r\n if id_linea:\r\n\r\n if type(id_linea) == int:\r\n id_linea = [id_linea]\r\n\r\n lineas_str = \",\".join(map(str, id_linea))\r\n else:\r\n lineas_str = ''\r\n\r\n # create query to get data from db\r\n q = load_route_section_load_data_q(\r\n lineas_str, rango_hrs, n_sections, section_meters, day_type\r\n )\r\n\r\n # Read data from section load table\r\n table = pd.read_sql(q, conn_data)\r\n\r\n if len(table) == 0:\r\n print(\"No hay datos de carga por tramo para estos parametros.\")\r\n print(\" id_linea:\", id_linea,\r\n \" rango_hrs:\", rango_hrs,\r\n \" n_sections:\", n_sections,\r\n \" section_meters:\", section_meters,\r\n \" day_type:\", day_type)\r\n\r\n return table\r\n\r\n\r\ndef load_route_section_load_data_q(\r\n lineas_str, rango_hrs, n_sections, section_meters, day_type\r\n):\r\n \"\"\"\r\n Creates a query that gets route section load data from the db\r\n for a specific set of lineas, hours, section meters and day type\r\n\r\n Parameters\r\n ----------\r\n lineas_str : str\r\n list of lines to query in a string format separated by comma\r\n rango_hrs : tuple or bool\r\n tuple holding hourly range (from,to) and from 0 to 24.\r\n day_type: str\r\n type of day. It can take `weekday`, `weekend` or a specific\r\n day in format 'YYYY-MM-DD'\r\n n_sections: int\r\n number of sections to split the route geom\r\n section_meters: int\r\n section lenght in meters to split the route geom. If specified,\r\n this will be used instead of n_sections.\r\n\r\n Returns\r\n -------\r\n str\r\n query that gets data\r\n\r\n \"\"\"\r\n\r\n # hour range filter\r\n if rango_hrs:\r\n hora_min_filter = f\"= {rango_hrs[0]}\"\r\n hora_max_filter = f\"= {rango_hrs[1]}\"\r\n else:\r\n hora_min_filter = \"is NULL\"\r\n hora_max_filter = \"is NULL\"\r\n\r\n q = f\"\"\"\r\n select * from ocupacion_por_linea_tramo\r\n where hora_min {hora_min_filter}\r\n and hora_max {hora_max_filter}\r\n and day_type = '{day_type}'\r\n \"\"\"\r\n\r\n if lineas_str != '':\r\n q = q + f\" and id_linea in ({lineas_str})\"\r\n\r\n if section_meters:\r\n q = q + f\" and section_meters = {section_meters}\"\r\n\r\n else:\r\n q = (\r\n q +\r\n f\" and n_sections = {n_sections} and section_meters is NULL\"\r\n )\r\n q = q + \";\"\r\n return q\r\n\r\n\r\ndef viz_etapas_x_tramo_recorrido(df,\r\n indicator='cantidad_etapas', factor=1,\r\n factor_min=50, return_gdfs=False,\r\n save_gdf=False):\r\n \"\"\"\r\n Plots and saves a section load viz for a given route\r\n\r\n Parameters\r\n ----------\r\n df: pandas.DataFrame\r\n table for a given route in section load db table\r\n route geom: geopandas.GeoSeries\r\n route geoms with id_route as index\r\n indicator: str\r\n Tipe of section load to display. 'cantidad_etapas' (amount of legs)\r\n or `prop_etapas` (proportion of legs)\r\n factor: int\r\n scaling factor to use for line width to plot section load\r\n factor_min: int\r\n minimum width of linea for low section loads to be displayed\r\n return_gdfs: bool\r\n if functions will return section load geodataframes per direction\r\n\r\n Returns\r\n -------\r\n gdf_d0 : geopandas.GeoDataFrame\r\n geodataframe with section load data and sections geoms.\r\n\r\n gdf_d1 : geopandas.GeoDataFrame\r\n geodataframe with section load data and sections geoms.\r\n \"\"\"\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n id_linea = df.id_linea.unique()[0]\r\n s = f\"select nombre_linea from metadata_lineas\" +\\\r\n f\" where id_linea = {id_linea};\"\r\n id_linea_str = pd.read_sql(s, conn_insumos)\r\n\r\n if len(id_linea_str) > 0:\r\n id_linea_str = id_linea_str.nombre_linea.item()\r\n else:\r\n id_linea_str = ''\r\n\r\n day = df['day_type'].unique().item()\r\n\r\n if day == 'weekend':\r\n day_str = 'Fin de semana tipo'\r\n elif day == 'weekday':\r\n day_str = 'Dia de semana tipo'\r\n else:\r\n day_str = day\r\n\r\n section_ids = df.section_id.unique()\r\n\r\n print('Produciendo grafico de ocupacion por tramos', id_linea)\r\n\r\n # set a expansion factor for viz purposes\r\n df['buff_factor'] = df[indicator]*factor\r\n\r\n # Set a minimum for each section to be displated in map\r\n df['buff_factor'] = np.where(\r\n df['buff_factor'] <= factor_min, factor_min, df['buff_factor'])\r\n\r\n cols = ['id_linea', 'day_type', 'n_sections', 'sentido',\r\n 'section_id', 'hora_min', 'hora_max', 'cantidad_etapas',\r\n 'prop_etapas', 'buff_factor']\r\n\r\n df_d0 = df.loc[df.sentido == 'ida', cols]\r\n df_d1 = df.loc[df.sentido == 'vuelta', cols]\r\n\r\n # Create geoms for route in both directions\r\n df_geom = df.query(\"sentido == 'ida'\")\\\r\n .sort_values('section_id')\\\r\n .reset_index(drop=True)\r\n\r\n geom = [LineString(\r\n [[df_geom.loc[i, 'x'], df_geom.loc[i, 'y']],\r\n [df_geom.loc[i+1, 'x'], df_geom.loc[i+1, 'y']]]\r\n ) for i in df_geom.index[:-1]]\r\n gdf = gpd.GeoDataFrame(pd.DataFrame(\r\n {'section_id': df_geom.section_id.iloc[:-1]}),\r\n geometry=geom, crs='epsg:4326')\r\n\r\n # Arrows\r\n flecha_ida_wgs84 = gdf.loc[gdf.section_id == 0.0, 'geometry']\r\n flecha_ida_wgs84 = list(flecha_ida_wgs84.item().coords)\r\n flecha_ida_inicio_wgs84 = flecha_ida_wgs84[0]\r\n flecha_ida_fin_wgs84 = flecha_ida_wgs84[1]\r\n\r\n flecha_vuelta_wgs84 = gdf.loc[gdf.section_id ==\r\n max(gdf.section_id), 'geometry']\r\n flecha_vuelta_wgs84 = list(flecha_vuelta_wgs84.item().coords)\r\n flecha_vuelta_inicio_wgs84 = flecha_vuelta_wgs84[0]\r\n flecha_vuelta_fin_wgs84 = flecha_vuelta_wgs84[1]\r\n\r\n # Use a projected crs in meters\r\n epsg = geo.get_epsg_m()\r\n gdf = gdf.to_crs(epsg=epsg)\r\n\r\n gdf_d0 = gdf\\\r\n .merge(df_d0, on='section_id', how='left')\\\r\n .fillna(0)\r\n\r\n gdf_d1 = gdf\\\r\n .merge(df_d1, on='section_id', how='left')\\\r\n .fillna(0)\r\n\r\n # save data for dashboard\r\n gdf_d0_dash = gdf_d0.to_crs(epsg=4326).copy()\r\n gdf_d1_dash = gdf_d1.to_crs(epsg=4326).copy()\r\n\r\n # creando buffers en base a\r\n gdf_d0['geometry'] = gdf_d0.geometry.buffer(gdf_d0.buff_factor)\r\n gdf_d1['geometry'] = gdf_d1.geometry.buffer(gdf_d1.buff_factor)\r\n\r\n # creating plot\r\n f = plt.figure(tight_layout=True, figsize=(20, 15))\r\n gs = f.add_gridspec(nrows=3, ncols=2)\r\n ax1 = f.add_subplot(gs[0:2, 0])\r\n ax2 = f.add_subplot(gs[0:2, 1])\r\n ax3 = f.add_subplot(gs[2, 0])\r\n ax4 = f.add_subplot(gs[2, 1])\r\n\r\n font_dicc = {'fontsize': 18,\r\n 'fontweight': 'bold'}\r\n\r\n # create a squared box\r\n minx, miny, maxx, maxy = gdf_d0.total_bounds\r\n box = create_squared_polygon(minx, miny, maxx, maxy, epsg)\r\n box.plot(ax=ax1, color='#ffffff00')\r\n box.plot(ax=ax2, color='#ffffff00')\r\n\r\n # get branches' geoms\r\n branch_geoms = get_branch_geoms_from_line(id_linea=id_linea)\r\n\r\n if branch_geoms is not None:\r\n branch_geoms = branch_geoms.to_crs(epsg=epsg)\r\n branch_geoms.plot(ax=ax1, color='Purple',\r\n alpha=0.4, linestyle='dashed')\r\n branch_geoms.plot(ax=ax2, color='Orange',\r\n alpha=0.4, linestyle='dashed')\r\n\r\n gdf.plot(ax=ax1, color='black')\r\n gdf.plot(ax=ax2, color='black')\r\n\r\n try:\r\n gdf_d0.plot(ax=ax1, column=indicator, cmap='BuPu',\r\n scheme='fisherjenks', k=5, alpha=.6)\r\n gdf_d1.plot(ax=ax2, column=indicator, cmap='Oranges',\r\n scheme='fisherjenks', k=5, alpha=.6)\r\n except ValueError:\r\n gdf_d0.plot(ax=ax1, column=indicator, cmap='BuPu', alpha=.6)\r\n gdf_d1.plot(ax=ax2, column=indicator, cmap='Oranges', alpha=.6)\r\n\r\n ax1.set_axis_off()\r\n ax2.set_axis_off()\r\n\r\n ax1.set_title('IDA', fontdict=font_dicc)\r\n ax2.set_title('VUELTA', fontdict=font_dicc)\r\n\r\n # Set title and plot axis\r\n if indicator == 'cantidad_etapas':\r\n title = 'Segmentos del recorrido - Cantidad de etapas'\r\n y_axis_lable = 'Cantidad de etapas por sentido'\r\n elif indicator == 'prop_etapas':\r\n title = 'Segmentos del recorrido - Porcentaje de etapas totales'\r\n y_axis_lable = 'Porcentaje del total de etapas'\r\n else:\r\n raise Exception(\r\n \"Indicador debe ser 'cantidad_etapas' o 'prop_etapas'\")\r\n\r\n if not df.hora_min.isna().all():\r\n from_hr = df.hora_min.unique()[0]\r\n to_hr = df.hora_max.unique()[0]\r\n hr_str = f' {from_hr}-{to_hr} hrs'\r\n else:\r\n hr_str = ''\r\n\r\n title = title + hr_str + ' - ' + day_str + \\\r\n f\" {id_linea_str} (id_linea: {id_linea})\"\r\n f.suptitle(title, fontsize=18)\r\n\r\n # Matching bar plot with route direction\r\n flecha_eo_xy = (0.4, 1.1)\r\n flecha_eo_text_xy = (0.05, 1.1)\r\n flecha_oe_xy = (0.6, 1.1)\r\n flecha_oe_text_xy = (0.95, 1.1)\r\n\r\n labels_eo = [''] * len(section_ids)\r\n labels_eo[0] = 'INICIO'\r\n labels_eo[-1] = 'FIN'\r\n labels_oe = [''] * len(section_ids)\r\n labels_oe[-1] = 'INICIO'\r\n labels_oe[0] = 'FIN'\r\n\r\n # check if route geom is drawn from west to east\r\n geom_dir_east = flecha_ida_inicio_wgs84[0] < flecha_vuelta_fin_wgs84[0]\r\n\r\n # Set arrows in barplots based on reout geom direction\r\n if geom_dir_east:\r\n\r\n flecha_ida_xy = flecha_eo_xy\r\n flecha_ida_text_xy = flecha_eo_text_xy\r\n labels_ida = labels_eo\r\n\r\n flecha_vuelta_xy = flecha_oe_xy\r\n flecha_vuelta_text_xy = flecha_oe_text_xy\r\n labels_vuelta = labels_oe\r\n\r\n # direction 0 east to west\r\n df_d0 = df_d0.sort_values('section_id', ascending=True)\r\n df_d1 = df_d1.sort_values('section_id', ascending=True)\r\n\r\n else:\r\n flecha_ida_xy = flecha_oe_xy\r\n flecha_ida_text_xy = flecha_oe_text_xy\r\n labels_ida = labels_oe\r\n\r\n flecha_vuelta_xy = flecha_eo_xy\r\n flecha_vuelta_text_xy = flecha_eo_text_xy\r\n labels_vuelta = labels_eo\r\n\r\n df_d0 = df_d0.sort_values('section_id', ascending=False)\r\n df_d1 = df_d1.sort_values('section_id', ascending=False)\r\n\r\n sns.barplot(data=df_d0, x=\"section_id\",\r\n y=indicator, ax=ax3, color='Purple',\r\n order=df_d0.section_id.values)\r\n\r\n sns.barplot(data=df_d1, x=\"section_id\",\r\n y=indicator, ax=ax4, color='Orange',\r\n order=df_d1.section_id.values)\r\n\r\n # Axis\r\n ax3.set_xticklabels(labels_ida)\r\n ax4.set_xticklabels(labels_vuelta)\r\n\r\n ax3.set_ylabel(y_axis_lable)\r\n ax3.set_xlabel('')\r\n\r\n ax4.get_yaxis().set_visible(False)\r\n\r\n ax4.set_ylabel('')\r\n ax4.set_xlabel('')\r\n max_y_barplot = max(df_d0[indicator].max(), df_d1[indicator].max())\r\n ax3.set_ylim(0, max_y_barplot)\r\n ax4.set_ylim(0, max_y_barplot)\r\n\r\n ax3.spines.right.set_visible(False)\r\n ax3.spines.top.set_visible(False)\r\n ax4.spines.left.set_visible(False)\r\n ax4.spines.right.set_visible(False)\r\n ax4.spines.top.set_visible(False)\r\n\r\n # For direction 0, get the last section of the route geom\r\n flecha_ida = gdf.loc[gdf.section_id == max(gdf.section_id), 'geometry']\r\n flecha_ida = list(flecha_ida.item().coords)\r\n flecha_ida_inicio = flecha_ida[1]\r\n flecha_ida_fin = flecha_ida[0]\r\n\r\n # For direction 1, get the first section of the route geom\r\n flecha_vuelta = gdf.loc[gdf.section_id == 0.0, 'geometry']\r\n flecha_vuelta = list(flecha_vuelta.item().coords)\r\n # invert the direction of the arrow\r\n flecha_vuelta_inicio = flecha_vuelta[0]\r\n flecha_vuelta_fin = flecha_vuelta[1]\r\n\r\n ax1.annotate('', xy=(flecha_ida_inicio[0],\r\n flecha_ida_inicio[1]),\r\n xytext=(flecha_ida_fin[0],\r\n flecha_ida_fin[1]),\r\n arrowprops=dict(facecolor='black',\r\n edgecolor='black'),\r\n )\r\n\r\n ax2.annotate('', xy=(flecha_vuelta_inicio[0],\r\n flecha_vuelta_inicio[1]),\r\n xytext=(flecha_vuelta_fin[0],\r\n flecha_vuelta_fin[1]),\r\n arrowprops=dict(facecolor='black',\r\n edgecolor='black'),\r\n )\r\n\r\n ax3.annotate('Sentido', xy=flecha_ida_xy, xytext=flecha_ida_text_xy,\r\n size=16, va=\"center\", ha=\"center\",\r\n xycoords='axes fraction',\r\n arrowprops=dict(facecolor='Purple',\r\n shrink=0.05, edgecolor='Purple'),\r\n )\r\n ax4.annotate('Sentido', xy=flecha_vuelta_xy, xytext=flecha_vuelta_text_xy,\r\n size=16, va=\"center\", ha=\"center\",\r\n xycoords='axes fraction',\r\n arrowprops=dict(facecolor='Orange',\r\n shrink=0.05, edgecolor='Orange'),\r\n )\r\n\r\n prov = cx.providers.Stamen.TonerLite\r\n try:\r\n cx.add_basemap(ax1, crs=gdf_d0.crs.to_string(), source=prov)\r\n cx.add_basemap(ax2, crs=gdf_d1.crs.to_string(), source=prov)\r\n except (UnidentifiedImageError, ValueError):\r\n prov = cx.providers.CartoDB.Positron\r\n cx.add_basemap(ax1, crs=gdf_d0.crs.to_string(), source=prov)\r\n cx.add_basemap(ax2, crs=gdf_d1.crs.to_string(), source=prov)\r\n except (r_ConnectionError):\r\n pass\r\n\r\n alias = leer_alias()\r\n\r\n for frm in ['png', 'pdf']:\r\n archivo = f\"{alias}_{day}_segmentos_id_linea_\"\r\n archivo = archivo+f\"{id_linea}_{indicator}_{hr_str}.{frm}\"\r\n db_path = os.path.join(\"resultados\", frm, archivo)\r\n f.savefig(db_path, dpi=300)\r\n plt.close(f)\r\n\r\n if save_gdf:\r\n gdf_d0 = gdf_d0.to_crs(epsg=4326)\r\n gdf_d1 = gdf_d1.to_crs(epsg=4326)\r\n\r\n f_0 = f'segmentos_id_linea_{id_linea}_{indicator}{hr_str}_0.geojson'\r\n f_1 = f'segmentos_id_linea_{id_linea}_{indicator}{hr_str}_1.geojson'\r\n\r\n db_path_0 = os.path.join(\"resultados\", \"geojson\", f_0)\r\n db_path_1 = os.path.join(\"resultados\", \"geojson\", f_1)\r\n\r\n gdf_d0.to_file(db_path_0, driver='GeoJSON')\r\n gdf_d1.to_file(db_path_1, driver='GeoJSON')\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n\r\n gdf_d0_dash['wkt'] = gdf_d0_dash.geometry.to_wkt()\r\n gdf_d1_dash['wkt'] = gdf_d1_dash.geometry.to_wkt()\r\n\r\n gdf_d_dash = pd.concat([gdf_d0_dash, gdf_d1_dash], ignore_index=True)\r\n\r\n gdf_d_dash['nombre_linea'] = id_linea_str\r\n\r\n cols = ['id_linea',\r\n 'nombre_linea',\r\n 'day_type',\r\n 'n_sections',\r\n 'sentido',\r\n 'section_id',\r\n 'hora_min',\r\n 'hora_max',\r\n 'cantidad_etapas',\r\n 'prop_etapas',\r\n 'buff_factor',\r\n 'wkt']\r\n\r\n gdf_d_dash = gdf_d_dash[cols]\r\n\r\n gdf_d_dash_ant = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM ocupacion_por_linea_tramo\r\n \"\"\",\r\n conn_dash,\r\n )\r\n\r\n gdf_d_dash_ant = gdf_d_dash_ant[~(\r\n (gdf_d_dash_ant.id_linea.isin(\r\n gdf_d_dash.id_linea.unique().tolist())) &\r\n (gdf_d_dash_ant.day_type.isin(\r\n gdf_d_dash.day_type.unique().tolist())) &\r\n (gdf_d_dash_ant.n_sections.isin(\r\n gdf_d_dash.n_sections.unique().tolist())) &\r\n ((gdf_d_dash_ant.hora_min == from_hr)\r\n & (gdf_d_dash_ant.hora_max == to_hr))\r\n )]\r\n\r\n gdf_d_dash = pd.concat(\r\n [gdf_d_dash_ant, gdf_d_dash], ignore_index=True)\r\n\r\n gdf_d_dash.to_sql(\"ocupacion_por_linea_tramo\", conn_dash,\r\n if_exists=\"replace\", index=False)\r\n\r\n conn_dash.close()\r\n\r\n if return_gdfs:\r\n return gdf_d0, gdf_d1\r\n\r\n\r\ndef plot_voronoi_zones(voi, hexs, hexs2, show_map, alias):\r\n fig = Figure(figsize=(13.5, 13.5), dpi=100)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n plt.rcParams.update({\"axes.facecolor\": '#d4dadc',\r\n 'figure.facecolor': '#d4dadc'})\r\n voi = voi.to_crs(3857)\r\n voi.geometry.boundary.plot(edgecolor='grey', linewidth=.5, ax=ax)\r\n # ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron,\r\n # attribution=None, attribution_size=10)\r\n\r\n try:\r\n cx.add_basemap(ax, source=ctx.providers.CartoDB.Positron,\r\n attribution=None, attribution_size=10)\r\n except (r_ConnectionError, ValueError):\r\n pass\r\n\r\n voi['coords'] = voi['geometry'].apply(\r\n lambda x: x.representative_point().coords[:])\r\n voi['coords'] = [coords[0] for coords in voi['coords']]\r\n voi.apply(lambda x: ax.annotate(\r\n text=x['Zona_voi'],\r\n xy=x.geometry.centroid.coords[0],\r\n ha='center',\r\n color='darkblue',\r\n ), axis=1)\r\n ax.set_title('Zonificación', fontsize=12)\r\n ax.axis('off')\r\n\r\n if show_map:\r\n\r\n display(fig)\r\n\r\n # Display figura temporal\r\n fig = Figure(figsize=(13.5, 13.5), dpi=70)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n hexs.to_crs(3857).plot(markersize=hexs['fex']/500, ax=ax)\r\n hexs2.to_crs(3857).boundary.plot(ax=ax, lw=.3)\r\n try:\r\n ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron,\r\n attribution=None, attribution_size=10)\r\n except (r_ConnectionError, ValueError):\r\n pass\r\n ax.axis('off')\r\n\r\n # graba resultados\r\n file_path = os.path.join(\"resultados\", \"png\", f\"{alias}Zona_voi_map.png\")\r\n fig.savefig(file_path, dpi=300)\r\n print('Zonificación guardada en', file_path)\r\n\r\n file_path = os.path.join(\"resultados\", \"pdf\", f\"{alias}Zona_voi_map.pdf\")\r\n fig.savefig(file_path, dpi=300)\r\n voi = voi.to_crs(4326)\r\n\r\n file_path = os.path.join(\"resultados\", f\"{alias}Zona_voi.geojson\")\r\n voi[['Zona_voi', 'geometry']].to_file(file_path)\r\n\r\n\r\ndef imprimir_matrices_od(viajes,\r\n savefile='viajes',\r\n title='Matriz OD',\r\n var_fex=\"\",\r\n desc_dia='',\r\n tipo_dia=''):\r\n\r\n alias = leer_alias()\r\n\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n zonas = pd.read_sql_query(\r\n \"\"\"\r\n SELECT * from zonas\r\n \"\"\",\r\n conn_insumos,\r\n )\r\n\r\n conn_insumos.close()\r\n zonas[f'h3_r6'] = zonas['h3'].apply(h3.h3_to_parent, res=6)\r\n zonas[f'h3_r7'] = zonas['h3'].apply(h3.h3_to_parent, res=7)\r\n\r\n df, matriz_zonas = traigo_zonificacion(\r\n viajes, zonas, h3_o='h3_o', h3_d='h3_d')\r\n\r\n if len(var_fex) == 0:\r\n var_fex = 'var_fex'\r\n df[var_fex] = 1\r\n\r\n for i in matriz_zonas:\r\n var_zona = i[1]\r\n matriz_order = i[2]\r\n\r\n imprime_od(\r\n df,\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD General',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Todos los viajes'\r\n )\r\n\r\n imprime_od(\r\n df[(df.cant_etapas > 1)],\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD viajes con transferencia',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}_transferencias\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Con transferencias'\r\n )\r\n\r\n imprime_od(\r\n df[(df.distance_osm_drive <= 5)],\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD viajes cortos (<5kms)',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}_corta_distancia\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Corta distancia (<5kms)'\r\n )\r\n\r\n # Imprime hora punta manana, mediodia, tarde\r\n\r\n df_tmp = df.groupby(['dia', 'hora'], as_index=False)[\r\n var_fex].sum().reset_index()\r\n df_tmp = df_tmp.groupby(['hora'])[var_fex].mean().reset_index()\r\n\r\n try:\r\n manana = df_tmp[(df_tmp.hora.astype(int) >= 6) & (\r\n df_tmp.hora.astype(int) < 12)][var_fex].idxmax()\r\n except ValueError:\r\n manana = None\r\n\r\n try:\r\n mediodia = df_tmp[(df_tmp.hora.astype(int) >= 12) & (\r\n df_tmp.hora.astype(int) < 16)][var_fex].idxmax()\r\n except ValueError:\r\n mediodia = None\r\n try:\r\n tarde = df_tmp[(df_tmp.hora.astype(int) >= 16) & (\r\n df_tmp.hora.astype(int) < 22)][var_fex].idxmax()\r\n except ValueError:\r\n tarde = None\r\n\r\n if manana != None:\r\n imprime_od(\r\n df[(df.hora.astype(int) >= manana-1) &\r\n (df.hora.astype(int) <= manana+1)],\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD viajes punta mañana',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_manana\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Punta mañana'\r\n )\r\n\r\n if mediodia != None:\r\n imprime_od(\r\n df[(df.hora.astype(int) >= mediodia-1) &\r\n (df.hora.astype(int) <= mediodia+1)],\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD viajes punta mediodí­a',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_mediodia\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Punta mediodí­a'\r\n\r\n )\r\n\r\n if tarde != None:\r\n imprime_od(\r\n df[(df.hora.astype(int) >= tarde-1) &\r\n (df.hora.astype(int) <= tarde+1)],\r\n zona_origen=f\"{var_zona}_o\",\r\n zona_destino=f\"{var_zona}_d\",\r\n var_fex=var_fex,\r\n x_rotation=90,\r\n normalize=True,\r\n cmap=\"Reds\",\r\n title='Matriz OD viajes punta tarde',\r\n figsize_tuple='',\r\n matriz_order=matriz_order,\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_tarde\",\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n var_zona=var_zona,\r\n filtro1='Punta tarde'\r\n )\r\n\r\n\r\ndef imprime_lineas_deseo(df,\r\n h3_o='',\r\n h3_d='',\r\n var_fex='',\r\n title='Lí­neas de deseo',\r\n savefile='lineas_deseo',\r\n k_jenks=5,\r\n filtro1='',\r\n desc_dia='',\r\n tipo_dia=''\r\n ):\r\n \"\"\"\r\n Esta funcion toma un df de viajes con destino validado\r\n nombres de columnas con el h3 de origen y destino\r\n un nombre con la columna del factor de expansion\r\n y nombres para el titulo del mapa y el archivo\r\n y produce un mapa con lineas de deseo para todas las\r\n geografias presentes en la tabla zonas\r\n \"\"\"\r\n\r\n pd.options.mode.chained_assignment = None\r\n alias = leer_alias()\r\n\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n zonas = pd.read_sql_query(\r\n \"\"\"\r\n SELECT * from zonas\r\n \"\"\",\r\n conn_insumos,\r\n )\r\n\r\n conn_insumos.close()\r\n\r\n zonas[f'h3_r6'] = zonas['h3'].apply(h3.h3_to_parent, res=6)\r\n zonas[f'h3_r7'] = zonas['h3'].apply(h3.h3_to_parent, res=7)\r\n\r\n zonas = gpd.GeoDataFrame(\r\n zonas,\r\n geometry=gpd.points_from_xy(zonas['longitud'], zonas['latitud']),\r\n crs=4326,\r\n )\r\n\r\n if len(h3_o) == 0:\r\n h3_o = 'h3_o_norm'\r\n if len(h3_d) == 0:\r\n h3_d = 'h3_d_norm'\r\n\r\n if len(var_fex) == 0:\r\n var_fex = 'fex'\r\n df[var_fex] = 1\r\n\r\n # Clasificar od en terminos de zonas\r\n df, matriz_zonas = traigo_zonificacion(df,\r\n zonas,\r\n h3_o=h3_o,\r\n h3_d=h3_d,\r\n res_agg=True)\r\n\r\n for m in matriz_zonas:\r\n var_zona = m[1]\r\n\r\n lineas_deseo(df,\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='viridis_r',\r\n porc_viajes=100,\r\n title=title,\r\n savefile=f\"{alias}{savefile}_{var_zona}\",\r\n show_fig=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Todos los viajes'\r\n )\r\n\r\n lineas_deseo(df[(df.cant_etapas > 1)],\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='crest',\r\n porc_viajes=90,\r\n title=f'{title}\\nViajes con transferencias',\r\n savefile=f\"{alias}{savefile}_{var_zona}_transferencias\",\r\n show_fig=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Con transferencias'\r\n )\r\n\r\n lineas_deseo(df[(df.distance_osm_drive <= 5)],\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='magma_r',\r\n porc_viajes=90,\r\n title=f'{title}\\nViajes de corta distancia (<5kms)',\r\n savefile=f\"{alias}{savefile}_{var_zona}_corta_distancia\",\r\n show_fig=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Corta distancia (<5kms)'\r\n )\r\n\r\n # Imprime hora punta manana, mediodia, tarde\r\n\r\n df_tmp = df\\\r\n .groupby(['dia', 'hora'], as_index=False)\\\r\n .factor_expansion_linea.sum()\\\r\n .rename(columns={'factor_expansion_linea': 'cant'})\\\r\n .reset_index()\r\n df_tmp = df_tmp.groupby(['hora']).cant.mean().reset_index()\r\n try:\r\n manana = df_tmp[(df_tmp.hora.astype(int) >= 6) & (\r\n df_tmp.hora.astype(int) < 12)].cant.idxmax()\r\n except ValueError:\r\n manana = None\r\n\r\n try:\r\n mediodia = df_tmp[(df_tmp.hora.astype(int) >= 12) & (\r\n df_tmp.hora.astype(int) < 16)].cant.idxmax()\r\n except ValueError:\r\n mediodia = None\r\n\r\n try:\r\n tarde = df_tmp[(df_tmp.hora.astype(int) >= 16) & (\r\n df_tmp.hora.astype(int) < 22)].cant.idxmax()\r\n except ValueError:\r\n tarde = None\r\n\r\n if manana != None:\r\n lineas_deseo(df[\r\n (df.hora.astype(int) >= manana-1) &\r\n (df.hora.astype(int) <= manana+1)],\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='magma_r',\r\n porc_viajes=90,\r\n title=f'{title}\\nViajes en hora punta mañana',\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_manana\",\r\n show_fig=False,\r\n normalizo_latlon=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Punta Mañana')\r\n\r\n if mediodia != None:\r\n lineas_deseo(df[\r\n (df.hora.astype(int) >= mediodia-1) &\r\n (df.hora.astype(int) <= mediodia+1)],\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='magma_r',\r\n porc_viajes=90,\r\n title=f'{title}\\nViajes en hora punta mediodia',\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_mediodia\",\r\n show_fig=False,\r\n normalizo_latlon=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Punta Mediodí­a')\r\n\r\n if tarde != None:\r\n lineas_deseo(df[\r\n (df.hora.astype(int) >= tarde-1) &\r\n (df.hora.astype(int) <= tarde+1)],\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='magma_r',\r\n porc_viajes=90,\r\n title=f'{title}\\nViajes en hora punta tarde',\r\n savefile=f\"{alias}{savefile}_{var_zona}_punta_tarde\",\r\n show_fig=False,\r\n normalizo_latlon=False,\r\n k_jenks=k_jenks,\r\n alias=alias,\r\n desc_dia=desc_dia,\r\n tipo_dia=tipo_dia,\r\n zona=var_zona,\r\n filtro1='Punta Tarde')\r\n\r\n\r\ndef indicadores_hora_punta(viajesxhora_dash, desc_dia, tipo_dia):\r\n\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n\r\n try:\r\n indicadores = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM hora_punta\r\n \"\"\",\r\n conn_data,\r\n )\r\n except DatabaseError as e:\r\n print(\"No existe la tabla indicadores, construyendola...\")\r\n indicadores = pd.DataFrame([])\r\n\r\n ind_horas = pd.DataFrame([])\r\n\r\n for modo in viajesxhora_dash.Modo.unique():\r\n\r\n vi = viajesxhora_dash[viajesxhora_dash.Modo ==\r\n modo].reset_index(drop=True)\r\n descrip = ''\r\n if modo != 'Todos':\r\n descrip = f' ({modo.capitalize()})'\r\n\r\n ind_horas = pd.concat([\r\n ind_horas,\r\n pd.DataFrame([\r\n [desc_dia,\r\n tipo_dia,\r\n f'Hora punta mañana{descrip}',\r\n vi.iloc[vi[(vi.Hora >= '05') & (\r\n vi.Hora < '12')].Viajes.idxmax()].Hora,\r\n 'viajesxhora',\r\n 0,\r\n 0]\r\n ], columns=['dia', 'tipo_dia', 'detalle', 'indicador', 'tabla', 'nivel', 'porcentaje'])])\r\n ind_horas = pd.concat([\r\n ind_horas,\r\n pd.DataFrame([\r\n [desc_dia,\r\n tipo_dia,\r\n f'Hora punta mediodí­a{descrip}',\r\n vi.iloc[vi[(vi.Hora >= '12') & (\r\n vi.Hora < '15')].Viajes.idxmax()].Hora,\r\n 'viajesxhora',\r\n 0,\r\n 0]\r\n ], columns=['dia', 'tipo_dia', 'detalle', 'indicador', 'tabla', 'nivel', 'porcentaje'])])\r\n ind_horas = pd.concat([\r\n ind_horas,\r\n pd.DataFrame([\r\n [desc_dia,\r\n tipo_dia,\r\n f'Hora punta tarde{descrip}',\r\n vi.iloc[vi[(vi.Hora >= '15') & (\r\n vi.Hora < '22')].Viajes.idxmax()].Hora,\r\n 'viajesxhora',\r\n 0,\r\n 0]\r\n ], columns=['dia', 'tipo_dia', 'detalle', 'indicador', 'tabla', 'nivel', 'porcentaje'])])\r\n\r\n ind_horas['indicador'] = ind_horas['indicador'].astype(float)\r\n if len(indicadores) > 0:\r\n indicadores = indicadores[~((indicadores.dia == desc_dia) & (\r\n indicadores.tipo_dia == tipo_dia) & (\r\n indicadores.tabla == 'viajesxhora')\r\n )]\r\n indicadores = pd.concat([indicadores, ind_horas])\r\n indicadores.to_sql(\"hora_punta\", conn_data,\r\n if_exists=\"replace\", index=False)\r\n\r\n conn_data.close()\r\n\r\n\r\ndef imprime_graficos_hora(viajes,\r\n title='Cantidad de viajes en transporte público',\r\n savefile='viajes',\r\n var_fex='',\r\n desc_dia='',\r\n tipo_dia=''):\r\n\r\n pd.options.mode.chained_assignment = None\r\n configs = leer_configs_generales()\r\n db_path = traigo_db_path\r\n alias = leer_alias()\r\n\r\n df_aux = pd.DataFrame([(str(x).zfill(2))\r\n for x in list(range(0, 24))], columns=['hora'])\r\n df_aux['dia'] = viajes.head(1).dia.values[0]\r\n df_aux['cant'] = 0\r\n df_aux['modo'] = viajes.modo.unique()[0]\r\n\r\n if not var_fex:\r\n viajes['cant'] = 1\r\n else:\r\n viajes['cant'] = viajes[var_fex]\r\n\r\n viajesxhora = pd.concat([viajes, df_aux], ignore_index=True)\r\n viajesxhora['hora'] = viajesxhora.hora.astype(str).str[:2].str.zfill(2)\r\n\r\n viajesxhora = viajesxhora.groupby(\r\n ['dia', 'hora']).cant.sum().reset_index()\r\n viajesxhora = viajesxhora.groupby(['hora']).cant.mean().reset_index()\r\n\r\n viajesxhora['cant'] = viajesxhora['cant'].round().astype(int)\r\n\r\n savefile_ = f'{savefile}_x_hora'\r\n\r\n viajesxhora_dash = viajesxhora.copy()\r\n viajesxhora_dash['modo'] = 'Todos'\r\n\r\n # Viajes por hora\r\n with sns.axes_style(\r\n {\"axes.facecolor\": \"#cadce0\",\r\n 'figure.facecolor': '#cadce0',\r\n }):\r\n fig = Figure(figsize=(10, 3), dpi=100)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n viajesxhora.plot(ax=ax, legend=False, label=False)\r\n ax.set_title(title, fontsize=8)\r\n ax.set_xlabel('Hora', fontsize=8)\r\n ax.set_ylabel('Viajes', fontsize=8)\r\n ax.set_xticks(list(range(0, 24)))\r\n ax.tick_params(labelsize=6)\r\n\r\n print(\"Nuevos archivos en resultados: \", f'{alias}{savefile_}')\r\n db_path = os.path.join(\"resultados\", \"png\", f\"{alias}{savefile_}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\"resultados\", \"pdf\", f\"{alias}{savefile_}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n # Viajes por hora y modo de transporte\r\n viajesxhora = pd.concat([viajes, df_aux], ignore_index=True)\r\n viajesxhora['hora'] = viajesxhora.hora.astype(str).str[:2].str.zfill(2)\r\n viajesxhora = viajesxhora.groupby(\r\n ['dia', 'hora', 'modo'], as_index=False).cant.sum()\r\n viajesxhora = viajesxhora.groupby(\r\n ['hora', 'modo'], as_index=False).cant.mean()\r\n\r\n viajesxhora.loc[viajesxhora.modo.str.contains(\r\n 'Multi'), 'modo'] = 'Multietapa'\r\n viajesxhora = viajesxhora.groupby(['hora', 'modo'])[\r\n 'cant'].sum().reset_index()\r\n\r\n viajesxhora['cant'] = viajesxhora['cant'].round().astype(int)\r\n\r\n # guarda distribución de viajes para dashboard\r\n viajesxhora_dash = pd.concat(\r\n [viajesxhora_dash, viajesxhora], ignore_index=True)\r\n\r\n viajesxhora_dash['tipo_dia'] = tipo_dia\r\n viajesxhora_dash['desc_dia'] = desc_dia\r\n\r\n viajesxhora_dash = viajesxhora_dash[[\r\n 'tipo_dia', 'desc_dia', 'hora', 'cant', 'modo']]\r\n viajesxhora_dash.columns = ['tipo_dia',\r\n 'desc_dia', 'Hora', 'Viajes', 'Modo']\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n\r\n query = f\"\"\"\r\n DELETE FROM viajes_hora\r\n WHERE desc_dia = \"{desc_dia}\"\r\n and tipo_dia = \"{tipo_dia}\"\r\n \"\"\"\r\n\r\n conn_dash.execute(query)\r\n conn_dash.commit()\r\n\r\n modos = viajesxhora_dash.Modo.unique().tolist()\r\n hrs = [str(i).zfill(2) for i in range(0, 24)]\r\n for modo in modos:\r\n for hr in hrs:\r\n if len(viajesxhora_dash.loc[(viajesxhora_dash.Modo == modo) & (viajesxhora_dash.Hora == hr)]) == 0:\r\n\r\n viajesxhora_dash = pd.concat([\r\n viajesxhora_dash,\r\n pd.DataFrame([[tipo_dia,\r\n desc_dia,\r\n hr,\r\n 0,\r\n modo]],\r\n columns=viajesxhora_dash.columns)\r\n ])\r\n\r\n viajesxhora_dash.to_sql(\"viajes_hora\", conn_dash,\r\n if_exists=\"append\", index=False)\r\n\r\n conn_dash.close()\r\n\r\n indicadores_hora_punta(viajesxhora_dash, desc_dia, tipo_dia)\r\n\r\n # Viajes por hora\r\n savefile_ = f'{savefile}_modo'\r\n with sns.axes_style(\r\n {\"axes.facecolor\": \"#cadce0\",\r\n 'figure.facecolor': '#cadce0',\r\n }):\r\n\r\n fig = Figure(figsize=(10, 3), dpi=100)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n for i in viajesxhora.modo.unique():\r\n viajesxhora[viajesxhora.modo == i].reset_index().plot(\r\n ax=ax, y='cant', legend=True, label=i)\r\n\r\n ax.set_title(title, fontsize=8)\r\n ax.set_xlabel('Hora', fontsize=8)\r\n ax.set_ylabel('Viajes', fontsize=8)\r\n ax.set_xticks(list(range(0, 24)))\r\n ax.tick_params(labelsize=6)\r\n print(\"Nuevos archivos en resultados: \", f'{alias}{savefile_}')\r\n db_path = os.path.join(\"resultados\", \"png\", f\"{alias}{savefile_}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\"resultados\", \"pdf\", f\"{alias}{savefile_}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n # Distribución de viajes\r\n savefile_ = f'{savefile}_dist'\r\n vi = viajes[(viajes.distance_osm_drive.notna())\r\n & (viajes.distance_osm_drive > 0)\r\n & (viajes.h3_o != viajes.h3_d)]\r\n\r\n if len(vi) == 0:\r\n return None\r\n\r\n vi['distance_osm_drive'] = vi['distance_osm_drive'].astype(int)\r\n\r\n vi_modo = vi\\\r\n .groupby(['distance_osm_drive', 'modo'], as_index=False)\\\r\n .factor_expansion_linea.sum()\\\r\n .rename(columns={'factor_expansion_linea': 'cant'})\r\n\r\n vi = vi\\\r\n .groupby('distance_osm_drive', as_index=False)\\\r\n .factor_expansion_linea.sum()\\\r\n .rename(columns={'factor_expansion_linea': 'cant'})\r\n\r\n vi = vi.loc[vi.cant > 0, ['distance_osm_drive', 'cant']\r\n ].sort_values('distance_osm_drive')\r\n\r\n vi['pc'] = round(vi.cant / vi.cant.sum() * 100, 5)\r\n vi['csum'] = vi.pc.cumsum()\r\n vi = vi[vi.csum <= 99.5]\r\n vi['Viajes (en miles)'] = round(vi.cant/1000)\r\n\r\n vi_modo['pc'] = round(vi_modo.cant / vi_modo.cant.sum() * 100, 5)\r\n vi_modo['csum'] = vi_modo.pc.cumsum()\r\n vi_modo = vi_modo[vi_modo.csum <= 99.5]\r\n\r\n # guarda distribución de viajes para dashboard\r\n\r\n vi_dash = vi.copy()\r\n vi_dash['modo'] = 'Todos'\r\n vi_dash = pd.concat([vi_dash, vi_modo], ignore_index=True)\r\n\r\n vi_dash['tipo_dia'] = tipo_dia\r\n vi_dash['desc_dia'] = desc_dia\r\n\r\n vi_dash = vi_dash[['desc_dia', 'tipo_dia',\r\n 'distance_osm_drive', 'cant', 'modo']]\r\n vi_dash.columns = ['desc_dia', 'tipo_dia', 'Distancia', 'Viajes', 'Modo']\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n query = f\"\"\"\r\n DELETE FROM distribucion\r\n WHERE desc_dia = \"{desc_dia}\"\r\n and tipo_dia = \"{tipo_dia}\"\r\n \"\"\"\r\n conn_dash.execute(query)\r\n conn_dash.commit()\r\n\r\n vi_dash.to_sql(\"distribucion\", conn_dash, if_exists=\"append\", index=False)\r\n conn_dash.close()\r\n\r\n ytitle = \"Viajes\"\r\n if vi.cant.mean() > 1000:\r\n vi['cant'] = round(vi['cant']/1000)\r\n ytitle = \"Viajes (en miles)\"\r\n\r\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \"#cadce0\",\r\n 'figure.facecolor': '#cadce0', \"grid.linestyle\": \":\"})\r\n\r\n fig = Figure(figsize=(8, 4), dpi=200)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n sns.histplot(x='distance_osm_drive', weights='cant',\r\n data=vi, bins=len(vi), ax=ax) # element='poly',\r\n ax.set_title(title, fontsize=12)\r\n ax.set_xlabel(\"Distancia (kms)\", fontsize=10)\r\n ax.set_ylabel(ytitle, fontsize=10)\r\n ax.set_xticks(list(range(0, len(vi)+1, 5)))\r\n\r\n fig.tight_layout()\r\n\r\n print(\"Nuevos archivos en resultados: \", f'{alias}{savefile_}')\r\n db_path = os.path.join(\"resultados\", \"png\", f\"{alias}{savefile_}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\"resultados\", \"pdf\", f\"{alias}{savefile_}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n\r\ndef imprime_burbujas(df,\r\n res=7,\r\n h3_o='h3_o',\r\n alpha=.4,\r\n cmap='viridis_r',\r\n var_fex='',\r\n porc_viajes=90,\r\n title='burbujas',\r\n savefile='burbujas',\r\n show_fig=False,\r\n k_jenks=5):\r\n\r\n pd.options.mode.chained_assignment = None\r\n configs = leer_configs_generales()\r\n db_path = traigo_db_path\r\n alias = leer_alias()\r\n\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n zonas = pd.read_sql_query(\r\n \"\"\"\r\n SELECT * from zonas\r\n \"\"\",\r\n conn_insumos,\r\n )\r\n\r\n conn_data.close()\r\n conn_insumos.close()\r\n\r\n zonas = gpd.GeoDataFrame(\r\n zonas,\r\n geometry=gpd.points_from_xy(zonas['longitud'], zonas['latitud']),\r\n crs=4326)\r\n\r\n if len(var_fex) == 0:\r\n var_fex = 'fex'\r\n df[var_fex] = 1\r\n\r\n df_agg = crea_df_burbujas(df,\r\n zonas,\r\n h3_o=h3_o,\r\n var_fex=var_fex,\r\n porc_viajes=porc_viajes,\r\n res=res\r\n )\r\n\r\n df_agg[var_fex] = df_agg[var_fex].round().astype(int)\r\n\r\n if len(df_agg) > 0:\r\n\r\n multip = df_agg[var_fex].head(1).values[0] / 500\r\n\r\n fig = Figure(figsize=(13.5, 13.5), dpi=100)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n zonas[zonas['h3'].isin(df[h3_o].unique())].to_crs(\r\n 3857).plot(ax=ax, alpha=0)\r\n try:\r\n df_agg.to_crs(3857).plot(ax=ax,\r\n alpha=alpha,\r\n cmap=cmap,\r\n markersize=df_agg[var_fex] / multip,\r\n column=var_fex,\r\n scheme='FisherJenks',\r\n k=k_jenks,\r\n legend=True,\r\n legend_kwds={\r\n 'loc': 'upper right',\r\n 'title': 'Viajes',\r\n 'fontsize': 8,\r\n 'title_fontsize': 10,\r\n }\r\n )\r\n try:\r\n ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron,\r\n attribution=None, attribution_size=10)\r\n except (r_ConnectionError, ValueError):\r\n pass\r\n\r\n ax.set_title(title, fontsize=12)\r\n\r\n leg = ax.get_legend()\r\n # leg._loc = 3\r\n\r\n for lbl in leg.get_texts():\r\n label_text = lbl.get_text()\r\n lower = label_text.split(',')[0]\r\n upper = label_text.split(',')[1]\r\n new_text = f'{float(lower):,.0f} - {float(upper):,.0f}'\r\n lbl.set_text(new_text)\r\n\r\n ax.add_artist(\r\n ScaleBar(1, location='lower right', box_alpha=0, pad=1))\r\n ax.axis('off')\r\n\r\n if len(savefile) > 0:\r\n print(\"Nuevos archivos en resultados: \", f'{alias}{savefile}')\r\n db_path = os.path.join(\"resultados\", \"png\",\r\n f\"{alias}{savefile}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\"resultados\", \"pdf\",\r\n f\"{alias}{savefile}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n if show_fig:\r\n display(fig)\r\n\r\n except (ValueError) as e:\r\n print(e)\r\n\r\n\r\ndef traigo_zonificacion(viajes,\r\n zonas,\r\n h3_o='h3_o',\r\n h3_d='h3_d',\r\n res_agg=False):\r\n \"\"\"\r\n Esta funcion toma la tabla viajes\r\n la tabla zonas, los nombres de las columnas con el h3 de origen y destino\r\n y un parametro para usar h3 con resolucion mas agregada\r\n y clasifica los origenes y destinos de los viajes para cada zona\r\n \"\"\"\r\n configs = leer_configs_generales()\r\n\r\n matriz_zonas = []\r\n vars_zona = []\r\n if 'Zona_voi' in zonas.columns:\r\n\r\n matriz_zonas = [['',\r\n 'Zona_voi',\r\n [str(x) for x in list(\r\n range(1, len(zonas.Zona_voi.unique())+1))]\r\n ]]\r\n vars_zona = ['Zona_voi']\r\n\r\n if res_agg:\r\n zonas[f'h3_r6'] = zonas['h3'].apply(h3.h3_to_parent, res=6)\r\n zonas[f'h3_r7'] = zonas['h3'].apply(h3.h3_to_parent, res=7)\r\n\r\n matriz_zonas += [['', f'h3_r6', ''],\r\n ['', f'h3_r7', '']]\r\n vars_zona += [f'h3_r6']\r\n vars_zona += [f'h3_r7']\r\n\r\n if configs[\"zonificaciones\"]:\r\n for n in range(0, 5):\r\n\r\n try:\r\n file_zona = configs[\"zonificaciones\"][f\"geo{n+1}\"]\r\n var_zona = configs[\"zonificaciones\"][f\"var{n+1}\"]\r\n\r\n try:\r\n matriz_order = configs[\"zonificaciones\"][f\"orden{n+1}\"]\r\n except KeyError:\r\n matriz_order = \"\"\r\n\r\n if matriz_order is None:\r\n matriz_order = \"\"\r\n\r\n if var_zona in zonas.columns:\r\n matriz_zonas += [[file_zona, var_zona, matriz_order]]\r\n vars_zona += [var_zona]\r\n except KeyError:\r\n pass\r\n\r\n vars_o = [h3_o] + [f'{x}_o' for x in vars_zona]\r\n vars_d = [h3_d] + [f'{x}_d' for x in vars_zona]\r\n\r\n zonas_tmp = zonas[['h3']+vars_zona]\r\n zonas_tmp.columns = vars_o\r\n viajes = viajes.merge(\r\n zonas_tmp,\r\n on=h3_o\r\n )\r\n\r\n zonas_tmp = zonas[['h3']+vars_zona]\r\n zonas_tmp.columns = vars_d\r\n viajes = viajes.merge(\r\n zonas_tmp,\r\n on=h3_d\r\n )\r\n return viajes, matriz_zonas\r\n\r\n\r\ndef imprime_od(\r\n df,\r\n zona_origen,\r\n zona_destino,\r\n var_fex=\"\",\r\n normalize=False,\r\n margins=False,\r\n matriz_order=\"\",\r\n matriz_order_row=\"\",\r\n matriz_order_col=\"\",\r\n path_resultados=Path(),\r\n savefile=\"\",\r\n title=\"Matriz OD\",\r\n figsize_tuple='',\r\n fontsize=12,\r\n fmt=\"\",\r\n cbar=False,\r\n x_rotation=0,\r\n y_rotation=0,\r\n cmap=\"Blues\",\r\n total_color=\"navy\",\r\n total_background_color=\"white\",\r\n show_fig=False,\r\n alias='',\r\n desc_dia='',\r\n tipo_dia='',\r\n var_zona='',\r\n filtro1='',\r\n):\r\n\r\n if len(fmt) == 0:\r\n if normalize:\r\n fmt = \".1%\"\r\n else:\r\n fmt = \".1f\"\r\n df = df[(df[zona_origen].notna()) & (df[zona_destino].notna())].copy()\r\n\r\n fill_value = mcolors.to_rgba(total_background_color)\r\n\r\n if len(matriz_order) > 0:\r\n matriz_order_row = matriz_order\r\n matriz_order_col = matriz_order\r\n\r\n if len(var_fex) == 0:\r\n var_fex = 'fex'\r\n df[var_fex] = 1\r\n\r\n df = df.groupby(['dia', zona_origen, zona_destino],\r\n as_index=False)[var_fex].sum()\r\n df = df.groupby([zona_origen, zona_destino], as_index=False)[\r\n var_fex].mean()\r\n\r\n df[var_fex] = df[var_fex].round().astype(int)\r\n\r\n if len(df) > 0:\r\n\r\n vals = df.loc[~(df[zona_origen].isin(\r\n df[zona_destino].unique())), zona_origen].unique()\r\n for i in vals:\r\n df = pd.concat([df,\r\n pd.DataFrame(\r\n [[i, i, 0]],\r\n columns=[\r\n zona_origen,\r\n zona_destino,\r\n var_fex\r\n ])\r\n ])\r\n vals = df.loc[~(df[zona_destino].isin(\r\n df[zona_origen].unique())), zona_destino].unique()\r\n for i in vals:\r\n df = pd.concat([df,\r\n pd.DataFrame(\r\n [[i, i, 0]],\r\n columns=[\r\n zona_destino,\r\n zona_origen,\r\n var_fex])\r\n ])\r\n\r\n od_heatmap = pd.crosstab(\r\n index=df[zona_origen],\r\n columns=df[zona_destino],\r\n values=df[var_fex],\r\n aggfunc=\"sum\",\r\n normalize=normalize,\r\n )\r\n\r\n if len(figsize_tuple) == 0:\r\n figsize_tuple = (len(od_heatmap)+1, len(od_heatmap)+1)\r\n\r\n matriz_order = [i for i in matriz_order if i in od_heatmap.columns]\r\n matriz_order_row = [\r\n i for i in matriz_order_row if i in od_heatmap.columns]\r\n matriz_order_col = [\r\n i for i in matriz_order_col if i in od_heatmap.columns]\r\n\r\n if len(matriz_order_col) > 0:\r\n od_heatmap = od_heatmap[matriz_order_col]\r\n if len(matriz_order_row) > 0:\r\n\r\n od_heatmap = (\r\n od_heatmap.reset_index()\r\n .sort_values(\r\n zona_origen,\r\n key=lambda s: s.apply(matriz_order_row.index),\r\n ignore_index=True,\r\n )\r\n .set_index(zona_origen)\r\n )\r\n\r\n for _ in od_heatmap.columns:\r\n od_heatmap.loc[od_heatmap[_] == 0, _] = None\r\n\r\n if margins:\r\n od_heatmap_sintot = od_heatmap.copy()\r\n od_heatmap[\"Total\"] = od_heatmap.sum(axis=1)\r\n od_heatmap = pd.concat(\r\n [od_heatmap, pd.DataFrame([od_heatmap.sum()], index=[\"Total\"])]\r\n )\r\n\r\n od_heatmap_tmp = od_heatmap.copy()\r\n od_heatmap_tmp[\"Total\"] = 0\r\n od_heatmap_tmp.iloc[len(od_heatmap_tmp) - 1] = 0\r\n\r\n fig = Figure(figsize=figsize_tuple, dpi=150)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n sns.heatmap(\r\n od_heatmap_tmp,\r\n cmap=cmap,\r\n annot=True,\r\n fmt=fmt,\r\n annot_kws={\"size\": fontsize},\r\n square=True,\r\n linewidth=0.5,\r\n cbar=cbar,\r\n ax=ax,\r\n )\r\n\r\n # find your QuadMesh object and get array of colors\r\n facecolors_anterior = ax.findobj(QuadMesh)[0].get_facecolors()\r\n\r\n fig = Figure(figsize=figsize_tuple, dpi=150)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n sns.heatmap(\r\n od_heatmap,\r\n cmap=cmap,\r\n annot=True,\r\n fmt=fmt,\r\n square=True,\r\n linewidth=0.5,\r\n cbar=cbar,\r\n ax=ax,\r\n xticklabels=True,\r\n yticklabels=True,\r\n )\r\n\r\n ax.set_title(title, fontsize=fontsize)\r\n ax.set_ylabel(\"Origen\", fontsize=fontsize)\r\n ax.set_xlabel(\"Destino\", fontsize=fontsize)\r\n\r\n # move x and y ticks\r\n ax.xaxis.set_label_position(\"top\")\r\n ax.yaxis.set_label_position(\"right\")\r\n\r\n ax.set_xticklabels(\r\n od_heatmap.columns.tolist(),\r\n rotation=x_rotation,\r\n ha=\"right\",\r\n )\r\n\r\n if margins:\r\n\r\n # find your QuadMesh object and get array of colors\r\n quadmesh = ax.findobj(QuadMesh)[0]\r\n facecolors = quadmesh.get_facecolors()\r\n\r\n # replace background heatmap colors\r\n for i in range(0, len(facecolors)):\r\n if (((i + 1) % len(od_heatmap.columns)) != 0) & (\r\n i < (len(facecolors) - len(od_heatmap.columns))\r\n ):\r\n facecolors[i] = facecolors_anterior[i]\r\n else:\r\n facecolors[i] = fill_value\r\n\r\n # set modified colors\r\n quadmesh.set_facecolors = facecolors\r\n\r\n # modify all text to black or white\r\n lst = []\r\n for _ in od_heatmap_sintot.columns:\r\n lst += od_heatmap[_].tolist()\r\n val_min = pd.DataFrame(lst, columns=[\"valor\"]).drop_duplicates()\r\n val_min[\"val_type\"] = pd.qcut(\r\n val_min.valor,\r\n q=4,\r\n labels=[\"1\", \"2\", \"3\", \"4\"],\r\n )\r\n val_min = val_min[val_min.val_type == \"4\"].valor.min()\r\n\r\n col_totals = np.arange(\r\n len(od_heatmap.columns) - 1,\r\n (len(od_heatmap.columns)) * len(od_heatmap),\r\n len(od_heatmap.columns),\r\n ).tolist()\r\n ii = 0\r\n for i in ax.findobj(Text):\r\n\r\n if (ii in col_totals[:-1]) | (\r\n (ii >= len(facecolors) - len(od_heatmap.columns))\r\n & (ii < len(facecolors) - 1)\r\n ):\r\n i.set_color(total_color)\r\n else:\r\n try:\r\n value_i = (\r\n str(i)\r\n .replace(\"Text(\", \"\")\r\n .replace(\")\", \"\")\r\n .replace(\"'\", \"\")\r\n .split(\",\")[2]\r\n .replace(\" \", \"\")\r\n )\r\n\r\n if value_i == \"Total\":\r\n i.set_color(total_color)\r\n\r\n if ii <= len(facecolors) - 1:\r\n value_i = float(value_i)\r\n cond = (value_i >= val_min) | (\r\n ii == len(facecolors) - 1)\r\n if cond:\r\n i.set_color(\"white\")\r\n\r\n if value_i == 0:\r\n facecolors[ii] = fill_value\r\n i.set_color(\"white\")\r\n\r\n except:\r\n pass\r\n ii += 1\r\n\r\n fig.tight_layout()\r\n\r\n if len(savefile) > 0:\r\n\r\n savefile = savefile+'_matrizod'\r\n\r\n print(\"Nuevos archivos en resultados: \", savefile)\r\n\r\n db_path = os.path.join(\"resultados\", \"png\", f\"{savefile}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\"resultados\", \"pdf\", f\"{savefile}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\r\n \"resultados\", \"matrices\", f\"{savefile}.xlsx\")\r\n\r\n if normalize:\r\n dash_tot = df.copy()\r\n od1 = pd.crosstab(\r\n index=df[zona_origen],\r\n columns=df[zona_destino],\r\n values=df[var_fex],\r\n aggfunc=\"sum\",\r\n normalize=False,\r\n margins=margins,\r\n )\r\n\r\n pd.concat(\r\n [od1, pd.DataFrame([[], []]), od_heatmap],\r\n ).to_excel(db_path)\r\n\r\n else:\r\n od_heatmap.to_excel(path_resultados / (db_path))\r\n\r\n if show_fig:\r\n display(fig)\r\n\r\n # Guardo datos para el dashboard\r\n if 'h3_r' not in var_zona:\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n\r\n df = df[[zona_origen, zona_destino, var_fex]].copy()\r\n df.columns = ['Origen', 'Destino', 'Viajes']\r\n\r\n df['desc_dia'] = desc_dia\r\n df['tipo_dia'] = tipo_dia\r\n df['var_zona'] = var_zona.replace('h3_r', 'H3 Resolucion ')\r\n df['filtro1'] = filtro1\r\n\r\n df_ant = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM matrices\r\n \"\"\",\r\n conn_dash,\r\n )\r\n\r\n df_ant = df_ant[~(\r\n (df_ant.desc_dia == desc_dia) &\r\n (df_ant.tipo_dia == tipo_dia) &\r\n (df_ant.var_zona == var_zona\r\n .replace('h3_r', 'H3 Resolucion ')) &\r\n (df_ant.filtro1 == filtro1)\r\n )]\r\n\r\n df = pd.concat([df_ant, df], ignore_index=True)\r\n\r\n if len(matriz_order_row) == 0:\r\n matriz_order_row = od_heatmap.reset_index()[\r\n zona_origen].unique()\r\n if len(matriz_order_col) == 0:\r\n matriz_order_col = od_heatmap.columns\r\n\r\n n = 1\r\n cols = []\r\n for i in matriz_order_row:\r\n cols += [str(n).zfill(3)+'_'+str(i)]\r\n n += 1\r\n df['Origen'] = df.Origen.replace(matriz_order_row, cols)\r\n\r\n n = 1\r\n cols = []\r\n for i in matriz_order_col:\r\n cols += [str(n).zfill(3)+'_'+str(i)]\r\n n += 1\r\n df['Destino'] = df.Destino.replace(matriz_order_row, cols)\r\n\r\n df.to_sql(\"matrices\", conn_dash, if_exists=\"replace\", index=False)\r\n conn_dash.close()\r\n\r\n\r\ndef lineas_deseo(df,\r\n zonas,\r\n var_zona,\r\n var_fex,\r\n h3_o,\r\n h3_d,\r\n alpha=.4,\r\n cmap='viridis_r',\r\n porc_viajes=100,\r\n title='Lí­neas de deseo',\r\n savefile='lineas_deseo',\r\n show_fig=True,\r\n normalizo_latlon=True,\r\n k_jenks=5,\r\n alias='',\r\n desc_dia='',\r\n tipo_dia='',\r\n zona='',\r\n filtro1='',\r\n ):\r\n\r\n hexs = zonas[(zonas.fex.notna()) & (zonas.fex != 0)]\\\r\n .groupby(var_zona, as_index=False)\\\r\n .size().drop(['size'], axis=1)\r\n\r\n hexs = hexs.merge(\r\n zonas[(zonas.fex.notna()) & (zonas.fex != 0)]\r\n .groupby(var_zona)\r\n .apply(lambda x: np.average(x['longitud'], weights=x['fex']))\r\n .reset_index()\r\n .rename(columns={0: 'longitud'}), how='left')\r\n\r\n hexs = hexs.merge(\r\n zonas[(zonas.fex.notna()) & (zonas.fex != 0)]\r\n .groupby(var_zona)\r\n .apply(lambda x: np.average(x['latitud'], weights=x['fex']))\r\n .reset_index()\r\n .rename(columns={0: 'latitud'}), how='left')\r\n\r\n tmp_o = f'{var_zona}_o'\r\n tmp_d = f'{var_zona}_d'\r\n\r\n if 'h3_' in tmp_o:\r\n tmp_h3_o = tmp_o\r\n tmp_h3_d = tmp_d\r\n else:\r\n tmp_h3_o = h3_o\r\n tmp_h3_d = h3_d\r\n\r\n # Normalizo con nueva zonificación (ESTO HACE QUE TODOS LOS ORIGENES\r\n # Y DESTINOS TENGAN UN MISMO SENTIDO)\r\n if (tmp_o != tmp_h3_o) & (tmp_d != tmp_h3_d):\r\n df_agg = df.groupby(['dia', tmp_h3_o, tmp_h3_d, tmp_o,\r\n tmp_d], as_index=False).agg({var_fex: 'sum'})\r\n else:\r\n df_agg = df.groupby(['dia', tmp_h3_o, tmp_h3_d],\r\n as_index=False).agg({var_fex: 'sum'})\r\n\r\n if normalizo_latlon:\r\n df_agg = normalizo_lat_lon(df_agg,\r\n h3_o=tmp_h3_o,\r\n h3_d=tmp_h3_d,\r\n origen=tmp_o,\r\n destino=tmp_d,\r\n )\r\n\r\n tmp_o = f'{var_zona}_o_norm'\r\n tmp_d = f'{var_zona}_d_norm'\r\n\r\n # Agrego a res de gráfico latlong\r\n df_agg = df_agg.groupby(['dia', tmp_o, tmp_d], as_index=False).agg(\r\n {var_fex: 'sum'})\r\n\r\n df_agg = df_agg.groupby([tmp_o, tmp_d], as_index=False).agg(\r\n {var_fex: 'mean'})\r\n\r\n df_agg[var_fex] = df_agg[var_fex].round().astype(int)\r\n\r\n df_agg = df_agg.merge(\r\n hexs.rename(columns={var_zona: tmp_o,\r\n 'latitud': 'lat_o',\r\n 'longitud': 'lon_o'})\r\n )\r\n df_agg = df_agg.merge(\r\n hexs.rename(columns={var_zona: tmp_d,\r\n 'latitud': 'lat_d',\r\n 'longitud': 'lon_d'})\r\n )\r\n\r\n df_agg = df_agg.sort_values(\r\n var_fex, ascending=False).reset_index(drop=True)\r\n df_agg['cumsum'] = round(\r\n df_agg[var_fex].cumsum() / df_agg[var_fex].sum() * 100)\r\n\r\n df_agg = df_agg[df_agg['cumsum'] <= porc_viajes]\r\n\r\n df_agg = df_agg[df_agg[tmp_o] != df_agg[tmp_d]]\r\n\r\n if len(df_agg) > 0:\r\n try:\r\n df_agg = crear_linestring(\r\n df_agg, 'lon_o', 'lat_o', 'lon_d', 'lat_d')\r\n\r\n multip = df_agg[var_fex].head(1).values[0] / 10\r\n\r\n fig = Figure(figsize=(13.5, 13.5), dpi=150)\r\n canvas = FigureCanvas(fig)\r\n ax = fig.add_subplot(111)\r\n\r\n zonas[zonas['h3'].isin(df[h3_o].unique())].to_crs(\r\n 3857).plot(ax=ax, alpha=0)\r\n\r\n # En caso de que no haya suficientes casos para 5 jenks\r\n try:\r\n df_agg.to_crs(3857).plot(ax=ax,\r\n alpha=alpha,\r\n cmap=cmap,\r\n lw=df_agg[var_fex]/multip,\r\n column=var_fex,\r\n scheme='FisherJenks',\r\n k=k_jenks,\r\n legend=True,\r\n legend_kwds={\r\n 'loc': 'upper right',\r\n 'title': 'Viajes',\r\n 'fontsize': 8,\r\n 'title_fontsize': 10,\r\n }\r\n )\r\n try:\r\n ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron,\r\n attribution=None, attribution_size=10)\r\n except (r_ConnectionError, ValueError):\r\n pass\r\n\r\n leg = ax.get_legend()\r\n # leg._loc = 3\r\n for lbl in leg.get_texts():\r\n label_text = lbl.get_text()\r\n lower = label_text.split(',')[0]\r\n upper = label_text.split(',')[1]\r\n new_text = f'{float(lower):,.0f} - {float(upper):,.0f}'\r\n lbl.set_text(new_text)\r\n\r\n title_ = f'{title}: {var_zona}s'\r\n\r\n ax.set_title(title_, fontsize=12)\r\n ax.add_artist(\r\n ScaleBar(1, location='lower right', box_alpha=0, pad=1))\r\n ax.axis('off')\r\n\r\n fig.tight_layout()\r\n\r\n if len(savefile) > 0:\r\n\r\n savefile = savefile+'_lineas_deseo'\r\n\r\n print(\"Nuevos archivos en resultados: \",\r\n savefile)\r\n db_path = os.path.join(\r\n \"resultados\", \"png\", f\"{savefile}.png\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n db_path = os.path.join(\r\n \"resultados\", \"pdf\", f\"{savefile}.pdf\")\r\n fig.savefig(db_path, dpi=300, bbox_inches=\"tight\")\r\n\r\n # Guarda geojson para el dashboard\r\n # if not 'h3_r' in var_zona:\r\n df_folium = df_agg.copy()\r\n df_folium.columns = ['Origen', 'Destino', 'Viajes',\r\n 'lon_o', 'lat_o', 'lon_d', 'lat_d',\r\n 'cumsum', 'geometry']\r\n\r\n df_folium = df_folium[[\r\n 'Origen', 'Destino', 'Viajes', 'lon_o', 'lat_o',\r\n 'lon_d', 'lat_d']]\r\n\r\n df_folium['desc_dia'] = desc_dia\r\n df_folium['tipo_dia'] = tipo_dia\r\n df_folium['var_zona'] = var_zona.replace(\r\n 'h3_r', 'H3 Resolucion ')\r\n df_folium['filtro1'] = filtro1\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n var_zona_q = var_zona.replace('h3_r', 'H3 Resolucion ')\r\n\r\n query = f\"\"\"\r\n DELETE FROM lineas_deseo\r\n WHERE\r\n desc_dia = '{desc_dia}' and\r\n tipo_dia = '{tipo_dia}' and\r\n var_zona = '{var_zona_q}' and\r\n filtro1 = '{filtro1}'\r\n \"\"\"\r\n\r\n conn_dash.execute(query)\r\n conn_dash.commit()\r\n\r\n df_folium.to_sql(\"lineas_deseo\", conn_dash,\r\n if_exists=\"append\", index=False)\r\n conn_dash.close()\r\n\r\n crear_mapa_folium(df_agg,\r\n cmap,\r\n var_fex,\r\n savefile=f\"{savefile}.html\",\r\n k_jenks=k_jenks)\r\n\r\n if show_fig:\r\n display(fig)\r\n except (ValueError) as e:\r\n print(e)\r\n\r\n except (ValueError) as e:\r\n pass\r\n\r\n\r\ndef crea_df_burbujas(df,\r\n zonas,\r\n h3_o='h3_o',\r\n var_fex='',\r\n porc_viajes=100,\r\n res=7):\r\n\r\n zonas['h3_o_tmp'] = zonas['h3'].apply(h3.h3_to_parent, res=res)\r\n\r\n hexs = zonas[(zonas.fex.notna()) & (zonas.fex != 0)].groupby(\r\n 'h3_o_tmp', as_index=False).size().drop(['size'], axis=1)\r\n\r\n hexs = hexs.merge(\r\n zonas[(zonas.fex.notna()) & (zonas.fex != 0)]\r\n .groupby('h3_o_tmp')\r\n .apply(lambda x: np.average(x['longitud'], weights=x['fex']))\r\n .reset_index().rename(columns={0: 'longitud'}), how='left')\r\n\r\n hexs = hexs.merge(\r\n zonas[(zonas.fex.notna()) & (zonas.fex != 0)]\r\n .groupby('h3_o_tmp')\r\n .apply(lambda x: np.average(x['latitud'], weights=x['fex']))\r\n .reset_index()\r\n .rename(columns={0: 'latitud'}), how='left')\r\n\r\n df['h3_o_tmp'] = df[h3_o].apply(h3.h3_to_parent, res=res)\r\n\r\n # Agrego a res de gráfico latlong\r\n df_agg = df.groupby(['dia', 'h3_o_tmp'],\r\n as_index=False).agg({var_fex: 'sum'})\r\n df_agg = df_agg.groupby(\r\n ['h3_o_tmp'], as_index=False).agg({var_fex: 'mean'})\r\n\r\n df_agg = df_agg.merge(\r\n hexs.rename(columns={'latitud': 'lat_o',\r\n 'longitud': 'lon_o'})\r\n )\r\n\r\n df_agg = gpd.GeoDataFrame(\r\n df_agg,\r\n geometry=gpd.points_from_xy(df_agg['lon_o'], df_agg['lat_o']),\r\n crs=4326,)\r\n\r\n df_agg = df_agg.sort_values(\r\n var_fex, ascending=False).reset_index(drop=True)\r\n df_agg['cumsum'] = round(df_agg[var_fex].cumsum() /\r\n df_agg[var_fex].sum() * 100)\r\n df_agg = df_agg[df_agg['cumsum'] < porc_viajes]\r\n\r\n return df_agg\r\n\r\n\r\ndef crear_mapa_folium(df_agg,\r\n cmap,\r\n var_fex,\r\n savefile,\r\n k_jenks=5):\r\n\r\n bins = [df_agg[var_fex].min()-1] + \\\r\n mapclassify.FisherJenks(df_agg[var_fex], k=k_jenks).bins.tolist()\r\n range_bins = range(0, len(bins)-1)\r\n bins_labels = [\r\n f'{int(bins[n])} a {int(bins[n+1])} viajes' for n in range_bins]\r\n df_agg['cuts'] = pd.cut(df_agg[var_fex], bins=bins, labels=bins_labels)\r\n\r\n from folium import Figure\r\n fig = Figure(width=800, height=800)\r\n m = folium.Map(location=[df_agg.lat_o.mean(\r\n ), df_agg.lon_o.mean()], zoom_start=9, tiles='cartodbpositron')\r\n\r\n title_html = \"\"\"\r\n

Your map title

\r\n \"\"\"\r\n m.get_root().html.add_child(folium.Element(title_html))\r\n\r\n line_w = 0.5\r\n\r\n colors = mcp.gen_color(cmap=cmap, n=k_jenks)\r\n\r\n n = 0\r\n for i in bins_labels:\r\n\r\n df_agg[df_agg.cuts == i].explore(\r\n m=m,\r\n color=colors[n],\r\n style_kwds={'fillOpacity': 0.3, 'weight': line_w},\r\n name=i,\r\n tooltip=False,\r\n )\r\n n += 1\r\n line_w += 3\r\n\r\n folium.LayerControl(name='xx').add_to(m)\r\n\r\n fig.add_child(m)\r\n\r\n db_path = os.path.join(\"resultados\", \"html\", savefile)\r\n m.save(db_path)\r\n\r\n\r\ndef save_zones():\r\n \"\"\"\r\n Esta función guarda las geografí­as de las zonas para el dashboard\r\n \"\"\"\r\n print('Creando zonificación para dashboard')\r\n\r\n configs = leer_configs_generales()\r\n\r\n try:\r\n zonificaciones = configs['zonificaciones']\r\n except KeyError:\r\n zonificaciones = []\r\n\r\n geo_files = [['zona_voi.geojson', 'Zona_voi']]\r\n\r\n if zonificaciones:\r\n for n in range(0, 5):\r\n\r\n try:\r\n file_zona = zonificaciones[f\"geo{n+1}\"]\r\n var_zona = zonificaciones[f\"var{n+1}\"]\r\n geo_files += [[file_zona, var_zona]]\r\n\r\n except KeyError:\r\n pass\r\n\r\n zonas = pd.DataFrame([])\r\n for i in geo_files:\r\n file = os.path.join(\"data\", \"data_ciudad\", f'{i[0]}')\r\n if os.path.isfile(file):\r\n df = gpd.read_file(file)\r\n df = df[[i[1], 'geometry']]\r\n df.columns = ['Zona', 'geometry']\r\n df['tipo_zona'] = i[1]\r\n zonas = pd.concat([zonas, df])\r\n\r\n zonas = zonas.dissolve(by=['tipo_zona', 'Zona'], as_index=False)\r\n zonas['wkt'] = zonas.geometry.to_wkt()\r\n zonas = zonas.drop(['geometry'], axis=1)\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n zonas.to_sql(\"zonas\", conn_dash, if_exists=\"replace\", index=False)\r\n conn_dash.close()\r\n\r\n\r\ndef particion_modal(viajes_dia, etapas_dia, tipo_dia, desc_dia):\r\n\r\n particion_viajes = viajes_dia.groupby(\r\n 'modo', as_index=False).factor_expansion_linea.sum().round()\r\n particion_viajes['modal'] = (particion_viajes['factor_expansion_linea'] /\r\n viajes_dia.factor_expansion_linea.sum() * 100\r\n ).round()\r\n particion_viajes = particion_viajes.sort_values(\r\n 'modal', ascending=False).drop(['factor_expansion_linea'], axis=1)\r\n particion_viajes['tipo'] = 'viajes'\r\n particion_viajes['tipo_dia'] = tipo_dia\r\n particion_viajes['desc_dia'] = desc_dia\r\n particion_etapas = etapas_dia.groupby(\r\n 'modo', as_index=False).factor_expansion_linea.sum().round()\r\n\r\n particion_etapas['modal'] = (particion_etapas['factor_expansion_linea'] /\r\n etapas_dia.factor_expansion_linea.sum() * 100\r\n ).round()\r\n particion_etapas = particion_etapas.sort_values(\r\n 'modal', ascending=False).drop(['factor_expansion_linea'], axis=1)\r\n particion_etapas['tipo'] = 'etapas'\r\n particion_etapas['desc_dia'] = desc_dia\r\n particion_etapas['tipo_dia'] = tipo_dia\r\n particion = pd.concat(\r\n [particion_viajes, particion_etapas], ignore_index=True)\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n\r\n query = f'DELETE FROM particion_modal WHERE desc_dia = \"{desc_dia}\" & tipo_dia = \"{tipo_dia}\"'\r\n conn_dash.execute(query)\r\n conn_dash.commit()\r\n particion['modo'] = particion.modo.str.capitalize()\r\n particion.to_sql(\"particion_modal\", conn_dash,\r\n if_exists=\"append\", index=False)\r\n conn_dash.close()\r\n\r\n\r\ndef plot_dispatched_services_wrapper():\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n\r\n q = \"\"\"\r\n select *\r\n from services_by_line_hour\r\n where dia = 'weekday';\r\n \"\"\"\r\n service_data = pd.read_sql(q, conn_data)\r\n\r\n if len(service_data) > 0:\r\n service_data.groupby(['id_linea']).apply(\r\n plot_dispatched_services_by_line_day)\r\n\r\n conn_data.close()\r\n\r\n\r\ndef plot_dispatched_services_by_line_day(df):\r\n \"\"\"\r\n Reads services' data and plots how many services\r\n by line, type of day (weekday weekend), and hour.\r\n Saves it in results dir\r\n\r\n Parameters\r\n ----------\r\n df : pandas.DataFrame\r\n dataframe with dispatched services by hour from\r\n services_by_line_hour table with\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n line_id = df.id_linea.unique().item()\r\n day = df.dia.unique().item()\r\n\r\n if day == 'weekend':\r\n day_str = 'Fin de semana tipo'\r\n elif day == 'weekday':\r\n day_str = 'Dia de semana tipo'\r\n else:\r\n day_str = day\r\n\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n s = f\"select nombre_linea from metadata_lineas\" +\\\r\n f\" where id_linea = {line_id};\"\r\n id_linea_str = pd.read_sql(s, conn_insumos)\r\n conn_insumos.close()\r\n\r\n if len(id_linea_str) > 0:\r\n id_linea_str = id_linea_str.nombre_linea.item()\r\n id_linea_str = id_linea_str + ' -'\r\n else:\r\n id_linea_str = ''\r\n\r\n print(\"Creando plot de servicios despachados por linea\", \"id linea:\", line_id)\r\n\r\n f, ax = plt.subplots(figsize=(8, 6))\r\n sns.barplot(\r\n data=df,\r\n x=\"hora\",\r\n y=\"servicios\",\r\n hue=\"id_linea\",\r\n ax=ax)\r\n\r\n ax.get_legend().remove()\r\n ax.set_xlabel(\"Hora\")\r\n ax.set_ylabel(\"Cantidad de servicios despachados\")\r\n\r\n f.suptitle(f\"Cantidad de servicios despachados por hora y dí­a\",\r\n fontdict={'size': 18,\r\n 'weight': 'bold'})\r\n ax.set_title(f\"{id_linea_str} id linea: {line_id} - Dia: {day_str}\",\r\n fontdict={\"fontsize\": 11})\r\n\r\n ax.spines.right.set_visible(False)\r\n ax.spines.top.set_visible(False)\r\n ax.spines.bottom.set_visible(False)\r\n ax.spines.left.set_visible(False)\r\n ax.spines.left.set_position(('outward', 10))\r\n ax.spines.bottom.set_position(('outward', 10))\r\n\r\n ax.grid(axis='y')\r\n\r\n for frm in ['png', 'pdf']:\r\n archivo = f'servicios_despachados_id_linea_{line_id}_{day}.{frm}'\r\n db_path = os.path.join(\"resultados\", frm, archivo)\r\n f.savefig(db_path, dpi=300)\r\n plt.close()\r\n\r\n\r\ndef plot_basic_kpi_wrapper():\r\n sns.set_style(\"whitegrid\")\r\n\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n\r\n q = \"\"\"\r\n select *\r\n from basic_kpi_by_line_hr\r\n where dia = 'weekday';\r\n \"\"\"\r\n kpi_data = pd.read_sql(q, conn_data)\r\n\r\n if len(kpi_data) > 0:\r\n kpi_data.groupby(['id_linea']).apply(\r\n plot_basic_kpi, standarize_supply_demand=False)\r\n\r\n conn_data.close()\r\n\r\n\r\ndef plot_basic_kpi(kpi_by_line_hr, standarize_supply_demand=False,\r\n *args, **kwargs):\r\n line_id = kpi_by_line_hr.id_linea.unique().item()\r\n day = kpi_by_line_hr.dia.unique().item()\r\n alias = leer_alias()\r\n\r\n if day == 'weekend':\r\n day_str = 'Fin de semana tipo'\r\n elif day == 'weekday':\r\n day_str = 'Dia de semana tipo'\r\n else:\r\n day_str = day\r\n\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n s = f\"select nombre_linea from metadata_lineas\" +\\\r\n f\" where id_linea = {line_id} AND nombre_linea IS NOT NULL;\"\r\n\r\n id_linea_str = pd.read_sql(s, conn_insumos)\r\n conn_insumos.close()\r\n\r\n if len(id_linea_str) > 0:\r\n id_linea_str = id_linea_str.nombre_linea.item()\r\n id_linea_str = id_linea_str + ' -'\r\n else:\r\n id_linea_str = ''\r\n\r\n # Create empty df with 0 - 23 hrs\r\n kpi_stats_line_plot = pd.DataFrame(\r\n {'id_linea': [line_id] * 24, 'hora': range(0, 24)})\r\n\r\n kpi_stats_line_plot = kpi_stats_line_plot\\\r\n .merge(kpi_by_line_hr.query(f\"id_linea == {line_id}\"),\r\n on=['id_linea', 'hora'],\r\n how='left')\r\n\r\n if standarize_supply_demand:\r\n supply_factor = kpi_stats_line_plot.of.max()\\\r\n / kpi_stats_line_plot.veh.max()\r\n demand_factor = kpi_stats_line_plot.of.max()\\\r\n / kpi_stats_line_plot.pax.max()\r\n kpi_stats_line_plot.veh = kpi_stats_line_plot.veh * supply_factor\r\n kpi_stats_line_plot.pax = kpi_stats_line_plot.pax * demand_factor\r\n note = \"\"\"\r\n Los indicadores de Oferta y Demanda se estandarizaron para que\r\n coincidan con máximo del eje de Factor de Ocupación\r\n \"\"\"\r\n ylabel_str = \"Factor de Ocupación (%)\"\r\n else:\r\n kpi_stats_line_plot.veh = kpi_stats_line_plot.veh / \\\r\n kpi_stats_line_plot.veh.sum() * 100\r\n kpi_stats_line_plot.pax = kpi_stats_line_plot.pax / \\\r\n kpi_stats_line_plot.pax.sum() * 100\r\n note = \"\"\"\r\n Oferta y Demanda expresan la distribución porcentual por\r\n hora de la sumatoria de veh-hr y de los pax-hr \r\n respectivamente \r\n \"\"\"\r\n ylabel_str = \"%\"\r\n missing_data = (kpi_stats_line_plot.pax.isna().all()) |\\\r\n (kpi_stats_line_plot.dmt.isna().all()) |\\\r\n (kpi_stats_line_plot.of.isna().all())\r\n\r\n if missing_data:\r\n print(\"No es posible crear plot de KPI basicos por linea\",\r\n \"id linea:\", line_id)\r\n\r\n else:\r\n print(\"Creando plot de KPI basicos por linea\", \"id linea:\", line_id)\r\n\r\n f, ax = plt.subplots(figsize=(8, 6))\r\n\r\n sns.barplot(data=kpi_stats_line_plot, x='hora', y='of',\r\n color='silver', ax=ax, label='Factor de ocupación')\r\n\r\n sns.lineplot(data=kpi_stats_line_plot, x=\"hora\", y=\"veh\", ax=ax,\r\n color='Purple', label='Oferta')\r\n sns.lineplot(data=kpi_stats_line_plot, x=\"hora\", y=\"pax\", ax=ax,\r\n color='Orange', label='Demanda')\r\n\r\n ax.set_xlabel(\"Hora\")\r\n ax.set_ylabel(ylabel_str)\r\n\r\n f.suptitle(f\"Indicadores de oferta y demanda estadarizados\",\r\n fontdict={'size': 18,\r\n 'weight': 'bold'})\r\n\r\n ax.set_title(f\"{id_linea_str} id linea: {line_id} - Dia: {day_str}\",\r\n fontdict={\"fontsize\": 11})\r\n # Add a footnote below and to the right side of the chart\r\n\r\n ax_note = ax.annotate(note,\r\n xy=(0, -.18),\r\n xycoords='axes fraction',\r\n ha='left',\r\n va=\"center\",\r\n fontsize=10)\r\n ax.spines.right.set_visible(False)\r\n ax.spines.top.set_visible(False)\r\n ax.spines.bottom.set_visible(False)\r\n ax.spines.left.set_visible(False)\r\n ax.spines.left.set_position(('outward', 10))\r\n ax.spines.bottom.set_position(('outward', 10))\r\n\r\n for frm in ['png', 'pdf']:\r\n archivo = f'{alias}_kpi_basicos_id_linea_{line_id}_{day}.{frm}'\r\n db_path = os.path.join(\"resultados\", frm, archivo)\r\n f.savefig(db_path, dpi=300, bbox_extra_artists=(\r\n ax_note,), bbox_inches='tight')\r\n plt.close()\r\n\r\n # add to dash\r\n kpi_stats_line_plot['nombre_linea'] = id_linea_str\r\n kpi_stats_line_plot['dia'] = day\r\n kpi_stats_line_plot = kpi_stats_line_plot\\\r\n .reindex(columns=[\r\n 'dia',\r\n 'id_linea',\r\n 'nombre_linea',\r\n 'hora',\r\n 'veh',\r\n 'pax',\r\n 'dmt',\r\n 'of',\r\n 'speed_kmh']\r\n )\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n\r\n query = f\"\"\"\r\n DELETE FROM basic_kpi_by_line_hr\r\n WHERE dia = \"{day}\"\r\n and id_linea = \"{line_id}\"\r\n \"\"\"\r\n conn_dash.execute(query)\r\n conn_dash.commit()\r\n\r\n kpi_stats_line_plot.to_sql(\r\n \"basic_kpi_by_line_hr\",\r\n conn_dash,\r\n if_exists=\"append\",\r\n index=False,\r\n )\r\n conn_dash.close()\r\n\r\n\r\ndef get_branch_geoms_from_line(id_linea):\r\n \"\"\"\r\n Takes a line id and returns a geoSeries with\r\n all branches' geoms\r\n \"\"\"\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n branch_geoms_query = f\"\"\"\r\n select * from branches_geoms bg \r\n where id_ramal in (\r\n select id_ramal from metadata_ramales mr \r\n where id_linea = {id_linea}\r\n )\r\n ;\r\n \"\"\"\r\n branch_geoms = pd.read_sql(branch_geoms_query, conn_insumos)\r\n branch_geoms = gpd.GeoSeries.from_wkt(\r\n branch_geoms.wkt.values,\r\n index=branch_geoms.id_ramal.values,\r\n crs='EPSG:4326')\r\n\r\n conn_insumos.close()\r\n\r\n if len(branch_geoms) == 0:\r\n branch_geoms = None\r\n\r\n return branch_geoms\r\n\r\n\r\ndef create_squared_polygon(min_x, min_y, max_x, max_y, epsg):\r\n\r\n width = max(max_x - min_x, max_y - min_y)\r\n center_x = (max_x + min_x) / 2\r\n center_y = (max_y + min_y) / 2\r\n\r\n square_bbox_min_x = center_x - width / 2\r\n square_bbox_min_y = center_y - width / 2\r\n square_bbox_max_x = center_x + width / 2\r\n square_bbox_max_y = center_y + width / 2\r\n\r\n square_bbox_coords = [\r\n (square_bbox_min_x, square_bbox_min_y),\r\n (square_bbox_max_x, square_bbox_min_y),\r\n (square_bbox_max_x, square_bbox_max_y),\r\n (square_bbox_min_x, square_bbox_max_y)\r\n ]\r\n\r\n p = Polygon(square_bbox_coords)\r\n s = gpd.GeoSeries([p], crs=f'EPSG:{epsg}')\r\n return s\r\n\r\n\r\ndef format_num(num, lpad=10):\r\n fnum = '{:,}'.format(num).replace(\r\n \".\", \"*\").replace(\",\", \".\").replace(\"*\", \",\")\r\n if lpad > 0:\r\n fnum = fnum.rjust(lpad, ' ')\r\n return fnum\r\n\r\n\r\ndef indicadores_dash():\r\n alias = leer_alias()\r\n\r\n configs = leer_configs_generales()\r\n\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n\r\n indicadores = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM indicadores\r\n \"\"\",\r\n conn_data,\r\n )\r\n\r\n hora_punta = indicadores[indicadores.detalle.str.contains('Hora punta')]\r\n indicadores = indicadores[~indicadores.detalle.str.contains('Hora punta')]\r\n\r\n indicadores['dia'] = pd.to_datetime(indicadores.dia)\r\n indicadores['dow'] = indicadores.dia.dt.dayofweek\r\n indicadores['mo'] = indicadores.dia.dt.month\r\n indicadores['yr'] = indicadores.dia.dt.year\r\n\r\n indicadores['desc_dia'] = indicadores['yr'].astype(str).str.zfill(\r\n 4) + '-' + indicadores['mo'].astype(str).str.zfill(2)\r\n indicadores['tipo_dia'] = 'Hábil'\r\n indicadores.loc[indicadores.dow >= 5, 'tipo_dia'] = 'Fin de semana'\r\n\r\n indicadores = indicadores.groupby(['desc_dia', 'tipo_dia', 'detalle'], as_index=False).agg({\r\n 'indicador': 'mean', 'porcentaje': 'mean'})\r\n indicadores.loc[indicadores.detalle == 'Cantidad de etapas con destinos validados',\r\n 'detalle'] = 'Transacciones válidas \\n(Etapas con destinos validados)'\r\n indicadores.loc[indicadores.detalle ==\r\n 'Cantidad total de viajes expandidos', 'detalle'] = 'Viajes'\r\n indicadores.loc[indicadores.detalle ==\r\n 'Cantidad total de etapas', 'detalle'] = 'Etapas'\r\n indicadores.loc[indicadores.detalle ==\r\n 'Cantidad total de usuarios', 'detalle'] = 'Usuarios'\r\n indicadores.loc[indicadores.detalle ==\r\n 'Cantidad de viajes cortos (<5kms)', 'detalle'] = 'Viajes cortos (<5kms)'\r\n indicadores.loc[indicadores.detalle == 'Cantidad de viajes con transferencia',\r\n 'detalle'] = 'Viajes con transferencia'\r\n\r\n conn_data.close()\r\n\r\n indicadores.loc[indicadores.detalle.isin(['Cantidad de transacciones totales',\r\n 'Cantidad de tarjetas únicas',\r\n 'Cantidad de transacciones limpias',\r\n ]), 'orden'] = 1\r\n\r\n indicadores.loc[indicadores.detalle.str.contains(\r\n 'Transacciones válidas'), 'orden'] = 1\r\n\r\n indicadores.loc[indicadores.detalle.isin(['Viajes',\r\n 'Etapas',\r\n 'Usuarios',\r\n 'Viajes cortos (<5kms)',\r\n 'Viajes con transferencia',\r\n 'Distancia de los viajes (promedio en kms)',\r\n 'Distancia de los viajes (mediana en kms)'\r\n ]), 'orden'] = 2\r\n\r\n indicadores.loc[indicadores.detalle.isin(['Viajes autobus',\r\n 'Viajes Multietapa',\r\n 'Viajes Multimodal',\r\n 'Viajes metro',\r\n 'Viajes tren'\r\n ]), 'orden'] = 3\r\n\r\n indicadores['Valor'] = indicadores.indicador.apply(format_num)\r\n indicadores['porcentaje'] = indicadores.porcentaje.apply(format_num)\r\n\r\n indicadores = indicadores[indicadores.orden.notna()]\r\n\r\n indicadores.loc[~(indicadores.detalle.str.contains('Distancia')), 'Valor'] = indicadores.loc[~(\r\n indicadores.detalle.str.contains('Distancia')), 'Valor'].str.split(',').str[0]\r\n\r\n indicadores = indicadores.drop(['indicador'], axis=1)\r\n indicadores = indicadores.rename(columns={'detalle': 'Indicador'})\r\n\r\n indicadores.loc[indicadores.Indicador.str.contains('Transacciones válidas'),\r\n 'Valor'] += ' ('+indicadores.loc[\r\n indicadores.Indicador.str.contains('Transacciones válidas'),\r\n 'porcentaje'].str.replace(' ', '')+'%)'\r\n\r\n indicadores.loc[indicadores.orden == 3,\r\n 'Valor'] += ' ('+indicadores.loc[indicadores.orden == 3,\r\n 'porcentaje'].str.replace(' ', '')+'%)'\r\n\r\n indicadores.loc[indicadores.Indicador == 'Viajes cortos (<5kms)',\r\n 'Valor'] += ' ('+indicadores.loc[\r\n indicadores.Indicador == 'Viajes cortos (<5kms)', 'porcentaje'].str.replace(' ', '')+'%)'\r\n indicadores.loc[indicadores.Indicador == 'Viajes con transferencia',\r\n 'Valor'] += ' ('+indicadores.loc[\r\n indicadores.Indicador == 'Viajes con transferencia', 'porcentaje'].str.replace(' ', '')+'%)'\r\n\r\n indicadores.loc[indicadores.orden == 1,\r\n 'Titulo'] = 'Información del dataset original'\r\n indicadores.loc[indicadores.orden == 2, 'Titulo'] = 'Información procesada'\r\n indicadores.loc[indicadores.orden == 3, 'Titulo'] = 'Partición modal'\r\n\r\n conn_dash = iniciar_conexion_db(tipo='dash')\r\n indicadores.to_sql(\"indicadores\", conn_dash,\r\n if_exists=\"replace\", index=False)\r\n conn_dash.close()\r\n\r\n\r\n@duracion\r\ndef create_visualizations():\r\n \"\"\"\r\n Esta funcion corre las diferentes funciones de visualizaciones\r\n \"\"\"\r\n\r\n pd.options.mode.chained_assignment = None\r\n\r\n # Leer informacion de viajes y distancias\r\n conn_data = iniciar_conexion_db(tipo='data')\r\n conn_insumos = iniciar_conexion_db(tipo='insumos')\r\n\r\n viajes = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM viajes\r\n where od_validado==1\r\n \"\"\",\r\n conn_data,\r\n )\r\n\r\n etapas = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM etapas\r\n where od_validado==1\r\n \"\"\",\r\n conn_data,\r\n )\r\n\r\n distancias = pd.read_sql_query(\r\n \"\"\"\r\n SELECT *\r\n FROM distancias\r\n \"\"\",\r\n conn_insumos,\r\n )\r\n\r\n conn_insumos.close()\r\n conn_data.close()\r\n\r\n # Agrego campo de distancias de los viajes\r\n viajes = viajes.merge(distancias,\r\n how='left',\r\n on=['h3_o', 'h3_d'])\r\n\r\n # Imputar anio, mes y tipo de dia\r\n viajes['yr'] = pd.to_datetime(viajes.dia).dt.year\r\n viajes['mo'] = pd.to_datetime(viajes.dia).dt.month\r\n viajes['dow'] = pd.to_datetime(viajes.dia).dt.day_of_week\r\n viajes.loc[viajes.dow >= 5, 'tipo_dia'] = 'Fin de semana'\r\n viajes.loc[viajes.dow < 5, 'tipo_dia'] = 'Dia habil'\r\n\r\n # Imputar anio, mes y tipo de dia\r\n etapas['yr'] = pd.to_datetime(etapas.dia).dt.year\r\n etapas['mo'] = pd.to_datetime(etapas.dia).dt.month\r\n etapas['dow'] = pd.to_datetime(etapas.dia).dt.day_of_week\r\n etapas.loc[etapas.dow >= 5, 'tipo_dia'] = 'Fin de semana'\r\n etapas.loc[etapas.dow < 5, 'tipo_dia'] = 'Dia habil'\r\n\r\n v_iter = viajes\\\r\n .groupby(['yr', 'mo', 'tipo_dia'], as_index=False)\\\r\n .factor_expansion_linea.sum()\\\r\n .iterrows()\r\n\r\n for _, i in v_iter:\r\n\r\n desc_dia = f'{i.yr}-{str(i.mo).zfill(2)} ({i.tipo_dia})'\r\n desc_dia_file = f'{i.yr}-{str(i.mo).zfill(2)}({i.tipo_dia})'\r\n\r\n viajes_dia = viajes[(viajes.yr == i.yr) & (\r\n viajes.mo == i.mo) & (viajes.tipo_dia == i.tipo_dia)]\r\n\r\n etapas_dia = etapas[(etapas.yr == i.yr) & (\r\n etapas.mo == i.mo) & (etapas.tipo_dia == i.tipo_dia)]\r\n\r\n # partición modal\r\n particion_modal(viajes_dia, etapas_dia,\r\n tipo_dia=i.tipo_dia, desc_dia=desc_dia)\r\n\r\n print('Imprimiendo tabla de matrices OD')\r\n # Impirmir tablas con matrices OD\r\n imprimir_matrices_od(viajes=viajes_dia,\r\n var_fex='factor_expansion_linea',\r\n title=f'Matriz OD {desc_dia}',\r\n savefile=f'{desc_dia_file}',\r\n desc_dia=f'{i.yr}-{str(i.mo).zfill(2)}',\r\n tipo_dia=i.tipo_dia,\r\n )\r\n\r\n print('Imprimiendo mapas de lí­neas de deseo')\r\n # Imprimir lineas de deseo\r\n imprime_lineas_deseo(df=viajes_dia,\r\n h3_o='',\r\n h3_d='',\r\n var_fex='factor_expansion_linea',\r\n title=f'Lí­neas de deseo {desc_dia}',\r\n savefile=f'{desc_dia_file}',\r\n desc_dia=f'{i.yr}-{str(i.mo).zfill(2)}',\r\n tipo_dia=i.tipo_dia)\r\n\r\n print('Imprimiendo gráficos')\r\n titulo = f'Cantidad de viajes en transporte público {desc_dia}'\r\n imprime_graficos_hora(viajes_dia,\r\n title=titulo,\r\n savefile=f'{desc_dia_file}_viajes',\r\n var_fex='factor_expansion_linea',\r\n desc_dia=f'{i.yr}-{str(i.mo).zfill(2)}',\r\n tipo_dia=i.tipo_dia)\r\n\r\n print('Imprimiendo mapas de burbujas')\r\n viajes_n = viajes_dia[(viajes_dia.id_viaje > 1)]\r\n imprime_burbujas(viajes_n,\r\n res=7,\r\n h3_o='h3_o',\r\n alpha=.4,\r\n cmap='rocket_r',\r\n var_fex='factor_expansion_linea',\r\n porc_viajes=100,\r\n title=f'Destinos de los viajes {desc_dia}',\r\n savefile=f'{desc_dia_file}_burb_destinos',\r\n show_fig=False,\r\n k_jenks=5)\r\n\r\n viajes_n = viajes_dia[(viajes_dia.id_viaje == 1)]\r\n imprime_burbujas(viajes_n,\r\n res=7,\r\n h3_o='h3_o',\r\n alpha=.4,\r\n cmap='flare',\r\n var_fex='factor_expansion_linea',\r\n porc_viajes=100,\r\n title=f'Hogares {desc_dia}',\r\n savefile=f'{desc_dia_file}_burb_hogares',\r\n show_fig=False,\r\n k_jenks=5)\r\n\r\n save_zones()\r\n\r\n print('Indicadores para dash')\r\n indicadores_dash()\r\n\r\n # plor dispatched services\r\n plot_dispatched_services_wrapper()\r\n\r\n # plot basic kpi if exists\r\n plot_basic_kpi_wrapper()\r\n","repo_name":"EL-BID/UrbanTrips","sub_path":"urbantrips/viz/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":100198,"program_lang":"python","lang":"es","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"28504235902","text":"# Your new class might look like:\nclass Weather(object):\n\n def __init__(self, fname):\n self.readHeader(fname)\n self.readData(fname)\n\n def readHeader(self, fname):\n \"Reads header from data file `fname` and populates instance dictionary: self.header.\"\n with open(fname) as f:\n self.header = {}\n\n i = 0\n while i < 3:\n line = f.readline()\n # Strip any white space from line\n line = line.strip()\n\n key, value = line.split(\":\")\n self.header[key] = value\n i += 1\n\n\n def readData(self, fname):\n \"Reads a data file `fname` and populates instance dictionary: self.data.\"\n with open(fname) as f:\n self.data = {}\n\n # Ignore the header\n for i in range(3):\n f.readline()\n\n # Read in variable names\n col_names = f.readline().strip().split(\",\")\n for col_name in col_names:\n self.data[col_name] = []\n i = 0\n\n for line in f.readlines():\n \n # Strip any white space from line\n line = line.strip()\n values = line.split(\",\")\n\n for (i, value) in enumerate(values):\n col_name = col_names[i]\n self.data[col_name].append(value)\n\n\n# Test it with:\nweather = Weather(\"../example_data/weather_meta.csv\")\nprint(weather.header)\nprint(weather.data)\n","repo_name":"ncasuk/ncas-isc","sub_path":"python-data/example_code/read_weather_class.py","file_name":"read_weather_class.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"71344662171","text":"\"\"\"\nPost related tests.\n\nThese will execute when you run \"manage.py test\".\n\"\"\"\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n\nimport logging\nfrom django.conf import settings\nfrom biostar.apps.users.models import User, Profile\nfrom biostar.apps.posts.models import Post, Subscription, Tag\nfrom biostar.apps.messages.models import Message\n\nfrom django.test import TestCase\n\nlogging.disable(logging.INFO)\n\n\nclass PostTest(TestCase):\n\n def test_tagging(self):\n \"Testing tagging.\"\n eq = self.assertEqual\n\n eq(0, Tag.objects.all().count() )\n\n # Create an admin user and a post.\n title = \"Hello Posts!\"\n email = \"john@this.edu\"\n jane = User.objects.create(email=email)\n html = \"Hello World!\"\n post = Post(title=title, author=jane, type=Post.FORUM, content=html)\n post.save()\n post.add_tags(\"t1,t2, t3\")\n\n eq(3, Tag.objects.all().count())\n\n post = Post(title=title, author=jane, type=Post.FORUM, content=html)\n post.save()\n post.add_tags(\"t1, t2, t3, t2, t1, t1\")\n\n t1 = Tag.objects.get(name=\"t1\")\n t3 = Tag.objects.get(name=\"t3\")\n\n eq(2, t1.count)\n eq(2, t3.count)\n\n post.add_tags(\"t2 t4\")\n\n t1 = Tag.objects.get(name=\"t1\")\n t3 = Tag.objects.get(name=\"t3\")\n\n eq(1, t1.count)\n eq(1, t3.count)\n\n def test_post_creation(self):\n \"Testing post creation.\"\n eq = self.assertEqual\n\n # Create an admin user and a post.\n title = \"Hello Posts!\"\n email = \"john@this.edu\"\n jane = User.objects.create(email=email)\n html = \"Hello World!\"\n post = Post(title=title, author=jane, type=Post.FORUM, content=html)\n post.save()\n\n # Get the object fresh.\n post = Post.objects.get(pk=post.id)\n\n eq(post.type, Post.FORUM)\n eq(post.root, post)\n eq(post.parent, post)\n\n # Subscriptions are automatically created\n sub = Subscription.objects.get(user=jane)\n eq(sub.user, jane)\n eq(sub.post, post)\n\n # A new post triggers a message to the author.\n email = \"jane@this.edu\"\n john = User.objects.create(email=email)\n answer = Post(author=john, parent=post, type=Post.ANSWER)\n answer.save()\n\n eq(answer.root, post)\n eq(answer.parent, post)\n eq(answer.type, Post.ANSWER)\n\n # Add comment. The parent will override the post type.\n email = \"bob@this.edu\"\n bob = User.objects.create(email=email)\n comment = Post(author=bob, type=Post.FORUM, parent=answer)\n comment.save()\n\n eq(comment.root, post)\n eq(comment.parent, answer)\n eq(comment.type, Post.COMMENT)\n\n # Everyone posting in a thread gets a subscription to the root post of the\n subs = Subscription.objects.filter(post=post)\n eq(len(subs), 3)\n\nTEST_CONTENT_EMBEDDING =\"\"\"\n

Gist links may be formatted

\n\n
\nhttps://gist.github.com/ialbert/ae46c5f51d63cdf2d0d2
\n\n

or embedded:

\n\n

https://gist.github.com/ialbert/ae46c5f51d63cdf2d0d2

\n\n

Video links may be formatted

\n\n
\nhttp://www.youtube.com/watch?v=_cDaX0xJPvI
\n\n

or embedded:

\n\n

http://www.youtube.com/watch?v=_cDaX0xJPvI

\n\n

Internal links are recognized:

\n\n
\nhttp://test.biostars.org/u/2/
\n\n\n

vs http://test.biostars.org/u/2/

\n

Similarly 

\n\n
\nhttp://test.biostars.org/p/2/
\n\n

versus http://test.biostars.org/p/2/

\n\n

 

\n\"\"\"","repo_name":"Lohit13/biostar-webhook-test","sub_path":"biostar/apps/posts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21480828913","text":"# -*- coding: utf-8 -*-\n\"\"\"Data containers convenient for/used to interact with bandit members.\"\"\"\nimport pprint\n\nimport numpy\n\n\nclass SampleArm(object):\n\n \"\"\"An arm (name, win, loss, total, variance) sampled from the objective function we are modeling/optimizing.\n\n This class is a representation of a \"Sample Arm,\" which is defined by the four data members listed here.\n SampleArm is a convenient way of communicating data to the rest of the bandit library (via the\n HistoricalData container); it also provides a convenient grouping for interactive introspection.\n\n Users are not required to use SampleArm, iterables with the same data layout will suffice.\n\n :ivar win: (*float64 >= 0.0*) The amount won from playing this arm\n :ivar loss: (*float64 >= 0.0*) The amount loss from playing this arm\n :ivar total: (*int >= 0*) The number of times we have played this arm\n :ivar variance: (*float >= 0.0*) The variance of this arm, if there is no variance it is equal to None\n\n \"\"\"\n\n __slots__ = ('_win', '_loss', '_total', '_variance')\n\n def __init__(self, win=0.0, loss=0.0, total=0, variance=None):\n \"\"\"Allocate and construct a new instance with the specified data fields; see class docstring for input descriptions.\"\"\"\n self._win = win\n self._loss = loss\n self._total = total\n self._variance = variance\n self.validate()\n\n def __str__(self):\n \"\"\"Pretty print this object as a dict.\"\"\"\n return pprint.pformat(self.json_payload())\n\n def __add__(self, sample_arm_to_add):\n \"\"\"Overload Add operator to add ``sample_arm_to_add`` sampled arm results to this arm.\n\n :param sample_arm_to_add: arm samples to add to this arm\n :type sample_arm_to_add: a SampleArm object\n :return: new SampleArm that is a result of adding two arms\n :rtype: SampleArm\n :raise: ValueError when ``arm.variance`` or self.variance is not None.\n\n \"\"\"\n if self._variance is not None or sample_arm_to_add.variance is not None:\n raise ValueError('Cannot add arms when variance is not None! Please combine arms manually.')\n result = SampleArm(win=self._win + sample_arm_to_add.win, loss=self._loss + sample_arm_to_add.loss, total=self._total + sample_arm_to_add.total)\n result.validate()\n return result\n\n __radd__ = __add__\n\n def __iadd__(self, sample_arm_to_add):\n \"\"\"Overload in-place Add operator to add ``sample_arm_to_add`` sampled arm results to this arm in-place.\n\n :param sample_arm_to_add: arm samples to add to this arm\n :type sample_arm_to_add: a SampleArm object\n :return: this arm after adding ``sample_arm_to_add``\n :rtype: SampleArm\n :raise: ValueError when ``arm.variance`` or self.variance is not None.\n\n \"\"\"\n if self._variance is not None or sample_arm_to_add.variance is not None:\n raise ValueError('Cannot add arms when variance is not None! Please combine arms manually.')\n self._win += sample_arm_to_add.win\n self._loss += sample_arm_to_add.loss\n self._total += sample_arm_to_add.total\n self.validate()\n return self\n\n def json_payload(self):\n \"\"\"Convert the sample_arm into a dict to be consumed by json for a REST request.\"\"\"\n return {\n 'win': self.win,\n 'loss': self.loss,\n 'total': self.total,\n 'variance': self.variance,\n }\n\n def validate(self):\n \"\"\"Check this SampleArm passes basic validity checks: all values are finite.\n\n :raises ValueError: if any member data is non-finite or out of range\n\n \"\"\"\n # check that all values are finite\n if self.win < 0.0 or not numpy.isfinite(self.win):\n raise ValueError('win = {0} is non-finite or negative!'.format(self.win))\n if self.loss < 0.0 or not numpy.isfinite(self.loss):\n raise ValueError('loss = {0} is non-finite or negative!'.format(self.loss))\n if self.total < 0 or not numpy.isfinite(self.total):\n raise ValueError('total = {0} is non-finite or negative!'.format(self.total))\n if self.variance is not None and (self.variance < 0.0 or not numpy.isfinite(self.variance)):\n raise ValueError('variance = {0} is non-finite or negative!'.format(self.variance))\n if self.total == 0 and not (self.win == 0.0 and self.loss == 0.0):\n raise ValueError('win or loss is not 0 when total is 0!')\n if self.variance is None and self.win > self.total:\n raise ValueError('win cannot be greater than total when default variance computation is used! Please specify variance.')\n\n @property\n def win(self):\n \"\"\"Return the amount win, always greater than or equal to zero.\"\"\"\n return self._win\n\n @property\n def loss(self):\n \"\"\"Return the amount loss, always greater than or equal to zero.\"\"\"\n return self._loss\n\n @property\n def total(self):\n \"\"\"Return the total number of tries, always a non-negative integer.\"\"\"\n return self._total\n\n @property\n def variance(self):\n \"\"\"Return the variance of sampled tries, always greater than or equal to zero, if there is no variance it is equal to None.\"\"\"\n return self._variance\n\n\nclass BernoulliArm(SampleArm):\n\n \"\"\"A Bernoulli arm (name, win, loss, total, variance) sampled from the objective function we are modeling/optimizing.\n\n A Bernoulli arm has payoff 1 for a success and 0 for a failure.\n See more details on Bernoulli distribution at http://en.wikipedia.org/wiki/Bernoulli_distribution\n\n See superclass :class:`~moe.bandit.data_containers.SampleArm` for more details.\n\n \"\"\"\n\n def validate(self):\n \"\"\"Check this Bernoulli arm is a valid Bernoulli arm. Also check that this BernoulliArm passes basic validity checks: all values are finite.\n\n A Bernoulli arm has payoff 1 for a success and 0 for a failure.\n See more details on Bernoulli distribution at http://en.wikipedia.org/wiki/Bernoulli_distribution\n\n :raises ValueError: if any member data is non-finite or out of range or the arm is not a valid Bernoulli arm\n\n \"\"\"\n super(BernoulliArm, self).validate()\n if self.loss != 0.0:\n raise ValueError('loss = {0} is not zero! This is not a Bernoulli arm'.format(self.loss))\n if self.win > self.total:\n raise ValueError('win = {0} > total = {1}! This is not a Bernoulli arm'.format(self.win, self.total))\n\n\nclass HistoricalData(object):\n\n \"\"\"A data container for storing the historical data from an entire experiment in a layout convenient for this library.\n\n Users will likely find it most convenient to store experiment historical data of arms in tuples of\n (win, loss, total, variance); for example, these could be the columns of a database row, part of an ORM, etc.\n The SampleArm class (above) provides a convenient representation of this input format, but users are *not* required\n to use it.\n\n :ivar _arms_sampled: (*dict*) mapping of arm names to already-sampled arms\n\n \"\"\"\n\n __slots__ = ('_arms_sampled')\n\n def __init__(self, sample_arms=None, validate=True):\n \"\"\"Create a HistoricalData object tracking the state of an experiment (already-sampled arms).\n\n :param sample_arms: the already-sampled arms: names, wins, losses, and totals\n :type sample_arms: a dictionary of (arm name, SampleArm) key-value pairs\n :param validate: whether to sanity-check the input sample_arms\n :type validate: boolean\n\n \"\"\"\n if sample_arms is None:\n sample_arms = {}\n\n if validate:\n self.validate_sample_arms(sample_arms)\n\n self._arms_sampled = sample_arms\n\n def __str__(self, pretty_print=True):\n \"\"\"String representation of this HistoricalData object.\n\n pretty-print'ing produces output that is easily read by humans.\n Disabling it prints the member arrays to the screen in full precision; this is convenient for\n pasting into C++ or other debugging purposes.\n\n :param pretty_print: enable pretty-printing for formatted, human-readable output\n :type pretty_print: bool\n :return: string representation\n :rtype: string\n\n \"\"\"\n if pretty_print:\n return pprint.pformat(self.json_payload())\n else:\n return repr(self.json_payload())\n\n def json_payload(self):\n \"\"\"Construct a json serializeable and MOE REST recognizeable dictionary of the historical data.\"\"\"\n json_arms_sampled = {}\n for name, arm in self._arms_sampled.iteritems():\n json_arms_sampled[name] = arm.json_payload()\n return {'arms_sampled': json_arms_sampled}\n\n @staticmethod\n def validate_sample_arms(sample_arms):\n \"\"\"Check that sample_arms passes basic validity checks: all values are finite.\n\n :param sample_arms: already-sampled arms: names, wins, losses, and totals\n :type sample_arms: a dictionary of (arm name, SampleArm) key-value pairs\n :return: True if inputs are valid\n :rtype: boolean\n\n \"\"\"\n if sample_arms:\n for arm in sample_arms.itervalues():\n arm.validate()\n\n def append_sample_arms(self, sample_arms, validate=True):\n \"\"\"Append the contents of ``sample_arms`` to the data members of this class.\n\n This method first validates the arms and then updates the historical data.\n The result of combining two valid arms is always a valid arm.\n\n :param sample_arms: the already-sampled arms: wins, losses, and totals\n :type sample_arms: a dictionary of (arm name, SampleArm) key-value pairs\n :param validate: whether to sanity-check the input sample_arms\n :type validate: boolean\n\n \"\"\"\n if not sample_arms:\n return\n\n if validate:\n self.validate_sample_arms(sample_arms)\n\n self._update_historical_data(sample_arms)\n\n def _update_historical_data(self, sample_arms):\n \"\"\"Add arm sampled results from ``sample_arms`` into this object's data member.\n\n :param sample_arms: the already-sampled arms: wins, losses, and totals\n :type sample_arms: dictionary of (arm name, SampleArm) key-value pairs\n \"\"\"\n for name, arm in sample_arms.iteritems():\n if name in self._arms_sampled:\n self._arms_sampled[name] += arm\n else:\n self._arms_sampled[name] = arm\n\n @property\n def num_arms(self):\n \"\"\"Return the number of sampled arms.\"\"\"\n return len(self._arms_sampled)\n\n @property\n def arms_sampled(self):\n \"\"\"Return the arms_sampled, a dictionary of (arm name, SampleArm) key-value pairs.\"\"\"\n return self._arms_sampled\n","repo_name":"Yelp/MOE","sub_path":"moe/bandit/data_containers.py","file_name":"data_containers.py","file_ext":"py","file_size_in_byte":10891,"program_lang":"python","lang":"en","doc_type":"code","stars":1307,"dataset":"github-code","pt":"32"} +{"seq_id":"18845702306","text":"if __name__ == '__main__':\n import matplotlib.pyplot as plt\n from FEM.Elasticity2D import PlaneStrainSparse\n from FEM.Geometry import Delaunay\n from FEM.Utils.polygonal import giveCoordsCircle\n\n E = 21000000.0 # MPa\n v = 0.2 # m\n gamma = 23.54\n\n b = 200\n h = 100\n l = 20\n\n vertices = [[0.0, 0.0], [b, 0.0], [b, h], [b/2+2*l, h],\n [b/2+l, h+l], [b/2-l, h+l], [b/2-2*l, h], [0.0, h]]\n\n holes = []\n radi = 20\n cent = [b/2, h/2]\n vert, seg = giveCoordsCircle(cent, radi, n=50)\n hole = {'center': cent, 'regions': seg, 'vertices': vert}\n holes += [hole]\n\n fillets = [{'start_region': 2, 'end_region': 3, 'r': 20, 'n': 10},\n {'start_region': 3, 'end_region': 4, 'r': 20, 'n': 10},\n {'start_region': 4, 'end_region': 5, 'r': 20, 'n': 10},\n {'start_region': 5, 'end_region': 6, 'r': 20, 'n': 10}]\n\n params = Delaunay._strdelaunay(\n constrained=True, delaunay=True, a='7', o=2)\n geometria = Delaunay(vertices, params, nvn=2,\n holes_dict=holes, fillets=fillets, fast=True)\n cb = geometria.cbFromRegion(0, 0, 1)\n cb += geometria.cbFromRegion(0, 0, 2)\n cb += geometria.cbFromRegion(1, 0, 1)\n cb += geometria.cbFromRegion(7, 0, 1)\n geometria.setCbe(cb)\n O = PlaneStrainSparse(geometria, E, v, fy=lambda x: -gamma, verbose=True)\n O.solve()\n O.exportJSON(\"Examples/Mesh_tests/Example29.json\")\n plt.show()\n","repo_name":"ZibraMax/FEM","sub_path":"Examples/example29.py","file_name":"example29.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"6494136825","text":"from StringProcessing import StringProcessing\r\nfrom BuildVocabulary import BuildVocabulary as bv\r\nfrom TransformerEncoder import TransformerEncoder\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom nltk.tokenize.toktok import ToktokTokenizer\r\nfrom gensim.models import Word2Vec\r\nfrom Preprocess import Preprocess\r\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\r\nfrom Utils import Utils\r\nimport numpy as np\r\n#import os\r\n#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Text parameters #\r\n max_words = 50 #50\r\n train_path = \"./tass-corpus/es/augmented_train_unique.csv\" # train\r\n dev_path = \"./tass-corpus/es/dev.csv\"\r\n\r\n # Training Parameters #\r\n batch_size = 32\r\n epochs = 250\r\n path_models = \"./models/\"\r\n name_models = \"transformer_1646NO\"\r\n cats = {\"N\":0, \"NEU\":1, \"NONE\":2, \"P\":3}\r\n r_cats = {0:\"N\", 1:\"NEU\", 2:\"NONE\", 3:\"P\"}\r\n class_weight = {0:1., 1:2., 2:2., 3:1.3} # BEST\r\n tokenizer = ToktokTokenizer()\r\n w2v_path = \"./twitter87/twitter87.model\"\r\n w2v = Word2Vec.load(w2v_path)\r\n\r\n # Encoder Parameters # # MEJOR\r\n dropout_input = 0.7 #0.7\r\n dropout_output = 0. #0.15 # 0.\r\n pe = False #False\r\n embedding_dims = w2v.vector_size\r\n n_encoders = 1 #2 #1\r\n attention_dims = 64 #32 #64\r\n n_heads = 6 #8\r\n dim_h = 128 #256\r\n final_h = False #False\r\n pool_mode = \"average\" #\"average\"\r\n\r\n output_encoder_dims = [embedding_dims for i in range(n_encoders)]\r\n attention_dims = [attention_dims for i in range(n_encoders)]\r\n n_heads = [n_heads for i in range(n_encoders)]\r\n\r\n ids_tr, x_tr, y_tr = StringProcessing.load_samples(train_path)\r\n ids_dv, x_dv, y_dv = StringProcessing.load_samples(dev_path)\r\n\r\n ry_tr = to_categorical([cats[c] for c in y_tr], 4)\r\n ry_dv = to_categorical([cats[c] for c in y_dv], 4)\r\n\r\n # Preprocess #\r\n x_tr = [Preprocess.preprocess(x, tokenizer) for x in x_tr]\r\n x_dv = [Preprocess.preprocess(x, tokenizer) for x in x_dv]\r\n\r\n # Represent #\r\n rx_tr = np.array(StringProcessing.represent_documents(x_tr, max_words, embedding_dims, w2v, word_delimiter = \" \"))\r\n rx_dv = np.array(StringProcessing.represent_documents(x_dv, max_words, embedding_dims, w2v, word_delimiter = \" \"))\r\n\r\n # Masks #\r\n masks_tr = np.array([((rx!=0).sum(axis=1)>0).astype(\"int\") for rx in rx_tr])\r\n masks_dv = np.array([((rx!=0).sum(axis=1)>0).astype(\"int\") for rx in rx_dv])\r\n\r\n # Positional Encodings #\r\n matrix_pos_encodings = Utils.precompute_sent_pos_encodings(max_words, embedding_dims)\r\n pe_tr = np.array([Utils.build_pe_sent_encodings(matrix_pos_encodings, m) for m in masks_tr])\r\n pe_dv = np.array([Utils.build_pe_sent_encodings(matrix_pos_encodings, m) for m in masks_dv])\r\n\r\n ht = TransformerEncoder(max_words = max_words,\r\n embedding_dims = embedding_dims,\r\n output_encoder_dims = output_encoder_dims,\r\n attention_dims = attention_dims,\r\n n_heads = n_heads,\r\n dropout_input = dropout_input,\r\n dropout_output = dropout_output,\r\n pe = pe,\r\n dim_h = dim_h,\r\n final_h = final_h,\r\n pool_mode = pool_mode)\r\n\r\n ht.build()\r\n print(ht.model.summary())\r\n ht.compile(ht.model)\r\n\r\n \"\"\"\r\n chkpath = path_models + \"/\" + name_models + \"-{epoch:05d}-{val_loss:.3f}-{val_acc:.3f}-{val_macro_f1}.hdf5\"\r\n\r\n checkpoint = ModelCheckpoint(chkpath, monitor='val_macro_f1',\r\n verbose=1, save_best_only=True,\r\n mode='max')\r\n callbacks = [checkpoint]\r\n ht.model.fit(x = [rx_tr, masks_tr, pe_tr], y = ry_tr,\r\n epochs = epochs,\r\n validation_data = ([rx_dv, masks_dv, pe_dv], ry_dv),\r\n verbose = 1,\r\n batch_size = batch_size, callbacks = callbacks)\r\n\r\n\r\n \"\"\"\r\n\r\n\r\n\r\n truths = y_dv\r\n best_mf1 = float(\"-inf\")\r\n for e in range(epochs):\r\n ht.model.fit(x = [rx_tr, masks_tr, pe_tr], y = ry_tr,\r\n epochs = 1,\r\n validation_data = ([rx_dv, masks_dv, pe_dv], ry_dv),\r\n verbose = 0,\r\n batch_size = batch_size, class_weight = class_weight)\r\n\r\n preds = ht.model.predict([rx_dv, masks_dv, pe_dv], batch_size = 256)\r\n preds = [r_cats[p.argmax()] for p in preds]\r\n acc = accuracy_score(truths, preds)\r\n mf1 = f1_score(truths, preds, average=\"macro\")\r\n if mf1 > best_mf1:\r\n best_mf1 = mf1\r\n acc = accuracy_score(truths, preds)\r\n mp = precision_score(truths, preds, average=\"macro\")\r\n mr = recall_score(truths, preds, average=\"macro\")\r\n print(\"BEST: %d\" % e)\r\n print(\"Acc: %f\" % acc)\r\n print(\"MF1: %f\" % mf1)\r\n print(\"MP: %f\" % mp)\r\n print(\"MR: %f\" % mr)\r\n ht.model.save_weights(path_models + \"/\" + name_models + \".hdf5\")\r\n print(\"\\n\\n\" + \"-\"*50 + \"\\n\\n\")\r\n","repo_name":"jogonba2/TE-TextClassification","sub_path":"SHT-Train.py","file_name":"SHT-Train.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73321876891","text":"import sys,os\n\nligand_editor = os.path.join(os.path.dirname(__file__), \"ligand_editor.py\")\nlocation_of_script = os.path.dirname(__file__)\ndef main():\n\n linker_name = sys.argv[1]\n smiles_string = sys.argv[2]\n\n print('creating 3D molecule from Smiles String')\n file_name = linker_name+'.sdf'\n command = 'obabel -:\"'+ smiles_string +'\" -O '+file_name+' --gen3D'\n print('using the command ', command)\n os.system(command)\n\n print('generating conformers')\n command = 'obabel '+file_name+' -O '+file_name+' --confab'\n print('using the command ', command)\n os.system(command)\n\n print('creating pymol script file')\n pymol_script_file = open('pymol_script.pml', 'w')\n pymol_script_file.write('load ' + file_name + ' \\n')\n pymol_script_file.write('multifilesave {name}-{state}.pdb, state=0')\n pymol_script_file.close()\n\n print('running pymol script file')\n command = 'pymol -c pymol_script.pml'\n os.system(command)\n\n print('deleting pymol script file')\n command = 'rm pymol_script.pml'\n os.system(command)\n\n print('generating linker_list.txt file')\n command = 'ls *.pdb > linker_list.txt'\n os.system(command)\n\n print('altering atom_names')\n command = '{} {} {}'.format('python', ligand_editor, 'linker_list.txt UNK')\n os.system(command)\n\n # printf '../alkylC6_conformers/%s\\n' edited * > linker_list.txt\n print('updating linker_list.txt file')\n command = 'ls edited*.pdb > linker_list.txt'\n os.system(command)\n\nmain()","repo_name":"jonsch1/bachelorproject","sub_path":"python_scripts/linker_preparation.py","file_name":"linker_preparation.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26682500827","text":"def _dictify_dn(dn):\n return dict(x.split('=') for x in dn.split('/') if '=' in x)\n\ndef user_dict_from_dn(dn):\n print(\"JAMNIK!!!\")\n d = _dictify_dn(dn)\n print(d)\n ret = dict()\n ret['username'] = d['serialNumber']\n ret['last_name'] = d['SN'].title()\n ret['first_name'] = d['GN'].title()\n ret['email'] = ''\n return ret","repo_name":"Adimus11/Cyber-Security","sub_path":"lista8/bank/main/ssl_login.py","file_name":"ssl_login.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25580022973","text":"import requests\nimport os\nimport pathlib\n\ndef DownloadFile(url, index):\n if not os.path.exists('audio_files'):\n os.makedirs('audio_files')\n extension = pathlib.Path(url).suffix\n local_filename = f\"audio_files/File {index}{extension}\"\n r = requests.get(url)\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n return None\n\nfile_name = input(\"Enter url filename (ex urls.txt): \")\nurl_list = open(file_name, \"r\").read().splitlines()\n\nindex = 1\nfor url in url_list:\n url = requests.head(url, allow_redirects=True)\n print(f\"Downloading File {index} from {url.url}\")\n DownloadFile(url.url, index)\n index+=1\nprint(\"Done!\")\n","repo_name":"kalix123/Useful-Python-Tools","sub_path":"Mass File Downloader/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9083127230","text":"from dataclasses import dataclass\nfrom typing import Optional\n\nimport flax\nimport flax.linen as nn\nimport jax\nimport jax.numpy as jnp\nimport optax\nimport tyro\n\n\ndef dropout(x: jnp.ndarray, rate: float, key: jax.random.KeyArray) -> jnp.ndarray:\n \"\"\"\n # nn.Dropout is a bit confusing to me... @Costa\n Functional dropout implementation. In contrast to the flax.linen module, this can\n be used inside of standard JAX function transforms.\n Note that we could also use the lifted transforms provided by Flax, but this\n is more general.\n taken from https://github.com/brentyi/minGPT-flax/blob/7927b564e04b929e4df219a9334d86de9486dfb0/mingpt/attention.py#L11\n \"\"\"\n keep_prob = 1.0 - rate\n mask = jax.random.bernoulli(key, p=keep_prob, shape=x.shape)\n out = jnp.where(mask, x / keep_prob, 0.0)\n assert out.shape == x.shape\n return out\n\n\nclass NewGELU(nn.Module):\n \"\"\"\n Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).\n Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415\n \"\"\"\n\n @nn.compact\n def __call__(self, x):\n return 0.5 * x * (1.0 + jnp.tanh(jnp.sqrt(2.0 / jnp.pi) * (x + 0.044715 * jnp.power(x, 3.0))))\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n It is possible to use torch.nn.MultiheadAttention here but I am including an\n explicit implementation here to show that there is nothing too scary here.\n \"\"\"\n\n embd_dim: int # alias: C\n n_head: int # alias: nh\n attn_pdrop: int\n resid_pdrop: int\n block_size: int # alias: T, sequence_length\n use_bias: bool\n deterministic: Optional[bool] = None\n dtype: Optional[str] = None\n\n @nn.compact\n def __call__(self, x: jnp.array, deterministic=None):\n deterministic = nn.merge_param(\"deterministic\", self.deterministic, deterministic)\n assert self.embd_dim % self.n_head == 0, \"embd_dim must be divisible by num_heads\"\n B, T, C = jnp.shape(x) # batch size, sequence length, embedding dimensionality (embd_dim\n head_dim = C // self.n_head # alias: hd\n bias = jnp.tril(jnp.ones((self.block_size, self.block_size))).reshape(1, 1, self.block_size, self.block_size)\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n c_attn = nn.Dense(3 * C, use_bias=self.use_bias, dtype=self.dtype)(\n x\n ) # (B, T, 3 * C), `c_attn` means `concatenated attention`\n q, k, v = jnp.split(c_attn, 3, axis=-1) # each has shape (B, T, C)\n q = q.reshape(B, T, self.n_head, head_dim).swapaxes(1, 2) # (B, nh, T, hd), nh: n_head, hd: head dimensionality\n k = k.reshape(B, T, self.n_head, head_dim).swapaxes(1, 2) # (B, nh, T, hd)\n v = v.reshape(B, T, self.n_head, head_dim).swapaxes(1, 2) # (B, nh, T, hd)\n attn = q @ k.swapaxes(-1, -2) / jnp.sqrt(head_dim) # (B, nh, T, T), attention scores\n attn = jnp.where(bias[:, :, :T, :T] == 0, float(\"-inf\"), attn) # (B, nh, T, T), mask out the future tokens\n attn = nn.softmax(attn, axis=-1) # (B, nh, T, T), attention weights (probabilities)\n attn = nn.Dropout(self.attn_pdrop)(attn, deterministic=deterministic)\n y = attn @ v # (B, nh, T, hd)\n y = y.swapaxes(1, 2) # (B, T, nh, hd)\n y = y.reshape(B, T, C) # (B, T, C)\n c_proj = nn.Dense(C, use_bias=self.use_bias, dtype=self.dtype)(y) # (B, T, C)\n x = nn.Dropout(rate=self.resid_pdrop)(c_proj, deterministic=deterministic)\n return x\n\n\nclass MLP(nn.Module):\n n_head: int # alias: nh\n attn_pdrop: int\n resid_pdrop: int\n block_size: int # alias: T, sequence_length\n use_bias: bool\n dtype: Optional[str] = None\n\n @nn.compact\n def __call__(self, x, deterministic=None):\n B, T, C = x.shape\n x = nn.Dense(4 * C, use_bias=self.use_bias, dtype=self.dtype, name=\"c_fc\")(x)\n x = nn.gelu(x, approximate=True)\n x = nn.Dense(C, use_bias=self.use_bias, dtype=self.dtype, name=\"c_proj\")(x)\n x = nn.Dropout(self.resid_pdrop)(x, deterministic)\n return x\n\n\nclass Block(nn.Module):\n embd_dim: int # alias: C\n n_head: int # alias: nh\n attn_pdrop: int\n resid_pdrop: int\n block_size: int # alias: T, sequence_length\n use_bias: bool\n dtype: Optional[str] = None\n\n def setup(self):\n self.ln_1 = nn.LayerNorm(epsilon=1e-5, use_bias=self.use_bias, dtype=self.dtype)\n self.attn = CausalSelfAttention(\n self.embd_dim, self.n_head, self.attn_pdrop, self.resid_pdrop, self.block_size, self.use_bias, dtype=self.dtype\n )\n self.ln_2 = nn.LayerNorm(epsilon=1e-5, use_bias=self.use_bias, dtype=self.dtype)\n self.mlp = MLP(self.n_head, self.attn_pdrop, self.resid_pdrop, self.block_size, self.use_bias, self.dtype)\n\n def __call__(self, x, deterministic=None):\n x = x + self.attn(self.ln_1(x), deterministic)\n x = x + self.mlp(self.ln_2(x), deterministic)\n return x\n\n\n@dataclass(frozen=True)\nclass GPTConfig:\n n_layer: int = 3\n n_head: int = 3\n embd_dim: int = 48\n # dropout hyperparameters\n embd_pdrop: int = 0.1\n resid_pdrop: int = 0.1\n attn_pdrop: int = 0.1\n use_bias: bool = True\n dtype: Optional[str] = None\n\n\nclass GPT(nn.Module):\n config: GPTConfig\n\n # these options must be filled in externally\n vocab_size: int = None\n block_size: int = None\n\n @nn.compact\n def __call__(self, idx, deterministic=None):\n _, T = jnp.shape(idx) # B, T\n assert T <= self.block_size, f\"Cannot forward sequence of length {T}, block size is only {self.block_size}\"\n pos = jnp.arange(0, T)[None] # shape (1, T)\n\n wte = nn.Embed(self.vocab_size, self.config.embd_dim, dtype=self.config.dtype, name=\"wte\")\n wpe = nn.Embed(self.block_size, self.config.embd_dim, dtype=self.config.dtype, name=\"wpe\")\n\n token_embed = wte(idx) # token embeddings of shape (B, T, embd_dim)\n pos_embed = wpe(pos) # position embeddings of shape (1, T, embd_dim)\n x = nn.Dropout(self.config.embd_pdrop)(token_embed + pos_embed, deterministic)\n\n for i in range(self.config.n_layer):\n x = Block(\n self.config.embd_dim,\n self.config.n_head,\n self.config.attn_pdrop,\n self.config.resid_pdrop,\n self.block_size,\n self.config.use_bias,\n self.config.dtype,\n name=str(i),\n )(x, deterministic=deterministic)\n\n x = nn.LayerNorm(1e-5, dtype=self.config.dtype, use_bias=self.config.use_bias, name=\"ln_f\")(x)\n logits = wte.attend(x)\n return logits\n\n\nGPTConfigPreset = tyro.extras.subcommand_type_from_defaults(\n {\n \"openai-gpt\": GPTConfig(n_layer=12, n_head=12, embd_dim=768), # 117M params\n # GPT-2 configs\n \"gpt2\": GPTConfig(n_layer=12, n_head=12, embd_dim=768), # 124M params\n \"gpt2-medium\": GPTConfig(n_layer=24, n_head=16, embd_dim=1024), # 350M params\n \"gpt2-large\": GPTConfig(n_layer=36, n_head=20, embd_dim=1280), # 774M params\n \"gpt2-xl\": GPTConfig(n_layer=48, n_head=25, embd_dim=1600), # 1558M params\n # Gophers\n \"gopher-44m\": GPTConfig(n_layer=8, n_head=16, embd_dim=512),\n # (there are a number more...)\n # I made these tiny models up\n \"gpt-small\": GPTConfig(n_layer=6, n_head=6, embd_dim=384, use_bias=False, dtype=\"bfloat16\"),\n \"gpt-mini\": GPTConfig(n_layer=6, n_head=6, embd_dim=192),\n \"gpt-micro\": GPTConfig(n_layer=4, n_head=4, embd_dim=128),\n \"gpt-nano\": GPTConfig(n_layer=3, n_head=3, embd_dim=48),\n }\n)\n\n\ndef param_decay_mask(params: flax.core.FrozenDict) -> flax.core.FrozenDict:\n \"\"\"pytree mask for non-bias parameters\"\"\"\n flat_params = flax.traverse_util.flatten_dict(params)\n flat_param_mask = {k: k[-1] not in (\"bias\", \"embedding\", \"scale\") for k in flat_params.keys()}\n param_mask = flax.traverse_util.unflatten_dict(flat_param_mask)\n return flax.core.frozen_dict.freeze(param_mask)\n\n\ndef generate(train_state, block_size, key, input_tokens, max_new_tokens, temperature=1.0, top_k=None):\n B, T = input_tokens.shape\n padding = jnp.zeros((B, max(block_size - T, max_new_tokens)), dtype=jnp.int32)\n tokens = jnp.concatenate([input_tokens, padding], axis=-1)\n indexes = jnp.arange(T, T + max_new_tokens)\n start_indexes = (indexes - block_size).clip(min=0)\n # print(\"B, T, max_new_tokens, tokens\", B, T, max_new_tokens, tokens.shape)\n # tokens index -> tokens None\n def scan_f(tokens, item):\n (i, start_i) = item\n # l: x y\n # t: a b - -\n # i: 0 1 2 3\n step_key = jax.random.fold_in(key, i)\n # if the sequence context is growing too long we must crop it at block_size\n # idx_cond = idx if idx.size(1) <= self.block_size else idx[:, -self.block_size:]\n # forward the model to get the logits for the index in the sequence\n logits = train_state.apply_fn(\n train_state.params,\n jax.lax.dynamic_slice(tokens, (0, start_i), (B, block_size)),\n deterministic=False,\n rngs={\"dropout\": step_key},\n ) # TODO: (0, 0) is going to be problematic\n # pluck the logits at the final step and scale by desired temperature\n logits = logits[:, i - 1, :] / temperature\n # optionally crop the logits to only the top k options\n # sample from the distribution\n if top_k is not None:\n top_logits, top_tokens = jax.lax.top_k(logits, min(top_k, logits.shape[-1]))\n token_idx = jax.random.categorical(step_key, top_logits, axis=-1)\n next_token = jnp.take_along_axis(top_tokens, token_idx[:, None], axis=-1).squeeze(-1)\n else:\n next_token = jax.random.categorical(step_key, logits, axis=-1)\n # logits = jnp.where(logits < v[:, -1:], float('-inf'), logits)\n # append sampled index to the running sequence and continue\n tokens = tokens.at[:, i].set(next_token)\n\n return tokens, None\n\n tokens, _ = jax.lax.scan(scan_f, tokens, (indexes, start_indexes))\n\n return tokens\n\n\nif __name__ == \"__main__\":\n block_size = 3\n embd_dim = 12\n n_head = 3\n key = jax.random.PRNGKey(0)\n key, params_key, dropout_key = jax.random.split(key=key, num=3)\n x = jax.random.normal(key, (1, block_size, embd_dim)) # B, T, C; or batch_size, sequence_length, embedding_dimensionality\n\n # CausalSelfAttention Demo\n attn_pdrop = 0.1\n resid_pdrop = 0.1\n attn = CausalSelfAttention(\n embd_dim=embd_dim,\n n_head=n_head,\n attn_pdrop=attn_pdrop,\n resid_pdrop=resid_pdrop,\n block_size=block_size,\n use_bias=False,\n )\n attn_params = attn.init(params_key, x, deterministic=True)\n attn_y = attn.apply(attn_params, x, deterministic=True)\n attn_y = attn.apply(attn_params, x, deterministic=False, rngs={\"dropout\": dropout_key})\n\n # Block Demo\n block = Block(\n embd_dim=embd_dim, n_head=n_head, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, block_size=block_size, use_bias=False\n )\n block_params = block.init(params_key, x, deterministic=True)\n block_y = block.apply(block_params, x, deterministic=True)\n\n # GPT Demo\n n_layer = 3\n vocab_size = 10\n # x = jax.random.randint(key, (1, block_size), minval=0, maxval=vocab_size) # B, T; or batch_size, sequence_length\n # y = jax.random.randint(key, (1,), minval=0, maxval=vocab_size) # B; or batch_size, sequence_length\n x = jnp.array([[0, 1, 1, 2, 2, 1, 0, 1, 1, 1, 2], [0, 1, 1, 2, 0, 2, 0, 0, 1, 1, 2], [0, 1, 2, 2, 1, 0, 0, 0, 1, 1, 2]])\n y = jnp.array(\n [\n [-1, -1, -1, -1, -1, 0, 1, 1, 1, 2, 2],\n [-1, -1, -1, -1, -1, 0, 0, 1, 1, 2, 2],\n [-1, -1, -1, -1, -1, 0, 0, 1, 1, 2, 2],\n ]\n )\n gpt = GPT(\n config=GPTConfig(\n n_layer=n_layer,\n n_head=n_head,\n embd_dim=embd_dim,\n ),\n vocab_size=3,\n block_size=11,\n )\n gpt_params = gpt.init(params_key, x, deterministic=True)\n\n def loss_fn(gpt_params, x, targets=None, deterministic=False):\n logits = gpt.apply(gpt_params, x, targets, deterministic=deterministic)\n # Costa: the following should be equivalent to `ignore_index=-1`\n # in F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)\n valid_targets = jnp.where(targets == -1, 0, targets) # remove the mask from the integer labels for cross entropy\n loss = optax.softmax_cross_entropy_with_integer_labels(\n logits.reshape(-1, jnp.shape(logits)[-1]), valid_targets.reshape(-1)\n )\n loss = loss.mean(where=targets.reshape(-1) != -1) # only calculate the mean for indices that are ignored\n return loss\n\n gpt_loss, (gpt_y) = loss_fn(gpt_params, x, y, deterministic=True)\n x = jnp.array([[0, 1, 1, 2, 2, 1], [0, 1, 1, 2, 0, 2], [0, 1, 2, 2, 1, 0]])\n logits = gpt.apply(gpt_params, x, deterministic=True)\n","repo_name":"vwxyzjn/cleanrlhf","sub_path":"cleanrlhf/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13199,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"43480536609","text":"#!/usr/bin/env python3\n'''\nThis script is intended to look at the events that construct best above horizon in allsky maps. I want to view the\npeak to sidelobe values and other parameters for the belowhorizon and abovehorizon maps and determine if there is\nan obvious cut for which sidelobed above horizon events can be discriminated. \n'''\n\nimport sys\nimport os\nimport inspect\nimport h5py\nimport copy\nfrom pprint import pprint\nimport textwrap\nimport pandas\n\nimport numpy\nimport scipy\nimport scipy.signal\n\nfrom beacon.tools.data_slicer import dataSlicer\nimport beacon.tools.get_plane_tracks as pt\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm, ticker\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.dates as mdates\nimport time\nfrom datetime import datetime\nimport pytz\nplt.ion()\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings(\"ignore\")\n\nraw_datapath = os.environ['BEACON_DATA']\n#processed_datapath = os.path.join(os.environ['BEACON_PROCESSED_DATA'],'backup_pre_all_map_run_12-5-2021')\nprocessed_datapath = os.environ['BEACON_PROCESSED_DATA']\nprint('SETTING processed_datapath TO: ', processed_datapath)\n\ndef maximizeAllFigures():\n '''\n Maximizes all matplotlib plots.\n '''\n for i in plt.get_fignums():\n plt.figure(i)\n fm = plt.get_current_fig_manager()\n fm.resize(*fm.window.maxsize())\n\n'''\n'impulsivity_h','impulsivity_v', 'cr_template_search_h', 'cr_template_search_v', 'std_h', 'std_v', 'p2p_h', 'p2p_v', 'snr_h', 'snr_v',\\\n'time_delay_0subtract1_h','time_delay_0subtract2_h','time_delay_0subtract3_h','time_delay_1subtract2_h','time_delay_1subtract3_h','time_delay_2subtract3_h',\\\n'time_delay_0subtract1_v','time_delay_0subtract2_v','time_delay_0subtract3_v','time_delay_1subtract2_v','time_delay_1subtract3_v','time_delay_2subtract3_v',\n'cw_present','cw_freq_Mhz','cw_linear_magnitude','cw_dbish','theta_best_h','theta_best_v','elevation_best_h','elevation_best_v','phi_best_h','phi_best_v',\\\n'calibrated_trigtime','triggered_beams','beam_power','hpol_peak_to_sidelobe','vpol_peak_to_sidelobe','hpol_max_possible_map_value','vpol_max_possible_map_value',\\\n'map_max_time_delay_0subtract1_h','map_max_time_delay_0subtract2_h','map_max_time_delay_0subtract3_h',\\\n'map_max_time_delay_1subtract2_h','map_max_time_delay_1subtract3_h','map_max_time_delay_2subtract3_h',\\\n'map_max_time_delay_0subtract1_v','map_max_time_delay_0subtract2_v','map_max_time_delay_0subtract3_v',\\\n'map_max_time_delay_1subtract2_v','map_max_time_delay_1subtract3_v','map_max_time_delay_2subtract3_v'\n'''\n\n#Include special conditions for certain events\nspecial_conditions = {}\n# Default Values\ninclude_baselines = numpy.array([0,1,2,3,4,5])\nappend_notches = None\n\nspecial_conditions['r5755e112418'] = {\n 'include_baselines':numpy.array([3,4,5]),\n 'append_notches':[[62,63.5],[67,69]]\n }\n\nspecial_conditions['r5966e45159'] = {\n 'append_notches':[[33,38]]\n }\n\n\nspecial_conditions['r5896e46823'] = {\n 'append_notches':[[33,38]]\n }\n\nspecial_conditions['r5889e70102'] = {\n 'append_notches':[[33,38]]\n }\nspecial_conditions['r5853e114664'] = {\n 'append_notches':[[62.5,70]]\n }\nspecial_conditions['r5978e120178'] = {\n 'append_notches':[[33,38]]\n }\nspecial_conditions['r5984e85580'] = {\n 'append_notches':[[33,38]]\n }\nspecial_conditions['r6019e31292'] = {\n 'append_notches':[[60,66]]\n }\nspecial_conditions['r6235e54175'] = {\n 'append_notches':[[43,47]]\n }\n\nspecial_conditions['r6243e68912'] = {\n 'append_notches':[[43,47]]\n }\n\nspecial_conditions['r6262e93320'] = {\n 'append_notches':[[48,54]]\n }\n\nspecial_conditions['r6263e33446'] = {\n 'append_notches':[[48,54]]\n }\nspecial_conditions['r6580e36332'] = {\n 'append_notches':[[48,54]]\n }\n\nignore_airplanes = []#['a73278']\n\nif __name__ == '__main__':\n args = copy.copy(sys.argv)\n if len(sys.argv) == 2:\n if 'r' in sys.argv[1] and 'e' in sys.argv[1]:\n args = [sys.argv[0], int(sys.argv[1].replace('r','').split('e')[0]), int(sys.argv[1].replace('r','').split('e')[1])]\n\n if len(args) >= 3:\n run = int(args[1])\n eventid = int(args[2])\n if len(args) == 4:\n apply_additional_notches = bool(args[3])\n else:\n # plt.close('all')\n apply_additional_notches = True\n\n cmap = 'cool'#'coolwarm'\n impulsivity_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n time_delays_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n map_direction_dset_key = 'LPf_85.0-LPo_6-HPf_25.0-HPo_8-Phase_1-Hilb_0-upsample_16384-maxmethod_0-sinesubtract_1-deploy_calibration_september_2021_minimized_calibration.json-n_phi_3600-min_phi_neg180-max_phi_180-n_theta_480-min_theta_0-max_theta_120-scope_allsky'\n\n ds = dataSlicer([run], impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key, analysis_data_dir=processed_datapath)\n \n # Custom testing values\n if apply_additional_notches:\n event_key = 'r%ie%i'%(run,eventid)\n if event_key in list(special_conditions.keys()):\n if 'append_notches' in list(special_conditions[event_key].keys()):\n append_notches = special_conditions[event_key]['append_notches']\n if 'include_baselines' in list(special_conditions[event_key].keys()):\n include_baselines = special_conditions[event_key]['include_baselines']\n\n\n # ds.eventInspector({run:[eventid]}, show_all=True, include_time_delays=True,append_notches=append_notches)\n\n for conference_mode in [False]:\n ds.conference_mode = conference_mode\n ds.eventInspector({run:[eventid]}, show_all=False, include_time_delays=not ds.conference_mode,append_notches=append_notches,include_baselines=include_baselines, div128=ds.conference_mode)\n \n\n # ds.eventInspector({run:[eventid]}, show_all=False, include_time_delays=True,append_notches=append_notches)\n # ds.eventInspector({run:[eventid]}, show_all=False, include_time_delays=False,append_notches=append_notches)\n print('https://users.rcc.uchicago.edu/~cozzyd/monutau/#event&run=%i&entry=%i'%(run,eventid))\n\n\n suspected_airplanes = {}#{6027:[21206,21515,21718,21812,21822,21905,21989,21993,22000,22008,22098,22176,22191,22208,22220,22323,22504,22507,22509,22565,22566,22652,22655,22731,22742,22749,22845,22847,22903,22917,22932,23364,23419,23429,23440,23462,23471,23477,23514,23519,23527,23557,23558,23593,23621,23627,23636,23643,23653,23659,23677,23678,23687,23920,28991,29275,29365,29459,29475,29595,29935,29939,29945,29948,30344,44049,51866,51871,51970,51986,51989,52086,52090,52099,52106,52199,52861,52957,52973,52974,53074,53175]}\n if run in list(suspected_airplanes.keys()):\n if eventid in suspected_airplanes[run]:\n total_mean_corr_values, fig_averaged, ax_averaged = ds.cor.averagedMap(suspected_airplanes[run], 'hpol', plot_map=True, hilbert=False, max_method=None, mollweide=False, zenith_cut_ENU=None,zenith_cut_array_plane=None, center_dir='E', circle_zenith=None, circle_az=None, radius=1.0, time_delay_dict={})\n try_animated = False\n if try_animated:\n animatedMap(suspected_airplanes[run], 'hpol', '', include_baselines=[0,1,2,3,4,5], plane_zenith=None, plane_az=None, map_source_distance_m=None, radius=1.0, hilbert=False, max_method=None,center_dir='E',save=False,dpi=300,fps=3)\n ax_animated = ds.cor.axs[-1]\n else:\n ax_averaged = None\n else:\n ax_averaged = None\n pprint(ds.inspector_mpl['current_table'])\n\n if pandas.__version__ == '1.4.0' and True:\n from tools.airplane_traffic_loader import getFileNamesFromTimestamps, enu2Spherical, addDirectionInformationToDataFrame, readPickle, getDataFrames\n from tools.get_plane_tracks import plotAirplaneTrackerStatus\n # Needs to match the version of pandas which was used to store airplane data.\n ax_keys = ['fig1_map_h','fig1_map_v']\n if ds.show_all:\n ax_keys.append('fig1_map_all')\n try:\n print('Looking for airplanes...')\n time_window_s = 5*60\n plot_distance_cut_limit = 500\n min_approach_cut_km = 1e6\n origin = ds.cor.A0_latlonel_hpol\n force_fit_order = 3#None\n\n elevation_best_choice = ds.getDataFromParam({run:[eventid]}, 'elevation_best_choice')\n phi_best_choice = ds.getDataFromParam({run:[eventid]}, 'phi_best_choice')\n best_elevation = float(elevation_best_choice[run][numpy.array([eventid]) == eventid][0])\n best_phi = float(phi_best_choice[run][numpy.array([eventid]) == eventid][0])\n\n event_time = ds.cor.getEventTimes()[numpy.array([eventid])][0]\n start = event_time - time_window_s/2\n stop = event_time + time_window_s/2\n\n files = getFileNamesFromTimestamps(start, stop, verbose=True)\n df = getDataFrames(start, stop, query='up > 0 and azimuth >= -90 and azimuth <= 90 and zenith < 85')\n minimum_approach = 1e10\n minimum_approach_airplane = ''\n minimum_approach_rpt = None\n rpt_at_event_time = None\n minimum_rpt_at_event_time = None\n all_min_angular_distances = {}\n print(numpy.unique(df['icao24']))\n for index, icao24 in enumerate(numpy.unique(df['icao24'])):\n if icao24 in ignore_airplanes:\n continue\n color = plt.rcParams['axes.prop_cycle'].by_key()['color'][index%len(plt.rcParams['axes.prop_cycle'].by_key()['color'])]\n try:\n traj = df.query('icao24 == \"%s\" and distance < %f'%(icao24, plot_distance_cut_limit*1000))\n except Exception as e:\n print(e)\n continue\n if force_fit_order is not None:\n order = force_fit_order\n else:\n if len(traj) == 0:\n continue\n elif len(traj) < 40:\n order = 3\n elif len(traj) < 80:\n order = 5\n else:\n order = 7\n poly = pt.PlanePoly(traj['utc_timestamp'].to_numpy(),(traj['east'].to_numpy(),traj['north'].to_numpy(),traj['up'].to_numpy()),order=order,plot=False)\n if poly.valid == True:\n #Might fail for single data point, or similar\n t = numpy.arange(float(min(traj['utc_timestamp'])), float(max(traj['utc_timestamp'])))\n\n interpolated_airplane_locations = poly.poly(t)\n rpt = enu2Spherical(interpolated_airplane_locations)\n\n approach_angle = numpy.sqrt((rpt[:,1] - best_phi)**2 + (rpt[:,2] - (90.0 - best_elevation ))**2)\n # numpy.sqrt((minimum_approach_rpt[1] - best_phi)**2 + (minimum_approach_rpt[2] - (90.0 - best_elevation ))**2)\n\n\n minimum_approach_index = numpy.argmin(approach_angle)\n\n rpt_at_event_time = enu2Spherical(poly.poly(event_time))[0]\n\n all_min_angular_distances[icao24] = {}\n all_min_angular_distances[icao24]['angular distance'] = approach_angle[minimum_approach_index]\n all_min_angular_distances[icao24]['trigtime - t'] = event_time - t[minimum_approach_index]\n\n if approach_angle[minimum_approach_index] < minimum_approach:\n minimum_approach = approach_angle[minimum_approach_index]\n minimum_approach_t = t[minimum_approach_index]\n minimum_approach_rpt = rpt[minimum_approach_index,:]\n minimum_approach_airplane = icao24\n\n minimum_rpt_at_event_time = enu2Spherical(poly.poly(event_time))[0]\n\n\n print(minimum_approach , str(minimum_approach_rpt))\n\n\n rpe = numpy.copy(rpt)\n rpe[:,2] = 90.0 - rpe[:,2]\n\n for ax_key in ax_keys:\n ax = ds.inspector_mpl[ax_key]\n ax.plot(rpe[:,1], rpe[:,2], linestyle = '--', c=color, alpha=0.2)\n ax.scatter(rpt_at_event_time[1], 90.0 - rpt_at_event_time[2],marker='|',c=color)\n\n if ax_averaged is not None:\n ax_averaged.plot(rpe[:,1], rpe[:,2], linestyle = '--', c=color, alpha=0.2)\n ax_averaged.scatter(rpt_at_event_time[1], 90.0 - rpt_at_event_time[2],marker='|',c=color)\n if try_animated:\n ax_animated.plot(rpe[:,1], rpe[:,2], linestyle = '--', c=color, alpha=0.2)\n ax_animated.scatter(rpt_at_event_time[1], 90.0 - rpt_at_event_time[2],marker='|',c=color)\n\n\n\n for ax_key in ax_keys:\n ax = ds.inspector_mpl[ax_key]\n ax.scatter(traj['azimuth'], 90.0 - traj['zenith'], c=color)\n\n if ax_averaged is not None:\n ax_averaged.scatter(traj['azimuth'], 90.0 - traj['zenith'], c=color)\n if try_animated:\n ax_animated.scatter(traj['azimuth'], 90.0 - traj['zenith'], c=color)\n\n [print(icao24, ' ang = ', all_min_angular_distances[icao24]['angular distance'], ' dt = ', all_min_angular_distances[icao24]['trigtime - t']) for icao24 in list(all_min_angular_distances.keys())]\n\n print('Relative to\\nphi = %0.2f, eleveation = %0.2f'%(best_phi, best_elevation))\n print('minimum_approach = ',minimum_approach)\n print('minimum_approach_t = ',minimum_approach_t)\n print('minimum_approach_rpt = ',minimum_approach_rpt)\n print('minimum_approach_airplane = ',minimum_approach_airplane)\n print('minimum_rpt_at_event_time = ',minimum_rpt_at_event_time)\n\n for ax_key in ax_keys:\n if minimum_approach_rpt is not None:\n ax.scatter(minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2],\n marker='*',c='k',\n label='Minimum Angular Approach\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg\\nat triggertime - t = %0.2f\\nicao24 = %s'%(minimum_approach_rpt[0]/1000.0, minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2], event_time - minimum_approach_t, minimum_approach_airplane))\n if ax_averaged is not None:\n ax_averaged.scatter(minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2],\n marker='*',c='k',\n label='Minimum Angular Approach\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg\\nat triggertime - t = %0.2f\\nicao24 = %s'%(minimum_approach_rpt[0]/1000.0, minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2], event_time - minimum_approach_t, minimum_approach_airplane))\n if try_animated:\n ax_animated.scatter(minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2],\n marker='*',c='k',\n label='Minimum Angular Approach\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg\\nat triggertime - t = %0.2f\\nicao24 = %s'%(minimum_approach_rpt[0]/1000.0, minimum_approach_rpt[1], 90.0 - minimum_approach_rpt[2], event_time - minimum_approach_t, minimum_approach_airplane))\n\n if minimum_rpt_at_event_time is not None and rpt_at_event_time is not None:\n ax.scatter(minimum_rpt_at_event_time[1], 90.0 - minimum_rpt_at_event_time[2],\n marker='o',c='k',\n label='At Trig Time = %0.2f\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg'%(event_time, rpt_at_event_time[0]/1000.0, rpt_at_event_time[1], 90.0 - rpt_at_event_time[2]))\n if ax_averaged is not None:\n ax_averaged.scatter(minimum_rpt_at_event_time[1], 90.0 - minimum_rpt_at_event_time[2],\n marker='o',c='k',\n label='At Trig Time = %0.2f\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg'%(event_time, rpt_at_event_time[0]/1000.0, rpt_at_event_time[1], 90.0 - rpt_at_event_time[2]))\n if try_animated:\n ax_animated.scatter(minimum_rpt_at_event_time[1], 90.0 - minimum_rpt_at_event_time[2],\n marker='o',c='k',\n label='At Trig Time = %0.2f\\nr,phi,el = %0.2f km, %0.2f deg, %0.2f deg'%(event_time, rpt_at_event_time[0]/1000.0, rpt_at_event_time[1], 90.0 - rpt_at_event_time[2]))\n\n # ax.legend(loc='lower center', fontsize = 16)\n # for t in ax.get_legend().get_texts():\n # print(t)\n\n maximizeAllFigures()\n except Exception as e:\n print(e)\n\n # import pdb; pdb.set_trace()\n","repo_name":"djsouthall/beacon","sub_path":"tools/event_info.py","file_name":"event_info.py","file_ext":"py","file_size_in_byte":18981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43027772032","text":"from email.parser import HeaderParser\nfrom .parser import Parser\nfrom .utils import cleanup_text, decode_and_convert_to_unicode\n\n\nclass Hopper:\n\n def __init__(self):\n self.parser = Parser()\n\n def analyse(self, raw_headers):\n \"\"\"\n sample output:\n {\n 'To': u'robin@apple.com',\n 'From': u'Dhruv ',\n 'Cc': u'Shivam ',\n 'Bcc': u'Abhishek ',\n 'total_delay': 2,\n 'trail': [\n {\n 'from': '',\n 'protocol': 'HTTP',\n 'receivedBy': '10.31.102.130',\n 'timestamp': 1452574216,\n 'delay': 0\n },\n {\n 'from': '',\n 'protocol': 'SMTP',\n 'receivedBy': 'mail-vk0-x22b.google.com',\n 'timestamp': 1452574218,\n 'delay': 2\n },\n {\n 'from': 'mail-vk0-x22b.google.com',\n 'protocol': 'ESMTPS',\n 'receivedBy': 'mx.google.com',\n 'timestamp': 1452574218,\n 'delay': 0\n },\n {\n 'from': '',\n 'protocol': 'SMTP',\n 'receivedBy': '10.66.77.65',\n 'timestamp': 1452574218,\n 'delay': 0\n }\n ]\n }\n \"\"\"\n if raw_headers is None:\n return None\n raw_headers = raw_headers.strip()\n parser = HeaderParser()\n headers = parser.parsestr(raw_headers)#.encode('ascii', 'ignore'))\n received_headers = headers.get_all('Received')\n trail = self.__generate_trail(received_headers)\n analysis = {\n 'From': decode_and_convert_to_unicode(headers.get('From')),\n 'To': decode_and_convert_to_unicode(headers.get('To')),\n 'Cc': decode_and_convert_to_unicode(headers.get('Cc')),\n 'Bcc': decode_and_convert_to_unicode(headers.get('Bcc')),\n 'trail': trail,\n 'total_delay': sum([hop['delay'] for hop in trail]) if trail else 0\n }\n return analysis\n\n def __generate_trail(self, received):\n \"\"\"\n Takes a list of `received` headers and\n creates the email trail (structured information of hops in transit)\n \"\"\"\n if received is None:\n return None\n\n received = [cleanup_text(header) for header in received]\n trail = [self.__analyse_hop(header) for header in received]\n\n # sort in chronological order\n trail.reverse()\n trail = self.__set_delay_information(trail)\n return trail\n\n def __analyse_hop(self, header):\n \"\"\" Parses the details associated with the hop into a structured format \"\"\"\n return {\n \"from\": self.parser.extract_from_label(header),\n \"receivedBy\": self.parser.extract_received_by_label(header),\n \"protocol\": self.parser.extract_protocol(header),\n \"timestamp\": self.parser.extract_timestamp(header)\n }\n\n def __set_delay_information(self, hop_list):\n \"\"\" For each hop sets the calculated `delay` from previous hop | mutates list\"\"\"\n previous_timestamp = None\n for hop in hop_list:\n hop['delay'] = self.parser.calculate_delay(hop['timestamp'], previous_timestamp)\n previous_timestamp = hop['timestamp']\n return hop_list\n","repo_name":"MSAdministrator/hopper","sub_path":"hopper/hopper.py","file_name":"hopper.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"2078246174","text":"import torch\nimport numpy as np\nimport random\nfrom scipy.spatial.distance import pdist, squareform\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.animation import FuncAnimation\n\n\ndef draw_trace(trace_matrix):\n # color_vec = ['r', 'b', 'y', 'b', 'm', 'k']\n # entity_vec = ['entity1', 'entity2', 'entity3']\n font = {\n 'color': 'k',\n 'style': 'oblique',\n 'size': 20,\n 'weight': 'bold'\n }\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.view_init(elev=20, azim=15)\n ax.set_title(\"Entity Traces\", fontdict=font)\n # ax.set_xlim(0, 1000)\n # ax.set_ylim(0, 800)\n # ax.set_zlim(0, 600)\n\n for i in range(0, 6):\n x_trace = trace_matrix[:, i, 0].detach().numpy()\n y_trace = trace_matrix[:, i, 1].detach().numpy()\n z_trace = trace_matrix[:, i, 2].detach().numpy()\n\n # ax.scatter3D(x_trace, y_trace, z_trace) # 绘制散点图 cmap='Blues'\n ax.plot3D(x_trace, y_trace, z_trace)\n # ax.legend()\n\n plt.show()\n plt.close()\n\n\ndef get_obstacle_coordinate(center, radius):\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n x = radius * np.outer(np.cos(u), np.sin(v)) + center[0]\n y = radius * np.outer(np.sin(u), np.sin(v)) + center[1]\n z = radius * np.outer(np.ones(np.size(u)), np.cos(v)) + center[2]\n\n return x, y, z\n\n\ndef draw_dynamic_graph(graph_data, times, center, radius):\n\n font = {\n 'color': 'k',\n 'style': 'oblique',\n 'size': 20,\n 'weight': 'bold'\n }\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.view_init(elev=20, azim=15)\n ax.set_title(\"Entity Traces\", fontdict=font)\n\n for r_i in range(center.shape[0]):\n obs_x, obs_y, obs_z = get_obstacle_coordinate(center[r_i, :], radius)\n # surface plot rstride 值越大,图像越粗糙\n ax.plot_surface(obs_x, obs_y, obs_z, rstride=4, cstride=4, color='b')\n\n def update(t):\n for i in range(6):\n x_trace = graph_data[0:(t + 1), i, 0].detach().numpy()\n y_trace = graph_data[0:(t + 1), i, 1].detach().numpy()\n z_trace = graph_data[0:(t + 1), i, 2].detach().numpy()\n\n ax.plot3D(x_trace, y_trace, z_trace)\n # ax.legend()\n\n ani = FuncAnimation(fig, update, frames=times, interval=500, blit=False, repeat=False) # 创建动画效果\n plt.show()\n # ani.save('line.gif', writer='pillow')\n plt.close()\n\n\ndef init_position(uav_n):\n uav_pos = np.zeros([uav_n, 3])\n g_r = np.arange(0, 13, 0.1)\n uav_pos[:, 0] = np.array(random.sample(set(g_r), uav_n))\n uav_pos[:, 1] = np.array(random.sample(set(g_r), uav_n))\n uav_pos[:, 2] = np.array(random.sample(set(g_r), uav_n))\n\n return uav_pos\n\n\ndef create_obstacle(obs_n, ep):\n obs_p = np.zeros([obs_n, 3])\n g_x = np.arange(0, ep[0], 1)\n g_y = np.arange(0, ep[1], 1)\n g_z = np.arange(0, ep[2], 1)\n\n obs_p[:, 0] = np.array(random.sample(set(g_x), obs_n))\n obs_p[:, 1] = np.array(random.sample(set(g_y), obs_n))\n obs_p[:, 2] = np.array(random.sample(set(g_z), obs_n))\n\n return obs_p\n\n\ndef calculate_dis_obs(uav_pos, obs_pos):\n expand_mat = np.zeros([obs_pos.shape[0], 3])\n dis_obs = np.zeros([uav_pos.shape[0], obs_pos.shape[0]])\n for i in range(uav_pos.shape[0]):\n expand_mat[:] = uav_pos[i, :]\n diff_mat = expand_mat - obs_pos\n dis_obs[i] = np.sqrt(np.sum(diff_mat * diff_mat, axis=1))\n\n return dis_obs\n\n\ndef compute_unit_vector(vs, ve):\n v_direct = ve - vs\n modulus = np.sqrt(np.dot(v_direct, v_direct))\n\n if modulus == 0:\n normal_v = np.zeros(3)\n\n else:\n normal_v = v_direct / modulus\n\n return normal_v\n\n\ndef v_repulsion(pos, obj_pos, obj_dis, s_r):\n v_force = np.zeros([obj_dis.shape[0], 3])\n for i in range(obj_dis.shape[0]):\n force_mag = 0\n v_direct = compute_unit_vector(obj_pos[i, :], pos)\n if obj_dis[i] > 0:\n force_mag = (1 / obj_dis[i] - 1 / s_r) * ((1 / obj_dis[i]) ** 3)\n\n v_force[i] = v_direct * force_mag\n\n v_rep = np.sum(v_force, axis=0)\n\n return v_rep\n\n\ndef v_attraction(pos, ep):\n v_direct = compute_unit_vector(pos, ep)\n # force_mag = np.sqrt(np.dot(ep - pos, ep - pos)) * 0.01\n # v_att = v_direct * force_mag\n v_att = v_direct\n\n return v_att\n\n\ndef v_neighbor(pos, uav_pos, c_r, uav_dis, s_r):\n nei_direct = np.zeros(3)\n for i in range(uav_pos.shape[0]):\n if uav_dis[i] > s_r:\n v_direct = compute_unit_vector(pos, uav_pos[i, :])\n\n force_mag = 2 * uav_dis[i] - c_r * 0.001\n nei_direct = nei_direct + force_mag * v_direct\n\n nei_direct = nei_direct\n\n return nei_direct\n\n\nclass UAVSystem:\n def __init__(self, num, pos_mat, safety_r, comm_r, v_m):\n self.uav_num = num\n self.safety_range = safety_r\n self.position = pos_mat\n self.communication_range = comm_r\n self.velocity = np.zeros([num, 3])\n self.v_max = v_m\n self.start = pos_mat.copy()\n self.alpha = 10\n self.beta = 1000 # 10\n self.gamma = 0.003 # 0.00165\n\n def get_acceleration(self, pos, dis_uav, dis_obs, ep, obs_r, obs_pos, uav_start):\n dv = np.zeros([4, 3])\n\n uav_idx = np.where(dis_uav < self.safety_range)[0]\n obs_idx = np.where(dis_obs < (self.safety_range + obs_r))[0]\n comm_idx = np.where(dis_uav < self.communication_range)[0]\n\n if uav_idx.size > 0:\n uav_pos = self.position[uav_idx, :]\n uav_dis = dis_uav[uav_idx]\n dv[0, :] = self.alpha * v_repulsion(pos, uav_pos, uav_dis, self.safety_range)\n\n if obs_idx.size > 0:\n obs_pos_tmp = obs_pos[obs_idx, :]\n dis_obs_tmp = dis_obs[obs_idx]\n dv[1, :] = self.alpha * v_repulsion(pos, obs_pos_tmp, dis_obs_tmp, self.safety_range)\n\n curr_to_final = np.sqrt(np.dot(ep - pos, ep - pos))\n start_to_final = np.sqrt(np.dot(ep - uav_start, ep - uav_start))\n cff = curr_to_final / start_to_final\n # cff = 1\n\n if curr_to_final > eps:\n dv[2, :] = self.beta * v_attraction(pos, ep)\n\n if comm_idx.size > 0:\n uav_cmm_pos = self.position[comm_idx, :]\n uav_dis_comm = dis_uav[comm_idx]\n dv[3, :] = cff * self.gamma * v_neighbor(pos, uav_cmm_pos, self.communication_range,\n uav_dis_comm, self.safety_range)\n\n resultant_dv = np.sum(dv, axis=0)\n\n return resultant_dv\n\n def get_next_position(self, obs_pos, ep, obs_r):\n acceleration = np.zeros([self.uav_num, 3])\n\n dis_uav_uav_tmp = squareform(pdist(self.position))\n dis_uav_uav = dis_uav_uav_tmp + np.eye(self.uav_num) * 100.0\n dis_uav_obs = calculate_dis_obs(self.position, obs_pos)\n\n for i in range(self.uav_num):\n acceleration[i, :] = self.get_acceleration(self.position[i, :], dis_uav_uav[i, :], dis_uav_obs[i, :],\n ep, obs_r, obs_pos, self.start[i, :])\n\n velocity_tmp = self.velocity[i, :] + acceleration[i, :]\n v_length = np.sqrt(np.dot(velocity_tmp, velocity_tmp))\n if self.v_max < v_length:\n self.velocity[i, :] = self.v_max * velocity_tmp / v_length\n\n else:\n self.velocity[i, :] = velocity_tmp\n\n self.position = self.position + self.velocity\n\n return self.position\n\n\nif __name__ == '__main__':\n uav_num = 32 # 32\n obstacle_num = 50 # 100\n step = 200 # 200\n\n eps = 5\n v_max = 2.5 # 2\n safety_range = 0.1\n obstacle_r = 5\n communication_range = 10\n\n start_point = np.array([0, 0, 0])\n end_point = np.array([135, 120, 110])\n\n all_position = torch.zeros([step, uav_num, 3])\n\n uav_position = init_position(uav_num)\n obs_position = create_obstacle(obstacle_num, end_point)\n\n us_obj = UAVSystem(uav_num, uav_position, safety_range, communication_range, v_max)\n\n all_position[0, :, :] = torch.from_numpy(uav_position)\n\n for it in range(step - 1):\n next_position = us_obj.get_next_position(obs_position, end_point, obstacle_r)\n all_position[it+1, :, :] = torch.from_numpy(next_position)\n\n draw_dynamic_graph(all_position, step, obs_position, obstacle_r)\n # torch.save(all_position, './trace_data/uav_trace_32_adap_neb_3')\n # torch.save(all_position, 'uav_trace_train_32')\n\n # print(all_position[:, 30, :])\n\n\n","repo_name":"zc1993cn/multi-uavs","sub_path":"robust_multi-uav/未命名文件夹/UAVs_movement.py","file_name":"UAVs_movement.py","file_ext":"py","file_size_in_byte":8624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40718312888","text":"\"\"\"\nThe model intent classifier module.\n\"\"\"\nfrom peque_nlu.intent_classifiers import IntentClassifier\nfrom peque_nlu.utils import IntentUtils\n\nfrom peque_nlu.intent_engines import LogisticIntentEngine\n\n\nclass ModelIntentClassifier(IntentClassifier, IntentUtils):\n \"\"\"\n The ModelIntentClassifier class.\n\n This class is used to create a model intent classifier.\n \"\"\"\n\n def __init__(\n self, language, intent_engine=None, feature_extractor=None, saver=None\n ):\n \"\"\"\n Initialize the ModelIntentClassifier.\n\n :param language: The language to use.\n :type language: str.\n\n :param intent_engine: The intent engine to use.\n :type intent_engine: IntentEngine.\n\n :param feature_extractor: The feature extractor to use.\n :type feature_extractor: FeatureExtractor.\n\n :param saver: The saver to use.\n :type saver: Saver.\n\n \"\"\"\n\n if intent_engine is None:\n self.intent_engine = LogisticIntentEngine(language)\n else:\n self.intent_engine = intent_engine\n\n self.feature_extractor = feature_extractor\n\n self.dataset = None\n self.categories = []\n\n self.saver = saver\n\n def save(self, path):\n \"\"\"\n Save the model.\n\n :param path: The path to save the model.\n :type path: str.\n \"\"\"\n\n if self.saver is None:\n raise ValueError(\"No saver was provided\")\n self.saver.save(self, path)\n\n @staticmethod\n def load(saver, path) -> \"ModelIntentClassifier\":\n \"\"\"\n Load the model.\n\n :param saver: The saver to use.\n :type saver: Saver.\n :param path: The path to load the model.\n :type path: str.\n\n :return: The loaded model.\n :rtype: ModelIntentClassifier.\n \"\"\"\n\n return saver.load(path)\n\n def fit(self, dataset_path):\n \"\"\"\n Fit the intent classifier.\n\n :param dataset_path: The path to the dataset.\n :type dataset_path: str.\n \"\"\"\n\n self.dataset, self.categories = self.load_dataset(dataset_path)\n\n if self.dataset is None:\n raise ValueError(\"You must load the dataset first\")\n\n text = self.dataset[\"text\"]\n intent = self.dataset[\"intent\"]\n\n if self.feature_extractor is not None:\n self.feature_extractor.fit(dataset_path, self.intent_engine.stopwords)\n\n self.intent_engine.fit(text, intent)\n\n def multiple_predict(self, texts, threshold=0.2):\n \"\"\"\n Predict the intent of multiple texts.\n\n :param texts: The texts to predict.\n :type texts: list.\n :param threshold: The threshold to apply.\n :type threshold: float or dict.\n\n :return: The predictions.\n :rtype: dict.\n\n example: multiple_predict([\"hello\", \"how are you\"]) ->\n [{\"text\": \"hello\", \"intent\": \"greet\", \"probability\": 0.9},\n {\"text\": \"how are you\", \"intent\": \"greet\", \"probability\": 0.7}]\n\n Can also return the features if a feature extractor is provided:\n example: multiple_predict([\"hello\", \"how are you\"]) ->\n [{\"text\": \"hello\", \"intent\": \"greet\", \"probability\": 0.9,\n \"features\": [{\"word\": \"hello\", \"entity\": \"greet\", \"similarities\": 1}]},\n {\"text\": \"how are you\", \"intent\": \"greet\", \"probability\": 0.7,\n \"features\": [{\"word\": \"how\", \"entity\": \"greet\", \"similarities\": 1},\n {\"word\": \"are\", \"entity\": \"greet\", \"similarities\": 1},\n {\"word\": \"you\", \"entity\": \"greet\", \"similarities\": 1}]}]\n\n \"\"\"\n intents, probabilities = self.intent_engine.predict(texts)\n results = []\n for intent, probability, text in zip(intents, probabilities, texts):\n results.append({\"text\": text, \"intent\": intent, \"probability\": probability})\n\n if self.feature_extractor is None:\n return results\n\n for result in results:\n result[\"features\"] = self.feature_extractor.get_features(\n result[\"text\"], threshold\n )\n\n return results\n\n def predict(self, text, threshold=0.2):\n \"\"\"\n Predict the intent of a text.\n\n :param text: The text to predict.\n :type text: str.\n :param threshold: The threshold to apply.\n :type threshold: float.\n\n :return: The prediction.\n :rtype: dict.\n \"\"\"\n return self.multiple_predict([text], threshold)[0]\n","repo_name":"HectorPulido/peque-nlu","sub_path":"peque_nlu/intent_classifiers/model_intent_classifier.py","file_name":"model_intent_classifier.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44239307689","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nfrom spider.pixiv.pixiv_api import AppPixivAPI, PixivAPI\n\nCONFIG = json.load(open('config.json'))\n_USERNAME = CONFIG.get('username')\n_PASSWORD = CONFIG.get('password')\n_TEST_WRITE = False\n\n## If a special network environment is meet, please configure requests as you need.\n## Otherwise, just keep it empty.\n_REQUESTS_KWARGS = {\n # 'proxies': {\n # 'https': 'http://127.0.0.1:1087',\n # },\n # 'verify': False, # PAPI use https, an easy way is disable requests SSL verify\n}\n\n\n# AppAPI start\ndef appapi_illust(aapi):\n json_result = aapi.illust_detail(59580629)\n print(json_result)\n illust = json_result.illust\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n json_result = aapi.illust_comments(59580629)\n print(json_result)\n\n json_result = aapi.ugoira_metadata(51815717)\n print(json_result)\n metadata = json_result.ugoira_metadata\n print(\">>> frames=%d %s\" % (len(metadata.frames), metadata.zip_urls.medium))\n\n\ndef appapi_recommend(aapi):\n json_result = aapi.illust_recommended(bookmark_illust_ids=[59580629])\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.illust_recommended(**next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\" > %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n json_result = aapi.illust_related(59580629)\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.illust_related(**next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\" > %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\ndef appapi_users(aapi):\n json_result = aapi.user_detail(275527)\n print(json_result)\n user = json_result.user\n print(\"%s(@%s) region=%s\" % (user.name, user.account, json_result.profile.region))\n\n json_result = aapi.user_illusts(275527)\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.user_illusts(**next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\" > %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n json_result = aapi.user_bookmarks_illust(2088434)\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n json_result = aapi.user_following(7314824)\n print(json_result)\n user_preview = json_result.user_previews[0]\n print(\">>> %s(@%s)\" % (user_preview.user.name, user_preview.user.account))\n\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.user_following(**next_qs)\n # print(json_result)\n user_preview = json_result.user_previews[0]\n print(\" > %s(@%s)\" % (user_preview.user.name, user_preview.user.account))\n\n json_result = aapi.user_follower(275527)\n print(json_result)\n\n json_result = aapi.user_mypixiv(275527)\n print(json_result)\n\n\ndef appapi_search(aapi):\n first_tag = None\n response = aapi.trending_tags_illust()\n for trend_tag in response.trend_tags[:10]:\n if not first_tag:\n first_tag = trend_tag.tag\n print(\"%s - %s(id=%s)\" % (trend_tag.tag, trend_tag.illust.title, trend_tag.illust.id))\n\n json_result = aapi.search_illust(first_tag, search_target='partial_match_for_tags')\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.search_illust(**next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\ndef appapi_ranking(aapi):\n json_result = aapi.illust_ranking('day_male')\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.illust_ranking(**next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # 2016-07-15 日的过去一周排行\n json_result = aapi.illust_ranking('week', date='2016-07-15')\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\ndef appapi_auth_api(aapi):\n json_result = aapi.illust_follow(req_auth=True)\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # get next page\n next_qs = aapi.parse_next_url_options(json_result.next_url)\n json_result = aapi.illust_follow(req_auth=True, **next_qs)\n # print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n json_result = aapi.illust_recommended(req_auth=True)\n print(json_result)\n illust = json_result.illusts[0]\n print(\">>> %s, origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\n# PAPI start\ndef migrate_rev2_to_papi(api):\n print(\">>> new ranking_all(mode='daily', page=1, per_page=50)\")\n # rank_list = api.sapi.ranking(\"all\", 'day', 1)\n rank_list = api.ranking_all('daily', 1, 50)\n print(rank_list)\n\n # more fields about response: https://github.com/upbit/pixivpy/wiki/sniffer\n ranking = rank_list.response[0]\n for img in ranking.works:\n # print img.work\n print(\"[%s/%s(id=%s)] %s\" % (img.work.user.name, img.work.title, img.work.id, img.work.image_urls.px_480mw))\n\n\ndef papi_base(api):\n # PAPI.works\n json_result = api.works(46363414)\n print(json_result)\n illust = json_result.response[0]\n print(\">>> %s, origin url: %s\" % (illust.caption, illust.image_urls['large']))\n\n # PAPI.users\n json_result = api.users(1184799)\n print(json_result)\n user = json_result.response[0]\n print(user.profile.introduction)\n\n\ndef papi_me(api):\n # PAPI.me_feeds\n json_result = api.me_feeds(show_r18=0)\n print(json_result)\n # work = json_result.response[0].ref_user.works[0]\n # print(work.title)\n\n # PAPI.me_favorite_works\n json_result = api.me_favorite_works(publicity='private')\n print(json_result)\n illust = json_result.response[0].work\n print(\"[%s] %s: %s\" % (illust.user.name, illust.title, illust.image_urls.px_480mw))\n\n # PAPI.me_following_works (New -> Follow)\n json_result = api.me_following_works()\n print(json_result)\n illust = json_result.response[0]\n print(\">>> %s, origin url: %s\" % (illust.caption, illust.image_urls['large']))\n\n if _TEST_WRITE:\n # PAPI.me_favorite_works_add\n json_result = api.me_favorite_works_add(ref_work.id, publicity='private')\n print(json_result)\n favorite_id = json_result.response[0].id\n print(\">>> Add favorite illust_id=%s success! favorite_id=%s\" % (ref_work.id, favorite_id))\n\n # PAPI.me_favorite_works_delete\n # json_result = api.me_favorite_works_delete([favorite_id, ...], publicity='private')\n json_result = api.me_favorite_works_delete(favorite_id, publicity='private')\n print(json_result)\n\n\ndef papi_me_user(api):\n # PAPI.me_following\n json_result = api.me_following()\n print(json_result)\n user = json_result.response[0]\n print(user.name)\n\n if _TEST_WRITE:\n # PAPI.me_favorite_users_follow\n user_id = 1184799\n json_result = api.me_favorite_users_follow(user_id)\n print(json_result)\n user = json_result.response[0].target_user\n print(user.name)\n\n # PAPI.me_favorite_users_unfollow\n json_result = api.me_favorite_users_unfollow(user_id)\n print(json_result)\n\n\ndef papi_user(api):\n # PAPI.users_works\n json_result = api.users_works(1184799)\n print(json_result)\n illust = json_result.response[0]\n print(\">>> %s, origin url: %s\" % (illust.caption, illust.image_urls['large']))\n\n # PAPI.users_favorite_works\n json_result = api.users_favorite_works(1184799)\n print(json_result)\n illust = json_result.response[0].work\n print(\">>> %s origin url: %s\" % (illust.caption, illust.image_urls['large']))\n\n # PAPI.users_feeds\n json_result = api.users_feeds(1184799, show_r18=0)\n print(json_result)\n ref_work = json_result.response[0].ref_work\n print(ref_work.title)\n\n # PAPI.users_following\n json_result = api.users_following(4102577)\n print(json_result)\n user = json_result.response[0]\n print(user.name)\n\n\ndef papi_ranking(api):\n # PAPI.ranking\n json_result = api.ranking('illust', 'weekly', 1)\n print(json_result)\n illust = json_result.response[0].works[0].work\n print(\">>> %s origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n # PAPI.ranking(2015-05-01)\n json_result = api.ranking(ranking_type='all', mode='daily', page=1, date='2015-05-01')\n print(json_result)\n illust = json_result.response[0].works[0].work\n print(\">>> %s origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\ndef papi_search(api):\n # PAPI.search_works\n json_result = api.search_works(\"五航戦 姉妹\", page=1, mode='text')\n # json_result = api.search_works(\"水遊び\", page=1, mode='exact_tag')\n print(json_result)\n illust = json_result.response[0]\n print(\">>> %s origin url: %s\" % (illust.title, illust.image_urls['large']))\n\n\ndef papi_others(api):\n # PAPI.latest_works (New -> Everyone)\n json_result = api.latest_works()\n print(json_result)\n illust = json_result.response[0]\n print(\">>> %s url: %s\" % (illust.title, illust.image_urls.px_480mw))\n\n\ndef old_main():\n # public-api\n api = PixivAPI(**_REQUESTS_KWARGS)\n api.login(_USERNAME, _PASSWORD)\n\n migrate_rev2_to_papi(api)\n\n papi_base(api)\n papi_me(api)\n papi_me_user(api)\n papi_user(api)\n papi_ranking(api)\n papi_search(api)\n papi_others(api)\n\n\ndef to_json_str(response):\n return json.dumps(response, ensure_ascii=False, indent=4)\n\n\ndef main():\n # app-api\n pixiv_api = AppPixivAPI(**_REQUESTS_KWARGS)\n pixiv_api.login(_USERNAME, _PASSWORD)\n # response = pixiv_api.user_detail(14674239)\n # response = pixiv_api.illust_detail(61446920)\n response = pixiv_api.illust_ranking('day')\n print(to_json_str(response))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"youyouzh/PythonPractice","sub_path":"spider/pixiv/crawler/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":11022,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"40508204974","text":"class Solution:\n def move_zeros(self, nums: list):\n n = len(nums)\n i, j = 0, 0\n\n while j < n:\n if nums[j] != 0:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n else:\n j += 1\n return nums\nif __name__ == '__main__':\n s = Solution()\n nums = [0, 1, 0, 3, 12]\n print(s.move_zeros(nums))","repo_name":"Yonoi/partner","sub_path":"msun/others/283.py","file_name":"283.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73161117852","text":"# grid_lines.py\n# takes 'raw' ais line segments and breaks them into gridded cells and attributes each new or existing line with a grid id.\n# to do this it also recalculates the speed and duration (based of proportion of line)\n\n# steps\n#\n\nimport logging\nfrom datetime import datetime\nimport psycopg2 as pg\n\nimport auth_class\n\n# establish database connection\nconn = pg.connect(host=auth_class.login.host,\n port=auth_class.login.port,\n dbname=auth_class.login.db,\n user=auth_class.login.user,\n password=auth_class.login.pw,\n options='-c search_path=dbo,' + str(auth_class.login.schem)) # sets schema to public\n\n\n\ndef main():\n global conn\n\n start_time = datetime.now()\n\n datelist = []\n datelist = datefinder(conn)\n print(datelist)\n\n # not going to loop through dates yet while i'm testing\n temper(conn) # might not even need this and do away with the temp table?? --> might be useful to keep for testing, though.\n\n the_intersector(conn)\n\n len_sog(conn)\n\n # select duplicate row id's and make a sub-id with decimal\n data_caboose(conn)\n\n # STILL NEED TO DUMP TEMP INTO FINAL TABLE\n data_finalizer(conn)\n\n # probably don't need this but this closes out the cursor (and saves changes)- probably just good practice\n conn.commit()\n conn.close()\n\n now = datetime.now()\n duration = (now - start_time)\n\n print(duration)\n\ndef the_intersector(conn):\n # moves the selected data from the main database into the temp database\n cursor = conn.cursor()\n\n sql = (\n 'INSERT INTO ' + auth_class.login.tempDb + '(segmentid, uid, mmsi, starttime, duration, isclassa, classais, classgen, name, isunique, lastchange, lenm, sogkt, inter, id1km, id2km, id4km, id8km) ' +\n 'SELECT l.segmentid, l.uid, l.mmsi, l.starttime, l.duration, l.isclassa, l.classais, l.classgen, l.name, l.isunique, l.lastchange, l.lenm, l.sogkt, ' +\n 'ST_INTERSECTION(l.geom, c.geom) AS inter, c.id_1km AS id1km, c.id_2km as id2km, c.id_4km as id4km, c.id_8km as id8km ' +\n 'FROM ' + auth_class.login.inputDb + ' AS l, ' + auth_class.login.gridDb + ' AS c WHERE ST_INTERSECTS(l.geom, c.geom)'\n )\n\n cursor.execute(sql)\n\ndef len_sog(conn):\n # recalculates the length of each segment (now that some are cut) and recalculates the duration accordingly based on sog and length\n cursor = conn.cursor()\n\n # recalculates the length--> a couple things to note. This calculates CARTESIAN by default AND the units based on the srid (currently 3005).\n # use: ST_Distance_Sphere to calculate spherical if desired\n sql = 'UPDATE ' + auth_class.login.tempDb + ' SET lenm = ST_LENGTH(inter)'\n cursor.execute(sql)\n\n # recalculates the duration of the segment based on the newly calculated length and the originally calculated speed over ground\n # the equation for this is: lenm [length in meters] / (sog[kts]*0.514) = duration [in seconds]\n sql = 'UPDATE ' + auth_class.login.tempDb + ' SET duration = (lenm / (sogkt*0.514))'\n cursor.execute(sql)\n\n\ndef temper(conn):\n # this functionality checks if a temp folder already exists and drops the existing one if it does -- regardless the script will create a blank temp folder\n exists = False\n try:\n cursor = conn.cursor()\n cursor.execute(\"select exists(select relname from pg_class where relname='\" + auth_class.login.tempDb + \"')\")\n # turns exists to True if a temp table already exists in the database\n exists = cursor.fetchone()[0]\n finally:\n pass\n\n # deletes existing temp table if exists is TRUE\n if exists == True:\n cursor = conn.cursor()\n sql = 'DROP TABLE ' + auth_class.login.tempDb\n cursor.execute(sql)\n else:\n pass\n\n # create temp table in database\n cursor = conn.cursor()\n sql = ('CREATE TABLE ' + auth_class.login.tempDb + ' ' +\n '(newid VARCHAR(50),' +\n 'segmentId BIGINT,' +\n 'uid BIGINT NOT NULL,' +\n 'mmsi INT NOT NULL,' +\n 'startTime TIMESTAMP WITHOUT TIME ZONE NOT NULL,' +\n 'duration INT NOT NULL,' +\n 'isClassA BOOL NOT NULL,' +\n 'classAIS SMALLINT NOT NULL,' +\n 'classGen SMALLINT NOT NULL,' +\n 'name VARCHAR(20),' +\n 'isUnique BOOL NOT NULL,' +\n 'lastChange TIMESTAMP WITHOUT TIME ZONE NOT NULL,' +\n 'lenM FLOAT,' +\n 'sogKt FLOAT,' +\n 'inter GEOMETRY(LineString,3005),' +\n 'id1km VARCHAR(50),' +\n 'id2km VARCHAR(50),' +\n 'id4km VARCHAR(50),' +\n 'id8km VARCHAR(50))'\n )\n\n cursor.execute(sql)\n\n\ndef datefinder(conn):\n # select dates that occur within the segments being worked on. This keeps the temp smaller and compartmentalizes the work a little bit\n cursor = conn.cursor()\n # sql = \"SELECT starttime FROM \" + auth_class.login.inputDb + \" WHERE segmentid IN ({})\".format(str(segList)[1:-1])\n sql = \"SELECT starttime FROM \" + auth_class.login.inputDb\n cursor.execute(sql)\n result = cursor.fetchall()\n\n # loop to convert datetime to date in a list\n datelist = []\n x = 0\n for i in result:\n datelist.append(result[x][0].strftime(\"'%Y-%m-%d'\"))\n x += 1\n\n # only keeps unique dates to prevent unnecessary loops\n datelist = set(datelist)\n\n return datelist\n\n\ndef data_caboose(conn):\n # this function adds the associated cwsid1km (smallest resolution) to the segment id which will result in a totally unique id. Downside is that it is varchar.. so might be worth redoing this later.\n cursor = conn.cursor()\n sql = \"UPDATE \" + auth_class.login.tempDb + \" SET newid = (segmentid || id1km)\"\n cursor.execute(sql)\n\ndef data_finalizer(conn):\n cursor = conn.cursor()\n # check if table exists and if not creates a new output table\n sql = ('CREATE TABLE IF NOT EXISTS ' + auth_class.login.outputDb +\n ' (newid VARCHAR(50),' +\n 'segmentId BIGINT,' +\n 'uid BIGINT NOT NULL,' +\n 'mmsi INT NOT NULL,' +\n 'startTime TIMESTAMP WITHOUT TIME ZONE NOT NULL,' +\n 'duration INT NOT NULL,' +\n 'isClassA BOOL NOT NULL,' +\n 'classAIS SMALLINT NOT NULL,' +\n 'classGen SMALLINT NOT NULL,' +\n 'name VARCHAR(20),' +\n 'isUnique BOOL NOT NULL,' +\n 'lastChange TIMESTAMP WITHOUT TIME ZONE NOT NULL,' +\n 'lenM FLOAT,' +\n 'sogKt FLOAT,' +\n 'inter GEOMETRY(LineString,3005),' +\n 'id1km VARCHAR(50),' +\n 'id2km VARCHAR(50),' +\n 'id4km VARCHAR(50),' +\n 'id8km VARCHAR(50),'\n 'UNIQUE(newid))'\n )\n cursor.execute(sql)\n\n # inserts data from the temporary table into the new main table -- does an upsert that skips on conflict with NEWID\n sql = (\"INSERT INTO \" + auth_class.login.outputDb + \" (newid,segmentid,uid,mmsi,starttime,duration,isclassa,classais,classgen,name,isunique,lastchange,lenm,sogkt,inter,id1km,id2km,id4km,id8km)\" +\n \" SELECT newid,segmentid,uid,mmsi,starttime,duration,isclassa,classais,classgen,name,isunique,lastchange,lenm,sogkt,inter,id1km,id2km,id4km,id8km FROM \" + auth_class.login.tempDb +\n \" ON CONFLICT (newid) DO NOTHING\")\n cursor.execute(sql)\n\nif __name__ == ('__main__'):\n main()\n\n","repo_name":"SpacecraftSI/posting_fishnet","sub_path":"grid_lines.py","file_name":"grid_lines.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30020801407","text":"\ndef grant_the_hint(txt):\n s=[]\n for k in range(-1, max([len(i) for i in txt.split()])):\n l=[]\n for i in txt.split():\n d=\"\"\n for j in range(len(i)):\n if j > k:\n d+='_'\n else:\n d+=i[j]\n l.append(d)\n s.append(\" \".join(l))\n return s\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"vudQZFD64nDWkKz8a_4.py","file_name":"vudQZFD64nDWkKz8a_4.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11102257464","text":"# insertion sort\n\ndef insertion_sort(arr): \n \n # Traverse through 1 to len(arr) \n for i in range(1, len(arr)): \n \n key = arr[i] \n \n # Move elements of arr[0..i-1], that are \n # greater than key, to one position ahead \n # of their current position \n position = i-1\n while position >= 0 and key < arr[position] : \n arr[position + 1] = arr[position] \n position -= 1\n arr[position + 1] = key \n\nnlist = [5,2,4,6,1,3]\ninsertion_sort(nlist)\nfor i in range(len(nlist)):\n\tprint(nlist[i])\n","repo_name":"kumarUjjawal/ds-and-algo-in-python","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21766985730","text":"from pathlib import Path\n\nimport computer_vision as code_under_test\nimport touca\nfrom roboflow.util.prediction import Prediction\n\ntouca.add_serializer(Prediction, lambda x: x.json())\n\n\ndef find_testcases():\n for file in sorted(Path(\"images\").glob(\"*.jpg\")):\n yield file.stem\n\n\n@touca.workflow(testcases=find_testcases)\ndef hard_hats(filename: str):\n input_file = Path(\"images\").joinpath(filename).with_suffix(\".jpg\")\n output_file = Path(\"out\").joinpath(filename).with_suffix(\".jpg\")\n\n with touca.scoped_timer(\"predict\"):\n outcome = code_under_test.model.predict(str(input_file))\n output_file.parent.mkdir(exist_ok=True)\n outcome.save(str(output_file))\n\n touca.check(\"outcome\", outcome)\n touca.check_file(\"input_file\", input_file)\n touca.check_file(\"output_file\", output_file)\n","repo_name":"trytouca/trytouca","sub_path":"examples/python/06_python_computer_vision/computer_vision_test.py","file_name":"computer_vision_test.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"32"} +{"seq_id":"31945520800","text":"\"\"\"Compile assets to minified bundles using Flask-Assets.\"\"\"\n\nfrom flask import current_app as app\nfrom flask_assets import Bundle\n\n\ndef compile_assets(assets):\n \"\"\"Configure and build bundles.\"\"\"\n # Shared bundle\n shared_js_bundle = Bundle(\n \"src/js/search.js\", filters=\"rjsmin\", output=\"dist/js/shared.min.js\"\n )\n # League-page bundle\n league_js_bundle = Bundle(\n \"league_bp/src/js/awards.js\",\n \"league_bp/src/js/compare/*.js\",\n filters=\"rjsmin\",\n output=\"dist/js/league.min.js\",\n )\n # Player-page bundle\n player_js_bundle = Bundle(\n \"players_bp/src/js/base.js\",\n \"players_bp/src/js/directory.js\",\n \"players_bp/src/js/player/*.js\",\n filters=\"rjsmin\",\n output=\"dist/js/players.min.js\",\n )\n # Team bundle\n team_js_bundle = Bundle(\n \"teams_bp/src/js/*.js\", filters=\"rjsmin\", output=\"dist/js/teams.min.js\"\n )\n # Game bundle\n game_js_bundle = Bundle(\n \"game_bp/src/js/*.js\", filters=\"rjsmin\", output=\"dist/js/game.min.js\"\n )\n # Register bundles\n assets.register(\"shared_js\", shared_js_bundle)\n assets.register(\"league_js\", league_js_bundle)\n assets.register(\"player_js\", player_js_bundle)\n assets.register(\"teams_js\", team_js_bundle)\n assets.register(\"game_js\", game_js_bundle)\n # Build\n if app.config[\"FLASK_ENV\"] == \"development\":\n shared_js_bundle.build()\n league_js_bundle.build()\n player_js_bundle.build()\n team_js_bundle.build()\n game_js_bundle.build()\n","repo_name":"ak-gupta/nbaspa-app","sub_path":"nbaspa_app/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14820154881","text":"'''\nfunctions to create nodes in python\nand saving the files to HDF5 tile.\n'''\n# program imports.\nfrom data_structs.types import node_dt\n\n# system imports.\nimport numpy as np\nimport logging\nimport h5py\nimport sys\n\n############## functions.##################\n\ndef create_lookup(nodes):\n\t''' creates lookup dictionary '''\n\t# loop over names.\n\tlookup = {}\n\t\n\tfor node in nodes:\n\t\tlookup[node['ctg_name']] = node['node_idx']\n\t\t\n\treturn lookup\n\t\n\ndef load_nodes(file_path):\n\t''' loads nodes from h5py file.'''\n\t\n\t'''\n\t# open the file read only.\n\tf = h5py.File(file_path, 'r')\n\t\n\t# load data into memory.\n\tdata = f['nodes'][:]\n\t'''\n\t\n\t# load files into memory.\n\tfin = open(file_path, \"rb\")\n\tlines = fin.readlines()\n\tfin.close()\n\t\n\t# create numpy array.\n\tdata = np.zeros(len(lines), dtype=node_dt)\n\t\n\t# populate it.\n\ti = 0\n\tfor line in lines:\n\t\tline = line.split(\"\\t\")\n\t\tdata[i]['node_idx'] = int(line[0])\n\t\tdata[i]['ctg_name'] = line[1]\n\t\tdata[i]['ctg_width'] = int(line[2])\n\t\tdata[i]['ctg_orien'] = int(line[3])\n\t\tdata[i]['ctg_order'] = int(line[4])\n\t\tdata[i]['invalid'] = int(line[5])\n\t\ti += 1\n\t\t\n\t# return data.\n\treturn data\n\t\t\n\ndef save_nodes(nodes, file_path):\n\t''' save node array ot hd5f file.'''\n\tlogging.error(\"not supported\")\n\tsys.exit(1)\n\t\n\t'''\n\t# open the file.\n\tf = h5py.File(file_path, 'w')\n\t\n\t# save the dataset.\n\tf.create_dataset('nodes', data=nodes)\n\t\n\t# close the file.\n\tf.close()\n\t'''\n","repo_name":"jim-bo/SINAH","sub_path":"SINAH/scripts/data_structs/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7184022377","text":"\nfrom price_tracker import db\n\ntrackers = db.Table('trackers',\n db.Column('uid', db.Integer(), db.ForeignKey('user.id')),\n db.Column('pid', db.Integer(), db.ForeignKey('product.id'))\n\n)\n\nclass User(db.Model) :\n id = db.Column(db.Integer, primary_key = True)\n username = db.Column(db.String(20), default='user')\n email = db.Column(db.String(50), unique=True, nullable=False)\n password = db.Column(db.String(100), nullable=False)\n\n prods_list = db.relationship('Product',secondary='trackers',\n backref=db.backref('users', lazy=True))\n\n\n def __repr__(self) -> str:\n return f'User : {self.email}'\n\n\nclass Product(db.Model) :\n\n id = db.Column(db.Integer, primary_key = True)\n img_id = db.Column(db.String(50), unique=True, nullable=False)\n url = db.Column(db.String(50), unique=True, nullable=False)\n title = db.Column(db.String(120), nullable=False)\n mrp = db.Column(db.Integer)\n current = db.Column(db.Integer)\n\n def __repr__(self) -> str:\n return f'{self.title}'\n\n\n\n\n\n","repo_name":"screddy1313/amazon-price-tracker","sub_path":"price_tracker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1693100050","text":"from urllib import request\n\n# 没有使用代理的\n# url = 'http://httpbin.org/ip'\n# resp = request.urlopen(url)\n# print(resp.read())\n\n# 使用代理的\nurl = 'http://httpbin.org/ip'\n# 1.使用ProxyHandler,传入代理构建一个handler\nhandler = request.ProxyHandler({\"HTTP\":\"117.87.178.236:9000\"})\n# 2.使用上面创建的handler构建一个opener\nopener = request.build_opener(handler)\n# 3.使用opener去发送一个请求\nresp = opener.open(url)\nprint(resp.read())\n","repo_name":"Mocha-Pudding/Scrapy-Redis_Demos","sub_path":"Demo_ProxyHandler/ProxyHandler.py","file_name":"ProxyHandler.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"zh","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"73414595612","text":"'''This code is modified from https://github.com/NVlabs/DeepInversion/blob/master/deepinversion.py'''\n\nimport os\nimport sys\nimport shutil\nimport numpy as np\nimport time, datetime\nimport torch\nimport random\nimport logging\nimport argparse\nimport torch.nn as nn\nimport torch.utils\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.utils.data.distributed\nimport torch.optim as optim\nimport torchvision.models as models\nimport collections\nimport random\n\n\nfrom utils import *\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom resnet import build_resnet\n\nclass BNFeatureHook():\n\n def __init__(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)\n\n\n def hook_fn(self, module, input, output):\n\n nch = input[0].shape[1]\n mean = input[0].mean([0, 2, 3])\n var = input[0].permute(1, 0, 2, 3).contiguous().reshape([nch, -1]).var(1, unbiased=False)\n r_feature = torch.norm(module.running_var.data - var, 2) + torch.norm(\n module.running_mean.data - mean, 2)\n self.r_feature = r_feature\n\n def close(self):\n self.hook.remove()\n\ndef get_image_prior_losses(inputs_jit):\n\n diff1 = inputs_jit[:, :, :, :-1] - inputs_jit[:, :, :, 1:]\n diff2 = inputs_jit[:, :, :-1, :] - inputs_jit[:, :, 1:, :]\n diff3 = inputs_jit[:, :, 1:, :-1] - inputs_jit[:, :, :-1, 1:]\n diff4 = inputs_jit[:, :, :-1, :-1] - inputs_jit[:, :, 1:, 1:]\n\n loss_var_l2 = torch.norm(diff1) + torch.norm(diff2) + torch.norm(diff3) + torch.norm(diff4)\n loss_var_l1 = (diff1.abs() / 255.0).mean() + (diff2.abs() / 255.0).mean() + (\n diff3.abs() / 255.0).mean() + (diff4.abs() / 255.0).mean()\n loss_var_l1 = loss_var_l1 * 255.0\n\n return loss_var_l1, loss_var_l2\n\n\ndef get_images(gpu_id, num_generations, args, model_student, model_teacher, criterion, criterion_recur, hook_for_display, jitter=32, targets_in=None):\n print(\"get_images call\")\n\n model_student.eval()\n save_every = 100\n batch_size = args.batch_size\n\n kl_loss = nn.KLDivLoss(reduction='batchmean').cuda()\n best_cost = 1e4\n\n loss_r_feature_layers = []\n for module in model_teacher.modules():\n if isinstance(module, nn.BatchNorm2d):\n loss_r_feature_layers.append(BNFeatureHook(module))\n\n # setup target labels\n if targets_in == None:\n targets_all = torch.LongTensor(np.random.permutation(1000))\n else:\n targets_all = targets_in\n\n targets_new = None\n\n for kk in range(0, 1000, batch_size):\n\n targets = targets_all[kk:min(kk+batch_size,1000)].to('cuda')\n\n img_resize = 256\n img_crop = 224\n\n data_type = torch.float\n inputs = torch.randn((targets.shape[0], 3, img_resize, img_resize), requires_grad=True, device='cuda',\n dtype=data_type)\n pooling_function = nn.modules.pooling.AvgPool2d(kernel_size=2)\n\n skipfirst = False\n\n iteration = 0\n for lr_it, lower_res in enumerate([2, 1]):\n if lr_it==0:\n iterations_per_layer = 2000\n else:\n iterations_per_layer = 3000\n\n lim_0, lim_1 = jitter // lower_res, jitter // lower_res\n img_size = img_crop // lower_res\n\n optimizer = optim.Adam([inputs], lr=args.lr, betas=[0.5, 0.9], eps = 1e-8)\n\n lr_scheduler = lr_cosine_policy(args.lr, 100, iterations_per_layer)\n\n for iteration_loc in range(iterations_per_layer):\n iteration += 1\n # learning rate scheduling\n lr_scheduler(optimizer, iteration_loc, iteration_loc)\n\n # perform downsampling if needed\n if lower_res != 1:\n inputs_jit0 = pooling_function(inputs)\n else:\n inputs_jit0 = inputs\n\n # apply random jitter offsets\n off1 = random.randint(0, lim_0)\n off2 = random.randint(0, lim_1)\n inputs_jit = inputs_jit0[:, :, off1 : off1+img_size, off2 : off2+img_size]\n\n\n # Flipping\n flip = random.random() > 0.5\n inputs_jit = torch.flip(inputs_jit, dims=(3,))\n\n # forward pass\n optimizer.zero_grad()\n model_teacher.zero_grad()\n\n outputs = model_teacher(inputs_jit)\n\n # R_cross classification loss\n if targets_in == None:\n loss = criterion(outputs, targets)\n else:\n loss = criterion_recur(outputs, targets)\n\n # R_prior losses\n loss_var_l1, loss_var_l2 = get_image_prior_losses(inputs_jit)\n\n # R_feature loss\n\n rescale = [args.first_bn_multiplier] + [1. for _ in range(len(loss_r_feature_layers)-1)]\n loss_r_feature = sum([mod.r_feature * rescale[idx] for (idx, mod) in enumerate(loss_r_feature_layers)])\n\n\n # l2 loss on images\n loss_l2 = torch.norm(inputs_jit.reshape(batch_size, -1), dim=1).mean()\n\n # combining losses\n loss_aux = args.tv_l2 * loss_var_l2 + \\\n args.tv_l1 * loss_var_l1 + \\\n args.r_feature * loss_r_feature + \\\n args.l2_scale * loss_l2\n\n loss = args.main_loss_multiplier * loss + loss_aux\n\n if iteration % save_every==0:\n print(\"------------iteration {}----------\".format(iteration))\n print(\"total loss\", loss.item())\n print(\"loss_r_feature\", loss_r_feature.item())\n if targets_in == None:\n print(\"main criterion\", criterion(outputs, targets).item())\n if hook_for_display is not None:\n hook_for_display(inputs, targets)\n else:\n print(\"main criterion\", criterion_recur(outputs, targets).item())\n targets_max, indices = targets.max(1)\n if hook_for_display is not None:\n hook_for_display(inputs, indices)\n\n # do image update\n loss.backward()\n\n optimizer.step()\n\n # clip color outlayers\n inputs.data = clip(inputs.data)\n\n if best_cost > loss.item() or iteration == 1:\n best_inputs = inputs.data.clone()\n\n if args.store_best_images:\n best_inputs = denormalize(best_inputs)\n save_images(args, gpu_id, num_generations, best_inputs, targets)\n\n logits = model_teacher(inputs_jit)\n if targets_new == None:\n targets_new = logits.data\n else:\n targets_new = torch.cat((targets_new, logits.data), 0)\n\n # to reduce memory consumption by states of the optimizer we deallocate memory\n optimizer.state = collections.defaultdict(dict)\n\n torch.cuda.empty_cache()\n return targets_new\n\ndef save_images(args, gpu_id, num_generations, images, targets):\n # method to store generated images locally\n local_rank = torch.cuda.current_device()\n for id in range(images.shape[0]):\n if targets.ndimension() == 1:\n class_id = targets[id].item()\n else:\n class_id = targets[id].argmax().item()\n #if 0:\n if not os.path.exists(args.final_data_path):\n os.mkdir(args.final_data_path)\n store_path = args.final_data_path + '/new_class_' + str(class_id) + '/'\n if not os.path.exists(store_path):\n os.mkdir(store_path)\n\n place_to_store = store_path + 'ei_' + str(gpu_id) + str(num_generations) + '_' + str(id) + '.jpg'\n\n image_np = images[id].data.cpu().numpy().transpose((1, 2, 0))\n pil_image = Image.fromarray((image_np * 255).astype(np.uint8))\n pil_image.save(place_to_store)\n\ndef validate(input, target, model):\n\n def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n with torch.no_grad():\n output = model(input)\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n print(\"Verifier accuracy: \", prec1.item())\n\ndef main_syn(targets_new=None):\n\n parser = argparse.ArgumentParser(\"resnet18\")\n parser.add_argument('-s', '--worldsize', type=int, default=1, help='Number of processes participating in the job.')\n parser.add_argument('--local_rank', '--rank', type=int, default=0, help='Rank of the current process.')\n parser.add_argument('--jitter', default=32, type=int, help='random shift')\n parser.add_argument('--comment', default='', type=str, help='batch size')\n parser.add_argument('--arch_name', default='resnet50', type=str, help='model name from torchvision or resnet50v15')\n\n parser.add_argument('--fp16', action='store_true', help='use FP16 for optimization')\n parser.add_argument('--exp_name', type=str, default='test', help='where to store experimental data')\n\n parser.add_argument('--verifier', action='store_true', help='evaluate batch with another model')\n parser.add_argument('--verifier_arch', type=str, default='mobilenet_v2', help = \"arch name from torchvision models to act as a verifier\")\n\n parser.add_argument('--r_feature', type=float, default=0.05, help='coefficient for feature distribution regularization')\n parser.add_argument('--first_bn_multiplier', type=float, default=10., help='additional multiplier on first bn layer of R_feature')\n parser.add_argument('--tv_l1', type=float, default=0.0, help='coefficient for total variation L1 loss')\n parser.add_argument('--tv_l2', type=float, default=0.0001, help='coefficient for total variation L2 loss')\n parser.add_argument('--lr', type=float, default=0.1, help='learning rate for optimization')\n parser.add_argument('--l2_scale', type=float, default=0.00001, help='l2 loss on the image')\n parser.add_argument('--main_loss_multiplier', type=float, default=1.0, help='coefficient for the main loss in optimization')\n parser.add_argument('--batch_size', type=int, default=64, help='batch size')\n parser.add_argument('--epochs', type=int, default=256, help='num of training epochs')\n parser.add_argument('--final_data_path', type=str, default='../final_images', help='num of training epochs')\n parser.add_argument('--store_best_images', action='store_true', help='save best images as separate files')\n args = parser.parse_args()\n\n\n model_teacher = build_resnet(\"resnet50\", \"classic\", 1000)\n #model_teacher = models.__dict__[args.arch_name](pretrained=True)\n #model_teacher = model_teacher.cuda()\n model_teacher = nn.DataParallel(model_teacher).cuda()\n model_teacher.eval()\n\n checkpoint_tar = './models/checkpoint.pth.tar'\n checkpoint = torch.load(checkpoint_tar)\n model_teacher.load_state_dict(checkpoint['state_dict'])\n\n model_student = models.__dict__['resnet18'](pretrained=True)\n model_student = model_student.cuda()\n model_student.eval()\n\n model_verifier = models.__dict__[args.verifier_arch](pretrained=True)\n model_verifier = model_verifier.cuda()\n model_verifier.eval()\n\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n #criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)\n #criterion_smooth = criterion_smooth.cuda()\n criterion_recur = CrossEntropyRecursiveLabel()\n criterion_recur = criterion_recur.cuda()\n\n exp_name = \"generations/%s\"%args.exp_name\n hook_for_display = lambda x,y: validate(x, y, model_verifier)\n\n targets_new = get_images(0, i, args, model_student, model_teacher, criterion, criterion_recur, hook_for_display, targets_in=targets_new)\n\n return targets_new\n\nif __name__ == '__main__':\n i = 0\n targets_new = main_syn()\n for i in range(1,10):\n print('epoch:', i)\n targets_new = main_syn(targets_new)\n\n\n\n","repo_name":"liuzechun/Data-Free-NAS","sub_path":"Image_Synthesis/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12321,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"30074663277","text":"\"\"\"\r\n\n\nIn this challenge, establish which type of constrained writing is applied to a\nsentence. There are four possible types to detect:\n\n * **Pangram** : the sentence contains all the 26 letters of the English alphabet.\n * **Heterogram** : the sentence doesn't have multiple instances of its letters (as to say that every letter is unique).\n * **Tautogram** : every word of the sentence starts with the same letter.\n * **Transgram** : all words of the sentence share at least a common letter.\n\nGiven a string `txt` being a sentence, implement a function that returns the\nstrings `\"Pangram\"`, `\"Heterogram\"`, `\"Tautogram\"` or `\"Transgram\"`\naccordingly to the above definitions and following the same given order to\nestablish the result. If no constrained writing is detected, return the string\n`\"Sentence\"`.\n\n### Examples\n\n constraint(\"The quick brown fox jumps over the lazy dog.\") ➞ \"Pangram\"\n # The sentence contains every letter of the alphabet.\n # Repetitions are not considered.\n \n constraint(\"The big dwarf only jumps.\") ➞ \"Heterogram\"\n # The sentence has only unique characters.\n \n constraint(\"Todd told Tom to take the tiny turtles.\") ➞ \"Tautogram\"\n # Every word starts with the letter \"t\".\n \n constraint(\"A cannibal alligator has attacked an unaware vegan alligator.\") ➞ \"Transgram\"\n # Every word contains the letter \"a\".\n \n constraint(\"The unbearable lightness of coding...\") ➞ \"Sentence\"\n # No constraint is applied to the sentence.\n\n### Notes\n\n * Remember to respect the given order to establish the result: a **Pangram** has to be detected before a **Heterogram** , and a **Tautogram** has to be detected before a **Transgram**.\n * Sentences will contain letters (either uppercase or lowercase) and punctuation. Your function must be case-insensitive.\n\n\"\"\"\r\n\nimport re\nconstraint = lambda s: ['Pangram', 'Heterogram', 'Tautogram', 'Transgram', 'Sentence'][[\n is_pangram(s), is_heterogram(s), is_tautogram(s), is_transgram(s), True].index(True)]\nto_alpha = lambda w: re.sub(r'[^a-z]', '', w.lower())\nto_words = lambda t: re.sub(r'[^a-z ]', '', t.lower()).split(' ')\nis_pangram = lambda t: len(set(to_alpha(t))) == 26\nis_heterogram = lambda t: len(set(to_alpha(t))) == len(to_alpha(t))\nis_tautogram = lambda t: all(to_words(t)[0][0] == x[0] for i, x in enumerate(to_words(t)[1:]))\nis_transgram = lambda t: all(not set(w).isdisjoint(to_words(t)[i]) for i, w in enumerate(to_words(t)[1:]))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"JmyD5D4KnhzmMPEKz_14.py","file_name":"JmyD5D4KnhzmMPEKz_14.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70688166813","text":"from algosdk import account, mnemonic\nfrom algosdk.future import transaction\nfrom utilities import CommonFunctions\n\n\n# Declare application state storage (immutable)\nlocal_ints = 1\nlocal_bytes = 1\nglobal_ints = 5\nglobal_bytes = 5\nglobal_schema = transaction.StateSchema (global_ints, global_bytes)\nlocal_schema = transaction.StateSchema (local_ints, local_bytes)\n\n\n# Declare the approval program\napproval_program_source_initial =b\"\"\"#pragma version 6\ntxn ApplicationID\nint 0\n==\nbnz main_l6\ntxn OnCompletion\nint NoOp\n==\nbnz main_l3\nerr\nmain_l3:\nglobal GroupSize\nint 4\n==\ntxna ApplicationArgs 0\nbyte \"Start the transaction\"\n==\n&&\nbnz main_l5\nerr\nmain_l5:\nint 1\nreturn\nmain_l6:\nint 1\nreturn\n\"\"\"\n\n# Declare clear state program source\nclear_program_source = b\"\"\"#pragma version 6\nint 1\n\"\"\"\n\n\n# create application\ndef create_app(client, passphrase):\n print (\"Creating application...\")\n\n # declare accounts\n private_key = mnemonic.to_private_key (passphrase)\n address = account.address_from_private_key (private_key)\n\n # compile the smart contracts\n approval_program = CommonFunctions.compile_program (client, approval_program_source_initial)\n clear_program = CommonFunctions.compile_program (client, clear_program_source)\n\n on_complete = transaction.OnComplete.NoOpOC.real\n\n # get node suggested parameters\n params = client.suggested_params ()\n\n txn = transaction.ApplicationCreateTxn (address, params, on_complete,\n approval_program, clear_program,\n global_schema, local_schema)\n\n # sign transaction\n print (\"Signing Transaction!\")\n signed_txn = txn.sign (private_key)\n tx_id = signed_txn.transaction.get_txid ()\n\n try:\n # send the transaction to the network\n client.send_transactions ([signed_txn])\n\n # await confirmation\n CommonFunctions.wait_for_confirmation (client, tx_id)\n transaction_response = client.pending_transaction_info (tx_id)\n application_id = transaction_response['application-index']\n print (\"Created new app id: \", application_id)\n\n return application_id\n\n except Exception as error:\n print (error)\n\n\n# group transactions\ndef app_call(client, passphrase, index):\n print (f\"Calling {index} application...\")\n\n # declare sender\n private_key = mnemonic.to_private_key (passphrase)\n sender = account.address_from_private_key (private_key)\n\n # get node suggested parameters\n params = client.suggested_params ()\n\n # declare references\n seller = \"OLZBY2R7PFJN3DQ2JBLMT65O7IBZ4ARIFFUCRMZTVOCZ65JT53LD3UCIII\"\n commission = \"MO4CBXFLCK76E6VQJ3OLGS33ARGML2V2RGORIYNUSTJK4GVDKKL7LJDI3M\"\n asset_id = 89120887\n\n # declare amount\n payment_amount = 100_000\n req_list = [CommonFunctions.percentage (90, payment_amount),\n CommonFunctions.percentage (10, payment_amount)]\n\n # Transaction 1: Application Call\n app_args = [\"Start the transaction\"]\n txn_1 = transaction.ApplicationNoOpTxn(sender, params, index, app_args)\n\n # Transaction 2: Payment to owner\n txn_2 = transaction.PaymentTxn(sender, params, seller, req_list[0])\n\n # Transaction 3: Payment for commission\n txn_3 = transaction.PaymentTxn(sender, params, commission, req_list[1])\n\n # Transaction 4: Asset Optin\n txn_4 = transaction.AssetTransferTxn(sender=sender, sp=params, index=asset_id, receiver=sender, amt=0)\n\n print(\"Grouping transactions...\")\n # compute group id and put it into each transaction\n group_id = transaction.calculate_group_id([txn_1, txn_2, txn_3, txn_4])\n print(\"...computed groupId: \", group_id)\n txn_1.group = group_id\n txn_2.group = group_id\n txn_3.group = group_id\n txn_4.group = group_id\n\n print(\"Splitting unsigned transaction group...\")\n # sign transactions\n print(\"Signing transactions...\")\n stxn_1 = txn_1.sign(private_key)\n print(\"...account1 signed txn_1: \", stxn_1.get_txid())\n stxn_2 = txn_2.sign(private_key)\n print(\"...account2 signed txn_2: \", stxn_2.get_txid())\n stxn_3 = txn_3.sign(private_key)\n print(\"...account2 signed txn_3: \", stxn_3.get_txid())\n stxn_4 = txn_4.sign(private_key)\n print(\"...account2 signed txn_4: \", stxn_4.get_txid())\n\n # assemble transaction group\n print(\"Assembling transaction group...\")\n signedGroup = [stxn_1, stxn_2, stxn_3, stxn_4]\n\n # send transactions\n print(\"Sending transaction group...\")\n tx_id = client.send_transactions(signedGroup)\n\n # wait for confirmation\n\n confirmed_txn = CommonFunctions.wait_for_confirmation(client, tx_id)\n print(\"txID: {}\".format(tx_id), \" confirmed in round: {}\".format(\n confirmed_txn.get(\"confirmed-round\", 0)))\n\n return tx_id\n\n\nif __name__ == \"__main__\":\n\n # Connect to algorand client\n algod_client = CommonFunctions.algo_conn()\n\n # Define account\n account_mnemonics = \"program shy second strike ghost panel account fence welcome visa cattle sad cake proud reward lab abuse rail scare note alarm just cereal above cook\"\n\n # Create application\n app_id = create_app(algod_client, account_mnemonics)\n\n # Call Application\n grp_tx_id = app_call(algod_client, account_mnemonics, app_id)\n","repo_name":"algorand-guy/different_transactions","sub_path":"transactions/transactions_with_smartContract.py","file_name":"transactions_with_smartContract.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15384662140","text":"#-*- coding:utf-8 -*-\r\nimport zipfile \r\n\r\ndef main(imei):\r\n #now_dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n f = zipfile.ZipFile('/home/xiaoliujun/obd/1.zip', 'w' ,zipfile.ZIP_DEFLATED) \r\n f.write('/home/xiaoliujun/obd/20150517-100953-obd-data.xls') \r\n #f.write(var.excel_file_name[:-9] + '-bug.xls') \r\n #f.write(var.excel_file_name) \r\n f.close()\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n imei = \"869269016686512\"\r\n main(imei)\r\n","repo_name":"hhxxss0722/double","sub_path":"dmn_last_edition_v23_5910_CW_nocursor_10.23.3.661/dmn_last_edition_v23_5910_CW_nocursor/mainTest.py","file_name":"mainTest.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70637756573","text":"import time\nimport numpy as np\n\nfrom src.displacement.movement import stop, rotate_time, move, advance_time\nfrom src.displacement.planning import update_path\nfrom src.kalman.kalmann_filter import KalmanHandler\nfrom src.local_avoidance.obstacle import ObstacleAvoidance\nfrom src.path_planning.localization import Localization\nfrom src.path_planning.occupancy import display_occupancy, full_path_to_points\nfrom src.plots.covariance import plot_cov\nfrom src.thymio.Thymio import Thymio\nfrom src.vision.camera import Camera\n\n# State of the thymio\nFORWARD = 0\nTURN = 1\nSTOP = 2\n\n\nclass EventHandler:\n \"\"\"\n This class manages all the different scenarios of the robot until it reaches the goal.\n \"\"\"\n\n def __init__(self, thymio: Thymio, interval_camera=1, interval_odometry=0.1, interval_sleep=0.08,\n obstacle_threshold=4100, epsilon_theta=20, epsilon_r=2):\n \"\"\"\n Constructor of the class EventHandler.\n\n :param thymio: class thymio, reference to the robot\n :param interval_camera: time constant necessary to access to kalman odometry and measurement\n :param interval_odometry: time constant necessary to access to kalman odometry\n :param interval_sleep: time sleep constant before function loop calls\n :param obstacle_threshold: condition to go into local avoidance\n :param epsilon_theta: the tolerated angle deviation\n :param epsilon_r: the tolerated distance deviation\n \"\"\"\n self.thymio: Thymio = thymio\n self.interval_camera = interval_camera\n self.interval_odometry = interval_odometry\n self.interval_sleep = interval_sleep\n self.obstacle_threshold = obstacle_threshold\n self.case_size_cm = 2.5 # [cm]\n self.camera = Camera()\n self.camera.open_camera()\n self.final_occupancy_grid, self.goal = Localization(self.camera).localize()\n self.kalman_handler = KalmanHandler(self.thymio, self.camera)\n self.kalman_position = self.kalman_handler.get_camera()\n self.epsilon_theta = epsilon_theta # [degrees]\n self.epsilon_r = epsilon_r # [cm]\n self.path, self.full_path = display_occupancy(self.final_occupancy_grid,\n (self.kalman_position[0], self.kalman_position[1]),\n self.goal)\n # self.kalman_handler.start_recording()\n self.kalman_handler.start_timer()\n self.camera_timer = time.time()\n self.odometry_timer = time.time()\n # self.state = STOP\n self.__global_handler()\n\n def __global_handler(self):\n \"\"\"\n Function called in loop until the goal is reached. Kalman, global displacement, local avoidance happens here.\n \"\"\"\n # odometry and measurement kalman\n if time.time() - self.camera_timer >= self.interval_camera:\n print(\"before kalman position\", self.kalman_position)\n self.kalman_position = self.kalman_handler.get_kalman(True)\n print(\"after kalman position\", self.kalman_position)\n self.camera_timer = time.time()\n self.odometry_timer = time.time()\n\n # odometry kalman\n if time.time() - self.odometry_timer >= self.interval_odometry:\n self.kalman_position = self.kalman_handler.get_kalman(False)\n self.odometry_timer = time.time()\n\n # get orientation and displacement needed to reach next point of the path\n delta_r, delta_theta = update_path(self.path, self.kalman_position[0], self.kalman_position[1],\n self.kalman_position[2],\n self.case_size_cm)\n\n # Apply rotation\n if abs(delta_theta) > self.epsilon_theta:\n if abs(delta_r) < self.epsilon_r:\n print(\"Arrived to goal (from rotating)\")\n stop(self.thymio)\n self.path = np.delete(self.path, 0, 1) # removes the step done from the non-concatenated lists\n left_dir, right_dir, turn_time = rotate_time(delta_theta)\n left_dir = left_dir * 0.5\n right_dir = right_dir * 0.5\n if abs(delta_theta) < 20: # turn less quickly near epsilon_theta\n left_dir = left_dir * 0.5\n right_dir = right_dir * 0.5\n move(self.thymio, left_dir, right_dir)\n\n # Apply displacement\n elif abs(delta_r) > self.epsilon_r:\n left_dir, right_dir, distance_time = advance_time(delta_r)\n left_dir = left_dir * 0.5\n right_dir = right_dir * 0.5\n move(self.thymio, left_dir, right_dir)\n\n # check if local avoidance needed\n sensor_values = self.kalman_handler.sensor_handler.sensor_raw()\n if np.amax(sensor_values[\"sensor\"][0:4]).astype(int) >= self.obstacle_threshold:\n stop(self.thymio)\n self.__local_handler()\n self.camera_timer = time.time()\n self.odometry_timer = time.time()\n else:\n stop(self.thymio)\n print(\"REMOVE POINTS\", self.path[0][0], self.path[1][0])\n self.path = np.delete(self.path, 0, 1) # removes the step done from the non-concatenated lists\n\n # if there still exist a path, iterates once more\n if len(self.path[0]):\n time.sleep(self.interval_sleep)\n self.__global_handler()\n\n # no more path, go back to main\n else:\n self.camera.close_camera()\n stop(self.thymio)\n with open('cov_all.txt', 'w') as f:\n for item in self.kalman_handler.kalman.cov_all:\n f.write(\"%s,\" % item)\n f.close()\n with open('pos_all.txt', 'w') as f:\n for item in self.kalman_handler.kalman.pos_all:\n f.write(\"%s,\" % item)\n f.close()\n # plot at the end the covariances\n plot_cov(self.kalman_handler.kalman.pos_all, self.kalman_handler.kalman.cov_all)\n\n def __local_handler(self):\n \"\"\"\n Local avoidance handler that updates the path after done avoiding.\n \"\"\"\n obstacle = ObstacleAvoidance(self.thymio, self.kalman_handler, self.full_path, self.final_occupancy_grid)\n self.full_path = obstacle.full_path\n self.kalman_position = obstacle.kalman_position\n if len(self.full_path[0]) < 2:\n self.full_path = np.array([[self.goal[0]], [self.goal[1]]])\n return\n self.path = full_path_to_points(self.full_path) # concatenated path\n","repo_name":"KookaS/basics-mobile-robotics","sub_path":"src/displacement/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34522882340","text":"from modules.entity_readers.helpers import *\n\ndef read_LightProbe(f):\n\t'''\n\tLight_Probe (0x1c = 28) 01111110\n\t325 entities\n\tNode structure:\n\t- node_flags can be 00000000, 00000005, 00000200, 00000400, 00001000, 00008000.\n\t- node_string is only provided for #247 (:the_sky_probe:).\n\t- node_group_id is sometime provided.\n\t- node_ids can point to Door-152, Door-41, Door-37, and Door-219, but is most often not provided. -> opening the Door changes the light?\n\t- node_float is never provided (-1.0).\n\t- node_ints are never provided (light probes are not displayed).\n\t- node_final_floats is always (0.75, 0.0, 0.0, 0.0).\n\t'''\n\treturn (\n\t\tread_byte_array(f, 4),\t\t\t# 0: Always all 0.\n\t\tread_id(f),\t\t\t\t\t\t# 1: Group-203, Obelisk-1-3 (town, treehouse, symmetry island),\n\t\t\t\t\t\t\t\t\t\t# Inanimate-13-3399-4085-12946-17030 (loc_coloredLights_frontWindows, loc_quarry_lift_platform, loc_coloredLights_upperWindowsFrame, loc_coloredLights_slidingDoorNew, loc_quarryDelta_water),\n\t\t\t\t\t\t\t\t\t\t# Multipanel-7 (fade), Machine_Panel-185-436 (symmetry_translucent_hub, symmetry_translucent5).\n\t\tread_id(f),\t\t\t\t\t\t# 2: Marker-68 (#7), Marker-93 (#33,197,213,217,222,282), Marker-124 (#16), Marker-140 (#21), Marker-238 (#60), Marker-258 (#119), Marker-1023 (#187), Marker-1034 (#309)\n\t\tread_byte_array(f, 4),\t\t\t# 3: Always all 0.\n\t\tread_byte(f),\t\t\t\t\t# 4: Boolean.\n\t\tread_byte(f),\t\t\t\t\t# 5: Always 0.\n\t\tread_float32(f),\t\t\t\t# 6: 16.0 or 32.0.\n\t\tread_byte_array(f, 2),\t\t\t# 7: Booleans.\n\t\tread_int(f),\t\t\t\t\t# 8: 10, 16, 24, 32, 64, 128, or 256. Maybe a dimension of the probe bitmap?\n\t\tread_array(f, 5, read_float32),\t# 9: \n\t\tread_optional_string(f),\t\t# 10: Name of a texture probe file. Rarely provided.\n\t\tread_int(f),\t\t\t\t\t# 11: Always 0.\n\t)\n","repo_name":"ClementSparrow/TheWitnessExplorer","sub_path":"modules/entity_readers/LightProbe.py","file_name":"LightProbe.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29838088541","text":"# 只出现一次的数字 III\n# 给定一个整数数组 nums,其中恰好有两个元素只出现一次,其余所有元素均出现两次。 找出只出现一次的那两个元素。\n#\n# 示例 :\n# 输入: [1,2,1,3,2,5]\n# 输出: [3,5]\n#\n# 注意:\n# 结果输出的顺序并不重要,对于上面的例子, [5, 3] 也是正确答案。\n# 你的算法应该具有线性时间复杂度。你能否仅使用常数空间复杂度来实现?\n\n\nclass Solution(object):\n \"\"\"\n 法一:HashMap\n 时间复杂度:O(n),空间复杂度:O(n)\n \"\"\"\n def singleNumber1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n hash = {}\n for i in nums:\n hash[i] = hash.get(i, 0) + 1 # 返回指定键的值,如果值不在字典中返回default值\n for k, v in hash.items():\n if v == 1:\n res.append(k)\n return res\n\n \"\"\"\n 法二:异或\n 时间复杂度:O(n),空间复杂度:O(1)\n \"\"\"\n def singleNumber2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # 求全部数异或结果\n total_xor = 0\n for i in nums:\n total_xor ^= i\n\n # 求异或结果中非零的最低位\n index = 0\n while total_xor & 1 == 0:\n total_xor = total_xor >> 1\n index += 1\n\n # 将原数组分成两个子数组,且刚好每个子数组中各自包含一个只出现一次的数字\n nums1, nums2 = 0, 0\n for i in nums:\n if (i >> index) & 1 == 1:\n nums1 ^= i\n else:\n nums2 ^= i\n\n return [nums1, nums2]\n\n def singleNumber(self, nums):\n return self.singleNumber2(nums)\n\nif __name__ == '__main__':\n s = Solution()\n nums = [1,2,1,3,2,5]\n print(s.singleNumber(nums))\n","repo_name":"EricaEmmm/CodePython","sub_path":"LeetCode/L260_异或_只出现一次的数字III_两个只出现一次的数字.py","file_name":"L260_异或_只出现一次的数字III_两个只出现一次的数字.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34476321213","text":"\r\n\r\nservice_classifier = {\r\n\t\r\n\t\"java\" : {\r\n\t\t\t\"jboss\" : [\r\n\t\t\t\t\t\"isource\",\r\n\t\t\t\t\t\"icost\",\r\n\t\t\t\t\t\"spm\",\r\n\t\t\t\t\t\"imanage\",\r\n\t\t\t\t\t\"autoclass\",\r\n\t\t\t\t\t],\r\n\t\t\t\"tomcat\" : [\r\n\t\t\t\t\"ssobridge\",\r\n\t\t\t\t\"eproc\",\r\n\t\t\t\t\"icontract\",\r\n\t\t\t\t\"cns\",\r\n\t\t\t\t\"zytrack\",\r\n\t\t\t\t\"dashboard\",\r\n\t\t\t\t\"oneview\",\r\n\t\t\t\t\"crms\",\r\n\t\t\t\t\"supplier_portal\",\r\n\t\t\t\t\"sim\",\r\n\t\t\t\t\"tms\",\r\n\t\t\t\t\"rainbow\",\r\n\t\t\t\t\"fieldlibrary\",\r\n\t\t\t\t\"flexiform\",\r\n\t\t\t\t\"zcs\",\r\n\t\t\t\t\"inotify\",\r\n\t\t\t\t\"irequest\",\r\n\t\t\t\t\"iconsole\",\r\n\t\t\t\t\"imonitor\",\r\n\t\t\t\t\"zsn\",\r\n\t\t\t\t\"quicksearch\",\r\n\t\t\t\t\"isave\",\r\n\t\t\t\t],\r\n\t\t\t\"middleware\" : [\"h2\"],\r\n\t\t\t\"zookeeper\" : None,\r\n\t\t\t\"integration_platform\" : None,\r\n\r\n\t\t\t\"extra_tags\" : [\"jmx\",\"app_server\"]\r\n\t\t},\r\n\t\t\t\r\n\r\n\r\n\r\n}","repo_name":"MeghaRay0109/icinga-conf-gen","sub_path":"icinga-package/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11453146439","text":"AR = '/opt/local/bin/ar'\nARCH_ST = ['-arch']\nARFLAGS = 'rcs'\nBINDIR = '/usr/local/bin'\nBOOST_VERSION = '1_53'\nCC = ['/usr/bin/gcc']\nCCLNK_SRC_F = []\nCCLNK_TGT_F = ['-o']\nCC_NAME = 'gcc'\nCC_SRC_F = []\nCC_TGT_F = ['-c', '-o']\nCC_VERSION = ('4', '2', '1')\nCFLAGS_MACBUNDLE = ['-fPIC']\nCFLAGS_cshlib = ['-fPIC']\nCOMPILER_CC = 'gcc'\nCOMPILER_CXX = 'g++'\nCPPPATH_ST = '-I%s'\nCXX = ['/usr/bin/g++']\nCXXFLAGS = ['-O0', '-Wall', '-Wno-unused-variable', '-g3', '-Qunused-arguments']\nCXXFLAGS_MACBUNDLE = ['-fPIC']\nCXXFLAGS_cxxshlib = ['-fPIC']\nCXXLNK_SRC_F = []\nCXXLNK_TGT_F = ['-o']\nCXX_NAME = 'gcc'\nCXX_SRC_F = []\nCXX_TGT_F = ['-c', '-o']\nDEFINES = []\nDEFINES_ST = '-D%s'\nDEST_BINFMT = 'mac-o'\nDEST_CPU = 'x86_64'\nDEST_OS = 'darwin'\nFRAMEWORKPATH_ST = '-F%s'\nFRAMEWORK_OSX_APPKIT = ['AppKit']\nFRAMEWORK_OSX_FOUNDATION = ['Foundation']\nFRAMEWORK_OSX_SECURITY = ['Security']\nFRAMEWORK_ST = ['-framework']\nINCLUDES_BOOST = '/opt/local/include'\nINCLUDES_LOG4CXX = ['/opt/local/include']\nLIBDIR = '/usr/local/lib'\nLIBPATH_BOOST = ['/opt/local/lib']\nLIBPATH_LOG4CXX = ['/opt/local/lib']\nLIBPATH_ST = '-L%s'\nLIB_BOOST = ['boost_system-mt', 'boost_test_exec_monitor-mt', 'boost_iostreams-mt', 'boost_filesystem-mt', 'boost_thread-mt', 'boost_date_time-mt']\nLIB_LOG4CXX = ['log4cxx']\nLIB_ST = '-l%s'\nLINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']\nLINKFLAGS_cshlib = ['-dynamiclib', '-Wl,-compatibility_version,1', '-Wl,-current_version,1']\nLINKFLAGS_cstlib = []\nLINKFLAGS_cxxshlib = ['-dynamiclib', '-Wl,-compatibility_version,1', '-Wl,-current_version,1']\nLINKFLAGS_cxxstlib = []\nLINK_CC = ['/usr/bin/gcc']\nLINK_CXX = ['/usr/bin/g++']\nPKGCONFIG = '/opt/local/bin/pkg-config'\nPREFIX = '/usr/local'\nRPATH_ST = '-Wl,-rpath,%s'\nSHLIB_MARKER = []\nSONAME_ST = []\nSTLIBPATH_ST = '-L%s'\nSTLIB_MARKER = []\nSTLIB_ST = '-l%s'\ncfg_files = ['/Users/yuyingdi/Develop/ndn-security/build/config.h']\ncprogram_PATTERN = '%s'\ncshlib_PATTERN = 'lib%s.dylib'\ncstlib_PATTERN = 'lib%s.a'\ncxxprogram_PATTERN = '%s'\ncxxshlib_PATTERN = 'lib%s.dylib'\ncxxstlib_PATTERN = 'lib%s.a'\ndefine_key = []\nmacbundle_PATTERN = '%s.bundle'\n","repo_name":"bruinfish/ndn-security","sub_path":"build/c4che/_cache.py","file_name":"_cache.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2888269827","text":"#!/usr/bin/python\n# coding:utf-8\nfrom argparse import Action, SUPPRESS\nimport os\nfrom cqh_file_watcher import __version__\nimport argparse\nfrom queue import Queue\nimport re\nimport json\nimport sys\nimport logging\nimport time\n\nfrom pyinotify import WatchManager, Notifier, ProcessEvent, IN_DELETE, IN_CREATE, IN_MODIFY, IN_MOVED_TO, IN_MOVED_FROM\n# https://codereview.stackexchange.com/questions/6567/redirecting-subprocesses-output-stdout-and-stderr-to-the-logging-module\n\nfrom cqh_file_watcher.command_caller import CommandCaller\nfrom cqh_file_watcher import util\n\n\nclass EventHandler(ProcessEvent):\n def __init__(self, logger, command_list, directory):\n super().__init__()\n self.logger = logger\n self.queue = Queue(maxsize=1)\n self.check_set = dict()\n self.command_callder = CommandCaller(self.queue, logger)\n self.command_list = command_list\n self.directory = directory.rstrip(\"/\")\n\n def start(self):\n self.logger.info(\"begin call command_caller\")\n self.command_callder.start()\n\n def stop(self):\n self.queue.put((True, time.time(), None), True)\n\n def process_IN_CREATE(self, event):\n self.handle_event(event)\n\n def process_IN_DELETE(self, event):\n self.handle_event(event)\n\n def process_IN_MODIFY(self, event):\n self.handle_event(event)\n\n def process_IN_MOVE(self, event):\n self.handle_event(event)\n\n def process_IN_MOVED_TO(self, event):\n self.handle_event(event)\n\n def process_IN_MOVED_FROM(self, event):\n self.handle_event(event)\n\n def handle_event(self, event):\n send_data_list = []\n\n def should_execute_cmd(command, relative_path):\n pattern, ignore_pattern = command.get(\"pattern\"), command.get(\"ignore_pattern\")\n if not ignore_pattern:\n ignore_pattern = []\n else:\n if not isinstance(ignore_pattern, (list, tuple)):\n ignore_pattern = [ignore_pattern]\n\n def not_in_ignore_patttern(ignore_pattern_list):\n for _ignore_pattern in ignore_pattern_list:\n _ignore_pattern = re.compile(_ignore_pattern)\n if _ignore_pattern.match(relative_path):\n return False\n return True\n if not pattern:\n return not_in_ignore_patttern(ignore_pattern)\n pattern = re.compile(pattern)\n if pattern.match(relative_path):\n return not_in_ignore_patttern(ignore_pattern)\n\n for command_d in self.command_list:\n pattern = command_d.get(\"pattern\")\n # generated_by_dict_unpack: command_d\n command = command_d[\"command\"]\n directory = command_d.get(\"directory\") or self.directory\n relative_path = event.pathname[len(self.directory) + 1:]\n logger.debug(\"relative_path:{}\".format(relative_path))\n should_execute = should_execute_cmd(command_d, relative_path)\n # if not pattern:\n # should_execute = True\n # else:\n # if not pattern.endswith(\"$\"):\n # pattern += \"$\"\n # pattern = re.compile(pattern)\n # if pattern.match(relative_path):\n # should_execute = True\n if should_execute:\n # push command data\n now =time.time()\n if command not in self.check_set or self.check_set[command] < now:\n self.check_set[command] = now + command_d['delay']\n queue_data = dict(\n pattern=pattern,\n relative_path=relative_path,\n path=event.path,\n command=command,\n directory=directory,\n ts = time.time() + command_d['delay']\n )\n send_data_list.append(queue_data)\n\n if send_data_list:\n for item in send_data_list:\n if self.queue.full():\n self.logger.info(\"queue is full \")\n continue\n self.queue.put([False, item['ts'], item ])\n\n\n_dir = os.path.dirname(\n os.path.abspath(__file__)\n)\nfrom cqh_file_watcher.conf import doc\ndoc_content = doc\n\n\nlogger = logging.getLogger('cqh_file_watcher')\nif not logger.handlers:\n stream_handler = logging.StreamHandler(stream=sys.stdout)\n stream_handler.setFormatter(logging.Formatter('[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s',\n datefmt='%y%m%d %H:%M:%S'))\n logger.addHandler(stream_handler)\n\n\nclass DocAction(Action):\n\n def __init__(self,\n option_strings,\n dest=SUPPRESS,\n default=SUPPRESS,\n help=None):\n super(DocAction, self).__init__(\n option_strings=option_strings,\n dest=dest,\n default=default,\n nargs=0,\n help=help)\n\n def __call__(self, parser, namespace, values, option_string=None):\n # parser.print_help()\n print(self.default)\n parser.exit()\n\n\nparser = argparse.ArgumentParser('cqh_file_watcher',\n description='watch directory changes and run commands',\n )\n\nparser.register(\"action\", \"doc\", DocAction)\n\n\nlevel_choices = logging._nameToLevel\nlevel_choices = [e.lower() for e in level_choices]\n\nparser.add_argument(\"--level\", dest='level', type=str, default=\"info\", choices=level_choices)\nparser.add_argument(\"--conf\", dest='conf', help=\"conf path\", required=True)\nparser.add_argument(\"--doc\", default=doc_content, action='doc')\n\n\ndef main(argv=None):\n if argv is not None:\n convert_args = parser.parse_args(argv)\n else:\n convert_args = parser.parse_args()\n _inner_run(convert_args.level, convert_args.conf)\n\n\ndef _inner_run(level, conf):\n \"\"\"Simple program that greets NAME for a total of COUNT times.\"\"\"\n logger.setLevel(getattr(logging, level.upper()))\n if not os.path.exists(conf):\n logger.error(\"conf not exitst {}\".format(conf))\n return\n logger.debug(\"version:{}\".format(__version__))\n content_d = json.loads(open(conf, \"r\", encoding='utf-8').read())\n\n current_dir = os.getcwd()\n logger.info(\"current_dir:{}\".format(current_dir))\n env = os.environ.copy()\n env['DIRECTORY'] = current_dir\n logger.debug('env:{}'.format(env))\n\n content_d['directory'] = util.string_replace(content_d['directory'], env)\n for ele in content_d['command_list']:\n ele['command'] = util.string_replace(ele['command'], env)\n if \"delay\" not in ele:\n # 命令的延时时间\n ele['delay'] = 0.5\n logger.debug(\"content:{}\".format(json.dumps(\n content_d, ensure_ascii=False, indent=2)))\n # generated_by_dict_unpack: content_d\n directory, command_list = content_d[\"directory\"], content_d[\"command_list\"]\n monitor(directory, command_list)\n\n\ndef monitor(path, command_list):\n wm = WatchManager()\n \"\"\"\n IN_DELETE | IN_CREATE | IN_MODIFY | 监听不到ansible copy的事件?\n 试一试 IN_MOVED_TO?\n \"\"\"\n mask = IN_DELETE | IN_CREATE | IN_MODIFY | IN_MOVED_FROM | IN_MOVED_TO\n handler = EventHandler(logger, command_list=command_list,\n directory=path)\n notifier = Notifier(wm, handler)\n handler.start()\n wm.add_watch(path, mask, auto_add=True, rec=True)\n logger.info(\"now start monitor %s\" % path)\n while 1:\n try:\n notifier.process_events()\n if notifier.check_events():\n notifier.read_events()\n except KeyboardInterrupt:\n notifier.stop()\n handler.stop()\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chen19901225/cqh_file_watcher","sub_path":"cqh_file_watcher/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16216479988","text":"from collections import Counter\n\ndef checkio(data: list) -> list:\n\n delete_lst = [] # список удаляемых элементов массива \n dictionary = Counter(data) # посчитаем количество повторений элемента с помощью Counter \n couples = dictionary.items() # создадим пары элемент - повторение элемента\n\n for couple in couples: # пройдемся по каждой из них\n if couple[1] == 1: # если элемент уникальный то...\n delete_lst.append(couple[0]) # ... добавим его в массив элементов удаления \n \n for elem in delete_lst: # удаляем все элементы из списка\n data.remove(elem)\n\n return data\n\n\nif __name__ == \"__main__\":\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert list(checkio([1, 2, 3, 1, 3])) == [1, 3, 1, 3], \"1st example\"\n assert list(checkio([1, 2, 3, 4, 5])) == [], \"2nd example\"\n assert list(checkio([5, 5, 5, 5, 5])) == [5, 5, 5, 5, 5], \"3rd example\"\n assert list(checkio([10, 9, 10, 10, 9, 8])) == [10, 9, 10, 10, 9], \"4th example\"\n print(\"It is all good. Let's check it now\")","repo_name":"PontificSalivan/Checkio","sub_path":"Home tasks/non_unique_elements.py","file_name":"non_unique_elements.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30094082387","text":"\"\"\"\r\n\n\nWrite a function that returns `True` if every consecutive sequence of **ones**\nis followed by a consecutive sequence of **zeroes** of the same length.\n\n### Examples\n\n same_length(\"110011100010\") ➞ True\n \n same_length(\"101010110\") ➞ False\n \n same_length(\"111100001100\") ➞ True\n \n same_length(\"111\") ➞ False\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef same_length(txt):\n ones = [i for i in txt.split('0') if i != '']\n zeroes = [i for i in txt.split('1') if i != '']\n if len(ones) != len(zeroes):\n return False\n for i in range(len(ones)):\n if len(ones[i]) != len(zeroes[i]): \n return False\n return True\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"TZXG9RfcZ7T3o43QF_3.py","file_name":"TZXG9RfcZ7T3o43QF_3.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41456007639","text":"import synapse.datamodel as s_datamodel\n\nfrom synapse.tests.common import *\n\nclass DataModelTest(SynTest):\n\n def test_datamodel_types(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'bar', ptype='int')\n model.addTufoProp('foo', 'baz', ptype='str')\n model.addTufoProp('foo', 'faz', ptype='syn:tag')\n model.addTufoProp('foo', 'zip', ptype='str:lwr')\n\n self.eq(model.getPropRepr('foo:bar', 10), '10')\n self.eq(model.getPropRepr('foo:baz', 'woot'), 'woot')\n self.eq(model.getPropRepr('foo:faz', 'woot.toow'), 'woot.toow')\n self.eq(model.getPropRepr('foo:zip', 'woot'), 'woot')\n self.eq(model.getPropRepr('foo:nonexistent', 'stillwoot'), 'stillwoot')\n\n self.eq(model.getPropType('foo:bar').name, 'int')\n\n self.eq(model.getPropNorm('foo:bar', 10)[0], 10)\n self.eq(model.getPropNorm('foo:baz', 'woot')[0], 'woot')\n self.eq(model.getPropNorm('foo:faz', 'WOOT.toow')[0], 'woot.toow')\n self.eq(model.getPropNorm('foo:zip', 'WOOT')[0], 'woot')\n\n self.eq(model.getPropParse('foo:bar', '10')[0], 10)\n self.eq(model.getPropParse('foo:baz', 'woot')[0], 'woot')\n self.eq(model.getPropParse('foo:faz', 'WOOT.toow')[0], 'woot.toow')\n self.eq(model.getPropParse('foo:zip', 'WOOT')[0], 'woot')\n self.eq(model.getPropParse('foo:nonexistent', 'stillwoot'), 'stillwoot')\n\n def test_datamodel_glob(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'bar:*', ptype='str:lwr', glob=1)\n self.eq(model.getPropNorm('foo:bar:baz', 'Woot')[0], 'woot')\n\n def test_datamodel_fail_notype(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n self.raises(NoSuchType, model.addTufoProp, 'foo', 'bar', ptype='hehe')\n\n def test_datamodel_fail_noprop(self):\n model = s_datamodel.DataModel()\n\n self.raises(NoSuchForm, model.addTufoProp, 'foo', 'bar')\n\n model.addTufoForm('foo')\n self.raises(DupPropName, model.addTufoForm, 'foo')\n\n model.addTufoProp('foo', 'bar')\n self.raises(DupPropName, model.addTufoProp, 'foo', 'bar')\n\n def test_datamodel_cortex(self):\n core = s_cortex.openurl('ram:///')\n\n core.addTufoForm('foo', ptype='str')\n core.addTufoProp('foo', 'bar', ptype='int', defval=10)\n\n core.formTufoByProp('foo', 'hehe')\n core.formTufoByProp('foo', 'haha')\n\n core.formTufoByProp('foo', 'blah', bar=99)\n\n tufo0 = core.formTufoByProp('foo', 'hehe')\n self.eq(tufo0[1].get('foo:bar'), 10)\n\n core.setTufoProp(tufo0, 'bar', 30)\n self.eq(tufo0[1].get('foo:bar'), 30)\n\n tufo1 = core.formTufoByProp('foo', 'hehe')\n self.eq(tufo0[0], tufo1[0])\n\n tufos = core.getTufosByProp('foo')\n self.len(3, tufos)\n\n tufos = core.getTufosByProp('foo:bar', valu=30, limit=20)\n self.len(1, tufos)\n\n tufos = core.getTufosByProp('foo:bar', valu=99, limit=20)\n self.len(1, tufos)\n\n def test_datamodel_subs(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'bar', ptype='int')\n\n subs = model.getSubProps('foo')\n\n self.len(1, subs)\n self.eq(subs[0][0], 'foo:bar')\n\n model.addTufoProp('foo', 'baz', ptype='int', defval=20)\n\n defs = model.getSubPropDefs('foo')\n self.eq(defs.get('foo:baz'), 20)\n\n def test_datamodel_bool(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'bar', ptype='bool', defval=0)\n\n self.eq(model.getPropRepr('foo:bar', 1), 'True')\n self.eq(model.getPropRepr('foo:bar', 0), 'False')\n\n self.eq(model.getPropNorm('foo:bar', True)[0], 1)\n self.eq(model.getPropNorm('foo:bar', False)[0], 0)\n\n self.eq(model.getPropParse('foo:bar', '1')[0], 1)\n self.eq(model.getPropParse('foo:bar', '0')[0], 0)\n\n self.raises(BadTypeValu, model.getPropParse, 'foo:bar', 'asdf')\n\n def test_datamodel_hash(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n\n model.addTufoProp('foo', 'md5', ptype='hash:md5')\n model.addTufoProp('foo', 'sha1', ptype='hash:sha1')\n model.addTufoProp('foo', 'sha256', ptype='hash:sha256')\n\n fakemd5 = 'AA' * 16\n fakesha1 = 'AA' * 20\n fakesha256 = 'AA' * 32\n\n self.eq(model.getPropNorm('foo:md5', fakemd5)[0], fakemd5.lower())\n self.eq(model.getPropNorm('foo:sha1', fakesha1)[0], fakesha1.lower())\n self.eq(model.getPropNorm('foo:sha256', fakesha256)[0], fakesha256.lower())\n\n self.raises(BadTypeValu, model.getPropNorm, 'foo:md5', 'asdf')\n self.raises(BadTypeValu, model.getPropNorm, 'foo:sha1', 'asdf')\n self.raises(BadTypeValu, model.getPropNorm, 'foo:sha256', 'asdf')\n\n self.eq(model.getPropParse('foo:md5', fakemd5)[0], fakemd5.lower())\n self.eq(model.getPropParse('foo:sha1', fakesha1)[0], fakesha1.lower())\n self.eq(model.getPropParse('foo:sha256', fakesha256)[0], fakesha256.lower())\n\n self.raises(BadTypeValu, model.getPropParse, 'foo:md5', 'asdf')\n self.raises(BadTypeValu, model.getPropParse, 'foo:sha1', 'asdf')\n self.raises(BadTypeValu, model.getPropParse, 'foo:sha256', 'asdf')\n\n def test_datamodel_parsetypes(self):\n\n class Woot:\n @s_datamodel.parsetypes('int', 'str:lwr')\n def getFooBar(self, size, flag):\n return {'size': size, 'flag': flag}\n\n @s_datamodel.parsetypes('int', flag='str:lwr')\n def getBazFaz(self, size, flag=None):\n return {'size': size, 'flag': flag}\n\n woot = Woot()\n\n ret = woot.getFooBar('30', 'ASDF')\n\n self.eq(ret.get('size'), 30)\n self.eq(ret.get('flag'), 'asdf')\n\n ret = woot.getBazFaz('10')\n\n self.eq(ret.get('size'), 10)\n self.eq(ret.get('flag'), None)\n\n ret = woot.getBazFaz('10', flag='ASDF')\n self.eq(ret.get('size'), 10)\n self.eq(ret.get('flag'), 'asdf')\n\n def test_datamodel_inet(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'addr', ptype='inet:ipv4')\n model.addTufoProp('foo', 'serv', ptype='inet:srv4')\n model.addTufoProp('foo', 'port', ptype='inet:port')\n\n self.eq(model.getPropNorm('foo:port', 20)[0], 20)\n self.eq(model.getPropParse('foo:port', '0x10')[0], 16)\n\n self.eq(model.getPropRepr('foo:addr', 0x01020304), '1.2.3.4')\n self.eq(model.getPropNorm('foo:addr', 0x01020304)[0], 0x01020304)\n self.eq(model.getPropParse('foo:addr', '1.2.3.4')[0], 0x01020304)\n\n self.eq(model.getPropRepr('foo:serv', 0x010203040010), '1.2.3.4:16')\n self.eq(model.getPropNorm('foo:serv', 0x010203040010)[0], 0x010203040010)\n self.eq(model.getPropParse('foo:serv', '1.2.3.4:255')[0], 0x0102030400ff)\n\n self.raises(BadTypeValu, model.getPropNorm, 'foo:port', 0xffffff)\n self.raises(BadTypeValu, model.getPropParse, 'foo:port', '999999')\n\n def test_datamodel_time(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'meow', ptype='time:epoch')\n\n jan1_2016 = 1451606400\n self.eq(model.getPropNorm('foo:meow', jan1_2016)[0], jan1_2016)\n self.eq(model.getPropRepr('foo:meow', jan1_2016), '2016/01/01 00:00:00')\n self.eq(model.getPropParse('foo:meow', '2016/01/01 00:00:00')[0], jan1_2016)\n\n def test_datamodel_badprop(self):\n model = s_datamodel.DataModel()\n\n self.raises(BadPropName, model.addTufoForm, 'foo.bar')\n\n model.addTufoForm('foo:bar')\n self.raises(BadPropName, model.addTufoProp, 'foo:bar', 'b*z')\n\n def test_datatype_syn_prop(self):\n model = s_datamodel.DataModel()\n\n self.raises(BadTypeValu, model.getTypeNorm, 'syn:prop', 'asdf qwer')\n self.raises(BadTypeValu, model.getTypeNorm, 'syn:prop', 'foo::bar')\n\n self.eq(model.getTypeNorm('syn:prop', 'BAR')[0], 'bar')\n self.eq(model.getTypeParse('syn:prop', 'BAR')[0], 'bar')\n self.eq(model.getTypeNorm('syn:prop', 'foo:BAR')[0], 'foo:bar')\n self.eq(model.getTypeParse('syn:prop', 'foo:BAR')[0], 'foo:bar')\n\n def test_datatype_syn_tag(self):\n model = s_datamodel.DataModel()\n\n self.raises(BadTypeValu, model.getTypeNorm, 'syn:tag', 'asdf qwer')\n self.raises(BadTypeValu, model.getTypeNorm, 'syn:tag', 'foo..bar')\n\n self.eq(model.getTypeNorm('syn:tag', 'BAR')[0], 'bar')\n self.eq(model.getTypeParse('syn:tag', 'BAR')[0], 'bar')\n self.eq(model.getTypeNorm('syn:tag', 'foo.BAR')[0], 'foo.bar')\n self.eq(model.getTypeParse('syn:tag', 'foo.BAR')[0], 'foo.bar')\n\n def test_datamodel_forms(self):\n model = s_datamodel.DataModel(load=False)\n forms = model.getTufoForms()\n self.isinstance(forms, list)\n self.notin('syn:prop', forms)\n self.notin('inet:ipv4', forms)\n\n model = s_datamodel.DataModel()\n forms = model.getTufoForms()\n self.isin('syn:prop', forms)\n self.isin('inet:ipv4', forms)\n\n def test_datamodel_getPropInfo(self):\n model = s_datamodel.DataModel()\n\n model.addType('foo:bar', subof='str', doc='foo bar doc')\n model.addType('foo:baz', subof='foo:bar')\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'meow', ptype='foo:baz')\n model.addTufoProp('foo', 'bark', doc='lala')\n model.addTufoProp('foo', 'meow:purr', ptype='foo:baz', title='purr', doc='The sound a purr makes')\n\n self.eq(model.getPropInfo('foo:meow', 'req'), False)\n self.eq(model.getPropInfo('foo:meow', 'base'), 'meow')\n self.eq(model.getPropInfo('foo:meow', 'relname'), 'meow')\n self.eq(model.getPropInfo('foo:meow', 'defval'), None)\n self.eq(model.getPropInfo('foo:meow', 'title'), '')\n self.eq(model.getPropInfo('foo:meow', 'doc'), 'foo bar doc')\n\n self.eq(model.getPropInfo('foo:bark', 'doc'), 'lala')\n self.eq(model.getPropInfo('foo:bark', 'title'), '')\n self.eq(model.getPropInfo('foo:bark', 'base'), 'bark')\n self.eq(model.getPropInfo('foo:bark', 'relname'), 'bark')\n self.eq(model.getPropInfo('foo:meow', 'defval'), None)\n self.eq(model.getPropInfo('foo:meow', 'req'), False)\n\n self.eq(model.getPropInfo('foo:meow:purr', 'req'), False)\n self.eq(model.getPropInfo('foo:meow:purr', 'base'), 'purr')\n self.eq(model.getPropInfo('foo:meow:purr', 'relname'), 'meow:purr')\n self.eq(model.getPropInfo('foo:meow:purr', 'defval'), None)\n self.eq(model.getPropInfo('foo:meow:purr', 'title'), 'purr')\n self.eq(model.getPropInfo('foo:meow:purr', 'doc'), 'The sound a purr makes')\n\n self.eq(model.getPropInfo('foo:nonexistent', 'doc'), None)\n\n def test_datamodel_getPropDef(self):\n model = s_datamodel.DataModel()\n\n model.addTufoForm('foo')\n model.addTufoProp('foo', 'meow', ptype='int')\n\n self.eq(model.getPropDef('foo:meow'),\n ('foo:meow', {'title': '',\n 'req': False,\n 'form': 'foo',\n 'relname': 'meow',\n 'base': 'meow',\n 'defval': None,\n 'ptype': 'int',\n 'doc': 'The base integer type',\n 'univ': False,\n }\n )\n )\n self.eq(model.getPropDef('foo:meow:nonexistent'), None)\n self.eq(model.getPropDef('foo:meow:nonexistent', glob=False), None)\n\n def test_datamodel_typefns(self):\n self.eq(s_datamodel.getTypeRepr('str', 'haha'), 'haha')\n self.eq(s_datamodel.getTypeRepr('inet:ipv4', 0x01020304), '1.2.3.4')\n\n self.eq(s_datamodel.getTypeNorm('str', 'haha'), ('haha', {}))\n self.eq(s_datamodel.getTypeNorm('inet:ipv4', 0x01020304), (16909060, {}))\n self.eq(s_datamodel.getTypeNorm('inet:ipv4', '1.2.3.4'), (16909060, {}))\n\n self.raises(BadTypeValu, s_datamodel.getTypeNorm, 'inet:ipv4', 'hahaha')\n\n self.eq(s_datamodel.getTypeParse('str', 'haha'), ('haha', {}))\n self.eq(s_datamodel.getTypeParse('inet:ipv4', '1.2.3.4'), (16909060, {}))\n\n def test_datamodel_filepath(self):\n model = s_datamodel.DataModel()\n prop = 'file:path'\n\n data = (\n ('/', ('/', {'dir': '', 'depth': 0}), '/'),\n ('//', ('/', {'dir': '', 'depth': 0}), '//'),\n ('////////////', ('/', {'dir': '', 'depth': 0}), '////////////'),\n ('weirD', ('weird', {'base': 'weird', 'dir': '', 'depth': 1}), 'weirD'),\n\n ('foo1', ('foo1', {'base': 'foo1', 'dir': '', 'depth': 1}), 'foo1'),\n ('/foo2', ('/foo2', {'base': 'foo2', 'dir': '', 'depth': 1}), '/foo2'),\n ('/foo/bar3', ('/foo/bar3', {'base': 'bar3', 'dir': '/foo', 'depth': 2}), '/foo/bar3'),\n ('/foo/bar4 ', ('/foo/bar4 ', {'base': 'bar4 ', 'dir': '/foo', 'depth': 2}), '/foo/bar4 '), # These are valid filepaths\n ('/foo/bar5/', ('/foo/bar5', {'base': 'bar5', 'dir': '/foo', 'depth': 2}), '/foo/bar5/'),\n\n ('C:\\\\', ('c:', {'base': 'c:', 'depth': 1, 'dir': ''}), 'C:\\\\'),\n ('C:\\\\Program Files\\\\Foo.bAr.BAZ.exe',\n ('c:/program files/foo.bar.baz.exe', {'base': 'foo.bar.baz.exe', 'dir': 'c:/program files', 'depth': 3, 'ext': 'exe'}), 'C:\\\\Program Files\\\\Foo.bAr.BAZ.exe')\n )\n\n for valu, expected, expected_repr in data:\n\n self.eq(expected, model.getTypeNorm(prop, valu))\n self.eq(expected, model.getTypeParse(prop, valu))\n self.eq(expected_repr, model.getTypeRepr(prop, valu))\n\n def test_datamodel_filebase(self):\n model = s_datamodel.DataModel()\n prop = 'file:base'\n\n data = (\n ('my_COOL_file', ('my_cool_file', {}), 'my_COOL_file'),\n ('my file', ('my file', {}), 'my file'),\n ('!@#$%^&.jpeg', ('!@#$%^&.jpeg', {}), '!@#$%^&.jpeg'),\n )\n\n for valu, expected, expected_repr in data:\n\n self.eq(expected, model.getTypeNorm(prop, valu))\n self.eq(expected, model.getTypeParse(prop, valu))\n self.eq(expected_repr, model.getTypeRepr(prop, valu))\n\n bads = (None, [], {}, 1, '/teehee', 'hoho/haha')\n for bad in bads:\n self.raises(BadTypeValu, model.getTypeNorm, prop, bad)\n self.raises(BadTypeValu, model.getTypeParse, prop, bad)\n\n def test_datamodel_formbase(self):\n\n modl = s_datamodel.DataModel()\n modl.addTufoForm('foo:bar')\n modl.addTufoProp('foo:bar', 'baz')\n\n form, base = modl.getPropFormBase('foo:bar:baz')\n\n self.eq(form, 'foo:bar')\n self.eq(base, 'baz')\n\n self.raises(NoSuchProp, modl.getPropFormBase, 'newp:newp')\n\n def test_datamodel_reqpropnorm(self):\n with self.getRamCore() as core:\n v, _ = core.reqPropNorm('strform:foo', '1')\n self.eq(v, '1')\n self.raises(NoSuchProp, core.reqPropNorm, 'strform:beepbeep', '1')\n\n def test_datamodel_istufoform(self):\n modl = s_datamodel.DataModel()\n self.true(modl.isTufoForm('file:bytes'))\n self.false(modl.isTufoForm('file:bytes:size'))\n self.false(modl.isTufoForm('node:ndef'))\n\n self.none(modl.reqTufoForm('file:bytes'))\n self.raises(NoSuchForm, modl.reqTufoForm, 'file:bytes:size')\n\n def test_datamodel_cast_json(self):\n modl = s_datamodel.DataModel()\n self.eq(modl.getTypeCast('make:json', 1), '1')\n self.eq(modl.getTypeCast('make:json', 'hehe'), '\"hehe\"')\n self.eq(modl.getTypeCast('make:json', '\"hehe\"'), '\"\\\\\"hehe\\\\\"\"')\n self.eq(modl.getTypeCast('make:json', {\"z\": 1, 'yo': 'dawg', }), '{\"yo\":\"dawg\",\"z\":1}')\n\n def test_datamodel_cast_int10(self):\n modl = s_datamodel.DataModel()\n self.eq(modl.getTypeCast('int:2:str10', 1), '1')\n self.eq(modl.getTypeCast('int:2:str10', 100), '100')\n self.eq(modl.getTypeCast('int:2:str10', 0x11), '17')\n self.eq(modl.getTypeCast('int:2:str10', 'hehe'), 'hehe')\n\n def test_datamodel_type_hook(self):\n defs = []\n modl = s_datamodel.DataModel()\n modl.addType('gronk', subof='guid')\n modl.addPropDef('foo:bar', ptype='gronk')\n modl.addPropTypeHook('gronk', defs.append)\n\n self.len(1, defs)\n self.eq(defs[0][0], 'foo:bar')\n\n modl.addPropDef('foo:baz', ptype='gronk')\n\n self.len(2, defs)\n self.eq(defs[1][0], 'foo:baz')\n\n def test_datamodel_istufoprop(self):\n modl = s_datamodel.DataModel()\n\n # Types are not props\n self.false(modl.isTufoProp('str:lwr'))\n\n # Prop does not yet exist\n self.false(modl.isTufoProp('foo:meow'))\n modl.addTufoForm('foo')\n modl.addTufoProp('foo', 'meow', ptype='str:lwr')\n # Forms are props\n self.true(modl.isTufoProp('foo'))\n # And props are props!\n self.true(modl.isTufoProp('foo:meow'))\n","repo_name":"larrycameron80/synapse","sub_path":"synapse/tests/test_datamodel.py","file_name":"test_datamodel.py","file_ext":"py","file_size_in_byte":17318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"40705895765","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\np = np.genfromtxt('P.txt')\r\nbl = np.zeros(p.shape)\r\nplt.plot(bl,'r')\r\nplt.plot(p)\r\n#plt.plot(pd,'g')\r\nplt.ylabel('CTE')\r\nplt.xlabel('t [steps]')\r\nplt.title('P controller')\r\nplt.ylim((-7, 7))\r\n\r\nplt.figure()\r\npd = np.genfromtxt('PD.txt')\r\nbl = np.zeros(pd.shape)\r\nplt.plot(bl,'r')\r\nplt.plot(pd)\r\nplt.ylabel('CTE')\r\nplt.xlabel('t [steps]')\r\nplt.title('PD controller')\r\nplt.ylim((-7, 7))\r\n\r\n#data = np.genfromtxt('data.txt', delimiter='\\t')[1:,:]\r\n#\r\n#\r\n#Kp = data[:,0]\r\n#Kd = data[:,1]\r\n#Ki = data[:,2]\r\n#error = data[:,3]\r\n#errorn = 1-(error - min(error))/(max(error) - min(error))\r\n#errorn = errorn**5\r\n#\r\n#plt.scatter(Kp, Kd, c=error, s=100*errorn, cmap='plasma', alpha=0.5)\r\n#plt.colorbar()\r\n#plt.xlabel(\"K_p\")\r\n#plt.ylabel(\"K_d\")\r\n","repo_name":"janulicny/sdcnd-pid-control","sub_path":"images/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34673371700","text":"\"\"\"\nTHe FFT class and associated helpers.\n\"\"\"\n\nimport numpy\n\nfrom scipy.fftpack import fft\nfrom matplotlib import pyplot\n\nfrom . import music_facts\n\n\nclass FFT(numpy.ndarray):\n \"\"\"\n Represents the Fast Fourier Transform corresponding to a WavData object.\n \"\"\"\n def __new__(cls, wav_data):\n ret = numpy.abs(fft(wav_data.normalize()))\n ret = ret.view(cls)\n\n ret.wav_data = wav_data\n ret.scaling_factor = ret.wav_data.sample_rate / len(ret)\n return ret\n\n def x_val_in_hz(self, x_val):\n \"\"\"\n Convert abstract FFT indexes to Hz, using the `scaling_factor`.\n\n Source: https://dsp.stackexchange.com/questions/46167/getting-frequencies-corresponding-to-peaks-in-fft-plot-matlab # pylint: disable=line-too-long\n \"\"\"\n return x_val * self.scaling_factor # pylint: disable=no-member\n\n def hz_to_x_val(self, hz):\n \"\"\"\n Opposite of the above; same logic.\n \"\"\"\n return hz / self.scaling_factor # pylint: disable=no-member\n\n def single_note(self):\n \"\"\"\n Pick a single note to represent this audio snippet\n \"\"\"\n loudest = self.argmax()\n freq = self.x_val_in_hz(loudest)\n return music_facts.freq_to_note(freq)\n\n def plot(self, numbered_notes=True):\n \"\"\"\n Plot this frequency spectrum\n \"\"\"\n plot = FFTPlot(self)\n plot.plot()\n\n\n\nclass FFTPlot(object):\n \"\"\"\n Plots an FFT.\n \"\"\"\n def __init__(self, _fft):\n self.fft = _fft\n\n self.plottable_start = int(self.fft.hz_to_x_val(music_facts.LOWEST_PIANO_FREQ))\n assert self.plottable_start >= 1, self.plottable_start\n assert isinstance(self.plottable_start, int), type(self.plottable_start)\n\n self.plottable_end = int(self.fft.hz_to_x_val(music_facts.HIGHEST_PIANO_FREQ))\n assert self.plottable_end <= len(self.fft), self.plottable_end\n assert isinstance(self.plottable_end, int)\n\n self.x_vals = [\n self.fft.x_val_in_hz(i) for i in range(self.plottable_start, self.plottable_end)\n ]\n\n self.y_vals = [\n self.fft[i] for i in range(self.plottable_start, self.plottable_end)\n ]\n\n assert len(self.x_vals) == len(self.y_vals), \"len(x_vals): {}, len(y_vals): {}\".format(\n len(self.x_vals), len(self.y_vals)\n )\n\n def plot(self, numbered_notes=True):\n if numbered_notes:\n x_vals = [music_facts.freq_to_note(x) for x in self.x_vals]\n else:\n x_vals = self.x_vals\n\n pyplot.plot(x_vals, self.y_vals, linewidth=0.5)\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n from musical_decoder import * # pylint: disable=wildcard-import,unused-wildcard-import\n # data = read_sample_wav(\"waltz2.wav\")\n data = read_sample_wav(\"single-note-me2.wav\")\n data = read_sample_wav(\"single-piano-note.wav\")\n fft = FFT(data)\n fft.plot()\n","repo_name":"playfulpachyderm/polyphonic-pagination","sub_path":"musical_decoder/FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35903395629","text":"from datetime import datetime\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom datasets import mnist, mnist_m\nfrom models.ganin import GaninModel\nfrom trainer import train, test\nfrom utils import transform, helper\n\n# Random Seed\nhelper.set_random_seed(seed=12345)\n\n# Device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparameters\nconfig = dict(epochs=2,\n batch_size=64,\n learning_rate=2e-4,\n classes=10,\n img_size=28,\n experiment='minst-minist_m')\n\n\ndef main():\n\n model = GaninModel().to(device)\n\n # transforms\n transform_m = transform.get_transform(dataset=\"mnist\")\n transform_mm = transform.get_transform(dataset=\"mnist_m\")\n\n # dataloaders\n loaders_args = dict(\n batch_size=config[\"batch_size\"],\n shuffle=True,\n num_workers=1,\n pin_memory=True,\n )\n\n trainloader_m = mnist.fetch(data_dir=\"data/mnist/processed/train.pt\",\n transform=transform_m,\n **loaders_args)\n\n # fetching testloader_m for symmetry but it is not needed in the code\n testloader_m = mnist.fetch(data_dir=\"data/mnist/processed/test.pt\",\n transform=transform_m,\n **loaders_args)\n\n trainloader_mm = mnist_m.fetch(data_dir=\"data/mnist_m/processed/train.pt\",\n transform=transform_mm,\n **loaders_args)\n\n testloader_mm = mnist_m.fetch(data_dir=\"data/mnist_m/processed/test.pt\",\n transform=transform_mm,\n **loaders_args)\n\n # criterion\n criterion_l = nn.CrossEntropyLoss()\n criterion_d = nn.BCEWithLogitsLoss()\n optimizer = optim.Adam(model.parameters(), lr=config[\"learning_rate\"])\n\n start_time = datetime.now()\n for epoch in range(config[\"epochs\"]):\n\n alpha = helper.get_alpha(epoch, config[\"epochs\"])\n print(\"alpha: \", alpha)\n\n train(model, epoch, config, criterion_l, criterion_d, optimizer, alpha,\n trainloader_m, trainloader_mm, testloader_mm, device)\n\n test(model, testloader_mm, criterion_l, optimizer, device)\n\n end_time = datetime.now()\n duration = end_time - start_time\n print(f\"Training Time for {config['epochs']} epochs: {duration}\")\n\n return model\n\n\nif __name__ == \"__main__\":\n model = main()\n","repo_name":"nisyad/pytorch-unsupervised-domain-adatation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"74735365852","text":"import pandas as pd \r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ndados = pd.read_csv('iris.data')\r\n#print(dados)\r\n#################67% a 80% ---- tipico 75%\r\ndados_embaralhados = dados.sample(frac=1)\r\n\r\ndados_treinamento = dados_embaralhados.iloc[:100,:-1].values\r\ndados_treinamento_rotulo = dados_embaralhados.iloc[:100,-1].values\r\n\r\ndados_teste = dados_embaralhados.iloc[100:,:-1].values\r\ndados_teste_rotulo = dados_embaralhados.iloc[100:,-1].values\r\n\r\nvetor=[]\r\n\r\nfor i in range(1,20): \r\n classificador = KNeighborsClassifier(n_neighbors=i)\r\n classificador = classificador.fit(dados_treinamento,dados_treinamento_rotulo)\r\n\r\n dadoEstimado = classificador.predict(dados_teste)\r\n #print(dadoEstimado,dados_teste_rotulo)\r\n\r\n print(\"Acurácia:\",100*sum(dadoEstimado==dados_teste_rotulo)/len(dadoEstimado),\"%\")\r\n print(\"N_neighbors:\",i)\r\n vetor.append(100*sum(dadoEstimado==dados_teste_rotulo)/len(dadoEstimado))\r\n\r\nplt.plot(vetor)\r\nplt.show()","repo_name":"gabrielbastoos/IntroMachineLearning","sub_path":"Exp2/codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28028654836","text":"\"\"\"Author: Sagar Shelke\r\nThis is a program to tune Xg boost using Hyperopt tuning library and Tree of Parzen Estimators (TPE) algorithm\r\nThis program can be easily generalized to tune any machine/deep learning algorithm\r\n\r\nWe are saving results on test data if error on validation data goes below particular threshold \"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport xgboost as xgb\r\nimport sys\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\nfrom hyperopt import hp, fmin, tpe, STATUS_OK, Trials\r\n\r\ndef objective(space):\r\n\r\n space[\"colsample_bytree\"] = float(space[\"colsample_bytree\"])\r\n space['max_depth'] = int(space['max_depth'])\r\n space['learning_rate'] = float(space['learning_rate'])\r\n space['subsample'] = float(space['subsample'])\r\n space['min_child_weight'] = float(space['min_child_weight'])\r\n\r\n print(\"Reading Data\")\r\n\r\n train = pd.read_csv(\"./nyc/train_final_feat.csv\")\r\n test = pd.read_csv(\"./nyc/test_final_feat.csv\")\r\n\r\n do_not_use_for_train = [\"id\", \"pickup_datetime\", \"trip_duration\", \"pickup_date\", \"dropoff_datetime\", \"avg_speed_h\",\r\n \"avg_speed_m\", \"avg_speed\"]\r\n\r\n feature_names = [f for f in train.columns if f not in do_not_use_for_train]\r\n\r\n y = np.log(train[\"trip_duration\"].values + 1)\r\n Xtr, Xv, ytr, yv = train_test_split(train[feature_names].values, y, test_size=0.2, random_state=1987)\r\n\r\n d_train = xgb.DMatrix(train[feature_names].values)\r\n dtrain = xgb.DMatrix(Xtr, label=ytr)\r\n dvalid = xgb.DMatrix(Xv, label=yv)\r\n dtest = xgb.DMatrix(test[feature_names].values)\r\n\r\n watchlist = [(dtrain, \"train\"), (dvalid, \"valid\")]\r\n\r\n print(\"Building Model\")\r\n\r\n model = xgb.train(space, d_train, 1000, early_stopping_rounds=150, maximize=False, verbose_eval=10)\r\n rmsle = round(model.best_score, 5)\r\n print(\"Modeling RSMLE {}\".format(rmsle))\r\n print(space)\r\n\r\n if rmsle < 0.36500:\r\n\r\n ytest = model.predict(dtest)\r\n if test.shape[0] == ytest.shape[0]:\r\n print(\"Test is successful\")\r\n else:\r\n print(\"Oops! There is some problem with dimention\")\r\n test[\"trip_duration\"] = np.exp(ytest) - 1\r\n test[[\"id\", \"trip_duration\"]].to_csv(\"./results/\"+str(rmsle)+\"_submission.csv.gz\", index=False, compression=\"gzip\")\r\n\r\n print(\"-------------------------------------\")\r\n\r\n return {'loss': rmsle, 'status': STATUS_OK}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n trial = 0\r\n\r\n space = {\r\n 'learning_rate': hp.quniform('eta', 0.005, 0.05, 0.005),\r\n 'max_depth': hp.quniform('max_depth', 3, 20, 1),\r\n 'min_child_weight': hp.quniform('min_child_weight', 1, 10, 1),\r\n 'subsample': hp.quniform('subsample', 0.5, 1, 0.05),\r\n 'gamma': hp.quniform('gamma', 0.5, 1, 0.01),\r\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.4, 1, 0.05),\r\n }\r\n\r\n trials = Trials()\r\n print(\"optimizing function\")\r\n sys.stdout = open(\"tune_xg_boost_without_snow.txt\", \"wt\")\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=300,\r\n trials=trials)\r\n print(best)\r\n","repo_name":"kaggle3/nyc_taxi_challenge_kaggle","sub_path":"tune_xg_boost.py","file_name":"tune_xg_boost.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32538628134","text":"import torch\r\nimport torch.nn.functional as F\r\n\r\ndef rot90(tensor, k, reverse=False): # B,C,D,W,H\r\n k = k if not reverse else -k\r\n return torch.rot90(tensor.detach(), k=k, dims=(3, 4))\r\n\r\ndef trans1(tensor,intensity):\r\n tensor=tensor/2+0.5\r\n tensor=torch.clip(tensor*intensity,0,1)\r\n return tensor*2-1\r\n\r\ndef trans2(tensor,intensity):\r\n tensor=tensor/2+0.5\r\n tensor=torch.clip(tensor+intensity,0,1)\r\n return tensor*2-1\r\n\r\nclass Merger:\r\n def __init__(self,type: str = 'mean',n: int = 1,):\r\n if type not in ['mean', 'gmean', 'sum', 'max', 'min', 'tsharpen']:\r\n raise ValueError('Not correct merge type `{}`.'.format(type))\r\n self.output = None\r\n self.type = type\r\n self.n = n\r\n\r\n def append(self, x):\r\n\r\n if self.type == 'tsharpen':\r\n x = x ** 0.5\r\n\r\n if self.output is None:\r\n self.output = x\r\n elif self.type in ['mean', 'sum', 'tsharpen']:\r\n self.output = self.output + x\r\n elif self.type == 'gmean':\r\n self.output = self.output * x\r\n elif self.type == 'max':\r\n self.output = F.max(self.output, x)\r\n elif self.type == 'min':\r\n self.output = F.min(self.output, x)\r\n\r\n @property\r\n def result(self):\r\n if self.type in ['sum', 'max', 'min']:\r\n result = self.output\r\n elif self.type in ['mean', 'tsharpen']:\r\n result = self.output / self.n\r\n elif self.type in ['gmean']:\r\n result = self.output ** (1 / self.n)\r\n else:\r\n raise ValueError('Not correct merge type `{}`.'.format(self.type))\r\n return result\r\n\r\nclass TestTimeAug(object):\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self, model, input):\r\n merger = Merger(type='mean', n=5)\r\n with torch.no_grad():\r\n merger.append(model(input)[0])\r\n merger.append(model(input.detach().flip(3))[0].flip(3))\r\n merger.append(model(trans1(input.detach(),1.1))[0])\r\n merger.append(model(trans1(input.detach(),0.9))[0])\r\n merger.append(model(trans2(input.detach(),0.1))[0])\r\n return merger.result\r\n\r\nimport random\r\n\r\nclass PatchInferencer(object):\r\n def __init__(self, n_class=5, batch_size=2, size=[32, 256, 256], stride=[16, 128, 128], TTA=False):\r\n self.n_class = n_class\r\n self.size = size\r\n self.stride = stride\r\n self.batch_size=batch_size\r\n assert len(self.size) == len(self.stride)\r\n if TTA:\r\n self.tta = TestTimeAug()\r\n\r\n def __call__(self, model,input, softmax=True):\r\n model.eval()\r\n ned_pad=False\r\n b, c, d, w, h = input.size()\r\n if d= 2 ):\n continue\n prediction = np.dot( data_in[ i ], classifier[ 0 ] ) > 0\n if ( data_out[ i ] != prediction ):\n sign = 1 if data_out[ i ] > 0 else -1\n classifier[ 0 ] = classifier[ 0 ] + ( data_in[ i ] * sign )\n def score_classifier( classifier, data_in, data_out ):\n import random\n count = 0\n iterations = 0\n for i in range( len( data_out ) ):\n if ( data_out[ i ] >= 2 ):\n continue\n iterations += 1\n prediction = np.dot( data_in[ i ], classifier[ 0 ] ) > 0\n if ( data_out[ i ] == prediction ):\n count += 1\n return float( count ) / iterations\n\nelif ( classifier == 1 ):\n import sklearn\n from sklearn import svm\n\n def get_classifier( data_in, data_out ):\n return svm.LinearSVC( C=1.0 )\n def train_classifier( classifier, data_in, data_out ):\n classifier.fit( data_in, data_out )\n def score_classifier( classifier, data_in, data_out ):\n return classifier.score( data_in, data_out )\n \nelif ( classifier == 2 ):\n import random\n \n def get_classifier( data_in, data_out ):\n return 0\n def train_classifier( classifier, data_in, data_out ):\n return\n def score_classifier( classifier, data_in, data_out ):\n count = 0\n for i in data_out:\n if i == random.randint(0, 9):\n count += 1\n return float( count ) / len( data_out )\n\n\n# In[23]:\n\nfor i in range( 10 ):\n print( crossvalidation( 10, data, labels ) )\n\n\n\n","repo_name":"hexecute/eecs-149-PIOng","sub_path":"svm/SVM PoC.py","file_name":"SVM PoC.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15097953958","text":"\n\nclass TreeNode(object):\n\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\n\nclass Solution(object):\n\n\tdef __init__(self):\n\t\tself.result = 0\n\n\tdef findTilt(self,root):\t\t\n\t\tself.do_find_tilt(root)\n\t\treturn self.result\n\n\n\tdef do_find_tilt(self, root):\n\t\tif root == None: \n\t\t\treturn 0\n\t\ttl = self.do_find_tilt(root.left)\n\t\ttr = self.do_find_tilt(root.right)\n\t\tself.result += abs(tl - tr) \n\t\treturn tl + tr + root.val\n\n\n\ndef build_1_2_3_4_5_tree():\n\tlf4 = TreeNode(4)\n\tlf5 = TreeNode(5)\n\tlf2 = TreeNode(2)\n\tlf2.left = lf4\n\tlf3 = TreeNode(3)\n\tlf3.right = lf5\n\tlf1 = TreeNode(1)\n\tlf1.left = lf2\n\tlf1.right = lf3\n\treturn lf1\n\n\n\n\nif __name__ == '__main__':\t\n\tleaf1 = TreeNode(1)\n\tleaf2 = TreeNode(2)\n\tleaf3 = TreeNode(3)\n\tleaf1.left = leaf2\n\tleaf1.right = leaf3\n\tsol = Solution()\n\tprint(sol.findTilt(build_1_2_3_4_5_tree()))","repo_name":"dreamsql/PythonDemos","sub_path":"algorithm/tilt.py","file_name":"tilt.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32494092496","text":"import requests\r\nimport bs4\r\nimport re\r\nfrom xml.dom import minidom\r\n#\r\n# Specify a callsign\r\nsearchValue = input(\"Enter search value: \")\r\n#\r\n# Retrieve XML record from FCC API\r\nrooturl = 'http://data.fcc.gov/api/license-view/basicSearch/getLicenses?searchValue='\r\nurl = rooturl + searchValue\r\nprint(\"Retrieving general XML record from FCC database...\")\r\ntry:\r\n fccResp = requests.get(url, timeout = 5)\r\nexcept:\r\n if fccResp.status_code != 200:\r\n print(f\"FCC API Error: {fccResp}\")\r\n exit()\r\n#\r\n# Parse XML record for details\r\nprint(\"Parsing XML record...\")\r\nfccRecord = minidom.parseString(fccResp.text)\r\ntry:\r\n fccRecordTree = fccRecord.getElementsByTagName('License')[0]\r\nexcept IndexError:\r\n errorTree = fccRecord.getElementsByTagName('Errors')[0]\r\n errorElement = errorTree.childNodes.item(0)\r\n errorCode = errorElement.getAttribute('code')\r\n errorMsg = errorElement.getAttribute('msg')\r\n print(f\"\\nSearch Error Code: {errorCode}\")\r\n print(f\"{errorMsg}\\n\")\r\n exit()\r\nfccLicenseTree = fccRecordTree.childNodes\r\ntry:\r\n name = fccLicenseTree[0].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n name = ''\r\ntry:\r\n frn = fccLicenseTree[1].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n frn = ''\r\ntry:\r\n callSign = fccLicenseTree[2].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n callSign = ''\r\ntry:\r\n categoryDesc = fccLicenseTree[3].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n categoryDesc = ''\r\ntry:\r\n serviceDesc = fccLicenseTree[4].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n serviceDesc = ''\r\ntry:\r\n statusDesc = fccLicenseTree[5].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n statusDesc = ''\r\ntry:\r\n expDate = fccLicenseTree[6].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n expDate = ''\r\ntry:\r\n licenseID = fccLicenseTree[7].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n licenseID = ''\r\ntry:\r\n webpage = fccLicenseTree[8].childNodes.item(0).nodeValue\r\nexcept AttributeError:\r\n webpage = ''\r\n#\r\n# Retrieve address and license class from detailed FCC record from url in XML\r\nif webpage == '':\r\n print(\"Detailed webpage not available...\")\r\n exit()\r\n#\r\nprint(\"Retrieving detailed HTML page from FCC database...\")\r\ntry:\r\n detailsResp = requests.get(webpage, timeout = 5)\r\nexcept:\r\n if detailsResp.status_code != 200:\r\n print(f\"ULS Database Error: {detailsResp}\")\r\n exit()\r\n#\r\nprint(\"Parsing HTML document...\")\r\nsoup = bs4.BeautifulSoup(detailsResp.text, 'html5lib')\r\nlicNameAddrStyle = 'body > table:nth-child(4) > tbody > tr > td:nth-child(2) > div > table:nth-child(2) > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(3) > td:nth-child(1)'\r\nlicTypeStyle = 'body > table:nth-child(4) > tbody > tr > td:nth-child(2) > div > table:nth-child(2) > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(1) > td:nth-child(4)'\r\nlicClassStyle = 'body > table:nth-child(4) > tbody > tr > td:nth-child(2) > div > table:nth-child(2) > tbody > tr:nth-child(6) > td > table > tbody > tr:nth-child(1) > td:nth-child(2)'\r\nlicFonEmailStyle = 'body > table:nth-child(4) > tbody > tr > td:nth-child(2) > div > table:nth-child(2) > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(3) > td:nth-child(2) > p'\r\n#\r\nlicNameAddr = soup.select(licNameAddrStyle)[0].text.lstrip().split('\\n')\r\nlicAddr = licNameAddr[1]\r\nlicAddr2 = licNameAddr[2].split(', ')\r\nif len(licAddr2) == 2:\r\n licCity = licAddr2[0]\r\n licState = licAddr2[1]\r\nelse:\r\n licCity = ''\r\n licState = ''\r\nlicZip = licNameAddr[3]\r\nif len(licNameAddr) == 6:\r\n licAttn = licNameAddr[5]\r\nelse:\r\n licAttn = ''\r\n#\r\nlicType = soup.select(licTypeStyle)[0].text.strip()\r\n#\r\nlicClass = soup.select(licClassStyle)[0].text.strip()\r\n#\r\nfonRegEx = '\\(\\d{3}\\)\\d{3}-\\d{4}'\r\nlicFonEmail = soup.select(licFonEmailStyle)[0].text.strip().split(':')\r\nif len(licFonEmail) > 1:\r\n fonObj = re.search(fonRegEx,licFonEmail[1].strip())\r\n licFon = fonObj.group()\r\nelse:\r\n licFon = ''\r\nif len(licFonEmail) > 2:\r\n fonObj = re.search(fonRegEx,licFonEmail[2].strip())\r\n licFax = fonObj.group()\r\nelse:\r\n licFax = ''\r\nif len(licFonEmail) == 4:\r\n licEmail = licFonEmail[3]\r\nelse:\r\n licEmail = ''\r\n#\r\n#print(name,frn,callSign,categoryDesc,serviceDesc,statusDesc,expDate,licenseID,webpage)\r\n#print(licAddr,licCity,licState,licZip,licAttn,licClass,licFon,licFax,licEmail)\r\n#\r\nprint(f\"\\nName : {name}\")\r\nprint(f\"Address : {licAddr}\")\r\nprint(f\"City, State ZIP: {licCity}, {licState} {licZip}\")\r\nprint(f\" {licAttn}\")\r\nprint(f\"Phone : {licFon}\")\r\nprint(f\"Fax : {licFax}\")\r\nprint(f\"Email : {licEmail}\\n\")\r\nprint(f\"FRN : {frn}\")\r\nprint(f\"Callsign : {callSign}\")\r\nprint(f\"Type : {licType}\")\r\nprint(f\"Class : {licClass}\")\r\nprint(f\"Category : {categoryDesc}\")\r\nprint(f\"Service : {serviceDesc}\")\r\nprint(f\"Expiration Date: {expDate}\")\r\n#\r\n","repo_name":"cchipman21804/QRZ_XML_database","sub_path":"fcc_api.py","file_name":"fcc_api.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17620843000","text":"# Chapter 6 - Python Comprehensions:\n\"\"\"\nThe Python language has a couple of methods for creating lists and dictionaries that are known as Comprehensions.\nThere is a third type of Comprehensions for creating a Python set.\n\"\"\"\n#List Comprehensions:\n\"\"\"\nList Comprehensions in Python are very handy. They can also be a little hard to understand .\nA list Comprehensions is basically a one line for loop that produces a Python list data structure.\n\"\"\"\nx = [i for i in range(5)]\nprint(x)\n\nx = ['1', '2', '3', '4', '5']\ny = [int(i) for i in x]\nprint(y)\n\nmyStringList = ['a', 'b', 'C']\nmyString = [s.strip() for s in myStringList]\n\nvec = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nvecList = [num for elem in vec for num in elem]\nprint(vecList)\n\n\n# Dictionary Comprehensions:\nprint( {i: str(i) for i in range(5)} )\n\nmy_dict = {1:\"dog\", 2:\"cat\", 3:\"hamster\"}\nprint({value:key for key, value in my_dict.items()})\n\n# Set Comprehensions\n\nmy_list = [1, 2, 2, 3, 4, 5, 5, 7, 8]\nmy_set = set(my_list)\nprint(my_set)\n\n\nmy_list = [1, 2, 2, 3, 4, 5, 5, 7, 8]\nmy_set = {x for x in my_list}\nprint(my_set)\n","repo_name":"WalleEve/python101","sub_path":"Chapter_6_Python_Comprehensions.py","file_name":"Chapter_6_Python_Comprehensions.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25142080398","text":"# encoding: utf-8\n\nimport torch\nfrom torch import nn\nimport collections\nfrom .backbones.resnet import ResNet, Bottleneck\nfrom .backbones.senet import SENet, SEResNetBottleneck, SEBottleneck, SEResNeXtBottleneck\nfrom .backbones.resnet_ibn_a import resnet50_ibn_a\nfrom .backbones.resnet_nl import ResNetNL\nfrom .layer import CrossEntropyLabelSmooth, TripletLoss, WeightedRegularizedTriplet, CenterLoss, GeneralizedMeanPooling, GeneralizedMeanPoolingP\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\n\nclass Baseline(nn.Module):\n in_planes = 2048\n\n def __init__(self, num_classes, last_stride, model_path, model_name, gem_pool, pretrain_choice):\n super(Baseline, self).__init__()\n if model_name == 'resnet50':\n self.base = ResNet(last_stride=last_stride,\n block=Bottleneck,\n layers=[3, 4, 6, 3])\n elif model_name == 'resnet50_nl':\n self.base = ResNetNL(last_stride=last_stride,\n block=Bottleneck,\n layers=[3, 4, 6, 3],\n non_layers=[0, 2, 3, 0])\n elif model_name == 'resnet101':\n self.base = ResNet(last_stride=last_stride,\n block=Bottleneck,\n layers=[3, 4, 23, 3])\n elif model_name == 'resnet152':\n self.base = ResNet(last_stride=last_stride,\n block=Bottleneck,\n layers=[3, 8, 36, 3])\n\n elif model_name == 'se_resnet50':\n self.base = SENet(block=SEResNetBottleneck,\n layers=[3, 4, 6, 3],\n groups=1,\n reduction=16,\n dropout_p=None,\n inplanes=64,\n input_3x3=False,\n downsample_kernel_size=1,\n downsample_padding=0,\n last_stride=last_stride)\n elif model_name == 'se_resnet101':\n self.base = SENet(block=SEResNetBottleneck,\n layers=[3, 4, 23, 3],\n groups=1,\n reduction=16,\n dropout_p=None,\n inplanes=64,\n input_3x3=False,\n downsample_kernel_size=1,\n downsample_padding=0,\n last_stride=last_stride)\n elif model_name == 'se_resnet152':\n self.base = SENet(block=SEResNetBottleneck,\n layers=[3, 8, 36, 3],\n groups=1,\n reduction=16,\n dropout_p=None,\n inplanes=64,\n input_3x3=False,\n downsample_kernel_size=1,\n downsample_padding=0,\n last_stride=last_stride)\n elif model_name == 'se_resnext50':\n self.base = SENet(block=SEResNeXtBottleneck,\n layers=[3, 4, 6, 3],\n groups=32,\n reduction=16,\n dropout_p=None,\n inplanes=64,\n input_3x3=False,\n downsample_kernel_size=1,\n downsample_padding=0,\n last_stride=last_stride)\n elif model_name == 'se_resnext101':\n self.base = SENet(block=SEResNeXtBottleneck,\n layers=[3, 4, 23, 3],\n groups=32,\n reduction=16,\n dropout_p=None,\n inplanes=64,\n input_3x3=False,\n downsample_kernel_size=1,\n downsample_padding=0,\n last_stride=last_stride)\n elif model_name == 'senet154':\n self.base = SENet(block=SEBottleneck,\n layers=[3, 8, 36, 3],\n groups=64,\n reduction=16,\n dropout_p=0.2,\n last_stride=last_stride)\n elif model_name == 'resnet50_ibn_a':\n self.base = resnet50_ibn_a(last_stride)\n\n if pretrain_choice == 'imagenet':\n self.base.load_param(model_path)\n print('Loading pretrained ImageNet model......')\n\n self.num_classes = num_classes\n\n if gem_pool == 'on':\n print(\"Generalized Mean Pooling\")\n self.global_pool = GeneralizedMeanPoolingP()\n else:\n print(\"Global Adaptive Pooling\")\n self.global_pool = nn.AdaptiveAvgPool2d(1)\n\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n\n self.bottleneck.apply(weights_init_kaiming)\n self.classifier.apply(weights_init_classifier)\n\n def forward(self, x):\n x = self.base(x)\n\n global_feat = self.global_pool(x) # (b, 2048, 1, 1)\n global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048)\n\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n\n if not self.training:\n return feat\n\n cls_score = self.classifier(feat)\n return cls_score, global_feat\n\n def load_param(self, trained_path):\n param_dict = torch.load(trained_path)\n if not isinstance(param_dict, collections.OrderedDict):\n param_dict = param_dict.state_dict()\n for i in param_dict:\n if 'classifier' in i:\n continue\n self.state_dict()[i].copy_(param_dict[i])\n\n def get_optimizer(self, cfg, criterion):\n optimizer = {}\n params = []\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n for key, value in self.named_parameters():\n if not value.requires_grad:\n continue\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n if cfg.SOLVER.OPTIMIZER_NAME == 'SGD':\n optimizer['model'] = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params, momentum=cfg.SOLVER.MOMENTUM)\n else:\n optimizer['model'] = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params)\n if cfg.MODEL.CENTER_LOSS == 'on':\n optimizer['center'] = torch.optim.SGD(criterion['center'].parameters(), lr=cfg.SOLVER.CENTER_LR)\n return optimizer\n\n def get_creterion(self, cfg, num_classes):\n criterion = {}\n criterion['xent'] = CrossEntropyLabelSmooth(num_classes=num_classes) # new add by luo\n\n print(\"Weighted Regularized Triplet:\", cfg.MODEL.WEIGHT_REGULARIZED_TRIPLET)\n if cfg.MODEL.WEIGHT_REGULARIZED_TRIPLET == 'on':\n criterion['triplet'] = WeightedRegularizedTriplet()\n else:\n criterion['triplet'] = TripletLoss(cfg.SOLVER.MARGIN) # triplet loss\n\n if cfg.MODEL.CENTER_LOSS == 'on':\n criterion['center'] = CenterLoss(num_classes=num_classes, feat_dim=cfg.MODEL.CENTER_FEAT_DIM,\n use_gpu=True)\n\n def criterion_total(score, feat, target):\n loss = criterion['xent'](score, target) + criterion['triplet'](feat, target)[0]\n if cfg.MODEL.CENTER_LOSS == 'on':\n loss = loss + cfg.SOLVER.CENTER_LOSS_WEIGHT * criterion['center'](feat, target)\n return loss\n\n criterion['total'] = criterion_total\n\n return criterion\n\n","repo_name":"mangye16/ReID-Survey","sub_path":"modeling/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","stars":577,"dataset":"github-code","pt":"32"} +{"seq_id":"15862817714","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport sys\nfrom optparse import OptionParser\nfrom datetime import datetime\nfrom collorg.controller.controller import Controller\n\nctrl = Controller()\ndb = ctrl.db\ntable = db.table\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-f\", \"--function\", dest=\"function\",\n help=\"function to which the role is attached (long name)\")\n parser.add_option(\"-t\", \"--type\", dest=\"data_type\",\n help=\"Type to which the function is attached (FQTN)\")\n parser.add_option(\"-l\", \"--list_functions\", dest=\"list_functions\",\n action=\"store_true\",\n help=\"list the available functions\", default=True)\n (options, args) = parser.parse_args()\n function = table('collorg.actor.function')\n function.data_type_.value = options.data_type\n if options.function:\n function.long_name_.value = options.function\n function.get()\n datas = db.table(function.data_type_.value)\n for data in datas:\n print(\"{}: {}\".format(data.cog_oid_, data.cog_label()))\n data_oid = raw_input('Data oid? ')\n if not data_oid.strip():\n sys.exit()\n data = db.get_elt_by_oid(data_oid)\n pseudo = raw_input('Pseudo? ')\n user = table('collorg.actor.user', pseudo_=pseudo)\n user.get()\n access = user._rev_access_\n access._data_ = data\n access.granted()\n if access.is_empty():\n access.insert()\n access = access.get()\n role = function._rev_role_\n role._access_ = access\n role.insert()\n else:\n for fct in function:\n print(fct.long_name_)\n if not options.function:\n parser.print_help()\n","repo_name":"joel-m/collorg","sub_path":"tools/access/role/grant.py","file_name":"grant.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15736791114","text":"#question 7 last page\nrows=int(input(\"Rows in list: \"))\ncolumns=int(input(\"Columns in list: \"))\nList1 = []\nList2= []\n#print(List1)\n#print(List2)\n\nfor i in range(0,rows):\n\n tempList = []\n\n for j in range(0,columns):\n\n tempMem = int(input(\"Enter the elements of List1: \" ))\n\n tempList.append(tempMem)\n\n List1.append(tempList)\n\nfor i in range(0,rows):\n\n tempList = []\n\n for j in range(0,columns):\n\n tempMem = int(input(\"Enter the elements of List2: \" ))\n\n tempList.append(tempMem)\n\n List2.append(tempList)\n\n#print(List1)\n#print(List2)\n\n\n\n\n\n\ndef intersection(List1,List2):\n new_list1 = []\n for i in List1:\n for j in i:\n new_list1.append(j)\n print(new_list1)\n new_list2 = []\n for i in List2:\n for j in i:\n new_list2.append(j)\n print(new_list2)\n inter_List=list(set(new_list1) & set(new_list2))\n print(inter_List)\n return len(inter_List)\n\n\nprint(intersection(List1,List2))","repo_name":"PrenciousCoder/Jaydeep-assignment","sub_path":"Question67.py","file_name":"Question67.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12389772072","text":"# screen settings\r\nWIDTH, HEIGHT = 600, 450\r\nFPS = 90\r\n\r\n# color settings\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\n\r\n# player setting\r\nPLAYER_COLOR = (0, 157, 255)\r\nPLAYER_SIZE = 50\r\nPLAYER_SPEED = 8\r\nENEMY_SPEED = 4\r\n","repo_name":"dharunvs/Save_Earth","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9786901166","text":"import cv2\nimport numpy as np \n\ncap = cv2.VideoCapture(0)\n\n#Using a pre-trained algorithm to find faces\nface_detect = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\nwhile True:\n ret, frame = cap.read()\n\n #Changing the comments to grayscale. Required for processing\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_detect.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (1, 255, 1), 3)\n roi_gray = gray[y:y+w, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n\n cv2.imshow(\"Face detection\", frame)\n\n if cv2.waitKey(1) == ord('c'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Pro-tonn/OCV","sub_path":"scripts/OCVfacedetection.py","file_name":"OCVfacedetection.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30766256895","text":"# -*- coding: utf-8 -*-\n\nimport imageio\nimport cv2\nimport os\n\n\ndef print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='*'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n print('\\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\n\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\ndef generate_gif(filename, array, fps=30):\n array /= array.max()\n array *= 255.0\n array = array.astype('uint8')\n imageio.mimwrite(filename, array, fps=30)\n\n\ndef enlarge_image(reduced_image, image_width, image_height):\n return cv2.resize(reduced_image, (image_width, image_height), fx=0, fy=0, interpolation=cv2.INTER_NEAREST)\n\n\ndef create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n","repo_name":"rcrespocano/functional-diversity-rgc","sub_path":"diversityrgc/io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9942106099","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Max-Heinrich Laves\n# Institute of Medical Technology and Intelligent Systems\n# Hamburg University of Technology, Germany\n# 2021\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom pathlib import Path\nimport itertools\nimport numpy as np\nfrom models import get_net\nimport torch\nimport torch.multiprocessing as mp\nimport torch.optim\nimport torch.autograd as autograd\nfrom torch.distributions import constraints, transform_to\nimport gpytorch\nfrom skimage.feature import peak_local_max\nfrom utils.denoising_utils import get_noisy_image_gaussian\nfrom utils.bayesian_utils import gaussian_nll\nfrom utils.common_utils import crop_image, get_image, pil_to_np, np_to_pil, plot_image_grid,\\\n get_noise, get_params, np_to_torch, peak_signal_noise_ratio, structural_similarity\nimport time\nfrom tqdm import tqdm\nfrom BayTorch.freq_to_bayes import MeanFieldVI\nimport seaborn as sns\n\nsns.set()\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n\ndef run(\n img: int=0,\n num_iter: int=5000,\n lr: float=3e-4,\n beta: float=4e-6, # lambda in the paper\n tau: float=0.01,\n input_depth: int=16,\n device: torch.device=torch.device('cpu'),\n index: int=0,\n seed: int=42,\n show_every: int=100,\n plot: bool=True,\n save: bool=True,\n save_path: str='../logs',\n ):\n\n timestamp = str(time.time())\n Path(f'{save_path}/{timestamp}').mkdir(parents=True, exist_ok=False)\n\n with open(f'{save_path}/{timestamp}/locals.txt', 'w') as f:\n for key, val in locals().items():\n print(key, '=', val, file=f)\n\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n torch.backends.cudnn.benchmark = True\n\n imsize = (256, 256)\n\n # denoising\n if img == 0:\n fname = 'data/NORMAL-4951060-8.png'\n imsize = (256, 256)\n elif img == 1:\n fname = 'data/BACTERIA-1351146-0006.png'\n imsize = (256, 256)\n elif img == 2:\n fname = 'data/081_HC.png'\n imsize = (256, 256)\n elif img == 3:\n fname = 'data/CNV-9997680-30.png'\n imsize = (256, 256)\n elif img == 4:\n fname = 'data/VIRUS-9815549-0001.png'\n imsize = (256, 256)\n else:\n assert False\n\n if fname == 'data/NORMAL-4951060-8.jpeg':\n\n # Add Gaussian noise to simulate speckle\n img_pil = crop_image(get_image(fname, imsize)[0], d=32)\n img_np = pil_to_np(img_pil)\n p_sigma = 0.1\n img_noisy_pil, img_noisy_np = get_noisy_image_gaussian(img_np, p_sigma)\n\n elif fname in ['data/BACTERIA-1351146-0006.png', 'data/VIRUS-9815549-0001.png']:\n\n # Add Poisson noise to simulate low dose X-ray\n img_pil = crop_image(get_image(fname, imsize)[0], d=32)\n img_np = pil_to_np(img_pil)\n #img_noisy_pil, img_noisy_np = get_noisy_image_poisson(img_np, p_lambda)\n # for lam > 20, poisson can be approximated with Gaussian\n p_sigma = 0.1\n img_noisy_pil, img_noisy_np = get_noisy_image_gaussian(img_np, p_sigma)\n\n elif fname == 'data/081_HC.png':\n\n # Add Gaussian noise to simulate speckle\n img_pil = crop_image(get_image(fname, imsize)[0], d=32)\n img_np = pil_to_np(img_pil)\n p_sigma = 0.1\n img_noisy_pil, img_noisy_np = get_noisy_image_gaussian(img_np, p_sigma)\n\n elif fname == 'data/CNV-9997680-30.png':\n\n # Add Gaussian noise to simulate speckle\n img_pil = crop_image(get_image(fname, imsize)[0], d=32)\n img_np = pil_to_np(img_pil)\n p_sigma = 0.1\n img_noisy_pil, img_noisy_np = get_noisy_image_gaussian(img_np, p_sigma)\n\n else:\n assert False\n\n if plot:\n q = plot_image_grid([img_np, img_noisy_np], 4, 6)\n out_pil = np_to_pil(q)\n out_pil.save(f'{save_path}/{timestamp}/input.png', 'PNG')\n\n INPUT = 'noise'\n pad = 'reflection'\n OPT_OVER = 'net' # 'net,input'\n\n reg_noise_std = 1./10.\n LR = lr\n \n num_iter += 1\n\n exp_weight = 0.99\n\n mse = torch.nn.MSELoss()\n\n img_torch = np_to_torch(img_np).to(device)\n img_noisy_torch = np_to_torch(img_noisy_np).to(device)\n\n MSE_NOISY = {}\n MSE_GT = {}\n UNCERTS_EPI = {}\n UNCERTS_ALE = {}\n PSNRS = {}\n SSIMS = {}\n\n figsize = 4\n\n NET_TYPE = 'skip'\n\n skip_n33d = [16, 32, 64, 128, 128]\n skip_n33u = [16, 32, 64, 128, 128]\n skip_n11 = 4\n num_scales = 5\n upsample_mode = 'bilinear'\n\n ## MFVI\n weight_decay = 0\n\n dropout_mode_down = 'None'\n dropout_p_down = 0.0\n dropout_mode_up = 'None'\n dropout_p_up = dropout_p_down\n dropout_mode_skip = 'None'\n dropout_p_skip = dropout_p_down\n dropout_mode_output = 'None'\n dropout_p_output = dropout_p_down\n\n net_input = get_noise(input_depth, INPUT, (img_pil.size[1], img_pil.size[0])).to(device).detach()\n\n net_input_saved = net_input.detach().clone()\n noise = net_input.detach().clone()\n\n out_avg = None\n\n mc_iter = 25\n mc_ring_buffer_epi = torch.zeros((mc_iter,) + imsize) # saves the last mc_iter reconstructions\n mc_ring_buffer_ale = torch.zeros((mc_iter,) + imsize) # saves the last mc_iter reconstructions\n\n net = get_net(input_depth, NET_TYPE, pad,\n skip_n33d=skip_n33d,\n skip_n33u=skip_n33u,\n skip_n11=skip_n11,\n num_scales=num_scales,\n n_channels=2,\n upsample_mode=upsample_mode,\n dropout_mode_down=dropout_mode_down,\n dropout_p_down=dropout_p_down,\n dropout_mode_up=dropout_mode_up,\n dropout_p_up=dropout_p_up,\n dropout_mode_skip=dropout_mode_skip,\n dropout_p_skip=dropout_p_skip,\n dropout_mode_output=dropout_mode_output,\n dropout_p_output=dropout_p_output).to(device)\n \n prior = {'mu': 0.0,\n 'sigma': np.sqrt(tau)*1.0}\n\n net = MeanFieldVI(net,\n prior=prior,\n beta=beta,\n replace_layers='all',\n device=device,\n reparam='')\n\n mse_noisy = np.zeros((num_iter))\n mse_gt = np.zeros((num_iter))\n uncerts_epi = np.zeros((num_iter//show_every+1, 1)+imsize)\n uncerts_ale = np.zeros((num_iter//show_every+1, 1)+imsize)\n psnrs = np.zeros((num_iter, 3))\n ssims = np.zeros((num_iter, 3))\n\n img_mean = 0\n sample_count = 0\n psnr_noisy_last = 0\n\n parameters = get_params(OPT_OVER, net, net_input)\n optimizer = torch.optim.AdamW(parameters, lr=LR, weight_decay=weight_decay)\n \n pbar = tqdm(range(num_iter), miniters=num_iter//show_every, position=index)\n for i in pbar:\n optimizer.zero_grad()\n \n if reg_noise_std > 0:\n net_input = net_input_saved + (noise.normal_() * reg_noise_std)\n\n out = net(net_input)\n\n nll = gaussian_nll(out[:,:1], out[:,1:], img_noisy_torch)\n kl = net.kl()\n loss = nll + beta*kl\n loss.backward()\n optimizer.step()\n\n out[:,1:] = torch.exp(-out[:,1:]) # aleatoric uncertainty\n\n # Smoothing\n if out_avg is None:\n out_avg = out.detach()\n else:\n out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)\n\n with torch.no_grad():\n mse_noisy[i] = mse(out_avg[:,:1], img_noisy_torch).item()\n mse_gt[i] = mse(out_avg[:,:1], img_torch).item()\n\n _out = out.detach()[:,:1].clip(0, 1)\n _out_avg = out_avg.detach()[:,:1].clip(0, 1)\n _out_ale = out.detach()[:,1:].clip(0, 1)\n\n mc_ring_buffer_epi[i % mc_iter] = _out[0]\n mc_ring_buffer_ale[i % mc_iter] = _out_ale[0]\n\n psnr_noisy = peak_signal_noise_ratio(img_noisy_torch, _out)\n psnr_gt = peak_signal_noise_ratio(img_torch, _out)\n psnr_gt_sm = peak_signal_noise_ratio(img_torch, _out_avg)\n ssim_noisy = structural_similarity(img_noisy_torch, _out)\n ssim_gt = structural_similarity(img_torch, _out)\n ssim_gt_sm = structural_similarity(img_torch, _out_avg)\n\n psnrs[i] = [psnr_noisy, psnr_gt, psnr_gt_sm]\n ssims[i] = [ssim_noisy, ssim_gt, ssim_gt_sm] \n\n if i % show_every == 0:\n pbar.set_description(f'MSE: {mse_noisy[i].item():.4f} | PSNR_noisy: {psnr_noisy:7.4f} \\\n| PSRN_gt: {psnr_gt:7.4f} PSNR_gt_sm: {psnr_gt_sm:7.4f}')\n\n _out_var = torch.var(mc_ring_buffer_epi, dim=0)\n _out_ale = torch.mean(mc_ring_buffer_ale, dim=0)\n uncerts_epi[i//show_every] = _out_var.cpu().numpy()\n uncerts_ale[i//show_every] = _out_ale.cpu().numpy()\n \n if plot:\n fig, ax0 = plt.subplots()\n ax0.plot(range(len(mse_noisy[:i])), mse_noisy[:i])\n ax0.plot(range(len(mse_gt[:i])), mse_gt[:i])\n ax0.set_title('MSE MFVI')\n ax0.set_xlabel('iteration')\n ax0.set_ylabel('mse')\n ax0.set_ylim(0, 0.03)\n ax0.grid(True)\n\n ax1 = ax0.twinx()\n ax1.plot(range(len(psnrs[:i])), psnrs[:i,2], 'g')\n ax1.set_ylabel('psnr gt sm')\n\n fig.tight_layout()\n fig.savefig(f'{save_path}/{timestamp}/loss_mfvi.png')\n plt.close('all')\n\n\n MSE_NOISY['mfvi'] = mse_noisy\n MSE_GT['mfvi'] = mse_gt\n UNCERTS_EPI['mfvi'] = uncerts_epi\n UNCERTS_ALE['mfvi'] = uncerts_ale\n PSNRS['mfvi'] = psnrs\n SSIMS['mfvi'] = ssims\n\n ## END\n\n file = open(f'{save_path}/{timestamp}/locals.txt', 'a')\n \n if plot: \n fig, ax = plt.subplots(1, 1)\n for key, loss in MSE_NOISY.items():\n ax.plot(range(len(loss)), loss, label=key)\n ax.set_title('MSE noisy')\n ax.set_xlabel('iteration')\n ax.set_ylabel('mse loss')\n ax.set_ylim(0, 0.03)\n ax.grid(True)\n ax.legend()\n plt.tight_layout()\n plt.savefig(f'{save_path}/{timestamp}/mse_noisy.png')\n \n fig, ax = plt.subplots(1, 1)\n for key, loss in MSE_GT.items():\n ax.plot(range(len(loss)), loss, label=key)\n ax.set_title('MSE GT')\n ax.set_xlabel('iteration')\n ax.set_ylabel('mse loss')\n ax.set_ylim(0, 0.01)\n ax.grid(True)\n ax.legend()\n plt.tight_layout()\n plt.savefig(f'{save_path}/{timestamp}/mse_gt.png')\n\n fig, axs = plt.subplots(1, 3, constrained_layout=True)\n labels = [\"psnr_noisy\", \"psnr_gt\", \"psnr_gt_sm\"]\n for key, psnr in PSNRS.items():\n psnr = np.array(psnr)\n print(f\"{key} PSNR_max: {np.max(psnr)}\", file=file)\n for i in range(psnr.shape[1]):\n axs[i].plot(range(psnr.shape[0]), psnr[:,i], label=key)\n axs[i].set_title(labels[i])\n axs[i].set_xlabel('iteration')\n axs[i].set_ylabel('psnr')\n axs[i].legend()\n plt.savefig(f'{save_path}/{timestamp}/psnrs.png')\n\n fig, axs = plt.subplots(1, 3, constrained_layout=True)\n labels = [\"ssim_noisy\", \"ssim_gt\", \"ssim_gt_sm\"]\n for key, ssim in SSIMS.items():\n ssim = np.array(ssim)\n print(f\"{key} SSIM_max: {np.max(ssim)}\", file=file)\n for i in range(ssim.shape[1]):\n axs[i].plot(range(ssim.shape[0]), ssim[:,i], label=key)\n axs[i].set_title(labels[i])\n axs[i].set_xlabel('iteration')\n axs[i].set_ylabel('ssim')\n axs[i].legend()\n plt.savefig(f'{save_path}/{timestamp}/ssims.png')\n\n file.close()\n\n # save stuff for plotting\n if save:\n np.savez(f\"{save_path}/{timestamp}/save.npz\",\n noisy_img=img_noisy_np, mse_noisy=MSE_NOISY, mse_gt=MSE_GT,\n uncerts=UNCERTS_EPI, uncerts_ale=UNCERTS_ALE, psnrs=PSNRS)\n\n plt.close('all')\n\n return PSNRS['mfvi'][-1,2]\n\n\n# We will use the simplest form of GP model, exact inference\nclass ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super().__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.ConstantMean(\n prior=gpytorch.priors.NormalPrior(15., 4.)\n )\n self.covar_module = gpytorch.kernels.ScaleKernel(\n gpytorch.kernels.RBFKernel()\n )\n\n self.covar_module.base_kernel.lengthscale = 3e-1\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\n\ndef train_gp(X_train, Y_train, iter_max=1000):\n # initialize likelihood and model\n likelihood = gpytorch.likelihoods.GaussianLikelihood(\n noise_prior=gpytorch.priors.GammaPrior(concentration=0.01, rate=100.0)\n ).double()\n # likelihood = gpytorch.likelihoods.FixedNoiseGaussianLikelihood(\n # noise=torch.ones(X_train.shape[0])*1e-4\n # ).double().to(device)\n gp = ExactGPModel(\n X_train,\n Y_train,\n likelihood).double().to(device)\n gp.train()\n likelihood.train()\n\n # Use the adam optimizer\n optimizer_gp = torch.optim.Adam(gp.parameters(), lr=0.05)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp)\n\n for i in range(iter_max):\n # Zero gradients from previous iteration\n optimizer_gp.zero_grad()\n # Output from model\n output = gp(X_train)\n # Calc loss and backprop gradients\n loss = -mll(output, Y_train)\n loss.backward()\n if i % 100 == 0:\n print(f'Iter {i + 1:4d}/{iter_max} - Loss: {loss.item():.4f} \\\nlengthscale: {gp.covar_module.base_kernel.lengthscale.item():.3f} \\\nnoise: {gp.likelihood.noise[0].item():.4f}')\n optimizer_gp.step()\n\n gp.eval()\n likelihood.eval()\n return gp, likelihood\n\n\ndef expected_improvement(gp: gpytorch.models.ExactGP,\n X: torch.Tensor,\n X_train: torch.Tensor\n ) -> torch.Tensor:\n '''\n Computes the EI at points for the parameter space based on\n cost samples using a Gaussian process surrogate model.\n Args:\n model: surrogate GP model\n params_space: Parameter space at which EI shall be computed (m x d).\n params_samples: already evaluated parameters (n x d)\n Returns:\n Expected improvements for paramter space.\n '''\n pred = gp(X)\n pred_sample = gp(X_train)\n\n mu, sigma = pred.mean, pred.variance.clamp_min(1e-9).sqrt()\n mu_sample = pred_sample.mean\n\n sigma = sigma.reshape(-1, 1)\n\n imp = mu - mu_sample.max()\n u = imp.reshape(-1, 1) / sigma\n normal = torch.distributions.Normal(torch.zeros_like(u), torch.ones_like(u))\n ucdf = normal.cdf(u)\n updf = torch.exp(normal.log_prob(u))\n ei = sigma * (updf + u * ucdf)\n\n return ei.clamp_min(0)\n\n\ndef upper_confidence_bound(gp, X, kappa=2):\n pred = gp(X)\n return pred.mean + kappa * pred.variance.sqrt()\n\n\ndef acquisition_fun(gp, X, X_train, acq_fn, *args):\n assert acq_fn in ['ei', 'ucb']\n gp.eval()\n if acq_fn == 'ei':\n return expected_improvement(gp, X, X_train)\n elif acq_fn == 'ucb':\n return upper_confidence_bound(gp, X, *args)\n\n\ndef find_candidates(gp, X_, samples, acq_fn='ei'):\n with torch.no_grad():\n acq = acquisition_fun(gp, X_, samples, acq_fn)\n\n acq = acq.cpu().numpy().reshape(100, 100)\n peaks = peak_local_max(acq, min_distance=5, threshold_rel=0.1, num_peaks=4)\n global_max = np.array(np.unravel_index(np.argmax(acq, axis=None), acq.shape)).reshape(1, -1)\n peaks = np.append(peaks, global_max, axis=0)\n peaks = np.unique(peaks, axis=0)\n peaks = np.ravel_multi_index(peaks.transpose(), acq.shape)\n\n X_init = X_[peaks]\n\n constraint = constraints.interval(0, 1)\n candidates = []\n expected_improvement = []\n\n for i in range(len(X_init[:4])):\n unconstrained_X_init = transform_to(constraint).inv(X_init[i].unsqueeze(0))\n unconstrained_X = unconstrained_X_init.clone().detach().requires_grad_(True)\n minimizer = torch.optim.LBFGS([unconstrained_X], line_search_fn='strong_wolfe')\n\n def closure():\n minimizer.zero_grad()\n x = transform_to(constraint)(unconstrained_X)\n y = -acquisition_fun(gp, x, samples, acq_fn)\n autograd.backward(unconstrained_X, autograd.grad(y, unconstrained_X))\n return y\n\n minimizer.step(closure)\n X = transform_to(constraint)(unconstrained_X)\n\n expected_improvement.append(acquisition_fun(gp, X, samples, acq_fn).item())\n candidates.append(X.detach().cpu())\n\n return candidates, expected_improvement, acq\n\n\ndef normalize_X(X_unnorm, beta_logbounds, tau_logbounds):\n X_norm = X_unnorm.clone().log10()\n X_norm[:, 0] -= beta_logbounds[0]\n X_norm[:, 0] /= (beta_logbounds[1]-beta_logbounds[0])\n\n X_norm[:, 1] -= tau_logbounds[0]\n X_norm[:, 1] /= (tau_logbounds[1]-tau_logbounds[0])\n\n return X_norm\n\n\ndef unnormalize_X(X_norm, beta_logbounds, tau_logbounds):\n X_unnorm = X_norm.clone()\n X_unnorm[:, 0] *= (beta_logbounds[1]-beta_logbounds[0])\n X_unnorm[:, 0] += beta_logbounds[0]\n\n X_unnorm[:, 1] *= (tau_logbounds[1]-tau_logbounds[0])\n X_unnorm[:, 1] += tau_logbounds[0]\n\n return torch.pow(10, X_unnorm)\n\n\ndef f(idx, queue, candidate, device):\n res = run(beta=candidate[0], tau=candidate[1], index=idx, device=device,\n img=1, seed=1, num_iter=50000, lr=2e-3, input_depth=16, save=True, save_path='/opt/laves/bo_logs')\n queue.put((candidate, res))\n\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n\n bo_out_path = '/opt/laves/bo_results'\n Path(bo_out_path).mkdir(parents=True, exist_ok=True)\n\n device = torch.device(\"cuda:0\")\n device_list = [\n torch.device(\"cuda:0\"),\n torch.device(\"cuda:1\")\n ]\n\n X = []\n Y = []\n beta_logbounds = [-8, -4]\n tau_logbounds = [-4, 0]\n X_lr = torch.logspace(*beta_logbounds, 100, dtype=torch.double).to(device)\n X_wd = torch.logspace(*tau_logbounds, 100, dtype=torch.double).to(device)\n XX_lr, XX_wd = torch.meshgrid(X_lr, X_wd)\n X_ = torch.stack([XX_lr.reshape(-1), XX_wd.reshape(-1)]).transpose(1, 0)\n\n candidates = np.array([\n [4e-8, 0.01], # beta, tau\n [4e-8, 0.1],\n [4e-6, 0.01],\n [4e-6, 0.1]\n ])\n\n for runs_num in range(100):\n\n plt.close('all')\n\n queue = mp.Queue()\n processes = []\n for i, (candidate, dev) in enumerate(zip(candidates, itertools.cycle(device_list))):\n p = mp.Process(target=f, args=(i, queue, candidate, dev))\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n\n y_run = []\n candidates_run = []\n while not queue.empty():\n candidate, res = queue.get()\n candidates_run.append(candidate)\n y_run.append(res)\n\n print()\n print(\"beta tau psnr\")\n for c, y in zip(candidates_run, y_run):\n print(f\"{c[0]:.6f} {c[1]:.6f} {y:.6f}\")\n\n X += candidates_run\n Y += y_run\n\n X_train = torch.stack([\n torch.DoubleTensor(np.array(X)[:, 0]),\n torch.DoubleTensor(np.array(X)[:, 1])\n ]).transpose(1, 0).to(device)\n X_train = normalize_X(X_train, beta_logbounds, tau_logbounds)\n\n Y_train = torch.DoubleTensor(np.array(Y)).to(device)\n\n gp, likelihood = train_gp(X_train, Y_train)\n\n with torch.no_grad():\n X_test = torch.stack([\n X_[:, 0],\n X_[:, 1]\n ]).transpose(1, 0)\n X_test = normalize_X(X_test, beta_logbounds, tau_logbounds)\n candidates, exp_imp, acq = find_candidates(gp, X_test, X_train)\n\n candidates = torch.cat(candidates).cpu()\n candidates = torch.unique(candidates, dim=0)\n candidates = unnormalize_X(candidates, beta_logbounds, tau_logbounds).numpy()\n\n pred = gp(X_test)\n # acq = upper_confidence_bound(gp, X_test)\n\n fig1, ax1 = plt.subplots()\n ln11 = ax1.contourf(XX_lr.cpu().numpy(), XX_wd.cpu().numpy(),\n pred.mean.cpu().reshape(100, 100).numpy())\n ln12 = ax1.plot(np.array(X)[:, 0], np.array(X)[:, 1], 'g.', label='observed')\n ax1.set_title(f\"{runs_num} mean acc\")\n fig1.colorbar(ln11, ax=ax1)\n ax1.set_xlabel('beta')\n ax1.set_ylabel('tau')\n #ax1.set_xlim(np.power(10, beta_logbounds))\n #ax1.set_ylim(np.power(10, tau_logbounds))\n ax1.loglog()\n fig1.tight_layout()\n fig1.savefig(f'{bo_out_path}/{runs_num}_fig1.pdf', bbox_inches='tight')\n fig1.show()\n\n fig2, ax2 = plt.subplots()\n ln21 = ax2.contourf(XX_lr.cpu().numpy(), XX_wd.cpu().numpy(),\n (pred.confidence_region()[1].detach().cpu().reshape(100, 100) \\\n - pred.confidence_region()[0].detach().cpu().reshape(100, 100)).numpy(),\n )\n ln22 = ax2.plot(np.array(X)[:, 0], np.array(X)[:, 1], 'g.', label='observed')\n ax2.set_title(f\"{runs_num} uncertainty\")\n fig2.colorbar(ln21, ax=ax2)\n ax2.set_xlabel('beta')\n ax2.set_ylabel('tau')\n #ax2.set_xlim(np.power(10, beta_logbounds))\n #ax2.set_ylim(np.power(10, tau_logbounds))\n ax2.loglog()\n fig2.tight_layout()\n fig2.savefig(f'{bo_out_path}/{runs_num}_fig2.pdf', bbox_inches='tight')\n fig2.show()\n\n fig3, ax3 = plt.subplots()\n ln31 = ax3.contourf(XX_lr.cpu().numpy(), XX_wd.cpu().numpy(),\n acq.reshape(100, 100))\n ln32 = ax3.plot(candidates[:, 0], candidates[:, 1], 'g.', label='candidates')\n ax3.set_title(f\"{runs_num} acq_fun\")\n ax3.set_xlabel('beta')\n ax3.set_ylabel('tau')\n #ax3.set_xlim(np.power(10, beta_logbounds))\n #ax3.set_ylim(np.power(10, tau_logbounds))\n ax3.loglog()\n fig3.colorbar(ln31, ax=ax3)\n fig3.tight_layout()\n fig3.savefig(f'{bo_out_path}/{runs_num}_fig3.pdf', bbox_inches='tight')\n fig3.show()\n\n fig4, ax4 = plt.subplots(subplot_kw={\"projection\": \"3d\"})\n ln41 = ax4.plot_surface(XX_lr.log10().cpu().numpy(),\n XX_wd.log10().cpu().numpy(),\n acq.reshape(100, 100),\n cmap=cm.jet,\n linewidth=0, antialiased=False)\n ax4.plot(np.log10(candidates[:, 0]), np.log10(candidates[:, 1]), exp_imp, 'gx')\n ax4.set_title(f\"{runs_num} acq_fun\")\n fig4.tight_layout()\n fig4.savefig(f'{bo_out_path}/{runs_num}_fig4.pdf', bbox_inches='tight')\n\n fig4.show()\n\n np.savez(\n f\"{bo_out_path}/{runs_num}_fig_data.npz\",\n XX_lr=XX_lr.cpu().numpy(), XX_wd=XX_wd.cpu().numpy(),\n pred=pred.mean.cpu().reshape(100, 100).numpy(),\n observed_X=np.array(X),\n observed_Y=np.array(Y),\n expected_improvement=np.array(exp_imp),\n confidence=pred.confidence_region()[1].detach().cpu().reshape(100, 100) \\\n - pred.confidence_region()[0].detach().cpu().reshape(100, 100),\n acq=acq.reshape(100, 100),\n candidates=candidates\n )\n","repo_name":"mlaves/optimal-posterior-temperature","sub_path":"bayesian_optimization_bayesian_denoising.py","file_name":"bayesian_optimization_bayesian_denoising.py","file_ext":"py","file_size_in_byte":23580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42508862963","text":"import requests\nimport random\n\nclass Traffic:\n keywords=[\n \"trafik\",\"trafik yoğunluğu\",\"trafik seviyesi\",\n \"trafik nasıl\",\"trafik ne durumda\",\"yollar ne durumda\",\n \"trafik durumu nedir\",\"istanbulda trafik nasıl\"\n ]\n def __init__(self):\n pass\n def get_traffic_density(self):\n ENDPOINT=\"https://api.ibb.gov.tr/tkmservices/api/TrafficData/v1/TrafficIndexHistory/1/5M\"\n\n try:\n res = requests.get(url=ENDPOINT)\n res.raise_for_status()\n\n except requests.exceptions.RequestException:\n return None\n else:\n t_density = res.json()\n if len(t_density)==0:\n return None\n else:\n t_density=t_density[0].get(\"TrafficIndex\")\n\n\n return self.__create_str(t_density)\n\n def __create_str(self,val):\n if val <=30:\n val_eval=\"düşük\"\n elif val>30 and val<=44:\n val_eval=\"orta\"\n else:\n val_eval=\"yüksek\"\n\n sentences=[\n f\"Şu anda trafik yoğunluğu yüzde {val} ile {val_eval} seviyede.\",\n f\"İstanbul'da trafik yüzde {val} ile {val_eval} seviyede.\",\n f\"Trafik yüzde {val} ile {val_eval} seviyede.\"\n ]\n\n return random.choice(sentences)\n\n","repo_name":"kkelesyusuf/sesliAsistan","sub_path":"Skills/Traffic.py","file_name":"Traffic.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2486112223","text":"#!/usr/bin/env python3\n#This Program pomPDDWF(PreOpenMarketPreviousDayDataWithFormat)\nimport pandas as pd\nfrom datetime import date,timedelta\nfrom nsepy import get_history\n#reads the nse file and formats to specific data type along with rename\npom = pd.read_csv(\"pom.csv\",na_values=[\"-\"],header=0,names=['symbol','prev_close','open_price','chng', '%Chng',\n 'final_price', 'final_quantity', 'value', 'cap','52w_h', '52w_l'], thousands = \",\")\n\n#calculates total traded percentage in preopenmarket i.e (value/cap)*100\nfor ind, row in pom.iterrows():\n pom.loc[ind,\"%traded\"] = (row[\"value\"]/row[\"cap\"])*100\n\n#removes excess data\npom = pom.drop(['prev_close','final_price','chng'],axis=1)\n\n#create empty dataframe\nhist = pd.DataFrame()\nmonth = date.today().month\ndt = int(input(\"enter the preopen market date \"))\nweekday = date(2020,month,dt).weekday()\nif weekday>0 and dt>1:\n dt = dt-1\nelif weekday==0 and dt>3:\n dt = dt-3\nelif weekday == 0 and dt==3:\n dt = (date(2020,month,1)-timedelta(days=1)).day\nelif weekday == 0 and dt==2:\n dt = (date(2020,month,1)-timedelta(days=2)).day\nelif weekday == 0 and dt==1:\n dt = (date(2020,month,1)-timedelta(days=3)).day\n month = month-1\nelif weekday>0 and dt==1:\n dt = (date(2020,month,1)-timedelta(days=1)).day\n month = month-1\nprint(\"fetching data for date:\",dt,\"/\", month)\n#get the history data of all nifty50 stocks for particular date(yyyy,mm,dd) enter previous date in both start and end\nfor ind, row in pom.iterrows():\n name = row['symbol']\n hist = hist.append(get_history(symbol = name,start = date(2020,month,dt),end = date(2020,month,dt)),ignore_index = True)\n print(row)\n\n#delete the repeated values in history data\nhist = hist.rename(columns={'High': 'prevHigh', 'Low': 'prevLow','Close' : 'prevClose','Open':'prevOpen'})\nhist = hist.drop(['Symbol','Series','Prev Close','Deliverable Volume','Last'],axis=1)\n\n#concats the preopen\npom = pd.concat([pom,hist],axis=1)\n\n#Logic to compute before market open\n\n#saves file to csv\npom.to_csv(\"pom.csv\",index=False)\n\n#checks the commonly traded stocks in 4 groups prints the data\npom = pom.sort_values(by=\"%Chng\",ascending = False).reset_index(drop=True)\nfinal_list = list(pom.loc[0:9,\"symbol\"])\npom = pom.sort_values(by=\"final_quantity\",ascending = False).reset_index(drop=True)\nfinal_list = final_list + list(pom.loc[0:9,\"symbol\"])\npom = pom.sort_values(by=\"%traded\",ascending = False).reset_index(drop=True)\nfinal_list = final_list + list(pom.loc[0:9,\"symbol\"])\npom = pom.sort_values(by=\"value\",ascending = False).reset_index(drop=True)\nfinal_list = final_list + list(pom.loc[0:9,\"symbol\"])\nfrom collections import Counter\nsymbols = Counter(final_list) \ntop_10 = symbols.most_common(10)\n\nprint(\"\\n\")\nprint(top_10)\nprint(\"\\n\")\ntemp = input(\"press any key to exit()\")\nexit()","repo_name":"GaganDeepak/PreOpenMarketAnalysis","sub_path":"1.MorningExec.py","file_name":"1.MorningExec.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18005139002","text":"from util import is_pandigital\n\nproducts = set()\n\nfor i in range(100):\n for j in range(10000):\n k = i*j\n if is_pandigital(''.join(map(str, (i,j,k)))):\n print(i,j,k)\n products.add(k)\n\nprint(sum(products))\n","repo_name":"indraastra/puzzles","sub_path":"euler/prob032.py","file_name":"prob032.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71991831771","text":"import torch\nimport torch.nn as nn\n\nimport time\nfrom sklearn.metrics import r2_score\nfrom utils import *\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport json\nimport global_var\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--config\", type=str, help='configuration file')\nparser.add_argument(\"--output\", type=str, help='output path')\nargs = parser.parse_args()\n\nconfig_filename = args.config\noutput_path = args.output\n\n\nwith open(config_filename, 'r') as f:\n config = json.loads(f.read())\n\nseed = config['seed']\nsetup_seed(seed)\n\nglobal_var._init()\nglobal_var.set_value('config',config)\n\nfrom model import STHGNN\nfrom model_NO_N import STHGNN_N\nfrom model_NO_S import STHGNN_S\nfrom model_NO_T import STHGNN_T\nfrom model_NO_M import STHGNN_noM\nfrom model_NO_F import STHGNN_noF\nfrom model_NO_EMF import STHGNN_c_g\n\n\nfrom data_load import *\n\n\ndef train(model, train_data_loader, optimizer):\n model.train()\n # loss_fn = nn.MSELoss()\n loss_fn = nn.HuberLoss(reduction='mean', delta=config['DELTA'])\n st = time.time()\n output_list = []\n label_list = []\n for i, input_data in enumerate(train_data_loader):\n optimizer.zero_grad()\n input_data = input_data.to(device)\n\n label = input_data.y\n out = model(input_data)\n\n loss = loss_fn(out, label)\n\n pred = np.floor(out.cpu().reshape(263).detach().numpy())\n y_true = label.cpu().reshape(263).detach().numpy()\n output_list.append(pred)\n label_list.append(y_true)\n\n loss.backward()\n optimizer.step()\n\n torch.cuda.empty_cache()\n if i%50 == 0:\n print(f'{time.time()-st:.2f}: loss: {loss}. {i}/{len(train_data_loader)}')\n\n output = np.array(output_list)\n label = np.array(label_list)\n\n train_mse_loss = mse_np(output, label)\n train_rmse_loss = rmse_np(output, label)\n train_mae_loss = mae_np(output, label)\n train_mape_loss = mape_np(output, label)\n train_r2_loss = r2_score(output.reshape((-1, 1)), label.reshape((-1, 1)))\n\n return train_mse_loss, train_rmse_loss, train_mae_loss, train_mape_loss, train_r2_loss\n\n\ndef test(model, test_data_loader):\n model.eval()\n output_list = []\n label_list = []\n with torch.no_grad():\n for i, input_data in enumerate(test_data_loader):\n input_data = input_data.to(device)\n label = input_data.y\n out = model(input_data)\n\n pred = np.floor(out.cpu().reshape(263).numpy())\n y_true = label.cpu().reshape(263).numpy()\n\n output_list.append(pred)\n label_list.append(y_true)\n\n torch.cuda.empty_cache()\n if i % 50 == 0:\n print(f'testing... {i}/{len(test_data_loader)}')\n\n output = np.array(output_list)\n label = np.array(label_list)\n\n test_mse_loss = mse_np(output,label)\n test_rmse_loss = rmse_np(output,label)\n test_mae_loss = mae_np(output,label)\n test_mape_loss = mape_np(output,label)\n test_r2_loss = r2_score(np.array(output_list).reshape((-1,1)), np.array(label_list).reshape((-1,1)))\n\n return test_mse_loss, test_rmse_loss, test_mae_loss, test_mape_loss, test_r2_loss\n\n\nbest_rmse_list = []\nbest_mae_list = []\nbest_mape_list = []\nfor r in range(config['REPEAT_TIMES']):\n plt.cla()\n path = output_path + 'times_{}'.format(r)\n if not os.path.exists(path):\n os.mkdir(path)\n\n # setup_seed(20+r)\n train_data_loader, test_data_loader = load_data(config['CRIME_LABEL_DATA_PATH'], config['CRIME_DATA_PATH'],\n config['A311_DATA_PATH'],config['POI_DATA_PATH'],\n config['TAXI_DATA_PATH'], config['BIKE_DATA_PATH'],\n config['GEO_DATA_PATH'],config['SIMI_DATA_PATH'], config)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(f\"Using {device} device\")\n\n if 'WITHOUT_N' in config and config['WITHOUT_N'] == 1:\n model = STHGNN_N(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n # elif 'WITHOUT_E' in config and config['WITHOUT_E'] == 1:\n # model = STHGNN_E(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n # out_channels=1, num_layers=config['NUM_LAYERS'])\n elif 'WITHOUT_S' in config and config['WITHOUT_S'] == 1:\n model = STHGNN_S(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n elif 'WITHOUT_T' in config and config['WITHOUT_T'] == 1:\n model = STHGNN_T(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n elif 'WITHOUT_M' in config and config['WITHOUT_M'] == 1:\n model = STHGNN_noM(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n elif 'WITHOUT_F' in config and config['WITHOUT_F'] == 1:\n model = STHGNN_noF(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n elif 'CRIME_DATA' in config:\n if config['CRIME_DATA']==1 and config['POI_DATA']==0 and config['311_DATA']==0 and config['GEO_DATA']==1 \\\n and config['TAXI_DATA']==0 and config['BIKE_DATA']==0 and config['SIMI_DATA']==0:\n model = STHGNN_c_g(time_series_length=config['TIME_SERIES_LENGTH'],\n hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n elif config['CRIME_DATA']==1 and config['POI_DATA']==1 and config['311_DATA']==1 and config['GEO_DATA']==1 \\\n and config['TAXI_DATA']==1 and config['BIKE_DATA']==1 and config['SIMI_DATA']==1:\n model = STHGNN(time_series_length=config['TIME_SERIES_LENGTH'],\n hidden_channels=config['HIDDEN_CHANNELS'],\n out_channels=1, num_layers=config['NUM_LAYERS'])\n else:\n print('invalid config!')\n break\n # model = STHGNN(time_series_length=config['TIME_SERIES_LENGTH'], hidden_channels=config['HIDDEN_CHANNELS'],\n # out_channels=1, num_layers=config['NUM_LAYERS'])\n model = model.to(device)\n # model = torch.load('/home/zh/temp/new_20nodefeatures_threshold5/model_10_epoch.pth')\n\n optimizer = torch.optim.Adam(model.parameters(), lr=config['LEARNING_RATE'], weight_decay=config['WEIGHT_DECAY'])\n\n train_mse_loss_list = []\n train_rmse_loss_list = []\n train_mae_loss_list = []\n train_mape_loss_list = []\n train_r2_loss_list = []\n\n test_mse_loss_list = []\n test_rmse_loss_list = []\n test_mae_loss_list = []\n test_mape_loss_list = []\n test_r2_loss_list = []\n\n best = []\n min_rmse = 1000000000.0\n for epoch in range(0, config['EPOCH']):\n start_time = time.time()\n train_mse_loss, train_rmse_loss, train_mae_loss, train_mape_loss, train_r2_loss = train(model, train_data_loader, optimizer)\n test_mse_loss, test_rmse_loss, test_mae_loss, test_mape_loss, test_r2_loss = test(model, test_data_loader)\n\n if test_rmse_loss < min_rmse:\n min_rmse = test_rmse_loss\n best = [test_mse_loss, test_rmse_loss, test_mae_loss, test_mape_loss, test_r2_loss, epoch]\n\n train_mse_loss_list.append(train_mse_loss)\n train_rmse_loss_list.append(train_rmse_loss)\n train_mae_loss_list.append(train_mae_loss)\n train_mape_loss_list.append(train_mape_loss)\n train_r2_loss_list.append(train_r2_loss)\n\n test_mse_loss_list.append(test_mse_loss)\n test_rmse_loss_list.append(test_rmse_loss)\n test_mae_loss_list.append(test_mae_loss)\n test_mape_loss_list.append(test_mape_loss)\n test_r2_loss_list.append(test_r2_loss)\n\n print(f'{time.time()-start_time:.2f}: Epoch: {epoch:03d}, Train loss: '\n f' MAE {train_mae_loss:.4f}; MAPE {train_mape_loss:.4f}; R2 {train_r2_loss:.4f}')\n print(f'{time.time()-start_time:.2f}: Epoch: {epoch:03d}, Test loss: '\n f' MAE {test_mae_loss:.4f}; MAPE {test_mape_loss:.4f}; R2 {test_r2_loss:.4f}')\n print(f'{time.time() - start_time:.2f}:Best Epoch: {best[5]:03d}, '\n f' MAE {best[2]:.4f}; MAPE {best[3]:.4f}; R2 {best[4]:.4f}')\n if epoch%10==0:\n torch.save(model, path + '/' +'model_{}_epoch.pth'.format(epoch))\n\n plt.plot(train_rmse_loss_list, label=\"train rmse\")\n plt.plot(test_rmse_loss_list, label=\"test rmse\")\n plt.legend()\n plt.savefig(path + '/' + 'figure.jpg')\n plt.show()\n\n best_mae_list.append(best[2])\n best_mape_list.append(best[3])\n print('best tesing results: MAE: {:.2f}\\ntesting: RMSE: {:.2f}\\ntesting: MAPE: {:.2f}\\n'.format(best[2], best[1], best[3]))\n\n\nsave_variable(best_mae_list,output_path + 'best_mae_list')\nsave_variable(best_mape_list,output_path + 'best_mape_list')\nprint('best MAE list:')\nprint(best_mae_list)\nprint('best MAPE list:')\nprint(best_mape_list)\nprint('final results mean: MAE: {:.2f}\\ntesting: MAPE: {:.2f}\\n'.format(np.mean(best_mae_list),np.mean(best_mape_list)))\nprint('final results range: MAE: {:.2f}\\ntesting: MAPE: {:.2f}\\n'\n .format((np.max(best_mae_list)-np.min(best_mae_list)),\n (np.max(best_mape_list)-np.min(best_mape_list))))","repo_name":"ZJUDataIntelligence/HDM-GNN","sub_path":"model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"32"} +{"seq_id":"12817955602","text":"from django.urls import path\r\n\r\nfrom .views import HomeView, ArticleCreateView, ArticleListView\r\n\r\napp_name = 'notes'\r\n\r\nurlpatterns = [\r\n path('', HomeView.as_view(), name='home'),\r\n path('article/add/', ArticleCreateView.as_view(), name='add_article'),\r\n path('article/list/', ArticleListView.as_view(), name='articles_list'),\r\n]\r\n","repo_name":"Capwell/test_task_simbirsoft","sub_path":"app/apps/notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8717604828","text":"import asyncio\nimport logging\nimport state\nimport subprocess\nimport ws_server\n\npid = ''\n\nasync def start_ps_monitor_async():\n global pid\n logging.info(\"Starting ps monitor as an infinite loop\")\n sm = state.Machine()\n while True:\n logging.info(\"Looping ps monitor\")\n if sm.has_state(state.StreamingState):\n logging.info(\"Testing ps\")\n ps = subprocess.getoutput('./pffmpeggrep.sh').split()\n logging.debug(ps)\n if not ps:\n sm.on_event({'name': 'streaming-died'})\n else:\n pid = ps[1]\n await asyncio.sleep(5)\n \n","repo_name":"vicmortelmans/raspberry-pi-broadcaster","sub_path":"ps_monitor.py","file_name":"ps_monitor.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23215421699","text":"import pandas as pd\nimport numpy as np\nimport re\nimport sys\n# from keras.models import Sequential\n# from keras.layers import Dense\n# from keras.layers import Flatten, GRU, Dropout, TimeDistributed, Activation\n# from keras.layers.embeddings import Embedding\n# from keras.preprocessing import sequence\n# from keras.preprocessing import text\n# from keras.utils import to_categorical\n# from keras.callbacks import ModelCheckpoint\n# from keras.callbacks import EarlyStopping\n# from collections import Counter\nfrom gensim.models import Word2Vec\n\n\nfile_train_l = open(\"training_label.txt\", \"r\", encoding = 'utf-8')\nfile_train_n = open(\"training_nolabel.txt\", \"r\", encoding = 'utf-8')\n\ntemp = []\nlabels = []\ntrain_l = []\nfor line in file_train_l:\n line = line.strip(\"\\n\")\n temp = line.split(\" +++$+++ \")\n labels.append(temp[0])\n temp = temp[1].split(\" \")\n train_l.append(temp)\n\n\n \nfor line in file_train_n:\n line = line.strip(\"\\n\")\n temp = line.split(\" \")\n train_l.append(temp)\n\nlabels = np.array(labels)\ntrain_l = np.array(train_l)\n# test = np.array(test)\n\nmodel = Word2Vec(train_l, size=72, window=4, min_count=20, workers=4, negative = 5, batch_words=10000, iter = 10)\nmodel.save(\"dict_gen\")","repo_name":"icewolf00/ML2017FALL","sub_path":"hw4/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19162094643","text":"# Web scrap data for over 200 movies date created:05/03/2020\nimport bs4\nfrom urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup \n\nmy_url = 'https://www.imdb.com/search/title/?release_date=2019&sort=num_votes,desc&page=1'\n\n#open up connection, grabbing the page\n\nuClient = uReq(my_url)\n\npage_html = uClient.read()\n\n#close connection\nuClient.close()\n\n#html parser\npage_soup = soup(page_html, \"html.parser\")\n\n#grabs items\ncontainers = page_soup.findAll('div',class_ = 'lister-item mode-advanced')\n\nfilename = \"imdbrating.csv\"\n\nf = open(filename,\"w\")\n\nheaders = \"Movie, Year_Released, Genre, Rating, Votes \\n\"\n\nf.write(headers)\n\n\nfor container in containers:\n #name of the movie\n name = container.h3.a.text.strip()\n \n #Year released \n year = container.h3.find('span',class_ = 'lister-item-year text-muted unbold').text\n \n #Genre\n genre = container.p.find('span', class_ = 'genre').text.strip()\n\n #rating\n imdb = float(container.strong.text)\n\n #votes for each movie\n vote = container.find('span', attrs = {'name':'nv'})['data-value']\n \n f.write(name + \",\" + year + \",\" + genre.replace(\",\",\"|\") + \",\" + str(imdb) + \",\" + str(vote) + \"\\n\")\n\nf.close() \nprint(\"File loaded - Please check CSV file\") ","repo_name":"garrethblouws/BISandBox","sub_path":"Web Scraper/imbmovielist.py","file_name":"imbmovielist.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27803676510","text":"\"\"\"\n\"\"\"\nimport os\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\n\n# Gather data\n\nimport load_data\nfrom my_palette import (atlas_palette, color_palette,\n datasets_palette)\n\n# Covariance estimator used in connectomes\ncovariance_estimator = 'LedoitWolf'\n\ndataset_paths = dict()\ndataset_names = ['COBRE', 'ADNI', 'ADNIDOD', 'ACPI']\natlases = ['ICA', 'DictLearn', 'KMeans', 'BASC']\n\nextensions = {'ICA': 'scores_ica.csv',\n 'DictLearn': 'scores_dictlearn.csv',\n 'KMeans': 'scores_kmeans.csv',\n 'BASC': 'scores_basc.csv'\n }\n\nbase_path = os.path.join('../prediction_scores', covariance_estimator)\n\nfor dataset in dataset_names:\n paths = []\n for atlas in atlases:\n if (dataset == 'ADNIDOD') and (atlas == 'ICA'):\n extension = 'scores_ica_120.csv'\n else:\n extension = extensions[atlas]\n atlas_path = os.path.join(atlas, 'region_extraction',\n extension)\n path = os.path.join(base_path, dataset, atlas_path)\n if os.path.exists(path):\n paths.append(path)\n dataset_paths[dataset] = paths\n\ndata_list = []\nfor name in dataset_names:\n this_data_paths = dataset_paths[name]\n this_data = load_data._pandas_data_frame_list_of_paths_concat(this_data_paths)\n # Add dataset name to this_data\n data_list.append(this_data)\n\ndata = pd.concat(data_list)\ndata = data.drop('Unnamed: 0', axis=1)\nthis_data = data[(data['classifier'] == 'svc_l2') &\n (data['measure'] == 'tangent')]\n\nsns.set(color_codes=True)\nsns.set_style(\"whitegrid\", {'axes.edgecolor': '.6', 'grid.color': '.6'})\nsns.set_palette('dark')\n\n\ndef demean(group):\n return group - group.mean()\n\nthis_data.pop('Unnamed: 0.1')\nthis_data.pop('smoothing_fwhm')\nthis_data.pop('reduction_n_components')\nthis_data.pop('min_region_size_in_mm3')\nthis_data.pop('atlas_type')\nthis_data.pop('covariance_estimator')\nthis_data.pop('connectome_regress')\nthis_data.pop('compcor_10')\nthis_data.pop('version')\nthis_data.pop('multi_pca_reduction')\nthis_data.pop('motion_regress')\n\n\nplt.close('all')\natlas_names = {'ica': 'ICA',\n 'dictlearn': 'DictLearn',\n 'kmeans': 'KMeans',\n 'basc': 'BASC'}\n\nwilcoxon_tests = dict()\ncolumns = ['atlas', 'dataset', 'pvalues', '-log10pvalues',\n 'dimensionality_fixed', 'dimensionality_varied']\n\nfor column_name in columns:\n wilcoxon_tests.setdefault(column_name, [])\n\ncomparison_fixed_options = {'ica': 80,\n 'dictlearn': 80,\n 'kmeans': 120,\n 'basc': 122}\n\natlases = this_data['atlas'].unique()\n\nfor atlas in atlases:\n data2 = this_data[this_data['atlas'] == atlas]\n for dataset in data2['dataset'].unique():\n d_dataset = data2[(data2['dataset'] == dataset)]\n fixed_dim = comparison_fixed_options[atlas]\n for dim in d_dataset['dimensionality'].unique():\n if dim != fixed_dim:\n wilcoxon_tests['atlas'].append(atlas)\n wilcoxon_tests['dataset'].append(dataset)\n wilcoxon_tests['dimensionality_fixed'].append(fixed_dim)\n wilcoxon_tests['dimensionality_varied'].append(dim)\n data_fixed = d_dataset[(d_dataset['dimensionality'] == fixed_dim)]\n data_vary = d_dataset[(d_dataset['dimensionality'] == dim)]\n _, p = stats.wilcoxon(data_fixed['scores'], data_vary['scores'],\n correction=True)\n wilcoxon_tests['pvalues'].append(p)\n wilcoxon_tests['-log10pvalues'].append(-np.log10(p))\n\ndata_pvalues = pd.DataFrame(wilcoxon_tests)\n\nncols = len(this_data['atlas'].unique())\nfig, axes = plt.subplots(nrows=2, ncols=ncols, figsize=(10, 7), sharey=False,\n squeeze=True, sharex=False)\n# axes = axes.reshape(-1)\npalette = sns.color_palette(n_colors=len(this_data['dataset'].unique()))\n\nfor i, ax in enumerate(axes):\n if i == 0:\n axx = ax.reshape(-1)\n for ii, (ax1, atlas) in enumerate(zip(axx, atlases)):\n each_atlas_pvalues = data_pvalues[(data_pvalues['atlas'] == atlas)]\n sns.boxplot(data=each_atlas_pvalues, y='pvalues',\n x='dimensionality_varied',\n ax=ax1, color='.8', fliersize=0)\n sns.stripplot(data=each_atlas_pvalues, y='pvalues',\n x='dimensionality_varied',\n ax=ax1, hue='dataset', size=4)\n if ii == 0:\n ax1.set_ylabel('P-values (Wilcoxon tests)', size=15)\n else:\n ax1.set_ylabel('')\n\n #if ii == 0:\n # ax1.legend(scatterpoints=1, frameon=True, fontsize=15,\n # markerscale=1, borderaxespad=0,\n # handletextpad=.2, loc='upper left')\n #else:\n ax1.legend().remove()\n\n ax1.set_xlabel('')\n name = each_atlas_pvalues['dimensionality_fixed'].unique()[0]\n plt.text(.5, 1, atlas_names[atlas] + ':' + str(name) + 'vs Rest',\n transform=ax1.transAxes, size=15, ha='center')\n y_ticklabels = ax1.get_yticks()\n for x in (1, 3, 5):\n ax1.axvspan(x - .5, x + .5, color='.9', zorder=-1)\n ax1.set_yticklabels(y_ticklabels)\n else:\n axx2 = ax.reshape(-1)\n for ii, (ax2, atlas) in enumerate(zip(axx2, atlases)):\n each_atlas_neg_pvalues = data_pvalues[(data_pvalues['atlas'] == atlas)]\n sns.boxplot(data=each_atlas_neg_pvalues, y='-log10pvalues',\n x='dimensionality_varied',\n ax=ax2, color='.8', fliersize=0)\n sns.stripplot(data=each_atlas_neg_pvalues, y='-log10pvalues',\n x='dimensionality_varied',\n ax=ax2, hue='dataset', size=4)\n if ii == 0:\n ax2.set_ylabel('$-\\log_{10}(P) $', size=15)\n else:\n ax2.set_ylabel('')\n\n if ii == 0:\n ax2.legend(scatterpoints=1, frameon=True, fontsize=15,\n markerscale=1, borderaxespad=0,\n handletextpad=.2, loc='lower left',\n ncol=2, bbox_to_anchor=(-0.3, -0.23),\n columnspacing=0.5)\n else:\n ax2.legend().remove()\n\n ax2.set_xlabel('')\n ax2.set_xticklabels('')\n y_ticklabels2 = ax2.get_yticks()\n for x2 in (1, 3, 5):\n ax2.axvspan(x2 - .5, x2 + .5, color='.9', zorder=-1)\n ax2.set_yticklabels(y_ticklabels2)\n\nxlabel_name = 'Detailed one-to-one comparison on dimensionality of each atlas'\nplt.text(.6, .007, xlabel_name, transform=fig.transFigure,\n size=15, ha='center')\nplt.tight_layout(rect=[0, .05, 1, 1])\n\nplt.savefig('one_to_one_dim_comparison.pdf')\n","repo_name":"KamalakerDadi/Data-Processing","sub_path":"rs_study/experiments/plotting_scripts_for_paper/boxplot_comparison_one_vs_rest_in_optimal_dimensionaliy.py","file_name":"boxplot_comparison_one_vs_rest_in_optimal_dimensionaliy.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"2995326540","text":"from core.engine.Structure3D.structure_3D import Structure_3D\nfrom core.MDK.LayerStack.layer_stack import Layer, LayerStack\nfrom core.engine.Structure3D.structure_3D import Structure_3D\nfrom core.engine.InputParser.input_script import script_translator\nfrom core.engine.InputParser.input_script_up import script_translator as script_translator_up\n\n\ndef generateLayout(layout_script, bondwire_setup, layer_stack_file, constraint_file, i_v_constraint):\n \n layer_stack = LayerStack()\n layer_stack.import_layer_stack_from_csv(layer_stack_file)\n if bondwire_setup!='None':\n \n all_layers,via_connecting_layers,cs_type_map= script_translator(input_script=layout_script, bond_wire_info=bondwire_setup,layer_stack_info=layer_stack,dbunit=1000)\n else:\n \n all_layers,via_connecting_layers,cs_type_map= script_translator_up(input_script=layout_script, bond_wire_info=bondwire_setup, layer_stack_info=layer_stack,dbunit=1000)\n \n\n #all_layers,via_connecting_layers,cs_type_map= script_translator(input_script=layout_script, bond_wire_info=bondwire_setup, layer_stack_info=layer_stack, flexible=True)\n\n layer = all_layers[0]\n\n\n # Generate constraints file\n structure_3D = Structure_3D()\n\n structure_3D.layers=all_layers\n structure_3D.cs_type_map=cs_type_map\n structure_3D.via_connection_raw_info = via_connecting_layers\n if len(via_connecting_layers)>0:\n structure_3D.assign_via_connected_layer_info(info=via_connecting_layers)\n\n structure_3D.update_constraint_table(rel_cons=i_v_constraint)\n structure_3D.read_constraint_table(rel_cons=i_v_constraint, mode=0, constraint_file=constraint_file)\n\n device_dict = dict()\n lead_list = []\n\n for layer in structure_3D.layers:\n for comp in layer.all_components:\n if comp.layout_component_id.startswith(\"D\") and comp.layout_component_id not in device_dict:\n connections = []\n for key in comp.conn_dict.keys():\n connections.append(key)\n device_dict[comp.layout_component_id] = connections\n if comp.layout_component_id.startswith(\"L\") and comp.layout_component_id not in lead_list:\n lead_list.append(comp.layout_component_id)\n\n\n return [device_dict, lead_list]\n\n '''\n input_info = [layer.input_rects, layer.size, layer.origin]\n layer.new_engine.init_layout(input_format=input_info,islands=layer.new_engine.islands,all_cs_types=layer.all_cs_types,all_colors=layer.colors,bondwires=layer.bondwires)\n\n\n layer.plot_layout(fig_data=layer.new_engine.init_data[0], fig_dir=\"/nethome/jgm019/testcases\", name=\"sample_name\") # plots initial layout\n '''\n","repo_name":"e3da/PowerSynth2-gui","sub_path":"core/generateLayout.py","file_name":"generateLayout.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41562693128","text":"from unittest.mock import MagicMock\nimport math\n\nimport pytest\n\nfrom Nodes import HydroponicsBay\nfrom tests.testHelpers import setupForIdealState, createEngineFromConfig\n\n\n@pytest.mark.parametrize(\"resources_received, resources_provided, resources_left_over, effectiveness, oxygen_not_dumped\",\n [({\"water\": 10}, {\"oxygen\": 0}, {\"water\": 0, \"energy\": 0}, 1, 0),\n ({\"energy\": 10}, {\"oxygen\": 0}, {\"energy\": 0, \"water\": 0}, 1, 0),\n ({\"water\": 10, \"energy\": 10}, {\"oxygen\": 1875.0}, {\"water\": 0, \"energy\": 0}, 1, 0),\n ({\"water\": 10, \"energy\": 5}, {\"oxygen\": 1875.0}, {\"water\": 0, \"energy\": 0}, 1, 0),\n ({\"water\": 7.5, \"energy\": 10}, {\"oxygen\": 1406.25, \"plants\": 7.5}, {\"water\": 0, \"energy\": 0, \"plants\": 0}, 1, 0),\n ({\"water\": 7.5, \"energy\": 10, \"animal_waste\": 1}, {\"oxygen\": 2812.5, \"plants\": 15}, {\"water\": 0, \"energy\": 0, \"plants\": 0}, 1, 0), # Giving it 1 animal waste should double the output\n ({\"water\": 7.5, \"energy\": 10, \"animal_waste\": 0.5}, {\"oxygen\": 2109.375, \"plants\": 11.25}, {\"water\": 0, \"energy\": 0, \"plants\": 0}, 1, 0), # Giving it 1 animal waste should double the output\n ({\"water\": 7.5, \"energy\": 10}, {\"oxygen\": 703.125, \"plants\": 3.75}, {\"water\": 0, \"energy\": 0}, 0.5, 0),\n ({\"water\": 7.5, \"energy\": 10}, {\"oxygen\": 702.125, \"plants\": 2.75}, {\"water\": 1, \"energy\": 0}, 0.5, 1)]) # Since we couldn't dump 1 oxygen (and it doesn't store it), it keeps some water & energy in reserve.\ndef test_update(resources_received, resources_provided, resources_left_over, effectiveness, oxygen_not_dumped):\n hydroponics = HydroponicsBay.HydroponicsBay(\"omg\")\n\n hydroponics._resources_received_this_sub_tick = resources_received\n hydroponics._provideResourceToOutgoingConnections = MagicMock(return_value = oxygen_not_dumped)\n hydroponics._getAllReservedResources = MagicMock()\n hydroponics._getHealthEffectivenessFactor = MagicMock(return_value=effectiveness)\n hydroponics._temperature = hydroponics._optimal_temperature\n hydroponics.ensureSaneValues()\n hydroponics.update()\n\n resources_provided_this_tick = hydroponics.getResourcesProvidedThisTick()\n\n for key in resources_provided:\n assert math.isclose(resources_provided_this_tick[key], resources_provided[key]), \"%s doesn't match %s: %s\" % (key, resources_provided_this_tick[key], resources_provided[key])\n\n for key in resources_left_over:\n assert math.isclose(hydroponics._resources_left_over[key], resources_left_over[key]), \"%s doesn't match %s: %s\" % (key, hydroponics._resources_left_over[key], resources_left_over[key])\n\n\n@pytest.mark.parametrize(\"config_file\", [\"HydroponicsSetup.json\", \"HydroponicsSetupWithAnimalWaste.json\"])\ndef test_temperatureRemainTheSame(config_file):\n # Not quite a unit test; But create a simple setup.\n # Since the hydroponics does not create energy, it should just stay the same temperature\n engine = createEngineFromConfig(config_file)\n hydroponics = engine.getNodeById(\"hydroponics\")\n temperature_before = hydroponics.temperature\n engine.doTick()\n engine.doTick()\n\n assert math.isclose(temperature_before, hydroponics.temperature)\n\n\n@pytest.mark.parametrize(\"config_file\", [\"HydroponicsSetup.json\", \"HydroponicsSetupWithAnimalWaste.json\"])\ndef test_temperatureRemainTheSameOptimalTemperature(config_file):\n # Not quite a unit test; But create a simple setup.\n # Since the hydroponics does not create energy, it should just stay the same temperature.\n # For this test, we set the temp of all nodes at the perfect hydroponics temp (so that it actually creates resources!)\n engine = setupForIdealState(config_file, \"hydroponics\")\n hydroponics = engine.getNodeById(\"hydroponics\")\n temperature_before = hydroponics.temperature\n\n for _ in range(0, 10):\n engine.doTick()\n assert math.isclose(temperature_before, hydroponics.temperature)\n\n\n@pytest.mark.parametrize(\"performance\", [0.5, 1.2, 1])\n@pytest.mark.parametrize(\"sub_ticks\", [1, 10, 30])\n@pytest.mark.parametrize('ticks', [1, 10, 20])\n@pytest.mark.parametrize(\"config_file, plants_created_per_tick\", [(\"HydroponicsSetup.json\", 20), (\"HydroponicsSetupWithAnimalWaste.json\", 40)])\ndef test_plantsProduced(config_file, plants_created_per_tick, sub_ticks, ticks, performance):\n engine = setupForIdealState(config_file, \"hydroponics\")\n engine._sub_ticks = sub_ticks\n hydroponics = engine.getNodeById(\"hydroponics\")\n hydroponics._min_performance = performance\n hydroponics._max_performance = performance\n\n hydroponics._setPerformance(performance)\n hydroponics.target_performance = performance\n for _ in range(ticks):\n engine.doTick()\n\n assert math.isclose(engine.getNodeById(\"plant_storage\").amount_stored, plants_created_per_tick * ticks * performance)","repo_name":"FrivolousEngineering/ScifiBaseControlServer","sub_path":"tests/Nodes/test_HydroponicsBay.py","file_name":"test_HydroponicsBay.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27780252082","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : Jun/16/2020\n# @Author : zhx\n\"\"\"\n\nimport torch\nimport numpy as np\n\n\ndef softmax_torch(x):\n rpt = [1 for _ in range(len(x.size()))]\n rpt[1] = x.size(1)\n x_max = x.max(1, keepdim=True)[0].repeat(*rpt)\n e_x = torch.exp(x - x_max)\n return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)\n\n\ndef softmax_numpy(x):\n x_max = np.max(x, axis=1, keepdims=True).repeat(x.shape[1], axis=1)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x, axis=1, keepdims=True).repeat(x.shape[1], axis=1)\n\n\nif __name__ == '__main__':\n x = np.arange(0, 10).reshape(2, 5)\n y = softmax_numpy(x)\n print(y)\n # print(np.sum(y, axis=1))\n","repo_name":"hexiangzeng/galanet","sub_path":"utilites/softmax_helper.py","file_name":"softmax_helper.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28050672404","text":"import sys\nimport subprocess\nimport os\nimport mosaik\nimport time\nimport mosaik.scheduler\nimport mosaik.util\n\n\ndef test_integration_server(sim_config_data_client):\n # sim_config_data_client is a fixture created by conftest.py and contains\n # information shared across test cases\n\n sim_config, end, addr = sim_config_data_client\n\n # start Java Simulator as Server in a separate process,\n # which listens on addr_server_test\n if sys.platform == 'win32':\n proc = subprocess.Popen(['examplesim.bat', addr, 'server'],\n cwd=os.path.dirname(os.path.realpath(__file__))\n .replace('\\\\tests', ''))\n else:\n proc = subprocess.Popen(['./examplesim.sh', addr, 'server'])\n\n # wait for the Java Server\n time.sleep(2)\n\n # Create World\n print(\"Create World\")\n world = mosaik.World(sim_config)\n\n # Start simulatorsfs\n examplesim = world.start('ExampleSim', eid_prefix='Model_')\n examplectrl = world.start('ExampleCtrl')\n collector = world.start('Collector', step_size=60)\n\n # Instantiate models\n models = [examplesim.ExampleModel(init_val=i) for i in range(-2, 3, 2)]\n agents = examplectrl.Agent.create(len(models))\n monitor = collector.Monitor()\n\n # Connect entities\n for model, agent in zip(models, agents):\n world.connect(model, agent, ('val', 'val_in'), async_requests=True)\n\n mosaik.util.connect_many_to_one(world, models, monitor, 'val', 'delta')\n\n # Run simulation. The collector will test for error free execution.\n world.run(until=end)\n","repo_name":"mschvarc/SGTMP","sub_path":"mosaik-java-api/src/test/java/test_integration_server.py","file_name":"test_integration_server.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3359888699","text":"#!/usr/bin/python3\n\"\"\"Divides all elements of a matrix.\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"\n Divide all elements of a matrix by a given number.\n\n Args:\n matrix (list of lists): The matrix to be divided.\n div (int or float): The number to divide by.\n\n Returns:\n list of lists: A new matrix with elements divided by 'div', \\\n rounded to 2 decimal places.\n\n Raises:\n TypeError: If 'matrix' is not a list of lists of integers or floats.\n TypeError: If each row of 'matrix' does not have the same size.\n TypeError: If 'div' is not a number (integer or float).\n ZeroDivisionError: If 'div' is equal to 0.\n \"\"\"\n\n if not isinstance(matrix, list) or not \\\n all(isinstance(row, list) for row in matrix):\n raise TypeError(\"matrix must be a matrix (list of lists) \\\n of integers/floats\")\n\n row_lengths = [len(row) for row in matrix]\n if len(set(row_lengths)) != 1:\n raise TypeError(\"Each row of the matrix must have the same size\")\n\n if not isinstance(div, (int, float)):\n raise TypeError(\"div must be a number (integer or float)\")\n\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n # Divide each element by 'div' and round to 2 decimal places\n new_matrix = [[round(x / div, 2) for x in row] for row in matrix]\n\n return new_matrix\n","repo_name":"Josephorokpo/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31810348116","text":"import serial\nimport sys\nfrom datetime import datetime, date\n\nourSerial = serial.Serial()\nourSerial.port = '/dev/ttySC1'\nourSerial.baudrate = 4800\nourSerial.bytesize = serial.EIGHTBITS\nourSerial.parity = serial.PARITY_NONE\nourSerial.stopbits = serial.STOPBITS_ONE\nourSerial.xonxoff = False\nourSerial.rtscts = False\nourSerial.dsrdtr = False\n\nourSerial.open()\nourSerial.flushInput()\nourSerial.flushOutput()\n\nfor l in ourSerial:\n if l.rstrip() == 'S0030000FC': #S0 record identifies beginning of transmission\n now = datetime.now()\n today = date.today()\n filename = today.strftime(\"%d%m%Y\") + \"_\" + now.strftime(\"%H-%M-%S\") + '.s'\n f = open (filename, 'w')\n count = 1\n sys.stdout.write('Writing from Motorola ECB to file \"' + filename + '\"... ')\n sys.stdout.flush()\n f.write(l.rstrip() + '\\r\\n') # S-format expects a carriage return\n count += 1\n if l.rstrip() == 'S9030000FC': #S9 record identifies end of transmission\n f.flush()\n f.close()\n sys.stdout.write('done. ' + str(count + 1) + ' lines written.\\n')\n sys.stdout.flush()\n","repo_name":"podstawek/MEX68KECB","sub_path":"receiveFilesFromECB.py","file_name":"receiveFilesFromECB.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36338834090","text":"from gnuradio import gr\nimport numpy as np\nimport pmt\n\nfrom .eseo_line_decoder import reflect_bytes as reflect\n\n\nclass reflect_bytes(gr.basic_block):\n \"\"\"docstring for block reflect_bytes\"\"\"\n def __init__(self):\n gr.basic_block.__init__(\n self,\n name='reflect_bytes',\n in_sig=[],\n out_sig=[])\n self.message_port_register_in(pmt.intern('in'))\n self.set_msg_handler(pmt.intern('in'), self.handle_msg)\n self.message_port_register_out(pmt.intern('out'))\n\n def handle_msg(self, msg_pmt):\n msg = pmt.cdr(msg_pmt)\n if not pmt.is_u8vector(msg):\n print('[ERROR] Received invalid message type. Expected u8vector')\n return\n packet = np.array(pmt.u8vector_elements(msg), dtype='uint8')\n packet = np.unpackbits(packet)\n packet = reflect(packet)\n packet = np.packbits(packet)\n\n self.message_port_pub(\n pmt.intern('out'),\n pmt.cons(pmt.PMT_NIL, pmt.init_u8vector(len(packet), packet)))\n","repo_name":"daniestevez/gr-satellites","sub_path":"python/reflect_bytes.py","file_name":"reflect_bytes.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":700,"dataset":"github-code","pt":"32"} +{"seq_id":"1745302364","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom PIL import Image\nfrom models.cifar100.resnet20 import ClientModel as res20\nimport tqdm\nimport inversefed\nfrom statistics import mean \nimport os\nimport torchvision\nimport datetime\nimport time\nimport cv2\n\n\nDEVICE = 'cuda'\n\ndef evaluate(net, dataloader, print_tqdm = True):\n # Define loss function\n criterion = nn.CrossEntropyLoss() # for classification, we use Cross Entropy\n \n with torch.no_grad():\n net = net.to(DEVICE) # this will bring the network to GPU if DEVICE is cuda\n net.train(False) # Set Network to evaluation mode\n running_corrects = 0\n #iterable = tqdm(dataloader) if print_tqdm else dataloader\n iterable = dataloader\n losses = []\n for images, labels in iterable:\n images = images.to(DEVICE, dtype=torch.float)\n labels = labels.to(DEVICE)\n # Forward Pass\n outputs = net(images)\n loss = criterion(outputs, labels)\n losses.append(loss.item())\n # Get predictions\n _, preds = torch.max(outputs.data, 1)\n # Update Corrects\n running_corrects += torch.sum(preds == labels.data).data.item()\n # Calculate Accuracy\n accuracy = running_corrects / float(len(dataloader.dataset))\n\n return accuracy, mean(losses)\n\n\n\nstart_time = time.time()\nnum_images = 1\ntrained_model = True\ntarget_id = 20\nimage_path = 'images/'\ncheckpoint_epochs = 161\n\nsetup = inversefed.utils.system_startup()\ndefs = inversefed.training_strategy('conservative')\n\nloss_fn, trainloader, validloader = inversefed.construct_dataloaders('CIFAR100', defs)\n\nmodel = res20(lr=0.1, num_classes=100, device='cuda')\nmodel.to(**setup)\n\nif trained_model:\n checkpoint = torch.load(f'./checkpoint/resnet20_{checkpoint_epochs}')\n model.load_state_dict(checkpoint['state_dict'])\n\nmodel.eval();\n\naccuracy = evaluate(model, validloader)[0]\nprint('\\nTest Accuracy: {}'.format(accuracy))\n\n\ndm = torch.as_tensor(inversefed.consts.cifar100_mean, **setup)[:, None, None]\nds = torch.as_tensor(inversefed.consts.cifar100_std, **setup)[:, None, None]\n\n\n\nif num_images == 1:\n if target_id == -1: # demo image\n\n ground_truth = torch.as_tensor(\n np.array(Image.open(\"auto.jpg\").resize((32, 32), Image.BICUBIC)) / 255, **setup\n )\n ground_truth = ground_truth.permute(2, 0, 1).sub(dm).div(ds).unsqueeze(0).contiguous()\n \n labels = torch.as_tensor((1,), device=setup[\"device\"])\n target_id = -1\n else:\n #If the target is None take a random image else take id img\n\n if target_id is None:\n target_id = np.random.randint(len(validloader.dataset))\n else:\n target_id = target_id\n ground_truth, labels = validloader.dataset[target_id]\n\n ground_truth, labels = (\n ground_truth.unsqueeze(0).to(**setup),\n torch.as_tensor((labels,), device=setup[\"device\"]),\n )\nelse:\n ground_truth, labels = [], []\n idx = 25 # choosen randomly ... just whatever you want\n while len(labels) < num_images:\n img, label = validloader.dataset[idx]\n idx += 1\n if label not in labels:\n labels.append(torch.as_tensor((label,), device=setup['device']))\n ground_truth.append(img.to(**setup))\n ground_truth = torch.stack(ground_truth)\n labels = torch.cat(labels)\n\n\n\nmodel.zero_grad()\ntarget_loss, _, _ = loss_fn(model(ground_truth), labels)\ninput_gradient = torch.autograd.grad(target_loss, model.parameters())\ninput_gradient = [grad.detach() for grad in input_gradient]\n\nconfig = dict(signed=False,\n boxed=True,\n cost_fn='sim',\n indices='def',\n weights='equal',\n lr=0.13,\n optim='adam',\n restarts=2,\n max_iterations=8000,\n total_variation=1e-2,\n init='randn',\n filter='none',\n lr_decay=True,\n scoring_choice='loss')\n\nrec_machine = inversefed.GradientReconstructor(model, (dm, ds), config, num_images=num_images)\n\n#wandb.init(entity = \"aml-2022\", project=\"imageReconstruction\")\noutput, stats = rec_machine.reconstruct(input_gradient, labels, img_shape=(3, 32, 32))\n\ntest_mse = (output.detach() - ground_truth).pow(2).mean()\nfeat_mse = (model(output.detach())- model(ground_truth)).pow(2).mean() \ntest_psnr = inversefed.metrics.psnr(output, ground_truth, factor=1/ds)\n\n\nos.makedirs(image_path, exist_ok=True)\n\noutput_denormalized = torch.clamp(output * ds + dm, 0, 1)\ngt_denormalized = torch.clamp(ground_truth * ds + dm, 0, 1)\n\n\n\n\n\nif num_images == 1:\n rec_filename = f\"res20_{target_id}.png\"\n torchvision.utils.save_image(output_denormalized, os.path.join(image_path, rec_filename))\n\n gt_filename = f\"groundTruth-{target_id}.png\"\n torchvision.utils.save_image(gt_denormalized, os.path.join(image_path, gt_filename))\n\n fig, ax = plt.subplots(1, 2)\n ax = ax.ravel()\n ax[0].imshow(gt_denormalized[0].cpu().permute(1, 2, 0))\n ax[1].imshow(output_denormalized[0].cpu().permute(1, 2, 0))\n ax[1].set_title(f\"loss: {round(stats['opt'],2)} | PSNR: {round(test_psnr, 2)} \\n MSE: {test_mse:2.2f} | FMSE: {feat_mse:2.2e} \", fontsize = 10)\n\n fig.suptitle(f'resnet20 with {checkpoint_epochs} epochs, img {target_id}', fontsize=13)\n fig.savefig('single_comparison.png')\n\n\nelse:\n fig_gt, ax_gt = plt.subplots(1, num_images, figsize=(7, 7), sharey = True)\n ax_gt = ax_gt.ravel()\n\n fig_rec, ax_rec = plt.subplots(1, num_images, figsize=(7, 7), sharey = True)\n ax_rec = ax_rec.ravel()\n\n for idx, img in enumerate(output_denormalized):\n rec_filename = f\"res20_{labels[idx]}.png\"\n torchvision.utils.save_image(img, os.path.join(image_path, rec_filename))\n\n gt_filename = f\"groundTruth-{labels[idx]}.png\"\n torchvision.utils.save_image(gt_denormalized[idx], os.path.join(image_path, gt_filename))\n\n ax_rec[idx].imshow(img.cpu().permute(1, 2, 0))\n ax_rec[idx].axis(\"off\")\n\n ax_gt[idx].imshow(gt_denormalized[idx].cpu().permute(1, 2, 0))\n ax_gt[idx].axis(\"off\")\n \n fig_gt.savefig('gt_mlt_comparison.png')\n fig_rec.savefig('rec_mlt_comparison.png')\n\n\n\n\nprint(f\"Rec. loss: {stats['opt']:2.4f} | MSE: {test_mse:2.4f} | PSNR: {test_psnr:4.2f} | FMSE: {feat_mse:2.4e} |\")\n\n # Print final timestamp\nprint(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\"))\nprint(\"---------------------------------------------------\")\nprint(f\"Finished computations with time: {str(datetime.timedelta(seconds=time.time() - start_time))}\")\nprint(\"-------------Job finished.-------------------------\")","repo_name":"FlavioPatti/fedsam","sub_path":"recovery.py","file_name":"recovery.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14324479245","text":"import os\nimport tempfile\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.images import ImageFile\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom PIL import Image\n\nfrom photasm.photos.models import Album, Photo\n\n\nLOGIN_URL = reverse('django.contrib.auth.views.login')\n\n\ndef get_login_url(original_url):\n \"\"\"Gets the redirected login URL for the given original URL.\"\"\"\n return \"%s?next=%s\" % (LOGIN_URL, original_url)\n\n\nclass PhotoUploadTest(TestCase):\n\n def runTest(self):\n user = User.objects.create(username=\"Adam\")\n album = Album.objects.create(owner=user, name=\"Test\")\n\n photo_upload_url = reverse('photasm.photos.views.photo_upload',\n kwargs={'album_id': album.id})\n login_url = get_login_url(photo_upload_url)\n\n response = self.client.get(photo_upload_url)\n self.assertRedirects(response, login_url)\n\n\nclass PhotoTest(TestCase):\n\n def setUp(self):\n # Create an image.\n image_fd, image_path = tempfile.mkstemp(suffix='.jpg')\n os.close(image_fd)\n Image.new('RGB', (1, 1)).save(image_path, 'JPEG')\n\n # Create a Photo object.\n user = User.objects.create(username=\"Adam\")\n album = Album.objects.create(owner=user, name=\"Test\")\n self.photo = Photo()\n self.photo.owner = user\n image = open(image_path)\n self.photo.image = ImageFile(image)\n self.photo.album = album\n self.photo.is_jpeg = True\n self.photo.save()\n image.close()\n os.remove(image_path)\n\n def tearDown(self):\n Photo.objects.all().delete()\n Album.objects.all().delete()\n User.objects.all().delete()\n\n def test_photo_edit(self):\n photo_edit = 'photasm.photos.views.photo_edit'\n photo_edit_url = reverse(photo_edit, args=[self.photo.id])\n login_url = get_login_url(photo_edit_url)\n\n response = self.client.get(photo_edit_url)\n self.assertRedirects(response, login_url)\n\n def test_home(self):\n home_url = reverse('photasm.photos.views.home')\n login_url = get_login_url(home_url)\n\n response = self.client.get(home_url)\n self.assertRedirects(response, login_url)\n\n def test_new_album(self):\n home_url = reverse('new_album')\n login_url = get_login_url(home_url)\n\n response = self.client.get(home_url)\n self.assertRedirects(response, login_url)\n","repo_name":"a1russell/photasm","sub_path":"photos/tests/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70932331291","text":"import torch.nn as nn\nfrom .reconstruction import AutoEncoder as AE\nfrom .base import BaseModel\n\n\nclass DUAD(BaseModel):\n def __init__(self, r=10, p0=.35, p=.30, **kwargs):\n self.p0 = p0\n self.p = p\n self.r = r\n self.latent_dim = kwargs.get('ae_latent_dim', 1)\n self.name = \"DUAD\"\n self.ae = None\n super(DUAD, self).__init__(**kwargs)\n self.cosim = nn.CosineSimilarity()\n\n def resolve_params(self, dataset_name: str):\n enc_layers = [\n (self.in_features, 60, nn.Tanh()),\n (60, 30, nn.Tanh()),\n (30, 10, nn.Tanh()),\n (10, self.latent_dim, None)\n ]\n dec_layers = [\n (self.latent_dim, 10, nn.Tanh()),\n (10, 30, nn.Tanh()),\n (30, 60, nn.Tanh()),\n (60, self.in_features, None)\n ]\n self.ae = AE(enc_layers, dec_layers).to(self.device)\n\n def encode(self, x):\n return self.ae.encoder(x)\n\n def decode(self, code):\n return self.ae.decoder(code)\n\n def forward(self, x):\n code = self.ae.encoder(x)\n x_prime = self.ae.decoder(code)\n h_x = self.cosim(x, x_prime)\n return code, x_prime, h_x\n\n def get_params(self) -> dict:\n return {\n \"duad_p\": self.p,\n \"duad_p0\": self.p0,\n \"duad_r\": self.r\n }\n","repo_name":"intrudetection/robevalanodetect","sub_path":"src/model/DUAD.py","file_name":"DUAD.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"7647955739","text":"import numpy as np\nimport cv2\nimport time\nfrom datetime import datetime\n\n# in seconds; actual length dependent on camera and video FPS\nVID_LENGTH = 10\n# size of detected motion; experiment\nMIN_AREA = 250\n# codec\nFOURCC = cv2.VideoWriter_fourcc(*'XVID')\n\ncap = cv2.VideoCapture(0)\n\nconcluded = False\nfirstFrame = None\nstartTime = None\n\nwhile True:\n\tret, frame = cap.read()\n\tgrey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tblur = cv2.GaussianBlur(grey, (21, 21), 0)\n\t\n\tif firstFrame is None:\n\t\tfirstFrame = blur\n\t\n\t# motion detection\n\tframeDelta = cv2.absdiff(firstFrame, blur)\n\tthresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n\tthresh = cv2.dilate(thresh, None, iterations=2)\n\tdiff = cv2.subtract(firstFrame, blur)\n\t# win fix, experiment\n\t_, cnts, _= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\n\tfor c in cnts:\n\t\tif cv2.contourArea(c) >= MIN_AREA:\n\t\t\t(x,y,w,h) = cv2.boundingRect(c)\n\t\t\tcv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)\n\t\n\tcv2.imshow(\"Video Feed\", frame)\n\t\n\tif (len(cnts) > 0) and (startTime == None):\n\t\tstartTime = time.time()\n\t\tprint(\"detected motion, recording video\")\n\t\tcapname = \"{}.avi\".format(str(datetime.now().isoformat()))\n\t\tcapname = capname.replace(\":\",\"-\")\n\t\tvid = cv2.VideoWriter(capname, FOURCC, 5, (640, 480))\n\t\n\tif startTime != None:\n\t\tif (time.time() - startTime)< VID_LENGTH:\n\t\t\tvid.write(frame)\n\t\telif concluded == False:\n\t\t\tvid.release()\n\t\t\tprint(\"finished recording\")\n\t\t\tconcluded = True\n\t\t\tstartTime = None\n\t\n\tk=cv2.waitKey(10)& 0xff\n\tif k == 27:\n\t\tbreak\n\nif vid != None:\n\tvid.release()\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"vatavazz/cv","sub_path":"motion/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32303581137","text":"#!/usr/bin/python3.7\n# UTF8\n# Date:Thu 19 Mar 2020 22:24:29 CET\n# Author: Nicolas Flandrois\n# Create a sudoku solver in python, using recursion.\n\nimport numpy as np\n\n# Please Change this matrix according to the sudoku to solve.\n\n# Exemple With only 1 solution\ngrid = [\n [5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\n]\n\n# Exemple With 2 solutions\n# grid = [\n# [5, 3, 0, 0, 7, 0, 0, 0, 0],\n# [6, 0, 0, 1, 9, 5, 0, 0, 0],\n# [0, 9, 8, 0, 0, 0, 0, 6, 0],\n# [8, 0, 0, 0, 6, 0, 0, 0, 3],\n# [4, 0, 0, 8, 0, 3, 0, 0, 1],\n# [7, 0, 0, 0, 2, 0, 0, 0, 6],\n# [0, 6, 0, 0, 0, 0, 2, 8, 0],\n# [0, 0, 0, 4, 1, 9, 0, 0, 5],\n# [0, 0, 0, 0, 8, 0, 0, 7, 9]\n# ]\n\nprint('Initial Grid:\\n', np.matrix(grid))\n\n\ndef possible(y, x, n):\n global grid\n\n for i in range(0, 9):\n if grid[y][i] == n:\n return False\n\n for i in range(0, 9):\n if grid[i][x] == n:\n return False\n\n x0 = (x // 3) * 3\n y0 = (y // 3) * 3\n\n for i in range(0, 3):\n for j in range(0, 3):\n if grid[y0 + i][x0 + j] == n:\n return False\n\n return True\n\n\ndef solve():\n global grid\n\n for y in range(9):\n for x in range(9):\n if grid[y][x] == 0:\n for n in range(1, 10):\n if possible(y, x, n):\n grid[y][x] = n\n solve()\n grid[y][x] = 0 # Backtracking\n return\n print('\\nA possible Solution is:\\n', np.matrix(grid))\n input(\"\\nMore?\")\n # If the grid is open to multiple solution, this input() command will\n # relaunch the solve() function to find other possible solutions.\n # If only 1 solution exist to your grid, then the script stop.\n\n\nsolve()\n","repo_name":"NicolasFlandrois/My-Mini-Py-Scripts-Training","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71330845852","text":"def Collatz(s,n):\n if s == 1:\n return n\n elif s == 0:\n return 0\n if s % 2 == 1:\n return Collatz(3*s + 1, n = n+1)\n else:\n return Collatz(s/2, n = n+1)\n\ntesty = int(input())\nresult = 0\nfor i in range(testy):\n print(Collatz(int(input()),result))\n\n\n","repo_name":"cynarski/Spoj-Python","sub_path":"PTCLTZ - Problem Collatza.py","file_name":"PTCLTZ - Problem Collatza.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30102450977","text":"\"\"\"\r\n\n\nCreate a function to rotate a two-dimensional matrix of `N * N` integer\nelements `num` times, where if `num` is positive, the rotation is\n**clockwise** , and if not, **counterclockwise**.\n\n### Examples\n\n rotate_transform([\n [2, 4],\n [0, 0]\n ], 1) ➞ [\n [0, 2],\n [0, 4]\n ]\n rotate_transform([\n [2, 4],\n [0, 0]\n ], -1) ➞ [\n [4, 0],\n [2, 0]\n ]\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\nlst = [\n [1, 4, 0, 0],\n [2, 8, 0, 0],\n [0, 0, 3, 5],\n [0, 0, 7, 1]\n ]\nnum = -1\ndef rotate_transform(lst,num):\n def clockwise(lst):\n c = []\n for i in range(len(lst[0])):\n col = []\n for j in (lst):\n col.append(j[i])\n c.append(col)\n for j in range(len(c)):\n c[j].reverse()\n return c\n def counterclock(lst):\n c = []\n for i in range(len(lst[0])):\n col = []\n for j in (lst):\n col.append(j[i])\n c.append(col)\n c.reverse()\n return c\n if num<0:\n for i in range(-1*num):\n lst = counterclock(lst)\n return lst\n if num>0:\n for i in range(num):\n lst = clockwise(lst)\n return lst \nrotate_transform(lst,num)\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"YxnrZQwKyrzgcMvT4_23.py","file_name":"YxnrZQwKyrzgcMvT4_23.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27873576159","text":"\"\"\"\ncolor 1:12:30 1:23:00\nOpenCV Course - Full Tutorial with Python\n https://www.youtube.com/watch?v=oXlwWbU8l2o\n https://github.com/jasmcaus/opencv-course/blob/master/Section%20%232%20-%20Advanced/colour_spaces.py\n \"\"\"\nimport cv2 as cv\n\nimport matplotlib.pyplot as plt\n\n\nBASE_FOLDER = 'C:/Users/rockman/Pictures/Saved Pictures/'\nmimg = \"bz.JPG\"\npath = BASE_FOLDER + mimg\n\nimg = cv.imread(path)\ncv.imshow('Original', img)\n\n# Resizing\n\nresized = cv.resize(img, (500,500), interpolation=cv.INTER_CUBIC)\ncv.imshow('Resized', resized)\nimg = resized\n\n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray', gray)\n\n# BGR to HSV (hue, saturation, value\nhsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\ncv.imshow('HSV', hsv)\n\n# BGR to L*a*b\n# L* for perceptual lightness\n# a* and b* for the 4 unique colors red, green, blue, and yellow\nlab = cv.cvtColor(img, cv.COLOR_BGR2LAB)\ncv.imshow('LAB', lab)\n''' \n\n'''\n# BGR to RGB\nrgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)\ncv.imshow('RGB', rgb)\n\nplt.imshow(rgb)\nplt.show()\n\n# HSV to BGR\nlab_bgr = cv.cvtColor(lab, cv.COLOR_LAB2BGR)\ncv.imshow('LAB --> BGR', lab_bgr)\n\ncv.waitKey(0)","repo_name":"danizalm05/python01","sub_path":"opencv/murtazaNew/basic/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72076028250","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport docker\nfrom util.Logger import Logger\n\n\ndef startService(serviceInstance):\n # The callback for when the client receives a CONNACK response from the server.\n logger = Logger()\n service = serviceInstance.getName()\n nodes = serviceInstance.getSelectedNodes()\n client = docker.from_env()\n\n for node in nodes:\n n = client.nodes.list(filters={'name': node})[0]\n config = {'Availability': 'active',\n 'Name': node,\n 'Labels': {service: 'true'}\n }\n if node == 'node01':\n config['Role'] = 'manager'\n else:\n config['Role'] = 'worker'\n n.update(config)\n n.reload()\n\n client.services.create(\"service\", name=service, networks=[\"swarm_net\"],\n mounts=[\"/home/pi/video/tracking/container:/data:rw\"], mode=\"global\",\n constraints=[\"node.labels.\"+service+\"==true\"])\n\ndef stopService(serviceInstance):\n # The callback for when the client receives a CONNACK response from the server.\n logger = Logger()\n service = serviceInstance.getName()\n client = docker.from_env()\n client.services.get(service).remove()","repo_name":"cdsnlab/AIoTVirt","sub_path":"src/master/ClusterManager.py","file_name":"ClusterManager.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"11815005243","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom glob import glob\r\n\r\n# ----- Fonte do LaTeX -----\r\nplt.rcParams.update({\r\n 'font.family': 'serif',\r\n 'mathtext.fontset': 'cm'\r\n})\r\n\r\nfilelist_hom = glob('files/metropolis*T1_hom.dat')\r\nfilelist_rand = glob('files/metropolis*T1_rand.dat')\r\n\r\nfor hom, rand in zip(filelist_hom, filelist_rand):\r\n # ----- Leitura dos dados -----\r\n mcs_h, en_h, mag_h = np.loadtxt(hom,unpack=True)\r\n mcs_rand, en_rand, mag_rand = np.loadtxt(rand,unpack=True)\r\n L = int(hom.split('L')[1].split('_')[0])\r\n # ----- Gráficos -----\r\n fig, axes = plt.subplots(1,2)\r\n fig.set_size_inches(16,6)\r\n ax = axes[0] # esquerda: energia\r\n ax.plot(mcs_h, en_h, label=r'$m(0)=1$', color='darkblue')\r\n ax.plot(mcs_rand, en_rand, label='Estado inicial aleatório', color='darkred')\r\n ax.set_xlim(0,300)\r\n ax.set_ylabel(r'$E(t)/N$')\r\n ax.legend()\r\n ax = axes[1] # direita: magnetização\r\n ax.plot(mcs_h, np.abs(mag_h), label=r'$m(0)=1$', color='darkblue')\r\n ax.plot(mcs_rand, np.abs(mag_rand), label='Estado inicial aleatório', color='darkred')\r\n ax.set_xlim(0,300)\r\n ax.set_ylabel(r'$|m(t)|$')\r\n ax.legend()\r\n fig.suptitle(f'Energia e magnetização do modelo de Ising com T=1 e L={L}')\r\n fig.supxlabel(r'$t$ (MCS)')\r\n plt.savefig(f'grafico_ex15_L{L}_T1.png', dpi=1000)\r\n plt.show()","repo_name":"Vini-BM/monte-carlo","sub_path":"ising-model/ex15_graficos.py","file_name":"ex15_graficos.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40945362249","text":"\"\"\"\nYou are given an m x n grid where each cell can have one of three values:\n\n 0 representing an empty cell,\n 1 representing a fresh orange, or\n 2 representing a rotten orange.\n\nEvery minute, any fresh orange that is 4-directionally adjacent to a rotten orange becomes rotten.\n\nReturn the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1.\n\nExample 1:\n\nInput: grid = [[2,1,1],[1,1,0],[0,1,1]]\nOutput: 4\n\nExample 2:\n\nInput: grid = [[2,1,1],[0,1,1],[1,0,1]]\nOutput: -1\nExplanation: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.\n\nExample 3:\n\nInput: grid = [[0,2]]\nOutput: 0\nExplanation: Since there are already no fresh oranges at minute 0, the answer is just 0.\n\nConstraints:\n\n m == grid.length\n n == grid[i].length\n 1 <= m, n <= 10\n grid[i][j] is 0, 1, or 2.\n\n\n---!SECTION TIMING\n Start Time = 14/11/22 00:37\n End Time = 14/11/22 02:49\n\n\"\"\"\n\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n # grid size\n rows = len(grid)\n columns = len(grid[0])\n\n # rotten_indexes will save all the positions of rotten oranges\n # this will work as a Queue, we first add all the initial ones, and every minute\n # we will loop over the list, and for every one that we check\n # we will add the indexes of the new rottened neighbours and pop out the old one.\n # first to get in, first to get out\n # eventually, there will be no elements in this queue because there will not be new oranges to rot.\n rotten_indexes = []\n\n # we will mantain the count of clean oranges. If this reach to 0 we will reach the end.\n clean_counter = 0\n \n # I need indexes so I'm using range() to loop all the cells of the grid\n for i in range(rows):\n for j in range(columns):\n # If the cell is rotten, add it to the Queue\n if grid[i][j] == 2:\n rotten_indexes.append([i,j])\n # If it's a clean orange, counter up\n if grid[i][j] == 1:\n clean_counter += 1\n \n # If there's not initial rotten oranges we have two options\n if not rotten_indexes:\n # There are clean oranges that will never rot, hence we should return -1\n if clean_counter:\n return -1\n # There are no clean oranges neither. The initial position is a completed position.\n else:\n return 0\n \n # We declare variable\n minutes = 0\n while True:\n for i,j in rotten_indexes:\n if i > 0 and grid[i-1][j] == 1:\n new_indexes.append([i-1,j])\n grid[i-1][j] = 2\n if i < rows-1 and grid[i+1][j] == 1:\n new_indexes.append([i+1,j])\n grid[i+1][j] = 2\n if j > 0 and grid[i][j-1] == 1:\n new_indexes.append([i,j-1])\n grid[i][j-1] = 2\n if j < columns-1 and grid[i][j+1] == 1:\n new_indexes.append([i,j+1])\n grid[i][j+1] = 2\n if not new_indexes:\n for i in range(rows):\n for j in range(columns):\n if grid[i][j] == 1:\n return -1\n return minutes\n \n rotten_indexes = new_indexes\n new_indexes = []\n minutes +=1\n else:\n # Imposible situation\n return -1","repo_name":"santiagoNieva/Exercises","sub_path":"LeetCode/994_rotten_oranges.py","file_name":"994_rotten_oranges.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"47739882612","text":"import pandas as pd\r\nimport json\r\nimport os\r\nimport datetime\r\n\r\n\r\ndef start_converting(filename, destination):\r\n xls = pd.ExcelFile(filename)\r\n\r\n xls.sheet_names\r\n\r\n print(xls.sheet_names)\r\n for sheet_name in xls.sheet_names:\r\n excel_df = None\r\n try:\r\n excel_df = pd.read_excel(filename, skiprows=0, sheet_name=sheet_name)\r\n except Exception as e:\r\n print(e)\r\n return False\r\n\r\n data = []\r\n\r\n for k, v in excel_df.iterrows():\r\n data.append(v.to_dict())\r\n\r\n try:\r\n with open(destination + \"\\\\\" + sheet_name + \".json\", 'a+', ) as f:\r\n json.dump(data, f, indent=4, )\r\n except Exception as e:\r\n print(e)\r\n return False\r\n finally:\r\n xls.close()\r\n\r\n return True\r\n\r\n\r\ndef run():\r\n global source_filename\r\n print(\"converting from excel to json...\")\r\n destination_directory = \"\"\r\n source_directory = \"\"\r\n\r\n is_valid_destination = False\r\n is_valid_source = False\r\n\r\n while not is_valid_source:\r\n source_filename = input(\"Enter the filename to be converted.\\n\")\r\n if source_filename.endswith('.xls'):\r\n pass\r\n elif source_filename.endswith('.xlsx'):\r\n pass\r\n else:\r\n print(\"{} is not a valid excel file.\".format(source_filename))\r\n print(\"Please input valid excel file.\")\r\n continue\r\n\r\n if not os.path.exists(source_filename):\r\n print(\"Invalid filename.\\nPlease try again.\")\r\n continue\r\n\r\n is_valid_source = True\r\n\r\n destination_directory = \"\\\\\".join(source_filename.split(\"\\\\\")[:-1])\r\n\r\n success = start_converting(filename=source_filename,\r\n destination=destination_directory)\r\n\r\n if success:\r\n print(\"done conversion\")\r\n else:\r\n print(\"conversion failed.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n run()\r\n","repo_name":"adriane-macer/tools","sub_path":"xls_to_json_converter.py","file_name":"xls_to_json_converter.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8759081850","text":"# -*- coding: utf-8 -*-\n\"\"\"Pressure convergence WorkChain\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom aiida.engine import WorkChain, ToContext, while_, calcfunction, workfunction\nfrom aiida.orm import Code, Float, Str, StructureData\nfrom aiida.plugins import CalculationFactory, DataFactory\n\nfrom common_wf import generate_scf_input_params\nfrom create_rescale import rescale\n\nDict = DataFactory('dict')\nKpointsData = DataFactory('array.kpoints')\nPwCalculation = CalculationFactory('quantumespresso.pw')\n\n\ndef get_energy_first_derivative(stress):\n \"\"\"Return the energy first derivative from the stress.\n\n :param stress: the stress tensor in GPa\n :return: first derivative of the energy in eV/Å^3\n \"\"\"\n from numpy import trace\n\n GPa_to_eV_over_ang3 = 1. / 160.21766208\n\n # Get the pressure (GPa)\n pressure = trace(stress) / 3.\n\n # Pressure is -dE/dV; moreover p in kbar, we need to convert it to eV/Å^3 to be consistent\n dE = -pressure * GPa_to_eV_over_ang3\n\n return dE\n\n\ndef get_volume_energy_and_derivative(output_parameters):\n \"\"\"Return the volume, energy and energy derivative for given `PwCalculation` output parameters.\n\n Volume is in Å^3, energy in eV and energy derivative eV/Å^3\n\n :param output_parameters: the `output_parameters` `Dict` result node of a `PwCalculation`\n :return: tuple with volume, energy and energy derivative\n \"\"\"\n V = output_parameters.dict.volume\n E = output_parameters.dict.energy\n dE = get_energy_first_derivative(output_parameters.dict.stress)\n\n return V, E, dE\n\n\ndef get_energy_second_derivative(output_parameters_one, output_parameters_two):\n \"\"\"Return second derivative of the energy with respect to the volume given the results of two `PwCalculations`.\n\n The derivate is computed using the finite differences method.\n\n :param output_parameters_one: the `output_parameters` `Dict` result node of the first `PwCalculation`\n :param output_parameters_two: the `output_parameters` `Dict` result node of the second `PwCalculation`\n :return: the second derivative of the energy with respect to the volume\n \"\"\"\n dE1 = get_energy_first_derivative(output_parameters_one.dict.stress)\n dE2 = get_energy_first_derivative(output_parameters_two.dict.stress)\n V1 = output_parameters_one.dict.volume\n V2 = output_parameters_two.dict.volume\n\n return (dE2 - dE1) / (V2 - V1)\n\n\ndef get_parabola_coefficients(V, E, dE, ddE):\n \"\"\"Return coefficients of a parabola fit to E = a*V^2 + b*V + c for the given volume and energy.\n\n :param V: volume in Å^3\n :param E: energy in eV\n :param dE: the first derivative of the energy with respect to the volume\n :param ddE: the second derivative of the energy with respect to the volume\n :return: coefficients a, b and c\n \"\"\"\n a = ddE / 2.\n b = dE - ddE * V\n c = E - V * dE + V**2 * ddE / 2.\n\n return a, b, c\n\n\n@workfunction\ndef get_structure(structure, step_data=None):\n \"\"\"Return a scaled version of the given structure, where the new volume is determined by the given step data.\"\"\"\n initial_volume = structure.get_cell_volume()\n\n if step_data is None:\n new_volume = initial_volume + 4. # In Å^3\n else:\n # Minimum of a parabola\n new_volume = -step_data.dict.b / 2. / step_data.dict.a\n\n scale_factor = (new_volume / initial_volume)**(1. / 3.)\n scaled_structure = rescale(structure, Float(scale_factor))\n return scaled_structure\n\n\n@calcfunction\ndef get_step_data(parameters_first, parameters_second=None):\n \"\"\"Generate a dictionary with the step parameters from the output parameters of a completed `PwCalculation`.\"\"\"\n\n # If the parameters of the second calculation are not passed, this is for the first step and we only return\n # a dictionary with the volume, energy and derivative\n if parameters_second is None:\n V, E, dE = get_volume_energy_and_derivative(parameters_first)\n return Dict(dict={'V': V, 'E': E, 'dE': dE})\n\n # Otherwise, this is an iteration step and we return the difference between the steps\n V, E, dE = get_volume_energy_and_derivative(parameters_first)\n ddE = get_energy_second_derivative(parameters_first, parameters_second)\n a, b, c = get_parabola_coefficients(V, E, dE, ddE)\n\n return Dict(dict={\n 'V': V,\n 'E': E,\n 'dE': dE,\n 'ddE': ddE,\n 'a': a,\n 'b': b,\n 'c': c\n })\n\n\n@calcfunction\ndef bundle_step_data(step0, **kwargs):\n \"\"\"Bundle step data into Dict.\"\"\"\n steps = [step.get_dict() for step in kwargs.values()]\n print((step0, type(step0)))\n print((steps, type(steps)))\n return Dict(dict={'step0': step0.get_dict(), 'steps': steps})\n\n\nclass PressureConvergence(WorkChain):\n \"\"\"Relax a structure using Newton's algorithm on the first derivative of the energy (minus the pressure).\"\"\"\n\n @classmethod\n def define(cls, spec):\n \"\"\"Define spec of WorkChain.\"\"\"\n # yapf: disable\n # pylint: disable=bad-continuation\n super(PressureConvergence, cls).define(spec)\n spec.input('code', valid_type=Code,\n help='Code setup to run `pw.x` to use for the calculations.')\n spec.input('structure', valid_type=StructureData,\n help='The structure to minimize.')\n spec.input('pseudo_family', valid_type=Str,\n help='Family of pseudopotentials to use for the calculations.')\n spec.input('volume_tolerance', valid_type=Float,\n help='Stop if the volume difference of two consecutive calculations is less than this threshold.')\n spec.output('steps', valid_type=Dict,\n help='The data of all the steps in the minimization process containing info about energy and volume')\n spec.output('structure', valid_type=StructureData,\n help='Final relaxed structure.')\n spec.outline(\n cls.setup,\n cls.put_step0_in_ctx,\n cls.move_next_step,\n while_(cls.not_converged)(\n cls.move_next_step,\n ),\n cls.finish\n )\n\n def setup(self):\n \"\"\"Launch the first calculation for the input structure, and a second calculation for a shifted volume.\"\"\"\n scaled_structure = get_structure(self.inputs.structure)\n self.ctx.last_structure = scaled_structure\n\n inputs0 = generate_scf_input_params(self.inputs.structure,\n self.inputs.code,\n self.inputs.pseudo_family)\n inputs1 = generate_scf_input_params(scaled_structure, self.inputs.code,\n self.inputs.pseudo_family)\n\n # Run two `PwCalculations`\n future0 = self.submit(PwCalculation, **inputs0)\n future1 = self.submit(PwCalculation, **inputs1)\n\n # Wait for them to complete before going to the next step\n return ToContext(r0=future0, r1=future1)\n\n def put_step0_in_ctx(self):\n \"\"\"Store the outputs of the very first step in a specific dictionary.\"\"\"\n self.ctx.step0 = get_step_data(self.ctx.r0.outputs.output_parameters)\n\n # Prepare the list containing the steps: step 1 will be stored here by move_next_step\n self.ctx.steps = []\n\n def move_next_step(self):\n \"\"\"Main part of the algorithm.\n\n Compare the results of two consecutive calculations and use Newton's algorithm on the pressure by fitting the\n results with a parabola and setting the next volume to calculate to the parabola minimum.\n\n The oldest calculation gets replaced by the most recent and a new calculation is launched that will replace\n the most recent.\n \"\"\"\n # Computer the new Volume using Newton's algorithm and create the new corresponding structure by scaling it\n new_step_data = get_step_data(self.ctx.r0.outputs.output_parameters,\n self.ctx.r1.outputs.output_parameters)\n scaled_structure = get_structure(self.inputs.structure, new_step_data)\n self.ctx.steps.append(new_step_data)\n\n # Replace the older step with the latest and set the current structure\n self.ctx.r0 = self.ctx.r1\n self.ctx.last_structure = scaled_structure\n\n inputs = generate_scf_input_params(scaled_structure, self.inputs.code,\n self.inputs.pseudo_family)\n future = self.submit(PwCalculation, **inputs)\n\n return ToContext(r1=future)\n\n def not_converged(self):\n \"\"\"Return True if the worflow is not converged yet (i.e. the volume changed significantly).\"\"\"\n r0_out = self.ctx.r0.outputs.output_parameters\n r1_out = self.ctx.r1.outputs.output_parameters\n\n return abs(r1_out.dict.volume -\n r0_out.dict.volume) > self.inputs.volume_tolerance\n\n def finish(self):\n \"\"\"Attach the result nodes as outputs.\"\"\"\n steps = {\n 'step{}'.format(index + 1): step\n for index, step in enumerate(self.ctx.steps)\n }\n bundled_steps = bundle_step_data(step0=self.ctx.step0, **steps)\n\n self.out('steps', bundled_steps)\n self.out('structure', self.ctx.last_structure)\n","repo_name":"unkcpz/aiida-tutorials-zh_CN","sub_path":"docs/pages/2019_MARVEL_Psik_MaX/scripts/pressure_convergence.py","file_name":"pressure_convergence.py","file_ext":"py","file_size_in_byte":9265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12598793227","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup as bs\n\nquery=input('입력하실 태그? ')\nurl = 'https://search.naver.com/search.naver?where=image&sm=tab_jum&query='+'합정동오레노라멘'\n\nresponse=requests.get(url)\n\n\nsoup = bs(response.text, \"lxml\")\nimgs = soup.find(class_='_img')\n\nn=1\nimgUrl = imgs['data-source']\n\nprint(imgUrl)\n\n##\n##for i in imgs:\n## imgUrl = i['data-source']\n## print(imgUrl)\n#### with requests.get(imgUrl) as f:\n#### with open('./img/' + query + str(n)+'.jpg','wb') as h: # w - write b - binary\n#### img = f.read()\n#### h.write(img)\n## n += 1\n","repo_name":"parkgeonhu/sikugeon","sub_path":"naver/naver_image_search.py","file_name":"naver_image_search.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4769427179","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\nUser = get_user_model()\n\n\nclass Post(models.Model):\n text = models.TextField(max_length=200)\n pub_date = models.DateTimeField(auto_now_add=True)\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts',)\n\n group = models.ForeignKey(\n 'Group',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='posts')\n\n # Поле для картинки (необязательное)\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True\n )\n\n class Meta:\n ordering = ['-pub_date']\n\n def __str__(self):\n return self.text[:15]\n\n\nclass Group(models.Model):\n title = models.CharField(max_length=200)\n slug = models.SlugField(unique=True)\n description = models.TextField(max_length=300)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n related_name='comments',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='comments',\n )\n text = models.TextField(max_length=200)\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-created']\n\n def __str__(self):\n return self.text[:15]\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='following',\n )\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'author'],\n name='unique_follow'),\n\n models.CheckConstraint(\n check=~models.Q(author=models.F(\"user\")),\n name=\"prevent_self_follow\"), ]\n\n def __str__(self):\n return f'{self.user} following {self.author}'\n","repo_name":"lambazta/hw05_final","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12942497310","text":"import nltk\n\nclass PosTags:\n\n def tag(self, t, as_dicts=True):\n '''\n With a list of tokens, mark their part of speech and return\n a list dicts (no native tuple type in dataframes it seems).\n '''\n pos = nltk.pos_tag(t)\n if as_dicts:\n return self.to_dicts(pos)\n else:\n return pos\n\n\n def to_dicts(self, pos):\n '''\n With a list of POS tag tuples, convert the tuples to dicts\n because Spark can't store tuples.\n '''\n retval = []\n for p in pos:\n retval.append({\"word\": p[0], \"tag\": p[1]})\n return retval\n\n\nif __name__ == \"__main__\":\n from tokens import Tokens\n import sys\n fn = sys.argv[1]\n\n t = Tokens()\n pos_tags = PosTags() \n with open(fn) as f:\n for l in f:\n tokens = t.tokenize(l)\n pos = pos_tags.tag(tokens)\n s = \"\"\n for p in pos:\n s = s + p[\"word\"] + \" (\" + p[\"tag\"] + \") | \"\n print(s + \"\\n\")\n","repo_name":"mjcollin/2016spr","sub_path":"notebook/lib/pos_tags.py","file_name":"pos_tags.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23120921996","text":"import numpy as np\nimport scipy.io as sio\nfrom sklearn import svm\nfrom sklearn import metrics\nimport time\n\n# 读取mnist数据集\nmnist = sio.loadmat('mnist.mat')\ndataMat = mnist['data']\nlabelMat = mnist['label']\n\n# 读取5个划分的索引\nindex = sio.loadmat('index.mat')\ntrain_index = index['train_index']\ntest_index = index['test_index']\n\n\ndef svm_baseline(i):\n start_time = time.time()\n # 设定分类器参数\n clf = svm.SVC(C=100, kernel='rbf', gamma=0.03,decision_function_shape='ovo')\n # 使用第i个训练集的数据和样本进行模型训练\n clf.fit(dataMat[train_index[i]],labelMat[0][train_index[i]])\n # 第i个测试集的预测结果\n predict_result=clf.predict(dataMat[test_index[i]])\n # print(predict_result)\n predictions = [int(a) for a in predict_result]\n # print(predictions)\n # num_correct = sum(int(a == y) for a, y in zip(predictions, labelMat[0][test_index[i]]))\n # print(\"%s of %s test values are correct.\" % (num_correct, len(labelMat[0][test_index[i]])))\n score = clf.score(dataMat[test_index[i]],labelMat[0][test_index[i]])\n print(\"Score: {:.6f}.\".format(score))\n print(\"Error rate is {:.6f}.\".format((1 - score)))\n f_measure=metrics.f1_score(labelMat[0][test_index[i]],predict_result,average='micro')\n print(\"F-measure: {:.6f}.\".format(f_measure))\n end_time = time.time()\n print(\"Testing test set {} spent {:.2f}s.\".format(i+1,end_time-start_time))\n print(\"---------------------------------------------------------\")\n return score\n\n\nif __name__ == \"__main__\":\n start_time=time.time()\n # score_list 用于保存5次测试的精度\n score_list=[]\n # 分别对5个训练集和测试集进行训练和测试\n for i in range(0,5):\n score=svm_baseline(i)\n score_list.append(score)\n\n # print(score_list)\n end_time=time.time()\n avgAccuracy = np.mean(score_list)\n print(\"Average accuracy is: {:.6f}.\".format(avgAccuracy))\n avgStd=np.std(score_list)\n print(\"Standard deviation is: {:.6f}.\".format(avgStd))\n print(\"Testing all 5 test sets spent {:.2f}s.\".format(end_time - start_time))\n print(\"---------------------------------------------------------\")\n\n","repo_name":"Qinyimin/machine_learning_test","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17431703807","text":"from datetime import datetime\nfrom datetime import timezone\nfrom struct import unpack\nimport functools\nimport sys\nimport warnings\n\n\ndef must_be(expected):\n def outer(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n result = func(self, *args, **kwargs)\n\n if result != expected:\n msg = \"%s must be %s: %s\" % (func.__name__, expected, result)\n warnings.warn(msg)\n\n return result\n\n return inner\n\n return outer\n\n\ndef uuid(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n binary = func(self, *args, **kwargs)\n\n # UUID variants\n # https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/49e490b8-f972-45d6-a3a4-99f924998d97\n # Also see Java implementation (mslinks)\n # https://github.com/DmitriiShamrikov/mslinks/blob/master/src/mslinks/data/GUID.java#L51\n d1, d2, d3 = unpack(\"HHI\", binary[8:16])\n\n uuid = \"%08X-%04X-%04X-%04X-%04X%08X\" % (d1, d2, d3, d4, d51, d52)\n\n return uuid\n\n return inner\n\n\ndef _quad_to_hex(quad):\n # An implemetation is based on\n # https://metadataconsulting.blogspot.com/2019/12/CSharp-Convert-a-GUID-to-a-Darwin-Descriptor-and-back.html\n base_85 = \"!$%&'()*+,-.0123456789=?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{}~\"\n i = 5\n ddec = 0\n while i >= 1:\n char = quad[i - 1]\n b85 = base_85.find(char)\n ddec = ddec + b85\n if i > 1:\n ddec = ddec * 85\n i -= 1\n\n return f\"{ddec:08X}\"\n\n\ndef packed_uuid(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n text = func(self, *args, **kwargs)\n\n if text is None:\n return None\n\n # An implemetation is based on\n # https://metadataconsulting.blogspot.com/2019/12/CSharp-Convert-a-GUID-to-a-Darwin-Descriptor-and-back.html\n quad1 = _quad_to_hex(text[0:5])\n quad2 = _quad_to_hex(text[5:10])\n quad3 = _quad_to_hex(text[10:15])\n quad4 = _quad_to_hex(text[15:20])\n quads = quad1 + quad2 + quad3 + quad4\n\n d1 = quads[:8]\n d2 = quads[12:16]\n d3 = quads[8:12]\n d41 = quads[22:24]\n d42 = quads[20:22]\n d51 = quads[18:20]\n d52 = quads[16:18]\n d53 = quads[30:32]\n d54 = quads[28:30]\n d55 = quads[26:28]\n d56 = quads[24:26]\n\n uuid = \"%s-%s-%s-%s%s-%s%s%s%s%s%s\" % (\n d1,\n d2,\n d3,\n d41,\n d42,\n d51,\n d52,\n d53,\n d54,\n d55,\n d56,\n )\n\n return uuid\n\n return inner\n\n\ndef filetime(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n binary = func(self, *args, **kwargs)\n\n try:\n nanosec = unpack(\"> 9) + 1980,\n ((dos & 0x000001E0) >> 5),\n ((dos & 0x0000001F) >> 0),\n ((dos & 0xF8000000) >> 27),\n ((dos & 0x07E00000) >> 21),\n ((dos & 0x001F0000) >> 16) * 2,\n )\n\n return datetime(*ymdhms, tzinfo=timezone.utc)\n except ValueError:\n if sys.version_info < (3, 8, 0):\n # HACK for older versions for bytes.hex()\n # https://docs.python.org/3.9/library/stdtypes.html?highlight=hex#bytes.hex\n iterator = iter(binary.hex())\n invalid_date = \" \".join(a + b for a, b in zip(iterator, iterator))\n else:\n invalid_date = binary.hex(\" \")\n msg = \"Invalid dostime: %s\" % invalid_date\n warnings.warn(msg)\n return None\n\n return inner\n","repo_name":"yeom0331/lnk","sub_path":"venv/Lib/site-packages/LnkParse3/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73429114652","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nimport django.contrib.auth.views\n\nimport endagaweb.views\n\nimport endagaweb.stats_app\n\nimport rest_framework.authtoken.views\n\n\nurlpatterns = [\n # API v1.\n url(r'^api-token-auth/',\n rest_framework.authtoken.views.obtain_auth_token),\n url(r'^api/v1/register/(.*)/(.*)/',\n endagaweb.views.api.Register.as_view()),\n url(r'^api/v1/register/', endagaweb.views.api.Register.as_view()),\n url(r'^api/v1/fetch/(.*)/', endagaweb.views.api.GetNumber.as_view()),\n url(r'^api/v1/fetch/', endagaweb.views.api.GetNumber.as_view()),\n url(r'^api/v1/send/', endagaweb.views.api.SendSMS.as_view()),\n url(r'^api/v1/inbound/', endagaweb.views.api.InboundSMS.as_view()),\n url(r'^api/v1/receipt/',\n endagaweb.views.api.InboundReceipt.as_view()),\n url(r'^api/v1/checkin', endagaweb.views.api.Checkin.as_view()),\n url(r'^api/v1/bts/sslconf', endagaweb.views.api.SSLConfig.as_view()),\n url(r'^api/v1/bts/register',\n endagaweb.views.api.BTSRegistration.as_view()),\n url(r'^api/v1/bts/logfile',\n endagaweb.views.api.BTSLogfile.as_view()),\n # API v2.\n # /numbers/ -- POST to start the number-deactivation process.\n url(r'^api/v2/numbers/(?P[0-9]+)$',\n endagaweb.views.api_v2.Number.as_view(), name='number'),\n # /towers/ -- DELETE to start the bts deregistration process.\n url(r'^api/v2/towers/(?P[A-Za-z0-9-]+)$',\n endagaweb.views.api_v2.Tower.as_view(), name='apiv2_tower'),\n # /subscribers/ -- DELETE to start the sub-deactivation process.\n url(r'^api/v2/subscribers/(?P[^/]+)$',\n endagaweb.views.api_v2.Subscriber.as_view(), name='v2_subscribers'),\n\n # Routes for the new stats API, not to be confused with /stats (below).\n # Passes the infrastructure level in the URL (global, network, etc) and the\n # level id as a query param.\n url(r'^api/v1/stats/(.*)',\n endagaweb.stats_app.views.StatsAPIView.as_view()),\n\n # the internal API.\n url(r'^internal/api/v1/number/',\n endagaweb.views.internalapi.NumberLookup.as_view()),\n url(r'^internal/api/v1/uuid/',\n endagaweb.views.internalapi.UUIDLookup.as_view()),\n url(r'^internal/api/v1/auth/',\n endagaweb.views.internalapi.NumberAuth.as_view()),\n url(r'^internal/api/v1/voice/',\n endagaweb.views.internalapi.BillVoice.as_view()),\n\n # Our homepage.\n url(r'^$', endagaweb.views.static.LandingIndexView.as_view()),\n\n # ELB testing endpoint\n url(r'^django-status', endagaweb.views.static.TestView.as_view()),\n\n # Notification emails and phone nnumbers\n url(r'^account/notify_emails/update', endagaweb.views.user.update_notify_emails),\n url(r'^account/notify_numbers/update', endagaweb.views.user.update_notify_numbers),\n\n # Auth.\n url(r'^login/$', endagaweb.views.user.loginview, name='endagaweb-login'),\n url(r'^auth/', endagaweb.views.user.auth_and_login),\n url(r'^account/password/change', endagaweb.views.user.change_password),\n url(r'^account/update', endagaweb.views.user.update_contact),\n url(r'^account/', endagaweb.views.dashboard.dashboard_view),\n url(r'^logout/$', django.contrib.auth.views.logout, {'next_page': '/'}),\n\n # Dashboard.\n url(r'^dashboard/card', endagaweb.views.dashboard.addcard),\n url(r'^addmoney/', endagaweb.views.dashboard.addmoney),\n url(r'^dashboard/billing', endagaweb.views.dashboard.billing_view),\n url(r'^dashboard/profile', endagaweb.views.dashboard.profile_view),\n # Tower views in the dashboard.\n # /towers -- GET a list of towers or POST here to add one\n # /towers/ -- GET details on one tower\n # /towers//monitor -- GET related TimeseriesStats\n # /towers//edit -- GET details on one tower or POST to change them\n # /towers//deregister -- GET a UI for deregistering\n # /towers//tower_events -- GET related tower events\n url(r'^dashboard/towers$',\n endagaweb.views.towers.TowerList.as_view(),\n name='tower-list'),\n url(r'^dashboard/towers/(?P[A-Za-z0-9-]+)$',\n endagaweb.views.towers.TowerInfo.as_view(),\n name='tower-info'),\n url(r'^dashboard/towers/(?P[A-Za-z0-9-]+)/monitor$',\n endagaweb.views.towers.TowerMonitor.as_view(),\n name='tower-monitor'),\n url(r'^dashboard/towers/(?P[A-Za-z0-9-]+)/edit$',\n endagaweb.views.towers.TowerEdit.as_view(),\n name='tower-edit'),\n url(r'^dashboard/towers/(?P[A-Za-z0-9-]+)/deregister$',\n endagaweb.views.towers.TowerDeregister.as_view(),\n name='tower-deregister'),\n url(r'^dashboard/towers/(?P[A-Za-z0-9-]+)/tower_events$',\n endagaweb.views.towers.TowerEvents.as_view(),\n name='tower-events'),\n # Subscriber views in the dashboard.\n url(r'^dashboard/subscribers$',\n endagaweb.views.dashboard.subscriber_list_view),\n url(r'^dashboard/subscribers/(?P[^/]+)$',\n endagaweb.views.dashboard.SubscriberInfo.as_view(),\n name='subscriber-info'),\n url(r'^dashboard/subscribers/(?P[^/]+)/activity$',\n endagaweb.views.dashboard.SubscriberActivity.as_view(),\n name='subscriber-activity'),\n url(r'^dashboard/subscribers/(?P[^/]+)/send-sms$',\n endagaweb.views.dashboard.SubscriberSendSMS.as_view(),\n name='subscriber-send-sms'),\n url(r'^dashboard/subscribers/(?P[^/]+)/adjust-credit$',\n endagaweb.views.dashboard.SubscriberAdjustCredit.as_view(),\n name='subscriber-adjust-credit'),\n url(r'^dashboard/subscribers/(?P[^/]+)/edit$',\n endagaweb.views.dashboard.SubscriberEdit.as_view(),\n name='subscriber-edit'),\n # Network views in the dashboard.\n # /network -- GET basic network info\n # /network/prices -- GET pricing data for the network or POST to change it\n # /network/edit -- GET details on the network or POST to change them\n url(r'^dashboard/network$',\n endagaweb.views.network.NetworkInfo.as_view(),\n name='network-info'),\n url(r'^dashboard/network/prices$',\n endagaweb.views.network.NetworkPrices.as_view(),\n name='network-prices'),\n url(r'^dashboard/network/denominations$',\n endagaweb.views.network.NetworkDenomination.as_view(),\n name='network-denominations'),\n url(r'^dashboard/network/inactive-subscribers$',\n endagaweb.views.network.NetworkInactiveSubscribers.as_view(),\n name='network-inactive-subscribers'),\n url(r'^dashboard/network/edit$',\n endagaweb.views.network.NetworkEdit.as_view(),\n name='network-edit'),\n url(r'^dashboard/network/select/(?P[0-9]+)$',\n endagaweb.views.network.NetworkSelectView.as_view()),\n # The activity table.\n url(r'^dashboard/activity',\n endagaweb.views.dashboard.ActivityView.as_view(),\n name='network-activity'),\n\n # Raise a server error on-demand to test the 500 template.\n url(r'^insta-five-hundred$',\n endagaweb.views.static.InstaFiveHundred.as_view()),\n\n # OAuth login TODO(omar): setup OAuth provider\n url(r'^staff-login/', endagaweb.views.user.staff_login_view),\n url(r'^accounts/', include('allauth.urls')),\n]\n\n\nif 'django.contrib.admin' in settings.INSTALLED_APPS:\n # Only show the all-numbers table, the all-towers table and the margin\n # analysis page in staff-mode.\n urlpatterns += [\n url(r'^dashboard/staff/all-numbers$',\n endagaweb.views.staff.Numbers.as_view()),\n url(r'^dashboard/staff/all-towers$',\n endagaweb.views.staff.Towers.as_view()),\n url(r'^dashboard/staff/margin-analysis$',\n endagaweb.views.staff.MarginAnalysis.as_view(),\n name='margin-analysis'),\n url(r'^dashboard/staff/tower-monitoring$',\n endagaweb.views.staff.TowerMonitoring.as_view(),\n name='tower-monitoring'),\n url(r'^dashboard/staff/tower-monitoring\\?tower=(?P[0-9-]+)$',\n endagaweb.views.staff.TowerMonitoring.as_view(),\n name='tower-monitoring'),\n url(r'^dashboard/staff/network-earnings$',\n endagaweb.views.staff.NetworkEarnings.as_view(),\n name='network-earnings'),\n ]\n\n\nurlpatterns += [\n # The dashboard 'home'.\n url(r'^dashboard', endagaweb.views.dashboard.dashboard_view),\n\n # Old stats.\n url(r'^stats/numbers', endagaweb.views.stats.numbers),\n url(r'^stats/totals', endagaweb.views.stats.totals),\n\n # Debug.\n url(r'^debug', endagaweb.views.debug.debug_view),\n]\n\n\n# Only use django admin outside of prod.\nif 'django.contrib.admin' in settings.INSTALLED_APPS:\n # Register any apps that have admin functionality and add new URLs.\n admin.autodiscover()\n urlpatterns += [\n url(r'^django-admin/', include(admin.site.urls)),\n ]\n\n\n# We only install the loginas app in the staff version of the site and we hide\n# the ghosting routes in other versions.\nif 'loginas' in settings.INSTALLED_APPS:\n urlpatterns += url(r'^django-admin/', include('loginas.urls')),\n\nif 'DatabaseStorage' in settings.DEFAULT_FILE_STORAGE:\n urlpatterns += [\n url(r'^file/(?P.+)$',\n endagaweb.views.file_upload.file_view, name='file-upload')\n ]\n","repo_name":"facebookarchive/CommunityCellularManager","sub_path":"cloud/endagaweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"32"} +{"seq_id":"21295982780","text":"def find_differences(list1, list2):\n \"\"\"\n Takes two lists as input and returns a new list containing the elements that are different between them.\n \"\"\"\n # create an empty list to store the differences\n differences = []\n \n # iterate over the elements in list1\n for element in list1:\n # if the element is not in list2, add it to the differences list\n if element not in list2:\n differences.append(element)\n \n # iterate over the elements in list2\n # for element in list2:\n # # if the element is not in list1, add it to the differences list\n # if element not in list1:\n # differences.append(element)\n \n # return the differences list\n return differences","repo_name":"zak123/DiscordChallongeBot","sub_path":"formatting_helper.py","file_name":"formatting_helper.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25456996006","text":"from django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib.auth import login\nfrom django.http import HttpResponseForbidden\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom .models import ShippingAddress, Order, OrderDetail, Movie\nfrom django.contrib.auth.models import User\nfrom .forms import UserRegistrationForm\nimport requests\nimport json\nimport os\nimport multiprocessing\nmultiprocessing.set_start_method(\"fork\")\n\n\n# Super Buff Multiprocessing code by TBD\ndef home(request):\n response = requests.get(f\"{os.environ['MOVIE_DB_ROOT']}genre/movie/list?api_key={os.environ['MOVIE_DB_KEY']}\")\n genres = response.json()['genres']\n image_url = os.environ['MOVIE_DB_IMAGE_URL']\n multi_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1)\n genres = multi_pool.map(get_movies_from_genre, [genre for genre in genres])\n multi_pool.close()\n return render(request, 'home.html', {'genres': genres, 'image_url': image_url})\n\n\ndef get_movies_from_genre(genre):\n genre['movies'] = requests.get(\n f\"{os.environ['MOVIE_DB_ROOT']}discover/movie?api_key={os.environ['MOVIE_DB_KEY']}&with_genres={genre['id']}&language=en-US\").json()['results']\n return genre\n\n\ndef userprofile(request, user_id):\n if request.user.id != user_id:\n return HttpResponseForbidden('You cannot view what is not yours')\n addresses = ShippingAddress.objects.filter(user=request.user)\n user = User.objects.get(id=user_id)\n return render(request, 'users/index.html', {'addresses': addresses, 'user': user})\n\n\nclass ShippingAddressCreate(LoginRequiredMixin, CreateView):\n model = ShippingAddress\n fields = ['name', 'address', 'city', 'zip_code', 'state', 'country']\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n def get_form(self, form_class=None):\n form = super(ShippingAddressCreate, self).get_form(form_class)\n for visible in form.visible_fields():\n if visible.field.label == 'State':\n visible.field.widget.attrs.update({'class': 'form-select tbd-bg-secondary text-white'})\n else:\n visible.field.widget.attrs.update({'class': 'form-control tbd-bg-secondary'})\n return form\n\n\nclass ShippingAddressUpdate(LoginRequiredMixin, UpdateView):\n model = ShippingAddress\n fields = ['name', 'address', 'city', 'zip_code', 'state', 'country']\n\n def get_form(self, form_class=None):\n form = super(ShippingAddressUpdate, self).get_form(form_class)\n for visible in form.visible_fields():\n if visible.field.label == 'State':\n visible.field.widget.attrs.update({'class': 'form-select tbd-bg-secondary text-white'})\n else:\n visible.field.widget.attrs.update({'class': 'form-control tbd-bg-secondary text-white'})\n return form\n\n\nclass ShippingAddressDelete(LoginRequiredMixin, DeleteView):\n model = ShippingAddress\n\n def get_success_url(self):\n return reverse('userprofile', kwargs={'user_id': self.request.user.pk})\n\n\ndef signup(request):\n error_message = ''\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('home')\n else:\n error_message = 'Invalid sign up - try again'\n\n form = UserRegistrationForm()\n context = {'form': form, 'error_message': error_message}\n return render(request, 'registration/signup.html', context)\n\n\ndef search(request):\n query = request.GET.get('search')\n movies = ''\n image_url = ''\n if query:\n movies = requests.get(f\"{os.environ['MOVIE_DB_ROOT']}search/movie?api_key={os.environ['MOVIE_DB_KEY']}&query={query}\").json()['results']\n image_url = os.environ['MOVIE_DB_IMAGE_URL']\n return render(request, 'movies/search.html', {'movies': movies, 'image_url': image_url})\n\n\ndef movie_detail(request, movie_id):\n image_url = os.environ['MOVIE_DB_IMAGE_URL']\n movie = requests.get(f\"{os.environ['MOVIE_DB_ROOT']}movie/{movie_id}?api_key={os.environ['MOVIE_DB_KEY']}&append_to_response=release_dates\").json()\n credits = requests.get(f\"{os.environ['MOVIE_DB_ROOT']}movie/{movie_id}/credits?api_key={os.environ['MOVIE_DB_KEY']}\").json()['cast'][:10]\n cert = get_certification(movie)\n try:\n if Movie.objects.get(api_id=movie_id):\n pass\n except:\n Movie.objects.create(api_id=movie['id'], name=movie['original_title'], price=2.99)\n return render(request, 'movies/detail.html', {'movie': movie, 'credits': credits, 'image_url': image_url, 'certification': cert})\n\n\ndef get_certification(movie):\n for m in movie['release_dates']['results']:\n if m['iso_3166_1'] == \"US\":\n return m['release_dates'][0]['certification']\n\n\ndef cart(request):\n try:\n open_order = Order.objects.get(user=request.user.id, checkout_status=False)\n if open_order:\n order_total = open_order.order_total()\n open_order = open_order.order_detail_list()\n except:\n open_order = ''\n order_total = ''\n return render(request, 'cart/index.html', {'open_order': open_order, 'order_total': order_total})\n\n\ndef checkout(request):\n current_order = Order.objects.get(user=request.user.id, checkout_status=False)\n addresses = ShippingAddress.objects.filter(user=request.user)\n return render(request, 'cart/checkout.html', {'addresses': addresses, 'current_order': current_order})\n\n\ndef confirm_order(request, order_id):\n current_order = Order.objects.get(id=order_id)\n current_order.checkout_status = True\n current_order.save()\n return render(request, 'cart/confirm_order.html', {'current_order': current_order})\n\n\n@login_required\ndef add_to_cart(request):\n addresses = ShippingAddress.objects.filter(user=request.user)\n if len(addresses) == 0:\n message = 'Please create an address to add movie to cart'\n return render(request, 'users/index.html', {'message': message})\n movie_id = request.POST.get('movie_id')\n selected_movie = Movie.objects.get(api_id=movie_id)\n current_order = ''\n try:\n current_order = Order.objects.get(user=request.user, checkout_status=False)\n except:\n pass\n if current_order:\n try:\n order_to_update = OrderDetail.objects.filter(order=current_order, movie=selected_movie)\n q = order_to_update[0].quantity + 1\n order_to_update.update(quantity=q)\n order_to_update[0].set_order_price()\n except:\n OrderDetail.objects.create(\n order=current_order, movie=selected_movie, quantity=1, price=2.99)\n else:\n addresses = ShippingAddress.objects.filter(user=request.user)\n print('Reaching Line 143')\n default_address = addresses[0]\n new_order = Order.objects.create(user=request.user, ship_address=default_address)\n OrderDetail.objects.create(order=new_order, movie=selected_movie, quantity=1, price=2.99)\n return redirect('cart')\n\n\ndef user_orders(request, user_id):\n orders = Order.objects.filter(user=user_id)\n return render(request, 'users/order_history.html', {'orders': orders})\n","repo_name":"roger-davila/flixnet","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"2934535226","text":"import tensorflow as tf\nimport cv2\nimport numpy as np\nimport os\nfrom Models.architectures import *\n\n\n\n\nclass Discriminator(object):\n def __init__(self):\n self.input = tf.placeholder(tf.float32, shape=[None, 70, 70, 3], name='features')\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n\n # Ck block\n self.C64 = Ck(self.input, 64, name=\"c64\", with_batch_norm=False)\n self.C128 = Ck(self.C64, 128, name=\"c128\")\n self.C256 = Ck(self.C128, 256, name=\"c256\")\n self.C512 = Ck(self.C128, 512, name=\"c512\")\n\n\n def step(self, session, images):\n outputs = session.run([self.C512, self.C256, self.C128, self.C64], {self.input.name: images})\n return outputs[0], outputs[1]\n\n\ndef readImages():\n folderName = r\"D:\\data\\hackathon\\profile2manga\\Manga\"\n folder_images = [folderName + \"/\"+ f for f in os.listdir(folderName)]\n n = 5\n image_list = []\n for im_name in folder_images[:n]:\n im = cv2.imread(im_name)\n image_list.append(im)\n image_array = np.array(image_list)\n return image_array\n\n\ndef create_model(sess, model_path):\n \"\"\"\n Creates the model object with its graph, if there is an existing checkpoint restore the models variables,\n otherwise initialize them.\n :param sess: tensorflow session\n :return: the model\n \"\"\"\n model = Discriminator()\n ckpt = tf.train.get_checkpoint_state(model_path)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n sess.run(tf.global_variables_initializer())\n return model\n\ndef testNet():\n imagesArray = readImages()\n with tf.Session() as sess:\n testmodel = create_model(sess, \" \")\n res_output, ck_output = testmodel.step(sess, imagesArray)\n print(\"done\")\n\nif __name__ == \"__main__\":\n testNet()","repo_name":"amitaydr/Hackaton2018","sub_path":"Models/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39711483549","text":"import pytorch_lightning as pl\nfrom omegaconf import DictConfig\nimport torch.nn as nn\nfrom utils.losses import l2_loss, GANLoss\nimport torch\nfrom data import seq_collate\nfrom torch.utils.data import DataLoader\nfrom utils.utils import re_im\nfrom pytorch_lightning import Trainer\nfrom utils.visualize import visualize_probabilities\n# load optimizer\nfrom utils.radam import RAdam\n\n\ndef pretrain_func(generator, train_dset, val_dset, cfg, logger = None):\n\tpretrain = Pretrain(generator, train_dset, val_dset, cfg)\n\n\tStopCriterium = StopPretrain(cfg.pretrain)\n\tcallbacks = [StopCriterium]\n\tif cfg.pretraining.batch_size_scheduler:\n\t\tfrom utils.batchsizescheduler import BatchSizeScheduler\n\t\tcallbacks.append(BatchSizeScheduler(bs = cfg.pretraining.batch_size_scheduler,\n\t\t\t\t\t\t\t\t\t\t\tmax_bs = cfg.pretraining.batch_size ))\n\n\n\tpreTrainer = Trainer(logger,\n\t\t\t\t\t\t callbacks = callbacks,\n\t\t\t\t\t\t checkpoint_callback=False,\n\t\t\t\t\t\t num_sanity_val_steps=0,\n\t\t\t\t\t\t progress_bar_refresh_rate=10,\n\t\t\t\t\t\t **cfg.trainer)\n\n\tpreTrainer.fit(pretrain)\n\n\n\nclass StopPretrain(pl.callbacks.base.Callback):\n\tdef __init__(self, min_loss):\n\t\tself.min_loss = min_loss \n\t\tself.monitor_metric = \"val_loss\"\n\tdef on_validation_epoch_end(self, trainer, pl_module):\n\n\t\tif trainer.callback_metrics[self.monitor_metric] < self.min_loss:\n\t\t\ttrainer.should_stop = True\n\n\n\nclass Pretrain(pl.LightningModule):\n\tdef __init__(self, generator, train_dset, val_dset, cfg: DictConfig = None, loss_fns = None):\n\t\tsuper().__init__()\n\n\n\t\tself.cfg = cfg\n\t\tself.generator = generator\n\n\t\tself.generator.gen()\n\t\t# init loss functions\n\t\tself.loss_fns = loss_fns if loss_fns else {'L2': l2_loss, # L2 loss\n\t\t\t\t\t\t\t\t\t\t\t\t 'ADV': GANLoss(cfg.gan_mode), # adversarial Loss\n\t\t\t\t\t\t\t\t\t\t\t\t 'G': l2_loss, # goal achievement loss\n\t\t\t\t\t\t\t\t\t\t\t\t 'GCE': nn.CrossEntropyLoss()} # Goal Cross Entropy loss\n\t\t# init loss weights\n\n\t\tself.loss_weights = {'L2': cfg.w_L2,\n\t\t\t\t\t\t\t 'ADV': cfg.w_ADV, # adversarial Loss\n\t\t\t\t\t\t\t 'G': cfg.w_G, # goal achievement loss\n\t\t\t\t\t\t\t 'GCE': cfg.w_GCE} # Goal Cross Entropy loss\n\t\tself.train_dset = train_dset\n\t\tself.val_dset = val_dset\n\n\t\tself.plot_val = True\n\n\t\tif self.cfg.pretraining.batch_size_scheduler:\n\t\t\tself.batch_size = self.cfg.pretraining.batch_size_scheduler\n\t\telse: self.batch_size = self.cfg.batch_size\n\tdef train_dataloader(self):\n\t\t# REQUIRED\n\t\treturn DataLoader(\n\t\t\tself.train_dset,\n\t\t\tbatch_size=self.batch_size,\n\t\t\tshuffle=True,\n\t\t\tnum_workers=self.cfg.num_workers,\n\t\t\tcollate_fn=seq_collate\n\t\t)\n\n\tdef val_dataloader(self):\n\t\t# OPTIONAL\n\n\t\treturn DataLoader(\n\t\t\tself.val_dset,\n\t\t\tbatch_size=self.cfg.pretraining.batch_size,\n\t\t\tshuffle=True,\n\t\t\tnum_workers=self.cfg.num_workers,\n\t\t\tcollate_fn=seq_collate\n\t\t)\n\n\tdef training_step(self, batch, batch_idx):\n\t\t# gives a single float value\n\t\t# init loss and loss dict\n\t\ttqdm_dict = {}\n\t\ttotal_loss = 0.\n\n\n\n\t\tbatch_size = batch[\"size\"].item()\n\n\t\tgenerator_out = self.generator(batch)\n\n\n\t\n\t\ttarget_reshaped = batch[\"prob_mask\"].view(batch_size, -1)\n\t\toutput_reshaped = generator_out[\"y_scores\"].view(batch_size, -1)\n\n\t\t_, targets = target_reshaped.max(dim=1)\n\t\t\n\t\tloss_gce = self.loss_fns[\"GCE\"](output_reshaped, targets)\n\n\t\ttotal_loss += loss_gce\n\t\ttqdm_dict[\"GCE_pretrain\"] = loss_gce\n\t\n\t\tfor key, loss in tqdm_dict.items():\n\t\t\tself.logger.experiment.add_scalar('pre/{}'.format(key), loss, self.global_step)\n\n\t\treturn {\"loss\": total_loss}\n\n\tdef validation_step(self, batch, batch_idx):\n\n\t\tself.generator.test()\n\n\t\t# init loss and loss dict\n\t\ttqdm_dict = {}\n\t\ttotal_loss = 0.\n\n\t\tbatch_size = batch[\"size\"].item()\n\n\t\tgenerator_out = self.generator(batch)\n\n\t\tif self.plot_val:\n\t\t\tself.plot_val = False\n\t\t\tself.visualize_results(batch, generator_out)\n\n\t\ttarget_reshaped = batch[\"prob_mask\"][:batch_size].view(batch_size, -1)\n\t\toutput_reshaped = generator_out[\"y_scores\"][:batch_size].view(batch_size, -1)\n\n\t\t_, targets = target_reshaped.max(dim=1)\n\n\t\tloss_gce = self.loss_weights[\"GCE\"] * self.loss_fns[\"GCE\"](output_reshaped, targets)\n\n\t\ttotal_loss += loss_gce\n\t\ttqdm_dict[\"GCE_pretrain\"] = loss_gce\n\n\n\t\t# include early stopping when loss below threshold\n\n\n\t\treturn {\"loss\": total_loss}\n\n\n\tdef visualize_results(self, batch, out):\n\n\t\ty = out[\"y_map\"]\n\t\ty_softmax = out[\"y_softmax\"]\n\n\n\t\timage = visualize_probabilities(y_softmax = y_softmax,\n\t\t\t\t\t\t\t\t\t\ty = y,\n\t\t\t\t\t\t\t\t\t\tglobal_patch = re_im(batch[\"global_patch\"][0]).cpu().numpy(),\n\t\t\t\t\t\t\t\t\t\tprobability_mask = batch[\"prob_mask\"][0][0].cpu().numpy(),\n\t\t\t\t\t\t\t\t\t\tgrid_size_in_global = self.val_dset.grid_size_in_global\n\t\t\t\t\t\t\t\t\t\t)\n\t\tself.logger.experiment.add_image(f'Map', image, self.current_epoch)\n\n\n\tdef validation_epoch_end(self, outputs):\n\n\t\tGCE_loss = torch.stack([x['loss'] for x in outputs]).mean()\n\n\t\tself.logger.experiment.add_scalar('pre/GCE_val', GCE_loss, self.current_epoch)\n\t\tself.plot_val = True\n\t\n\t\treturn {'val_loss': GCE_loss}\n\n\tdef configure_optimizers(self):\n\t\topt_g = RAdam(self.generator.parameters(), lr=self.cfg.lr_pretrain)\n\n\t\t\n\t\tif self.cfg.lr_scheduler_pretrain:\n\t\t\tlr_scheduler_pretrain = [getattr(torch.optim.lr_scheduler, self.cfg.lr_scheduler_pretrain)(opt_g)]\n\n\t\telse:\n\t\t\tlr_scheduler_pretrain = []\n\n\t\treturn [opt_g], lr_scheduler_pretrain\n","repo_name":"dendorferpatrick/GoalGAN","sub_path":"model/pretrain_pl.py","file_name":"pretrain_pl.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"32"} +{"seq_id":"37609116560","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Import Splinter and BeautifulSoup\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nimport urllib3\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd \n\n\n# In[2]:\n\n\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# In[3]:\n\n\n# Visit the mars nasa news site\nurl = 'https://redplanetscience.com'\nbrowser.visit(url)\n# Optional delay for loading the page\nbrowser.is_element_present_by_css('div.list_text', wait_time=1)\n\n\n# In[4]:\n\n\nhtml = browser.html\nnews_soup = soup(html, 'html.parser')\nslide_elem = news_soup.select_one('div.list_text')\n\n\n# In[5]:\n\n\nslide_elem.find('div', class_='content_title')\n\n\n# In[6]:\n\n\n# Use the parent element to find the first `a` tag and save it as `news_title`\nnews_title = slide_elem.find('div', class_='content_title').get_text()\nnews_title\n\n\n# In[7]:\n\n\n# Use the parent element to find the paragraph text\nnews_p = slide_elem.find('div', class_='article_teaser_body').get_text()\nnews_p\n\n\n# ### Featured Images\n\n# In[8]:\n\n\n# Visit URL\nurl = 'https://spaceimages-mars.com'\nbrowser.visit(url)\n\n\n# In[9]:\n\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_tag('button')[1]\nfull_image_elem.click()\n\n\n# In[10]:\n\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\n\n\n# In[11]:\n\n\n# Find the relative image url\nimg_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\nimg_url_rel\n\n\n# In[12]:\n\n\n# Use the base URL to create an absolute URL\nimg_url = f'https://spaceimages-mars.com/{img_url_rel}'\nimg_url\n\n\n# In[13]:\n\n\ndf = pd.read_html('https://galaxyfacts-mars.com')[0]\ndf.columns=['description', 'Mars', 'Earth']\ndf.set_index('description', inplace=True)\ndf\n\n\n# In[14]:\n\n\ndf.to_html()\n\n\n# ### D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles\n\n# In[15]:\n\n\n# 1. Use browser to visit the URL \nurl = 'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/index.html'\n\nbrowser.visit(url)\n\n\n# In[16]:\n\n\n# 2. Create a list to hold the images and titles.\nhemisphere_image_urls = []\n\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\n\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\nhttp = urllib3.PoolManager()\n\n# Add try/except for error handling\ntry:\n # Find the relative image url\n img_url_rel = img_soup.find_all('div', class_=\"item\")\nexcept AttributeError as e:\n print(e)\n \nfor i in img_url_rel:\n # Find the relative image url\n href = i.find('a')\n link = f'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/{href.get(\"href\")}'\n text = http.request('GET', link).data.decode(\"utf-8\")\n img_pic = soup(text, 'html.parser')\n img_pic_title = img_pic.find('h2', class_='title')\n img_pic_jpg = img_pic.find('img', class_='wide-image')\n hemisphere_image_urls.append({\n 'img_url': f'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/{img_pic_jpg.get(\"src\")}',\n 'title': img_pic_title.text\n })\n \n\n\n# In[17]:\n\n\n# 4. Print the list that holds the dictionary of each image url and title.\nhemisphere_image_urls\n\n\n# In[18]:\n\n\n# 5. Quit the browser\nbrowser.quit()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"daniethecreator/Mission-to-Mars","sub_path":"Mission_to_Mars_Challenge/Mission_to_Mars_Challenge.py","file_name":"Mission_to_Mars_Challenge.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73414205850","text":"import json \nfrom urllib.request import urlopen \nimport requests\n\nimport customtkinter\nfrom tkinter import *\nimport tkintermapview\n\n\nimport pytz\nfrom datetime import datetime\nfrom time import strftime\n\n\ncustomtkinter.set_appearance_mode('dark')\ncustomtkinter.set_default_color_theme('dark-blue')\n\nmy_label = None\ntimelabel = None\n# User Entry For Location\ndef return_key(event):\n entered_zip = enter_zip.get()\n zipcode = str(entered_zip)\n window.focus_set()\n if my_label is not None:\n my_label.destroy()\n my_frame.destroy()\n addPoint.destroy()\n if timelabel is not None:\n timelabel.destroy()\n\n\n# API OPEN WEATHER APP \n response = urlopen(\"https://api.openweathermap.org/data/2.5/weather?zip=\" + zipcode + \"&appid=310f40723630e5becce6757a5dacd2c6&units=imperial\") \n string = response.read().decode('utf-8')\n info = json.loads(string)\n\n name = str(info['name'])\n temp = round(float(info['main']['temp']))\n lat = float(info['coord']['lat'])\n lon = float(info['coord']['lon'])\n\n# API DATE AND TIME cle\n url = \"https://timezone.abstractapi.com/v1/current_time\"\n locations = name \n querystring = {\"location\": locations,\"api_key\":\"27da52e5747d4f85a92c79956984d475\"}\n response2 = requests.request(\"GET\", url, params=querystring)\n\n string2 = response2.content.decode('utf-8') \n info2 = json.loads(string2) \n timezone = str(info2['timezone_location']) \n\n # Displays real time\n def update_time():\n global current_time\n global timelabel\n time_zone = pytz.timezone(timezone)\n current_time = datetime.now(time_zone).strftime('%I:%M %p')\n timelabel = customtkinter.CTkLabel(window,\n padx= 4,\n pady= 4,\n height= 40,\n width= 400,\n text= (f'The Local Time Is {current_time}'),\n \n )\n timelabel.after(1000, update_time)\n timelabel.place(relx=0.30,rely=0.27) \n update_time() \n\n# Checks if given city is real\n def validate():\n if info == json.loads(string):\n check = True\n outcast = (f\"The Current Weather In {name} is {temp}° Fahrenheit\")\n if check is True:\n bg.configure(image=PhotoNight) \n label = customtkinter.CTkLabel(window,\n text= outcast,\n padx= 2,\n pady= 2,\n height= 40,\n width= 400, \n \n )\n label.place(relx=0.30,rely=0.32)\n \n def check_box():\n global my_label\n global my_frame\n global addPoint\n if check_var.get() == \"on\":\n \n def add_checkmark():\n map_widget.set_address(name,marker= True)\n\n my_frame = customtkinter.CTkFrame(window,\n border_width=2,\n border_color='black',\n corner_radius=0,\n width=700,\n height=380)\n my_frame.place(relx=0.16,rely=0.44)\n\n addPoint = customtkinter.CTkButton(window,\n text='Mark Location',\n width=30,\n height=20,\n command= add_checkmark\n )\n addPoint.place(relx=.17, rely=0.50)\n\n my_label = LabelFrame(window,height=450,\n width=350,\n borderwidth=0)\n my_label.place(relx= 0.28, rely= 0.45)\n\n map_widget = tkintermapview.TkinterMapView(my_label, width=450, height=350, corner_radius= 0, )\n map_widget.set_position(lat,lon)\n map_widget.set_zoom(13)\n map_widget.pack()\n\n else:\n if my_label:\n my_label.destroy()\n my_frame.destroy()\n addPoint.destroy()\n\n check_var = customtkinter.StringVar(value='off')\n checkBox = customtkinter.CTkCheckBox(window, \n command= check_box,\n variable= check_var,\n text= 'Open Map',\n onvalue='on',\n offvalue='off',\n checkmark_color='white',\n hover_color='grey',\n corner_radius=20,\n border_color='grey',\n border_width=1, \n )\n checkBox.place(relx=0.45, rely=0.40)\n \n validate()\n\n#Main Window \nwindow = customtkinter.CTk()\nwindow.geometry('1005x650')\nwindow.title(\"Weather App\")\n\n#Images\nPhotoBg = PhotoImage(file = '/Users/matton/Downloads/background 2.png')\nPhotoNight = PhotoImage(file = '/Users/matton/Downloads/weather 2.png')\n\n#Labels\nbg = Label(window, image=PhotoBg,)\nbg.pack()\n\n#Entry \nenter_zip = customtkinter.CTkEntry(window, \n placeholder_text= 'Enter Zip Code',\n width= 500,\n height=50,\n justify='center',\n corner_radius=5,\n border_width=2,\n border_color='black',\n fg_color='grey10'\n )\nenter_zip.place(relx=0.5, rely=0.2, anchor=customtkinter.CENTER)\nenter_zip.bind('' , return_key)\n\nwindow.mainloop()","repo_name":"kaimatton/WeatherUI","sub_path":"WeatherUI.py","file_name":"WeatherUI.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4827964359","text":"import logging\n\nfrom barman.command_wrappers import Command, full_command_quote\nfrom barman.exceptions import FsOperationFailed\n\n_logger = logging.getLogger(__name__)\n\n\ndef _str(cmd_out):\n \"\"\"\n Make a string from the output of a CommandWrapper execution.\n If input is None returns a literal 'None' string\n\n :param cmd_out: String or ByteString to convert\n :return str: a string\n \"\"\"\n if hasattr(cmd_out, 'decode') and callable(cmd_out.decode):\n return cmd_out.decode('utf-8', 'replace')\n else:\n return str(cmd_out)\n\n\nclass UnixLocalCommand(object):\n \"\"\"\n This class is a wrapper for local calls for file system operations\n \"\"\"\n\n def __init__(self, path=None):\n # initialize a shell\n self.internal_cmd = Command(cmd='sh', args=['-c'], path=path)\n\n def cmd(self, cmd_name, args=[]):\n \"\"\"\n Execute a command string, escaping it, if necessary\n \"\"\"\n return self.internal_cmd(full_command_quote(cmd_name, args))\n\n def get_last_output(self):\n \"\"\"\n Return the output and the error strings from the last executed command\n\n :rtype: tuple[str,str]\n \"\"\"\n return _str(self.internal_cmd.out), _str(self.internal_cmd.err)\n\n def create_dir_if_not_exists(self, dir_path):\n \"\"\"\n This method recursively creates a directory if not exists\n\n If the path exists and is not a directory raise an exception.\n\n :param str dir_path: full path for the directory\n \"\"\"\n _logger.debug('Create directory %s if it does not exists' % dir_path)\n exists = self.exists(dir_path)\n if exists:\n is_dir = self.cmd('test', args=['-d', dir_path])\n if is_dir != 0:\n raise FsOperationFailed(\n 'A file with the same name already exists')\n else:\n return False\n else:\n # Make parent directories if needed\n mkdir_ret = self.cmd('mkdir', args=['-p', dir_path])\n if mkdir_ret == 0:\n return True\n else:\n raise FsOperationFailed('mkdir execution failed')\n\n def delete_if_exists(self, path):\n \"\"\"\n This method check for the existence of a path.\n If it exists, then is removed using a rm -fr command,\n and returns True.\n If the command fails an exception is raised.\n If the path does not exists returns False\n\n :param path the full path for the directory\n \"\"\"\n _logger.debug('Delete path %s if exists' % path)\n exists = self.exists(path, False)\n if exists:\n rm_ret = self.cmd('rm', args=['-fr', path])\n if rm_ret == 0:\n return True\n else:\n raise FsOperationFailed('rm execution failed')\n else:\n return False\n\n def check_directory_exists(self, dir_path):\n \"\"\"\n Check for the existence of a directory in path.\n if the directory exists returns true.\n if the directory does not exists returns false.\n if exists a file and is not a directory raises an exception\n\n :param dir_path full path for the directory\n \"\"\"\n _logger.debug('Check if directory %s exists' % dir_path)\n exists = self.exists(dir_path)\n if exists:\n is_dir = self.cmd('test', args=['-d', dir_path])\n if is_dir != 0:\n raise FsOperationFailed(\n 'A file with the same name exists, but is not a directory')\n else:\n return True\n else:\n return False\n\n def check_write_permission(self, dir_path):\n \"\"\"\n check write permission for barman on a given path.\n Creates a hidden file using touch, then remove the file.\n returns true if the file is written and removed without problems\n raise exception if the creation fails.\n raise exception if the removal fails.\n\n :param dir_path full dir_path for the directory to check\n \"\"\"\n _logger.debug('Check if directory %s is writable' % dir_path)\n exists = self.exists(dir_path)\n if exists:\n is_dir = self.cmd('test', args=['-d', dir_path])\n if is_dir == 0:\n can_write = self.cmd(\n 'touch', args=[\"%s/.barman_write_check\" % dir_path])\n if can_write == 0:\n can_remove = self.cmd(\n 'rm', args=[\"%s/.barman_write_check\" % dir_path])\n if can_remove == 0:\n return True\n else:\n raise FsOperationFailed('Unable to remove file')\n else:\n raise FsOperationFailed(\n 'Unable to create write check file')\n else:\n raise FsOperationFailed('%s is not a directory' % dir_path)\n else:\n raise FsOperationFailed('%s does not exists' % dir_path)\n\n def create_symbolic_link(self, src, dst):\n \"\"\"\n Create a symlink pointing to src named dst.\n Check src exists, if so, checks that destination\n does not exists. if src is an invalid folder, raises an exception.\n if dst already exists, raises an exception. if ln -s command fails\n raises an exception\n\n :param src full path to the source of the symlink\n :param dst full path for the destination of the symlink\n \"\"\"\n _logger.debug('Create symbolic link %s -> %s' % (dst, src))\n exists = self.exists(src)\n if exists:\n exists_dst = self.exists(dst)\n if not exists_dst:\n link = self.cmd('ln', args=['-s', src, dst])\n if link == 0:\n return True\n else:\n raise FsOperationFailed('ln command failed')\n else:\n raise FsOperationFailed('ln destination already exists')\n else:\n raise FsOperationFailed('ln source does not exists')\n\n def get_system_info(self):\n \"\"\"\n Gather important system information for 'barman diagnose' command\n \"\"\"\n result = {}\n # self.internal_cmd.out can be None. The str() call will ensure it\n # will be translated to a literal 'None'\n release = ''\n if self.cmd(\"lsb_release\", args=['-a']) == 0:\n release = _str(self.internal_cmd.out).rstrip()\n elif self.exists('/etc/lsb-release'):\n self.cmd('cat', args=['/etc/lsb-release'])\n release = \"Ubuntu Linux %s\" % _str(self.internal_cmd.out).rstrip()\n elif self.exists('/etc/debian_version'):\n self.cmd('cat', args=['/etc/debian_version'])\n release = \"Debian GNU/Linux %s\" % _str(\n self.internal_cmd.out).rstrip()\n elif self.exists('/etc/redhat-release'):\n self.cmd('cat', args=['/etc/redhat-release'])\n release = \"RedHat Linux %s\" % _str(self.internal_cmd.out).rstrip()\n elif self.cmd('sw_vers') == 0:\n release = _str(self.internal_cmd.out).rstrip()\n result['release'] = release\n\n self.cmd('uname', args=['-a'])\n result['kernel_ver'] = _str(self.internal_cmd.out).rstrip()\n self.cmd('python', args=['--version', '2>&1'])\n result['python_ver'] = _str(self.internal_cmd.out).rstrip()\n self.cmd('rsync', args=['--version', '2>&1'])\n try:\n result['rsync_ver'] = _str(self.internal_cmd.out).splitlines(\n True)[0].rstrip()\n except IndexError:\n result['rsync_ver'] = ''\n self.cmd('ssh', args=['-V', '2>&1'])\n result['ssh_ver'] = _str(self.internal_cmd.out).rstrip()\n return result\n\n def get_file_content(self, path):\n \"\"\"\n Retrieve the content of a file\n If the file doesn't exist or isn't readable, it raises an exception.\n\n :param str path: full path to the file to read\n \"\"\"\n _logger.debug('Reading content of file %s' % path)\n\n result = self.exists(path)\n if not result:\n raise FsOperationFailed('The %s file does not exist' % path)\n\n result = self.cmd('test', args=['-r', path])\n if result != 0:\n raise FsOperationFailed('The %s file is not readable' % path)\n\n result = self.cmd('cat', args=['%s', path])\n if result != 0:\n raise FsOperationFailed('Failed to execute \"cat \\'%s\\'\"' % path)\n\n return self.internal_cmd.out\n\n def exists(self, path, dereference=True):\n \"\"\"\n Check for the existence of a path.\n\n :param str path: full path to check\n :param bool dereference: whether dereference symlinks, defaults\n to True\n :return bool: if the file exists or not.\n \"\"\"\n _logger.debug('check for existence of: %s' % path)\n options = ['-e', path]\n if not dereference:\n options += ['-o', '-L', path]\n result = self.cmd('test', args=options)\n return result == 0\n\n def ping(self):\n\n \"\"\"\n 'Ping' the server executing the `true` command.\n\n :return int: the true cmd result\n \"\"\"\n _logger.debug('execute the true command')\n result = self.cmd(\"true\")\n return result\n\n def list_dir_content(self, dir_path, options=[]):\n \"\"\"\n List the contents of a given directory.\n\n :param str dir_path: the path where we want the ls to be executed\n :param list[str] options: a string containing the options for the ls\n command\n :return str: the ls cmd output\n \"\"\"\n _logger.debug('list the content of a directory')\n ls_options = []\n if options:\n ls_options += options\n ls_options.append(dir_path)\n self.cmd('ls', args=ls_options)\n return self.internal_cmd.out\n\n\nclass UnixRemoteCommand(UnixLocalCommand):\n \"\"\"\n This class is a wrapper for remote calls for file system operations\n \"\"\"\n\n # noinspection PyMissingConstructor\n def __init__(self, ssh_command, ssh_options=None, path=None):\n \"\"\"\n Uses the same commands as the UnixLocalCommand\n but the constructor is overridden and a remote shell is\n initialized using the ssh_command provided by the user\n\n :param str ssh_command: the ssh command provided by the user\n :param list[str] ssh_options: the options to be passed to SSH\n :param str path: the path to be used if provided, otherwise\n the PATH environment variable will be used\n \"\"\"\n # Ensure that ssh_option is iterable\n if ssh_options is None:\n ssh_options = []\n\n if ssh_command is None:\n raise FsOperationFailed('No ssh command provided')\n self.internal_cmd = Command(ssh_command,\n args=ssh_options,\n path=path,\n shell=True)\n try:\n ret = self.cmd(\"true\")\n except OSError:\n raise FsOperationFailed(\"Unable to execute %s\" % ssh_command)\n if ret != 0:\n raise FsOperationFailed(\n \"Connection failed using '%s %s' return code %s\" % (\n ssh_command,\n ' '.join(ssh_options),\n ret))\n","repo_name":"ADBSQL/adb-barman","sub_path":"barman/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":11499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"40657061406","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n\n\n\nif __name__ == \"__main__\":\n file_list = [\"two_cluster.txt\"]\n\n for file in file_list:\n train = pd.read_csv(file, sep=' ',header=None)\n x = train.iloc[:, 0]\n y = train.iloc[:, 1]\n z = train.iloc[:, 2]\n lens = len(x)\n div = len(set(x))+1\n train.iloc[200:, [1, 2]] -= 1\n train.to_csv(\"two_c.txt\",header=0,index=0,sep=' ')\n print(\"saved\")\n c = np.row_stack((x/div,[0.5]*lens,[0.5]*lens)).transpose()\n # plt.title(fig,fontsize='large',fontweight='bold')\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x,y,z)\n plt.savefig(f\"fig/3d_point_{file[:-4]}.jpeg\")\n # plt.cla()\n figs = plt.figure()\n plt.scatter(y,z,c=c)\n plt.savefig(f\"fig/2d_point_{file[:-4]}.jpeg\")\n # plt.cla()\n","repo_name":"Kinddle-tick/ML_clustering","sub_path":"show_point.py","file_name":"show_point.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29303862611","text":"import numpy as np\nimport random\nimport csv\n\nwith open('file.csv') as f:\n DataSet = [tuple(line) for line in csv.reader(f)]\n\nPOP_SIZE = 9\nREMAINING_HOURS = 6\nELITES_NUMBER = 3\nWANTED_MARK = 12\nMAX_GEN = 30\n\n# chromosome class:\n\n\nclass Chromosome:\n\n def __init__(self):\n self.genes = np.random.randint(2, size=len(DataSet))\n self.fitness = 0\n self.volume = 0\n # Check if the volume is greater than the remaining hours then flip a random bit that equal 1 to 0\n while (self.get_volume() > REMAINING_HOURS):\n self.genes[np.random.choice(np.where(self.genes == 1)[0])] = 0\n\n def get_genes(self):\n return self.genes\n\n def __str__(self):\n return self.genes.__str__()\n\n def get_fitness(self):\n self.fitness = 0\n for (selected, (_, _, mark)) in zip(self.genes, DataSet):\n if selected == 1:\n self.fitness += int(mark)\n return self.fitness\n\n def get_volume(self):\n self.volume = 0\n for (selected, (_, required_time, _)) in zip(self.genes, DataSet):\n if selected == 1:\n self.volume += int(required_time)\n return self.volume\n\n\ndef roulette_wheel_selection(pop):\n partialSum = 0\n sumFitness = 0\n for i in range(POP_SIZE):\n sumFitness += pop[i].get_fitness()\n randomShot = random.random() * sumFitness\n i = -1\n while partialSum < randomShot and i < POP_SIZE-1:\n i += 1\n partialSum += pop[i].get_fitness()\n return pop[i]\n\n\ndef tour_selection(pop):\n selected = np.empty(4, dtype=Chromosome)\n i = 0\n while i < 3:\n selected[i] = pop[random.randint(0, POP_SIZE-1)]\n i += 1\n i = 1\n highest_fitness = 0\n while i < 3:\n if selected[i].get_fitness() > selected[highest_fitness].get_fitness():\n highest_fitness = i\n i += 1\n return selected[highest_fitness]\n\n\ndef crossover(parent1, parent2):\n cp = random.randint(0, len(parent1.genes)-1)\n p1 = parent1.genes[:cp]\n p2 = parent2.genes[cp:]\n child = Chromosome()\n child.genes = np.concatenate((p1, p2), axis=None)\n return child\n\n\ndef mutation(child):\n index = child.genes[np.random.choice(child.genes)]\n if (child.genes[index] == 1):\n child.genes[index] = 0\n else:\n child.genes[index] = 1\n\n\ndef repair(child):\n while (child.get_volume() > REMAINING_HOURS):\n child.genes[np.random.choice(np.where(child.genes == 1)[0])] = 0\n\n\ndef sort_population(pop):\n pop_fitness = np.empty(POP_SIZE)\n for i in range(POP_SIZE):\n pop_fitness[i] = pop[i].get_fitness()\n inds = pop_fitness.argsort()[::-1][:POP_SIZE]\n sorted_pop = pop[inds]\n return sorted_pop\n\n\ndef evolve(pop):\n\n parent_1 = roulette_wheel_selection(pop)\n parent_2 = roulette_wheel_selection(pop)\n parent_3 = roulette_wheel_selection(pop)\n\n # parent_1 = tour_selection(pop)\n # parent_2 = tour_selection(pop)\n # parent_3 = tour_selection(pop)\n\n child_1 = crossover(parent_1, parent_2)\n child_2 = crossover(parent_2, parent_1)\n child_3 = crossover(parent_1, parent_3)\n child_4 = crossover(parent_3, parent_1)\n child_5 = crossover(parent_3, parent_2)\n child_6 = crossover(parent_2, parent_3)\n\n mutation(child_1)\n mutation(child_2)\n mutation(child_3)\n mutation(child_4)\n mutation(child_5)\n mutation(child_6)\n\n repair(child_1)\n repair(child_2)\n repair(child_3)\n repair(child_4)\n repair(child_5)\n repair(child_6)\n\n new_pop = np.array([child_1, child_2,\n child_3, child_4, child_5, child_6])\n new_pop = np.concatenate((pop[:ELITES_NUMBER], new_pop), axis=None)\n\n return sort_population(new_pop)\n\n\ndef print_pop(pop):\n for i in range(np.size(pop, 0)):\n print(pop[i].get_genes())\n\n\ndef final_result(chromosome):\n result = ''\n for (selected, (chapter, _, _)) in zip(chromosome, DataSet):\n if selected == 1:\n result = result + chapter + ','\n\n return result\n\n\n# main\ngeneration_number = 0\npopulation = np.array([Chromosome() for _ in range(POP_SIZE)])\npopulation = sort_population(population)\nprint_pop(population)\nprint(\"-------\")\nprint(\"generation number is:\", generation_number)\nwhile population[0].get_fitness() < WANTED_MARK and generation_number < MAX_GEN:\n population = evolve(population)\n generation_number += 1\n print(\"----------------------------------------------\")\n print_pop(population)\n print(\"-------\")\n print(\"generation number is:\", generation_number)\n print(\"-------\")\n print(\"best fitness:\", population[0].get_fitness())\n\nprint(\"********final result**********\")\nprint(\"chapters to study:\", final_result(population[0].genes))\n","repo_name":"youssefsoua/Exam-Preparation-Using-0-1-Knapsack-Problem-with-Genetic-Algorithms","sub_path":"Exam_Preparation.py","file_name":"Exam_Preparation.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24235891943","text":"from datasets import load_dataset\nfrom transformers import AutoTokenizer\nfrom transformers import AutoModelForSeq2SeqLM, pipeline\nimport random\nfrom transformers import pipeline\nimport pandas as pd\nimport torch\n\n# this file creates augmented data with back translation from the original training set and saves the resulting combined training set. \n# it also filters the augmented data with back translation with given text quality metric and saves the resulting combined training set.\n\nrouge = pipeline(\"text2text-generation\", \n task=\"text-generation\", \n metric=\"rouge\")\n \n \ndataset = load_dataset(\"tweet_eval\", \"abortion\")\n\"\"\"\nyou can use any of the following config names as a second argument:\n\"emoji\", \"emotion\", \"hate\", \"irony\", \n\"offensive\", \"sentiment\", \"stance_abortion\", \"stance_atheism\", \n\"stance_climate\", \"stance_feminist\", \"stance_hillary\"\n\"\"\"\ndata = {\n 'text': [],\n 'label': [],\n}\ndevice = torch.cuda.current_device()\n\ntranslator_en_to_x = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\", device=device, num_beams=2, length_penalty=0.5)\ntranslator_x_to_y = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-es-ru\", device=device, num_beams=2, length_penalty=0.5)\ntranslator_y_to_en = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-ru-en\", device=device, num_beams=2, length_penalty=0.5)\nfor sample in dataset[\"train\"]:\n text = sample[\"text\"]\n label = sample[\"label\"]\n #print(text)\n trans_text = translator_en_to_x(text)[0]['translation_text']\n inter_trans_text = translator_x_to_y(trans_text)[0]['translation_text']\n back_trans_text = translator_y_to_en(inter_trans_text)[0]['translation_text']\n #print(back_trans_text)\n data['text'].append(back_trans_text)\n data['label'].append(label)\n score = rouge(text, back_trans_text)[0]['rougeLsum']['f']\n scores.append(score)\nmean_score = sum(scores) / len(scores)\n\ndf = pd.DataFrame(data)\n\n# save the DataFrame to a CSV file\ndf.to_csv('./synthetic_data/tweet_eval_train_abortion.csv', index=False)\n\n\nfiltered_df = df.copy()\n\nfor sample in dataset[\"train\"]:\n text = sample[\"text\"]\n label = sample[\"label\"]\n for i, (text2, label2) in df[['text', 'label']].iterrows():\n score = rouge(text, text2)[0]['rougeLsum']['f']\n if score < mean_score:\n filtered_df = filtered_df.drop(i)\n break\n\nfiltered_df.to_csv('./synthetic_data/tweet_eval_train_abortion_filtered.csv', index=False)\n\n\n","repo_name":"kbulutozler/directed-research-3","sub_path":"augmentation and filtering/back translation/back_trans_rouge.py","file_name":"back_trans_rouge.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29991870640","text":"import os\nimport importlib\nimport pkgutil\nimport typing as t\nfrom contextlib import suppress\nimport logging\nimport traceback\n\nlogger = logging.getLogger('FLASK_ROUTER')\n\nQuart = t.Any\nFlask = t.Any\n\nwith suppress(ImportError):\n from quart import Quart\n from flask import Flask\n\n\nApp = t.Union[Flask, Quart]\nModule = t.Any\n\nclass FlaskRouter:\n\n \"\"\"\n A class to register routes for a Flask or Quart application.\n\n Attributes:\n app (App): The Flask or Quart application to register routes for.\n modules_visited (set): A set of modules that have been visited.\n\n \"\"\"\n\n def __init__(self, app: App):\n\n \"\"\"\n The constructor for FlaskRouter class.\n\n Parameters:\n app (App): The Flask or Quart application to register routes for.\n \"\"\"\n\n self.app = app\n self.modules_visited: t.Set = set()\n\n def register_routes(self, methods:list=['GET'], root_name:str='pages'):\n\n \"\"\"\n The function to register routes for a given root module.\n\n Parameters:\n methods (list): The list of HTTP methods to register. Defaults to ['GET'].\n root_name (str): The name of the root module. Defaults to 'pages'.\n \"\"\"\n\n try:\n pages = importlib.import_module(f'{root_name}')\n except ImportError as error:\n logger.error(f\"Error importing 'pages': {error}\")\n return\n\n self.check_and_register_package(\n root_name=root_name,\n current_package=pages,\n methods=methods,\n package_path=pages.__path__[0]\n )\n\n def check_and_register_package(\n self,\n root_name: str,\n current_package: Module,\n methods: list,\n package_path: str,\n ):\n\n \"\"\"\n Checks and registers a package if it hasn't been visited yet.\n\n Parameters:\n root_name (str): The name of the root package.\n current_package (Module): The current package being processed.\n methods (list): List of HTTP methods to register for the routes.\n package_path (str): The filesystem path of the package.\n \"\"\"\n\n prefix = package_path.split(f'/{root_name}')[1]\n name = current_package.__name__.split('.')[-1]\n\n try:\n\n if prefix == '':\n name = 'index'\n\n checked_prefix = '/' + self.check_path(path=prefix)\n name = self.check_package_name(prefix, methods)\n\n view_func = importlib.import_module(f'{current_package.__name__}').index\n \n self.app.add_url_rule(\n rule=checked_prefix,\n endpoint=name,\n view_func=view_func, # type: ignore\n methods=methods\n )\n logger.debug(f'2) Adding url \"{checked_prefix}\", {name}')\n\n except AttributeError as error:\n logger.error(f\"Error registering route for {name}: {error}\")\n \n self.modules_visited.add(current_package.__name__)\n for mod in pkgutil.iter_modules([package_path]):\n\n if mod.name not in self.modules_visited:\n self.process_module(\n mod, package_path, current_package, root_name, methods\n )\n\n def check_package_name(self, prefix: str, methods: list):\n\n \"\"\"\n Checks and formats the package name.\n\n Parameters:\n prefix (str): The prefix for the package name.\n methods (list): List of HTTP methods for the route.\n\n Returns:\n complete_name (str): The formatted name of the package.\n \"\"\"\n\n name = prefix.replace(\"/\", \".\")[1:]\n name = name.replace('_', ':')\n \n replacements = {\n '[': '',\n ']': '',\n ',': ':',\n \"'\": '',\n ' ': ''\n }\n \n methods_str = str(methods)\n for char, replacement in replacements.items():\n methods_str = methods_str.replace(char, replacement)\n methods_str = ':' + methods_str\n \n complete_name = name + methods_str\n complete_name = 'index' if complete_name.startswith(':') else complete_name\n return complete_name\n \n def check_module_name(self, prefix: str, methods: list):\n\n \"\"\"\n Checks and formats the module name.\n\n Parameters:\n prefix (str): The prefix for the module name.\n methods (list): List of HTTP methods for the route.\n\n Returns:\n complete_name (str): The formatted name of the module.\n \"\"\"\n\n name = prefix.replace(\"/\", \".\")\n name = name.replace('_', ':')\n \n replacements = {\n '[': '',\n ']': '',\n ',': ':',\n \"'\": '',\n ' ': ''\n }\n \n methods_str = str(methods)\n for char, replacement in replacements.items():\n methods_str = methods_str.replace(char, replacement)\n methods_str = ':' + methods_str\n \n complete_name = name + methods_str\n complete_name = 'index' if complete_name.startswith(':') else complete_name\n\n return complete_name\n\n def check_path(self, path: str):\n\n \"\"\"\n Checks and formats a given path.\n\n Parameters:\n path (str): The path to be checked and formatted.\n\n Returns:\n strip_name (str): The formatted path.\n \"\"\"\n\n replacements = {\n '(': '<',\n ')': '>',\n '[': '<',\n ']': '>',\n '_': ':',\n }\n strip_name = path.lstrip('/')\n\n for char, replacement in replacements.items():\n strip_name = strip_name.replace(char, replacement)\n\n return strip_name\n\n def process_module(self, mod, package_path, current_package, root_name, methods):\n \n \"\"\"\n Processes a module, registering its routes if it's a package or a module.\n\n Parameters:\n mod: The module to be processed.\n package_path (str): The filesystem path of the package.\n current_package (Module): The current package being processed.\n root_name (str): The name of the root package.\n methods (list): List of HTTP methods to register for the routes.\n \"\"\"\n \n try:\n if mod.ispkg:\n mod_path = os.path.join(package_path, mod.name)\n sub_package = importlib.import_module(\n f\"{current_package.__name__}.{mod.name}\"\n )\n self.check_and_register_package(\n root_name,\n sub_package,\n methods,\n mod_path\n )\n else:\n mod_path = os.path.join(package_path, mod.name)\n mod = importlib.import_module(f\"{current_package.__name__}.{mod.name}\") # type: ignore\n\n prefix = '/'.join(mod.__name__.split('.')[1:]) # type: ignore\n\n checked_prefix = '/' + self.check_path(path=prefix)\n name = self.check_module_name(prefix, methods)\n\n view_func = mod.index\n\n self.app.add_url_rule(\n rule=checked_prefix,\n endpoint=name,\n view_func=view_func,\n methods=methods\n )\n\n logger.debug(f'3) Adding url \"{prefix}\", {name}') # type: ignore\n\n except Exception as error:\n\n traceback.print_exc()\n\n logger.error(f\"Error while processing {mod.name}: {error}\")\n","repo_name":"antoniofernandodj/Flask-Router","sub_path":"flask_router/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23210805659","text":"#######################################################################################\n# MakingPlans Job Details Database.\n# ---------------------------------\n# Django LogEntry class model ovveridence.\n#\n# Checked: 2011/11/07\n# ichar@g2.ru\n#\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.models import User\nfrom django.contrib.admin.util import quote\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.safestring import mark_safe\n\nADDITION = 1\nCHANGE = 2\nDELETION = 3\n\nclass LogEntryManager( models.Manager ):\n def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):\n e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message)\n e.save()\n\nclass LogEntry( models.Model ):\n action_time = models.DateTimeField(_('action time'), auto_now=True)\n user = models.ForeignKey(User)\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.TextField(_('object id'), blank=True, null=True)\n object_repr = models.CharField(_('object repr'), max_length=200)\n action_flag = models.PositiveSmallIntegerField(_('action flag'))\n change_message = models.TextField(_('change message'), blank=True)\n objects = LogEntryManager()\n\n class Meta:\n verbose_name = _('log entry')\n verbose_name_plural = _('log entries')\n db_table = 'django_admin_log'\n ordering = ('-action_time',)\n\n def __repr__( self ):\n return smart_unicode(self.action_time)\n\n def caption( self ):\n return \"\"\"%s\"\"\" % \\\n ( self.get_admin_url(), self.object_id, self.object_repr )\n caption.allow_tags = True\n caption.short_description = _('Caption')\n caption.admin_order_field = 'object_repr'\n\n def object_short_repr( self ):\n sep = '...'\n l = 24\n s = ' '.join([len(x) > l and ('%s%s' % (x[:l], sep)) or x for x in self.object_repr.split(' ')])\n if sep in s:\n return s[:s.find(sep)+len(sep)]\n return s\n\n def is_addition( self ):\n return self.action_flag == ADDITION\n\n def is_change( self ):\n return self.action_flag == CHANGE\n\n def is_deletion( self ):\n return self.action_flag == DELETION\n\n def get_edited_object(self):\n \"Returns the edited object represented by this log entry\"\n return self.content_type.get_object_for_this_type(pk=self.object_id)\n\n def get_admin_url( self ):\n \"\"\"\n Returns the admin URL to edit the object represented by this log entry.\n This is relative to the Django admin index page.\n \"\"\"\n return mark_safe(u\"%s/%s/%s/\" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))\n\n def rendered_user( self ):\n html = \"\"\"%s\"\"\"\n if self.user is not None:\n return html % ( self.user, self.user.id, self.user )\n return html % ( self.user, '0', '---' )\n rendered_user.allow_tags = True\n rendered_user.short_description = _('User')\n rendered_user.admin_order_field = 'user'\n\n def rendered_action( self ):\n media_url = '/media/img'\n if self.is_addition():\n return '\"add\"' % media_url\n elif self.is_change():\n return '\"change\"' % media_url\n elif self.is_deletion():\n return '\"delete\"' % media_url\n rendered_action.allow_tags = True\n rendered_action.short_description = _('Action')\n rendered_action.admin_order_field = 'action_flag'\n\n def rendered_action_time( self ):\n html = \"\"\"%s ( %s )%s\"\"\"\n D = self.action_time.strftime('%d %b %Y')\n T = self.action_time.strftime('%H:%M:%S')\n w = self.action_time.strftime('%a')\n return html % (D, T, w)\n rendered_action_time.allow_tags = True\n rendered_action_time.short_description = _('Action time')\n rendered_action_time.admin_order_field = 'action_time'\n\n def rendered_content_type( self ):\n return '
%s
' % self.content_type\n rendered_content_type.allow_tags = True\n rendered_content_type.short_description = _('Content type')\n rendered_content_type.admin_order_field = 'content_type'\n\n def rendered_object_id( self ):\n if self.object_id is None or self.object_id == 'None':\n return '---'\n return self.object_id\n rendered_object_id.allow_tags = False\n rendered_object_id.short_description = _('OID')\n rendered_object_id.admin_order_field = 'object_id'\n\n def rendered_message( self ):\n s = self.change_message or ''\n if 'FlowLogic' in s or 'DataEntry' in s:\n i = s.find(':')\n caption = s[:i]\n s = '
\\n'.join([unichr(0x25a0)+' '+x for x in s[i+1:].split(',')])\n return '%s:
%s' % (caption, s)\n else:\n return s\n rendered_message.allow_tags = True\n rendered_message.short_description = _('Description')\n rendered_message.admin_order_field = 'change_message'\n","repo_name":"ichar/mp.python","sub_path":"manage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33964492043","text":"import sys, os\nfrom PyQt5 import QtGui,QtCore, QtWidgets, QtPrintSupport\nfrom PyQt5.QtGui import QAbstractTextDocumentLayout, \\\n QTextDocument, QPalette,QPageSize\nfrom PyQt5.QtCore import QSizeF\nclass Window(QtWidgets.QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.setWindowTitle('Document Printer')\n self.list = QtWidgets.QListWidget()\n self.editor = QtWidgets.QTextEdit()\n \n self.editor.textChanged.connect(self.handleTextChanged)\n## self.editor.setFixedSize(self.list.sizeHintForColumn(0) + 2 * self.list.frameWidth(), self.list.sizeHintForRow(0) * self.list.count() + 2 *self.list.frameWidth())\n self.buttonOpen = QtWidgets.QPushButton('Open', self)\n self.buttonOpen.clicked.connect(self.handleOpen)\n self.buttonPrint = QtWidgets.QPushButton('Print', self)\n self.buttonPrint.clicked.connect(self.handlePrint)\n self.buttonPreview = QtWidgets.QPushButton('Preview', self)\n self.buttonPreview.clicked.connect(self.handlePreview)\n layout = QtWidgets.QGridLayout(self)\n layout.addWidget(self.editor, 0, 0, 1, 3)\n layout.addWidget(self.buttonOpen, 1, 0)\n layout.addWidget(self.buttonPrint, 1, 1)\n layout.addWidget(self.buttonPreview, 1, 2)\n self.handleTextChanged()\n self.handlecontent()\n self.handlePrint()\n def handlecontent(self):\n space = \" \"\n header = '

'+ space * 10 + \"W.C wholesalers

\" + space * 15 + \"Shuja hse

\"+ space * 16 +\" Helena Road
\"\n \n content =\"\"\" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ItemdescriptionQuantitycost
January$100$100$100
February$80$80$80
\"\"\"\n math = \"

\"+space * 20 +\"Total \" +space * 15 + \"amount

\" + \"

\"+space * 20 +\"Subtotal \" +space * 8 + \"amount

\" \n \n footer = \"

\"+space * 10 +\"mobile phone

\"\n self.editor.append(header)\n self.editor.append(content)\n self.editor.append(math)\n self.editor.append(footer)\n\n def handleOpen(self):\n path = QtWidgets.QFileDialog.getOpenFileName(\n self, 'Open file', '',\n 'HTML files (*.html);;Text files (*.txt);;Pdf files (*.pdf)')[0]\n if path:\n file = QtCore.QFile(path)\n if file.open(QtCore.QIODevice.ReadOnly):\n stream = QtCore.QTextStream(file)\n text = stream.readAll()\n info = QtCore.QFileInfo(path)\n if info.completeSuffix() == 'html':\n self.editor.setHtml(text)\n else:\n self.editor.setPlainText(text)\n## file.close()\n\n def handlePrint(self):\n## width = 800\n## height = 263.520403\n self.printer = QtPrintSupport.QPrinter()\n## self.printer.setPageSize(QPageSize(QSizeF(width, height), QPageSize.Unit.Point, 'Cheque'))\n## self.printer.setFullPage(True)\n self.printer.NativeFormat\n dialog = QtPrintSupport.QPrintDialog()\n \n if dialog.exec_() == QtWidgets.QDialog.Accepted:\n print(self.editor)\n self.editor.print_(self.printer)\n\n def handlePreview(self):\n dialog = QtPrintSupport.QPrintPreviewDialog(self.printer)\n dialog.paintRequested.connect(self.editor.print_)\n dialog.exec_()\n\n def handleTextChanged(self):\n enable = not self.editor.document().isEmpty()\n self.buttonPrint.setEnabled(enable)\n self.buttonPreview.setEnabled(enable)\n def onSaveButtonClicked(self):\n reply = QtGui.QMessageBox.question(parent=self, title='Attention',\n text='File will be overwritten.\\nDo you still want to proceed?',\n buttons=QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n defaultButton=QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n filename = self.inputFileLineEdit.text()\n length = self.lengthSpinBox.value()\n width = self.widthSpinBox.value()\n line_width = self.lineWidthSpinBox.value()\n rounded = self.cornersCheckBox.isChecked()\n corners_radius = self.cornersSpinBox.value()\n x = self.xSpinBox.value()\n y = self.ySpinBox.value()\n\n print( \"Values are: \")\n print( \"Filename: %s\" % filename)\n print( \"Length: %.2f Width: %.2f\" % (length, width))\n print( \"Line width: %.2f\" % line_width)\n if corners_radius:\n print(\"Corner radius: %.2f\" % corners_radius)\n print(\"x: %.2f y: %.2f\" % (x, y))\n def onInputFileButtonClicked(self):\n filename, filter = QtGui.QFileDialog.getOpenFileName(parent=self, caption='Open file', dir='.', filter='Kicad PCB Files (*.txt)')\n\n if filename:\n self.inputFileLineEdit.setText(filename)\n\n\nif __name__ == '__main__':\n\n app = QtWidgets.QApplication(sys.argv)\n window = Window()\n window.resize(640, 480)\n window.show()\n sys.exit(app.exec_())\n","repo_name":"wincerd/poS","sub_path":"printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41241542015","text":"import docker\nimport time\nimport json\nimport requests\n\nclient = docker.from_env()\n\ncontainerStatus = {}\n\nwhile True:\n containerList = client.containers.list()\n body = {}\n for container in containerList:\n if 'service' in container.labels.keys():\n containerStats = container.stats(stream=False)\n cpuUseage = containerStats['cpu_stats']['cpu_usage']['total_usage']\n memoryLimit = containerStats['memory_stats']['limit']\n memoryMaxUsed = containerStats['memory_stats']['max_usage']\n memoryUsed = containerStats['memory_stats']['usage']\n # port = container.labels['port']\n # Get ip from database\n # response = requests.get('http://127.0.0.1:8001/health')\n # if response.status_code == 200:\n # containerStatus[container.id] = time.time()\n # else:\n # if containerStatus[container.id] == 0:\n # # Send request to CSS to restart service\n # body = {\n # 'action': 'restart',\n # 'id': container.id\n # }\n # response = requests.post('http://127.0.0.1:8001/action', json=body)\n # else:\n # containerStatus[container.id] = 0\n print('Inside Body')\n body[container.id] = {\n 'service': container.labels['service'],\n 'cpu_usage': cpuUseage,\n 'max_memory': memoryLimit,\n 'max_used': memoryMaxUsed,\n 'mem_usage': memoryUsed\n }\n else:\n containerStats = container.stats(stream=False)\n print(body)\n requests.post('http://127.0.0.1:8001/updateData', json=body)\n time.sleep(5)","repo_name":"cloudybytes/CloudAssignment2","sub_path":"monitoringService/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33247028880","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 18 13:21:58 2019\n\n@author: yangchg\n\"\"\"\n\n#coding=utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nimport time,datetime\nimport os\nfrom pyquery import PyQuery as pq\n#from config import settings as SET\nimport re\n\n#browser_for_login为正常浏览器,用于登录\n#browser_for_login = webdriver.Chrome()\n\nchrome_options = Options()\nchrome_options.add_argument('--headless')\n#chrome_options.add_argument('--disable-gpu')\n#无头模式\n#browser = webdriver.Chrome(chrome_options=chrome_options)\nbrowser2 = webdriver.Chrome(options=chrome_options)\n\nbrowser = webdriver.Chrome()\nwait = WebDriverWait(browser,10)\n\n\nchoice_list=[]\nban_list=[]\n\nbrowser.get('https://passport.jd.com/new/login.aspx')\nwhile browser.current_url!='https://www.jd.com/':\n print(browser.current_url)\n time.sleep(2)\n\ncookies = browser.get_cookies()\n#browser.close()\nbrowser2.get('https://www.jd.com/index-1000002668.html')\nfor cookie in cookies:\n browser2.add_cookie(cookie)\n\nbrowser.get('https://mall.jd.com/index-1000002668.html')\nclickarea = browser.find_element_by_xpath('//*[@class=\"J_LayoutWrap d-layout-wrap layout-auto d-enable \"]/div/div/div/div/div/div/div/map/area[4]')\n\n#方法1 直接点解区域\nclickarea.click() \n\n#方法2 发送链接地址\nbrowser.execute_script('window.open()')\nbrowser.switch_to.window(browser.window_handles[1])\nhref = clickarea.get_attribute('href') \nbrowser.get(href)\n\ndateLine = datetime.datetime.strptime('20190618 195959','%Y%m%d %H%M%S')\n\n\nwhile True :\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n \n if datetime.datetime.now() >= dateLine:\n clickarea.click()\n #browser.get(clickarea.get_attribute('href') )\n \n runtime = (datetime.datetime.now() - dateLine) \n if runtime.days>=0 and runtime.seconds >10 :\n break\n \n time.sleep(0.1)\n print(now)\n \n\n\n","repo_name":"ycg860102/JD_pachong","sub_path":"JD_atm_yhq.py","file_name":"JD_atm_yhq.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30470182048","text":"from enum import Enum\n\nclass Direccion(Enum):\n L = 0\n R = 1\n\nclass Transition():\n def __init__(self, nxt_st, nxt_alpha, nxt_dir):\n self.nxt_st = nxt_st\n self.nxt_alpha = nxt_alpha\n self.nxt_dir = nxt_dir\n\n def get_nxt_st(self):\n return self.nxt_st\n\n def get_nxt_alpha(self):\n return self.nxt_alpha\n\n def get_nxt_dir(self):\n return self.nxt_dir\n\nclass Turing():\n def __init__(self, tb, estados, alfabeto, report):\n self.tb = tb\n self.tape = []\n self.th = 0\n self.estados = estados\n self.alfabeto = alfabeto\n self.report = report\n\n def init_tape(self, string):\n self.tape = [self.alfabeto.get_alpha(c) for c in string] + [self.alfabeto.B_]\n\n def run(self):\n curr_st = self.estados.get_init_st()\n curr_alpha = self.tape[self.th]\n status = True\n\n while True:\n\n tr = self.tb[curr_st.value][curr_alpha.value]\n if not tr:\n status = False\n self.report.summit(curr_st.name, curr_alpha.name, \"/\")\n break\n\n nxt_st = tr.get_nxt_st()\n nxt_alpha = tr.get_nxt_alpha()\n nxt_dir = tr.get_nxt_dir()\n\n self.report.summit(curr_st.name, curr_alpha.name, nxt_st.name)\n\n curr_st = nxt_st\n self.tape[self.th] = nxt_alpha\n\n if nxt_dir == Direccion.L:\n self.th -= 1\n else:\n self.th += 1\n\n curr_alpha = self.tape[self.th]\n\n # Si fuera la condición de while loop la cadena vacía siempre se aceptaría\n # que solo es el caso para regex de la forma (x)*\n if self.estados.isfinal(curr_st) and curr_alpha == self.alfabeto.B_:\n break\n\n self.report.set_status(status)\n\n return status\n","repo_name":"juarez-gonza/automatas_tps","sub_path":"4/src/Turing.py","file_name":"Turing.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7772228450","text":"from abc import ABC\nimport unreal\nfrom typing import List, cast, Any\nimport os.path\n\n\nclass QueryBase(ABC):\n \"\"\"Base class for import queries.\"\"\"\n\n def test(self, factory: unreal.Factory, created_object: unreal.Object) -> bool:\n \"\"\"Test the created object and factory against this query.\"\"\"\n raise NotImplemented\n\n\nclass SourcePath(QueryBase):\n \"\"\"Query an imported factory and file path\"\"\"\n\n def __init__(\n self,\n file_name_starts_with: str = \"\",\n file_name_ends_with: str = \"\",\n file_name_contains: str = \"\",\n full_path_contains: str = \"\",\n extensions: List[str] = [],\n requires_all: bool = False,\n case_sensitive: bool = False,\n ) -> None:\n self.case_sensitive = case_sensitive\n\n # if case sensitive we use the substring as is, if not we lowercase it for future comparisons.\n self.file_name_starts_with = (\n file_name_starts_with\n if self.case_sensitive\n else file_name_starts_with.lower()\n )\n self.file_name_ends_with = (\n file_name_ends_with if self.case_sensitive else file_name_ends_with.lower()\n )\n self.file_name_contains = (\n file_name_contains if self.case_sensitive else file_name_contains.lower()\n )\n self.full_path_contains = (\n full_path_contains if self.case_sensitive else full_path_contains.lower()\n )\n self.extensions = [ext.lower() for ext in extensions]\n self.requires_all = requires_all\n\n def test(self, factory: unreal.Factory, created_object: unreal.Object) -> bool:\n if created_object is None:\n return False\n\n # if the created_object doesn't implement asset_import_data we early out.\n if not has_editor_property(created_object, \"asset_import_data\"):\n return False\n\n asset_import_data = cast(\n unreal.AssetImportData, created_object.get_editor_property(\"asset_import_data\")\n )\n file_path = asset_import_data.get_first_filename()\n\n # if case sensitive we use the filename raw, if not we lowercase for the future comparisons.\n file_path = file_path if self.case_sensitive else file_path.lower()\n file_name, extension = os.path.splitext(os.path.basename(file_path))\n\n results = []\n\n if self.file_name_starts_with:\n results.append(file_name.startswith(self.file_name_starts_with))\n\n if self.file_name_ends_with:\n results.append(file_name.endswith(self.file_name_ends_with))\n\n if self.file_name_contains:\n results.append(self.file_name_contains in file_name)\n\n if self.full_path_contains:\n results.append(self.full_path_contains in file_name)\n\n if self.extensions:\n results.append(extension in self.extensions)\n\n if self.requires_all:\n return all(results)\n\n return any(results)\n\n\nclass DestinationPath(QueryBase):\n\n \"\"\"Query based on the path the created object ends up in.\"\"\"\n\n def __init__(\n self,\n path_contains: str = \"\",\n case_sensitive: bool = False,\n ) -> None:\n self.case_sensitive = case_sensitive\n self.destination_path_contains = (\n path_contains if case_sensitive else path_contains.lower()\n )\n\n def test(self, factory: unreal.Factory, created_object: unreal.Object) -> bool:\n if created_object is None: # Early out, can't do destination path comparisons.\n return False\n\n destination_path = (\n created_object.get_path_name()\n if self.case_sensitive\n else created_object.get_path_name().lower()\n )\n\n if self.destination_path_contains:\n return self.destination_path_contains in destination_path\n\n return False\n\nclass CheckAssetTag(QueryBase):\n\n \"\"\"Query based on the asset tags of the created object. Optional asset_tag_value parameter will do a string equality compare.\n If left empty, then the test will only look to see if the tag exists.\"\"\"\n\n def __init__(\n self,\n asset_tag_key:str,\n asset_tag_value:Any = None,\n ) -> None:\n self.asset_tag_key = asset_tag_key\n self.asset_tag_value = asset_tag_value\n\n def test(self, factory: unreal.Factory, created_object: unreal.Object) -> bool:\n if created_object is None:\n return False\n \n eas = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)\n if eas:\n value = eas.get_metadata_tag(created_object, self.asset_tag_key)\n if value == \"\":\n return False\n return self.asset_tag_value is None or str(self.asset_tag_value) == value\n return False\n\n\n\ndef has_editor_property(created_object: unreal.Object, editor_property: str) -> bool:\n try:\n obj = created_object.get_editor_property(editor_property)\n return True\n except:\n return False\n","repo_name":"Ryan-DowlingSoka/UnrealImporterRules-Python","sub_path":"Content/Python/ImporterRules/Queries.py","file_name":"Queries.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"576155558","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('um_000073.png')\n\ngray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\nhsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\nnum_s = 0\nnum_l = 0\nmean_s = 0\nmean_l = 0\nvar_s = 0\nvar_l = 0\n\n#print(binary_img[20,56])\n\n#print(img.shape[0])\n#print(img.shape[1])\n\nfor i in range(0,img.shape[0]):\n\tfor j in range(0,img.shape[1]):\n\t\tif hsv_img[i,j,0] > 97 and hsv_img[i,j,0] < 120:\n\t\t#if hsv_img[i,j,1] <= 30 or hsv_img[i,j,2] <= 30:\n\t\t\t#if binary_img[i,j] == 0:\n\t\t\t#if hsv_img[i,j,0] > 97 and hsv_img[i,j,0] < 120:\n\t\t\t\tmean_s += gray_img[i,j]\n\t\t\t\tnum_s += 1\n\t\telse:\n\t\t\t\tmean_l += gray_img[i,j]\n\t\t\t\tnum_l += 1\n\nmean_s = mean_s / num_s\nmean_l = mean_l / num_l\n\n#print(mean_s)\n\nfor i in range(0,img.shape[0]):\n\tfor j in range(0,img.shape[1]):\n\t\tif hsv_img[i,j,0] > 97 and hsv_img[i,j,0] < 120:\n\t\t\t#if hsv_img[i,j,1] <= 30 or hsv_img[i,j,2] <= 30:\n\t\t\t\t#if binary_img[i,j] == 0:\n\t\t\t\t\tvar_s += (gray_img[i,j] - mean_s) ** 2\n\t\telse :\n\t\t\t\t\tvar_l += (gray_img[i,j] - mean_l) ** 2\n\nvar_s = var_s / num_s\nvar_l = var_l / num_l\n\nfor i in range(0,img.shape[0]):\n\tfor j in range(0,img.shape[1]):\n\t\tif hsv_img[i,j,0] > 97 and hsv_img[i,j,0] < 120:\n\t\t#if hsv_img[i,j,1] <= 30 and hsv_img[i,j,2] <=180:\n\t\t\t#print(hsv_img[i,j,2])\n\t\t\tif hsv_img[i,j,2] <= 200:\n\t\t\t\t#if binary_img[i,j] == 0:\n\t\t\t\t\tgray_img[i,j] -= (mean_s + ((gray_img[i,j] - mean_l) * var_l / var_s))/2.5 \n\nkernel = np.ones((2, 2), np.uint8)\n\ndilation2 = cv2.dilate(gray_img, kernel, iterations = 1)\n\nmedian3 = cv2.medianBlur(dilation2, 3)\n\nerosion3 = cv2.erode(median3, kernel, iterations = 1)\n\nfor i in range(0,img.shape[0]):\n\tfor j in range(0,img.shape[1]):\n\t\thsv_img[i,j,2] = erosion3[i,j]\n#median2 = cv2.medianBlur(erosion3, 3)\n\n#retval, threshold = cv2.threshold(erosion3,0,255,cv2.THRESH_OTSU)\n\ncv2.imshow('Final image', hsv_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"sinchana-hegde/shadow-removal","sub_path":"onlyremoval.py","file_name":"onlyremoval.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71899470171","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 24 13:25:32 2021\n\n@author: linjianing\n\"\"\"\n\n\nimport os\nimport pandas as pd\nfrom copy import deepcopy\nfrom itertools import product\nfrom autolrscorecard.entityset.entity import Entity\nfrom autolrscorecard.woebin.woebinning import ExploreWOEBinning, WOEBinning\nfrom autolrscorecard.collinear.varcluscust import VarClusCust\nfrom autolrscorecard.model.modelestimator import LassoLRCV, LRCust, Stepwise\nfrom autolrscorecard.plotfig.plotfig import (\n plot_bin, plot_repeat_split_performance)\nfrom autolrscorecard.performance.modelstability import (\n repeatkfold_performance, vars_bin_psi, score_psi)\n\n\nclass EntitySet:\n \"\"\"实体集合.\"\"\"\n\n def __init__(self, id, entities=None):\n \"\"\"创建实体集合.\n\n Example_:\n entities = {'train_sample': (train_df, {'target': 'flag, 'variable_options':{}})}.\n \"\"\"\n self.id = id\n self.entity_dict = {}\n entities = entities or {}\n for entity in entities:\n df = entities[entity][0]\n kw = {}\n if len(entities[entity]) == 2:\n kw = entities[entity][1]\n self.entity_from_dataframe(entity_id=entity,\n dataframe=df,\n **kw)\n self.pipe_result = {}\n self.steps = {}\n self.best_bins = {}\n self.in_model_vars = {}\n\n def entity_from_dataframe(self, entity_id, dataframe, target=None,\n variable_options=None):\n \"\"\"从dataframe生成实体.\"\"\"\n variable_options = variable_options or {}\n entity = Entity(entity_id,\n dataframe,\n target,\n variable_options)\n self.entity_dict[entity.id] = entity\n return self\n\n def drop_entities(self, drop_list):\n \"\"\"删除实体.\"\"\"\n self.entity_dict = {k: v for k, v in self.entity_dict.items()\n if k not in drop_list}\n return self\n\n @property\n def entities(self):\n \"\"\"获取实体集合.\"\"\"\n return list(self.entity_dict.values())\n\n def get_entity(self, entity_id):\n \"\"\"获取实体.\"\"\"\n return self.entity_dict[entity_id]\n\n def init_pipe_X(self, entity_id):\n \"\"\"初始化pipe_X.\"\"\"\n entity = self.get_entity(entity_id)\n entity.pipe_X = entity.df.loc[:, entity.variable_options.keys()]\\\n .copy(deep=True)\n return self\n\n def explore_binning(self, entity_id, verbose=True,\n save_file=None, **kwargs):\n \"\"\"探索性分箱.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n ewb = ExploreWOEBinning(variable_options=entity.variable_options,\n verbose=verbose, **kwargs)\n ewb.fit(entity.pipe_X, entity.pipe_y)\n entity_steps.update({'explore_binning': ewb})\n self.steps.update({entity_id: entity_steps})\n if save_file is not None:\n ewb.dump(save_file)\n return self\n\n def load_explore_bin(self, entity_id, file):\n \"\"\"载入探索结果.\"\"\"\n entity_steps = self.steps.get(entity_id, {})\n ewb = ExploreWOEBinning.load(file)\n entity_steps.update({'explore_binning': ewb})\n self.steps.update({entity_id: entity_steps})\n return self\n\n def explore_bin_search_best(self, entity_id, search_params={},\n verbose=True, search_vars=None, **kwargs):\n \"\"\"搜索最优分箱.\"\"\"\n entity_steps = self.steps.get(entity_id, {})\n ewb = entity_steps['explore_binning']\n ewb.grid_search_best(search_params=search_params, verbose=verbose,\n search_vars=search_vars, **kwargs)\n ewb.plot_best(plot_vars=search_vars)\n return self\n\n def WOE_binning(self, entity_id, variable_cuts, verbose=True,\n **kwargs):\n \"\"\"分箱.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n vop = {key: {\n 'subjective_cut': cut,\n **entity.variable_options[key]}\n for key, cut in variable_cuts.items()}\n wb = WOEBinning(variable_options=vop, verbose=verbose, **kwargs)\n wb.fit(entity.pipe_X.loc[:, variable_cuts.keys()], entity.pipe_y)\n entity_steps.update({'WOE_binning': wb})\n self.steps.update({entity_id: entity_steps})\n entity.pipe_X = wb.transform(entity.pipe_X)\n return self\n\n def var_cluster(self, entity_id, maxeigval2=1,\n maxclus=None, n_rs=0, speedup=True):\n \"\"\"特征分群.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n vc = VarClusCust(variable_options=entity.variable_options,\n maxeigval2=maxeigval2, maxclus=maxclus, n_rs=n_rs)\n vc.fit(entity.pipe_X, entity.pipe_y, speedup=speedup)\n entity_steps.update({'var_cluster': vc})\n self.steps.update({entity_id: entity_steps})\n entity.pipe_X = vc.transform(entity.pipe_X)\n return self\n\n def lasso_LRCV(self, entity_id):\n \"\"\"lasso交叉验证逻辑回归.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n lslrcv = LassoLRCV(variable_options=entity.variable_options)\n lslrcv.fit(entity.pipe_X, entity.pipe_y)\n entity_steps.update({'lasso_LRCV': lslrcv})\n self.steps.update({entity_id: entity_steps})\n entity.pipe_X = lslrcv.transform(entity.pipe_X)\n return self\n\n def stepwise_LR(self, entity_id, threshold_in=0.1, threshold_out=0.1):\n \"\"\"逐步回归.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n swlr = Stepwise(threshold_in=threshold_in, threshold_out=threshold_out)\n swlr.fit(entity.pipe_X, entity.pipe_y, verbose=False)\n entity_steps.update({'stepwise_LR': swlr})\n self.steps.update({entity_id: entity_steps})\n entity.pipe_X = swlr.transform(entity.pipe_X)\n return self\n\n def final_LR(self, entity_id):\n \"\"\"lasso交叉验证逻辑回归.\"\"\"\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n fnlr = LRCust(variable_options=entity.variable_options)\n fnlr.fit(entity.pipe_X, entity.pipe_y)\n entity_steps.update({'final_LR': fnlr})\n self.steps.update({entity_id: entity_steps})\n entity.pipe_X = fnlr.transform(entity.pipe_X)\n entity.pred_y = fnlr.predict(entity.pipe_X)\n rep = entity_steps['WOE_binning'].output()\n self.in_model_vars[entity_id] = rep.loc[\n rep.loc[:, 'var'].isin(list(entity.pipe_X.columns)), :]\n return self\n\n def batch_lr(self, entity_id):\n entity = self.get_entity(entity_id)\n entity_steps = self.steps.get(entity_id, {})\n if entity_steps == {}:\n return self\n entity_steps = entity_steps['explore_binning'].bin_dic\n tl = []\n kl = []\n for key, val in entity_steps.items():\n kl.append(key)\n ttl = []\n for vkey, vval in val.best_dic.items():\n ttl.append(vval['cut'])\n tl.append(ttl)\n tl = tuple(tl)\n bvl = product(*tl)\n n = 0\n for bv in bvl:\n pipe_X = entity.pipe_X\n pipe_y = entity.pipe_y\n bvd = dict(zip(kl, bv))\n vop = {key: {\n 'subjective_cut': cut,\n **entity.variable_options[key]}\n for key, cut in bvd.items()}\n wb = WOEBinning(variable_options=vop, verbose=False)\n wb.fit(pipe_X.loc[:, bvd.keys()], pipe_y)\n pipe_X = wb.transform(entity.pipe_X)\n vc = VarClusCust(variable_options=entity.variable_options,\n maxeigval2=0.5, maxclus=None, n_rs=0)\n vc.fit(pipe_X, pipe_y, speedup=True)\n pipe_X = vc.transform(pipe_X)\n lslrcv = LassoLRCV(variable_options=entity.variable_options)\n lslrcv.fit(pipe_X, pipe_y)\n pipe_X = lslrcv.transform(pipe_X)\n swlr = Stepwise(threshold_in=0.1, threshold_out=0.1)\n swlr.fit(pipe_X, pipe_y, verbose=False)\n pipe_X = swlr.transform(pipe_X)\n fnlr = LRCust(variable_options=entity.variable_options)\n fnlr.fit(pipe_X, pipe_y)\n pipe_X = fnlr.transform(pipe_X)\n pred_y = fnlr.predict(pipe_X)\n n += 1\n if n >= 10:\n break\n return bvl\n\n def transform(self, entity_id, X):\n \"\"\"流式应用.\"\"\"\n sX = X.copy(deep=True)\n entity_steps = self.steps[entity_id]\n for step, est in entity_steps.items():\n sX = est.transform(sX)\n return sX\n\n def predict(self, entity_id, X):\n \"\"\"流式预测.\"\"\"\n sX = self.transform(entity_id, X)\n entity_steps = self.steps[entity_id]\n est = entity_steps[list(entity_steps.keys())[-1]]\n px = est.predict(sX)\n return px\n\n# =============================================================================\n# def pipe_fit(self, entity_id, estimators):\n# \"\"\"流式训练.\"\"\"\n# entity = self.get_entity(entity_id)\n# for (est_name, estimator) in estimators.items():\n# est = deepcopy(estimator)\n# setattr(est, 'variable_options', entity.variable_options)\n# est.fit(entity.pipe_X, entity.pipe_y)\n# self.steps[est_name] = est\n# entity.pipe_X = est.transform(entity.pipe_X)\n# if hasattr(est, 'best_bins'):\n# self.best_bins = est.best_bins\n# rep = est.output()\n# entity.pred_y = est.predict(entity.pipe_X)\n# self.in_model_vars = rep.loc[rep.loc[:, 'var'].isin(list(entity.pipe_X.columns)), :]\n# return self\n#\n# def pipe_transform(self, X):\n# \"\"\"流式应用.\"\"\"\n# sX = X.copy(deep=True)\n# for step, est in self.steps.items():\n# sX = est.transform(sX)\n# return sX\n#\n# def pipe_predict(self, X):\n# \"\"\"流式预测.\"\"\"\n# sX = self.pipe_transform(X)\n# est = self.steps[list(self.steps.keys())[-1]]\n# px = est.predict(sX)\n# return px\n#\n# def performance(self, entity_id, n_r=10, n_s=5):\n# \"\"\"效果.\"\"\"\n# entity = self.get_entity(entity_id)\n# plot_bin(self.in_model_vars)\n# psi_df = repeatkfold_performance(entity.pipe_X, vars_bin_psi, n_r=n_r, n_s=n_s)\n# plot_repeat_split_performance(psi_df, 'VAR PSI', self.in_model_vars)\n# psi_df = repeatkfold_performance(pd.DataFrame(entity.pred_y), score_psi, n_r=n_r, n_s=n_s)\n# plot_repeat_split_performance(psi_df, 'SCORE PSI', pd.DataFrame({'describe': ['分数'], 'var': 'score'}))\n# entity.performance()\n# return self\n# =============================================================================\n\n def performance(self, entity_id, n_r=10, n_s=5):\n \"\"\"效果.\"\"\"\n entity = self.get_entity(entity_id)\n in_model_vars = self.in_model_vars[entity_id]\n plot_bin(in_model_vars)\n psi_df = repeatkfold_performance(\n entity.pipe_X, vars_bin_psi, n_r=n_r, n_s=n_s)\n plot_repeat_split_performance(psi_df, 'VAR PSI', in_model_vars)\n psi_df = repeatkfold_performance(\n pd.DataFrame(entity.pred_y), score_psi, n_r=n_r, n_s=n_s)\n plot_repeat_split_performance(\n psi_df, 'SCORE PSI',\n pd.DataFrame({'describe': ['分数'], 'var': 'score'}))\n entity.performance()\n return self\n\n def output(self, entity_id, save_path):\n \"\"\"输出结果.\"\"\"\n filename = '_'.join([\n entity_id, 'model_report',\n pd.Timestamp.now().date().strftime('%Y%m%d')]) + '.xlsx'\n writer = pd.ExcelWriter(os.path.join(save_path, filename))\n in_model_vars = self.in_model_vars[entity_id]\n entity = self.get_entity(entity_id)\n entity_steps = self.steps[entity_id]\n for step, est in entity_steps.items():\n est_repor = est.output()\n if est_repor is not None:\n est_repor.to_excel(writer, step)\n gain_table = entity.gain_table\n in_model_vars.to_excel(writer, 'inModelVars')\n gain_table.to_excel(writer, 'gain_table')\n writer.save()\n writer.close()\n return self\n\n def component(self, entity_id):\n \"\"\"组份.\"\"\"\n entity = self.get_entity(entity_id)\n pipe_X = entity.pipe_X.copy(deep=True)\n raw_df = entity.df.copy(deep=True)\n pipe_X_cols = [x.rsplit('_', 2)[0] for x in list(pipe_X.columns)]\n raw_df = raw_df.loc[:, pipe_X_cols]\n ret_df = pd.concat([raw_df, pipe_X, entity.df.loc[:, [entity.target]],\n pd.DataFrame(entity.pred_y)], axis=1)\n ret_df.loc[:, 'err_ratio'] = ret_df.loc[:, 'flag'] - ret_df.loc[:, 'proba']\n return ret_df\n","repo_name":"Lenny-cis/chiascal","sub_path":"chiascal/entityset/entityset.py","file_name":"entityset.py","file_ext":"py","file_size_in_byte":13210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74134384412","text":"\"\"\"Signal processing and filtering functionality\n\"\"\"\n\nimport ctypes\n\nimport numpy as np\nfrom numba import njit\n\n\ndef shift_frame(frame: np.ndarray, buffer: ctypes.Array, overlap: int, buffer_size: int):\n frame[:overlap] = frame[buffer_size:]\n frame[overlap:] = buffer\n\n\n# @njit\ndef calc_spectrum(fft_mags: np.ndarray, window: np.ndarray, frame: np.ndarray):\n fft_mags[:] = np.abs(np.fft.rfft(window * frame))\n # with numba.objmode():\n # fft_mags[:] = np.fft.rfft(window * frame)\n # fft_mags[:] = np.abs(fft_mags)\n\n\n@njit\ndef calc_freq_weights(frequencies: np.ndarray, weighting_type: str) -> np.ndarray:\n if weighting_type == 'A':\n a = np.power(12194.0, 2) * np.power(frequencies, 4)\n b = (np.power(frequencies, 2) + np.power(20.6, 2))\n c = (np.power(frequencies, 2) + np.power(107.7, 2))\n d = (np.power(frequencies, 2) + np.power(737.9, 2))\n e = (np.power(frequencies, 2) + np.power(12194.0, 2))\n r_a = a / (b * np.sqrt(c * d) * e)\n weight = 20 * np.log10(r_a) + 2.0\n return weight\n elif weighting_type == 'C':\n a = np.power(12194.0, 2) * np.power(frequencies, 2)\n b = np.power(frequencies, 2) + np.power(20.6, 2)\n c = np.power(frequencies, 2) + np.power(12194.0, 2)\n r_c = (a / (b * c))\n weight = 20 * np.log10(r_c) + 0.06\n return weight\n elif weighting_type == 'Z':\n return np.ones(frequencies.shape)\n\n\ndef calc_octave_freq_bounds(fraction=3, freq_lower_bound=12, freq_upper_bound=20000,\n g=2) -> tuple[np.ndarray, np.ndarray]:\n # https://law.resource.org/pub/us/cfr/ibr/002/ansi.s1.11.2004.pdf\n # https://apmr.matelys.com/Standards/OctaveBands.html\n\n # fraction: bandwidth for octave fraction in format 1/`fraction`-octave\n # e.g. 1/3-octave => `fraction` = 3, 2/3-octave => `fraction` = 3/2\n # g: Octave ratio\n\n # Reference frequency\n fr = 1000\n\n if fraction % 2:\n center_idx = np.round(\n (fraction * np.log(freq_lower_bound / fr) + 30 * np.log(g)) / np.log(g)\n )\n else:\n center_idx = np.round(\n (2 * fraction * np.log(freq_lower_bound / fr) + 59 * np.log(g)) / (2 * np.log(g))\n )\n\n def _band_edge(g, fraction):\n return g ** (1 / (2 * fraction))\n\n def _ratio(g, center_idx, fraction):\n if fraction % 2:\n return g ** ((center_idx - 30) / fraction)\n else:\n return g ** ((2 * center_idx - 59) / (2 * fraction))\n\n center_freq = _ratio(g, center_idx, fraction) * fr\n\n nth_freq = 0\n while nth_freq * _band_edge(g, fraction) < freq_upper_bound:\n center_idx += 1\n nth_freq = _ratio(g, center_idx, fraction) * fr\n center_freq = np.append(center_freq, nth_freq)\n\n lower_bounds = center_freq / _band_edge(g, fraction)\n upper_bounds = center_freq * _band_edge(g, fraction)\n\n return lower_bounds, upper_bounds\n\n\ndef map_to_fft_bounds(oct_freq_lower_bounds, oct_freq_upper_bounds,\n fft_size) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Maps calculated octave frequency bounds to fft bounds\n \"\"\"\n num_splits = oct_freq_lower_bounds.size\n lower_bounds = np.zeros(num_splits, np.int64)\n upper_bounds = np.zeros(num_splits, np.int64)\n max_upper_bound = fft_size - 1\n ratio = fft_size / max(oct_freq_upper_bounds)\n for i in range(num_splits):\n lower_bounds[i] = np.clip(np.round(oct_freq_lower_bounds[i] * ratio), 0, max_upper_bound)\n upper_bounds[i] = np.clip(np.round(oct_freq_upper_bounds[i] * ratio), 0, max_upper_bound)\n\n return lower_bounds, upper_bounds\n\n\ndef calc_logspace_fft_bounds(sample_frequency, bars, frame_size,\n freq_lower_bound=12,\n freq_upper_bound=20000) -> tuple[np.ndarray, np.ndarray]:\n fft_lower_bounds = np.zeros(bars + 1, dtype=np.int32)\n fft_upper_bounds = np.zeros(bars + 1, dtype=np.int32)\n\n freqconst = np.log10(freq_lower_bound / freq_upper_bound) / (1 / (bars + 1) - 1)\n fc = np.zeros(bars + 1, dtype=np.float32)\n for n in range(bars + 1):\n fc[n] = freq_upper_bound * np.power(\n 10, freqconst * (-1) + (((n + 1) / (bars + 1)) * freqconst)\n )\n fc[n] = fc[n] / (sample_frequency / 2)\n\n fft_lower_bounds[n] = fc[n] * (frame_size / 2)\n if (n != 0):\n fft_upper_bounds[n - 1] = fft_lower_bounds[n] - 1\n\n if (fft_lower_bounds[n] <= fft_lower_bounds[n - 1]):\n fft_lower_bounds[n] = fft_lower_bounds[n - 1] + 1\n fft_upper_bounds[n - 1] = fft_lower_bounds[n] - 1\n\n return fft_lower_bounds, fft_upper_bounds\n\n\ndef calc_freq_amplifier(bars, frame_size,\n freq_lower_bound=12, freq_upper_bound=20000) -> np.ndarray:\n amplifier = np.zeros(bars + 1, dtype=np.float64)\n\n bars_third = int(np.round(bars / 3))\n amplifier[:bars_third] = np.logspace(\n np.log2(2), np.log2(1), bars_third, base=2\n )\n amplifier[bars_third:2 * bars_third] = np.logspace(\n np.log2(1), np.log2(0.6), bars_third, base=2\n )\n amplifier[2 * bars_third:] = np.logspace(\n np.log2(0.6), np.log2(0.3), bars + 1 - 2 * bars_third, base=2\n )\n\n freqconst = np.log10(freq_lower_bound / freq_upper_bound) / (1 / (bars + 1) - 1)\n fc = np.zeros(bars + 1, dtype=np.float32)\n for n in range(bars + 1):\n fc[n] = freq_upper_bound * np.power(\n 10, freqconst * (-1) + (((n + 1) / (bars + 1)) * freqconst)\n )\n amplifier[n] = fc[n] * amplifier[n] / np.log2(frame_size)\n\n return amplifier\n\n\ndef filter_signal(fft_mags, frame, buffer, overlap, buffer_size, window,\n bars, fft_lower_bounds, fft_upper_bounds, adjustment, amplifier,\n band_mags, cava_mem, noise_reduction):\n shift_frame(frame, buffer, overlap, buffer_size)\n calc_spectrum(fft_mags, window, frame)\n gather_energy(fft_mags, bars, fft_lower_bounds, fft_upper_bounds, adjustment, amplifier,\n band_mags, cava_mem, noise_reduction)\n\n\n@njit\ndef gather_energy(fft_mags, bars, fft_lower_bounds, fft_upper_bounds, adjustment, amplifier,\n band_mags, prev_mags, noise_reduction):\n for n in range(bars):\n energy = 0\n for i in range(fft_lower_bounds[n], fft_upper_bounds[n] + 1):\n energy += fft_mags[i]\n\n energy /= fft_upper_bounds[n] - fft_lower_bounds[n] + 1\n energy *= amplifier[n]\n band_mags[n] = energy\n\n band_mags *= adjustment\n\n excess = 0\n for n in range(bars):\n band_mags[n] = prev_mags[n] * noise_reduction + band_mags[n]\n prev_mags[n] = band_mags[n]\n\n diff = 1200 - band_mags[n]\n if (diff < 0):\n diff = 0\n div = 1 / (diff + 1)\n prev_mags[n] = prev_mags[n] * (1 - div / 20)\n\n if (band_mags[n] > 1200):\n excess = 1\n band_mags[n] /= 1200\n\n if excess:\n adjustment *= 1 - 0.01\n else:\n adjustment *= 1 + 0.001\n\n\n# dead_code\n@njit\ndef calc_psd(fft_mags: np.ndarray, squared_window_sum: np.float64):\n # power spectral density\n fft_mags[:] = np.power(fft_mags * 2., 2) / squared_window_sum\n\n\n# dead_code\n@njit\ndef log_scale(fft_mags: np.ndarray, fft_weights: np.ndarray):\n fft_mags[:] = 10. * np.log10(fft_mags) + fft_weights\n","repo_name":"gephaistos/audioviz-desk","sub_path":"audioviz/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15030932427","text":"# 采集六合程序 run6h.py\r\nimport requests\r\nimport json\r\nimport datetime\r\nimport time\r\n#\r\nn_time = datetime.datetime.now().strftime('%H%M')#现在\r\nprint(n_time)\r\n\r\n # {\r\n # \"id\":\"122\",\r\n # \"time\":\"21:43:0\",\r\n # \"nextid\":\"123\",\r\n # \"s\":0,\r\n # \"c\":\"4001\",\r\n # \"ma\":\"45,虎,red,1,狗,red,15,猴,blue,5,马,green,39,猴,green,47,鼠,blue,43,龙,green\",\r\n # \"year\":\"2018\",\r\n # \"m\":\"\",\r\n # \"day\":\"10月27日\",\r\n # \"type\":4,\r\n # \"nextdate\":\"2018/10/30 21:30:00\",\r\n # \"info\":\"\"\r\n # }\r\n\r\ndef six2():\r\n _url = \"http://1680660.com/smallSix/findCurrentVideoInfo.do?\"\r\n _get_token = \"kdsjfhsh29*/djk.*3dsa.1x1as\"\r\n _get_url = \"http://127.0.0.9/opengotwo/\"\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9',\r\n 'Cache-Control': 'max-age=0',\r\n 'Connection': 'keep-alive',\r\n 'Cookie': '__cfduid=dbe18b18953b7f84f38dc8c4bf8687cfe1540888690',\r\n 'Host': '1680660.com',\r\n 'Pragma': 'no-cache',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\r\n }\r\n r = requests.get(_url, headers=headers)\r\n _json = r.json()\r\n print(_json)\r\n _id = _json['id']\r\n _time = _json['time']\r\n _nextid = _json['nextid']\r\n _ma = _json['ma']\r\n _type = _json['type']\r\n _nextdate = _json['nextdate']\r\n\r\n data = {\r\n \"token\": _get_token,\r\n \"id\": _id,\r\n \"time\": _time,\r\n \"nextid\": _nextid,\r\n \"ma\": _ma,\r\n \"type\": _type,\r\n \"nextdate\": _nextdate,\r\n }\r\n print(data)\r\n xx = requests.get(_get_url, data)\r\n print(xx) \r\n\r\ndef xunhuan():\r\n #six()\r\n #print(\"循环开始\")\r\n s_time = 2130 #开始时间\r\n e_time = 2140 #结束时间\r\n now_time = datetime.datetime.now().strftime('%H%M')#现在\r\n #print(now_time)\r\n now_time = int(now_time)\r\n #zhi = now_time - s_time\r\n if now_time > s_time:\r\n #print(\"start\")\r\n if now_time < e_time:\r\n print(\"开始执行\")\r\n try:\r\n six2()\r\n except:\r\n print(\"bug\")\r\ni = 1\r\nwhile True:\r\n a = str(i)\r\n print('运行第'+a+'次')\r\n xunhuan()\r\n time.sleep(6)\r\n i=i+1","repo_name":"xiaotang999/moge","sub_path":"runsix2.py","file_name":"runsix2.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9540965320","text":"# P1182 数列分段 Section II https://www.luogu.com.cn/problem/P1182\n\nN, M = map(int, input().split())\nA = list(map(int, input().split()))\n\n\ndef simulate(max_val: int) -> bool:\n seg_num = 1\n s = 0\n for a in A:\n if a > max_val:\n return False\n s += a\n if s > max_val:\n seg_num += 1\n s = a\n return seg_num <= M\n\n\nlower_bound = max(A)\nupper_bound = sum(A)\nwhile lower_bound <= upper_bound:\n m = (lower_bound + upper_bound) // 2\n sm = simulate(m)\n if sm:\n upper_bound = m - 1\n else:\n lower_bound = m + 1\nprint(lower_bound)\n","repo_name":"frederick-wang/algorithm-exercises","sub_path":"luogu/P1182 数列分段 Section II/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72252736091","text":"#!/usr/bin/env python2\n\"\"\"Plot effective area for a dataset from HDF5 files. e.g.\n\nTEXINPUTS=$(pwd): python plot_numisland_current_mean.py /home/jbuss/plots/numIslandsCureents.pdf /fhgfs/users/jbuss/20140615_27_cStd.hdf /fhgfs/users/jbuss/20140615_27_c6_4.hdf /fhgfs/users/jbuss/20140615_27_c7_5.hdf --password r3adfac! --pattern \"_c,.hdf\" --unit=\"p.e.\" --feature=\"Level:\"\n\nUsage:\n plot_ped_var_nsb_rate.py ... [options]\n\nOptions:\n --tablename= [default: table]\n --password= password for the factdb\n --cuts cuts for the pandas data frame as comma separated list\n --default_cuts choose predefined default cuts as comma separted list e.g. qualitycuts, precuts\n --feature= feature name of these comparisons [default: ped_std_mean]\n --unit= unit of feature these comparisons [default: $\\mathrm{p.e.}$]\n --pattern pattern of the feature value string e.g \"_xT,_c\" [default: \"_nsb,_c\"]\n\"\"\"\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom docopt import docopt\nimport numpy as np\nimport matplotlib\nimport datetime\nimport matplotlib.pyplot as plt\nimport logging\nfrom scipy.stats import moment\nfrom scipy.optimize import curve_fit\nfrom IPython import embed\nimport os\n\nlogger = logging.getLogger(__name__)\nargs = docopt(__doc__)\n\ndef gauss(x, mu, sigma):\n return 1/(np.sqrt(2*np.pi)*sigma) * np.exp(-0.5*(x-mu)**2/sigma**2)\n\ndef f_sqrt(x, a, b):\n return a * np.sqrt(x) + b\n\ndef f_lin(x, a, b):\n return a * x + b\n\ndef buildLabel(path, pattern):\n if not pattern:\n return pattern\n pattern = pattern.split(\",\")\n label_val = os.path.basename(datafile).split(pattern[0])[-1].split(pattern[1])[0]\n return label_val\n\nlogging.captureWarnings(True)\nlogging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO)\n\n\ndatafiles = args[\"\"]\noutputfile = args[\"\"]\n\ntablename = args[\"--tablename\"]\npassword = args[\"--password\"]\n\ncuts = args[\"--cuts\"]\ndefault_cuts = args[\"--default_cuts\"]\nfeature_name = args[\"--feature\"]\nunit = args[\"--unit\"]\n\npattern = args[\"--pattern\"]\n\n\nlogger.info(\"loading Files\")\ndf_list = []\nlabels = []\n\nped_var_means = []\nped_vars_vars = []\nped_vars_stds = []\nped_vars_sizes = []\nped_vars_sems = []\n\nfor datafile in datafiles:\n logger.info(\"loading: {}\".format(datafile))\n df = pd.read_hdf(datafile, tablename)\n logger.debug(\"{} Events in file\".format(len(df)))\n # df = df.query(\"Size > 60\")\n df_list.append(df)\n ped_var_means.append(df[feature_name].mean())\n ped_vars_vars.append(df[feature_name].var())\n ped_vars_stds.append(df[feature_name].std())\n # ped_vars_sizes.append(df[feature_name].size())\n ped_vars_sems.append(df[feature_name].sem())\n label = buildLabel (datafile, pattern)\n nsb_rate = int(label)\n labels.append(nsb_rate)\n\nped_var_means = np.array(ped_var_means)\nped_vars_vars = np.array(ped_vars_vars)\nped_vars_stds = np.array(ped_vars_stds)\nped_vars_sizes = np.array(ped_vars_sizes)\nped_vars_sems = np.array(ped_vars_sems)\nnsb_rate = np.array(labels)\n\nfig = plt.figure()\nax = plt.subplot(1,1,1)\n\ngain = 257.\n\nparams, cov = curve_fit(f_lin, nsb_rate, ped_var_means/gain**2)\nx_plot = np.linspace(0, 300, 1000)\nprint(params)\n\nax.errorbar(nsb_rate,\n ped_var_means/gain**2,\n xerr=0,\n # yerr=binned[feature_name+\"_std\"].values/binned[feature_name+\"_size\"].values,\n yerr=ped_vars_stds/gain**2,\n fmt=\".\",\n capsize=1,\n label=\"simulated pedestal\"\n )\nax.plot(x_plot, f_lin(x_plot, *params), 'b-', linewidth=0.8, label=\"linear fit\")\nax.text(210, 4.1, \"$f(x)={:.2f} \\cdot x + {:.2f}\".format(*params),\n fontsize=12, color='b')\n\nax.set_xlabel(\"Simulated NSB rate / $\\si{\\mega \\hertz}$\")\nax.set_ylabel(\"Mean pedestal variance / $\\mathrm{p.e.^2}\")\nax.legend(loc=\"upper left\")\nlogger.info(\"saving image data: {}\".format(outputfile))\nfig.savefig(outputfile)\n\n\n\n#pdftoppm -png -r 600 test.pdf > test.png\n","repo_name":"fact-project/fact_plots","sub_path":"fact_plots/scripts/plot_ped_var_nsb_rate.py","file_name":"plot_ped_var_nsb_rate.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7851155745","text":"import numpy as np\nfrom . import mol_dens\nfrom .mol_dens import mol_dens\n\nimport logging\n\nlogit = logging.getLogger(__name__)\ndef set_logging_level(level=logging.WARNING):\n logit.setLevel(level=level)\n logit.info(\"Setting the logging level to %s\"%(level))\n \n \nclass wet_atmo(object):\n def __init__(self, config=None,\n temp=None, pres=None,\n rhum=None, co2=None, ph2o=None, eso=False,\n column=False, nws=None, name=\"True atmosphere\"):\n \"\"\"\n Creates a model for humid refractive index\n \n **Parameters:**\n \n * config : Give a config file that provides the parameters for\n the fields left at `None`.\n * temp : air temperature [K]\n * pres : air pressure [mbar]\n * rhum : relative humidity of the air [%]\n * co2 : CO2 content in the air [ppm]\n * ph2o : partial pressure of water vapour [in Pa = 0.01 mbar]\n \n \"\"\"\n self.name = name\n self.eso = eso\n \n if temp is None:\n self.temp = config.getfloat(\"vlti\", \"T_vlti\")\n if pres is None:\n self.pres = config.getfloat(\"atmo\", \"pres\")\n if co2 is None:\n self.co2 = config.getfloat(\"atmo\", \"co2\")\n else :\n self.co2 = co2\n if rhum is None:\n self.rhum = config.getfloat(\"atmo\", \"rhum\")\n else:\n self.rhum = rhum\n \n self.Nair = None\n \n \n def get_Nair(self, lambs, add=1):\n self.Nair = add + n_air(lambs, temp=self.temp,\n pres=self.pres,\n rhum=self.rhum,\n co2=self.co2,\n eso=self.eso)\n return self.Nair\n def report_setup(self):\n print(\"================================\")\n print(f\"{self.name}\")\n print(f\"Temperature : {self.temp:.3e} K\")\n print(f\"Pressure : {self.pres:.3e} mbar\")\n print(f\"CO2 content : {self.co2:.1f} ppm\")\n print(f\"Relative humidity : {self.rhum:.1f} %\")\n print(\"================================\")\n \n \n#class simulated_air(object):\n# def __init__(temp=273.15+15., pres=1000.,\n# rhum=0., co2=450., ph2o=None, eso=False,\n# column=False, nws=None, name=\"Simple model for atmosphere\"):\n# \"\"\"\n# An object to represent refractive and dispersive medium.\n# \"\"\"\n# self.temp = temp\n# self.pres = pres\n# self.rhum = rhum\n# self.co2 = co2\n# self.ph2o = ph2o\n# self.eso = eso\n# \n# self.__doc__ = name\n# \n# def get_n(lambda_):\n# \"\"\"\n# \n# \"\"\"\n# then = n_air(lambda_, temp=self.temp, pres=self.pres,\n# rhum=self.rhum, co2=self.co2, ph2o=self.ph2o,\n# eso=self.eso, column=self.column) \n \n \n \n\ndef n_air(lambda_ , temp=273.15+15., pres=1000.,\n rhum=0., co2=450., ph2o=None, eso=False,\n column=False, nws=None):\n \"\"\"\n This function is a translation of a function of GENIEsim.\n \n **DESCRIPTION**\n Returns the phase refractive index (n-1) of air a function of (IR) wavelength (in m),\n and optionally, temperature (in K), pressure (in bar), relative humidity (%) and CO2 content (in ppm).\n Note that the used approximation for air applies in the range from 300 to 1690 nm,\n hence their use at much longer wavelengths should be done with caution.\n For wavelengths longer than 1.7 micron, use the Hill & Lawrence approximation for water vapor,\n which has been verified with experimental data up to 15 micron.\n \n **Argument**\n \n * lambda_ : wavelength vector in meters\n \n **Keyword arguments**\n \n * temp : air temperature [K]\n * pres : air pressure [mbar]\n * rhum : relative humidity of the air [%]\n * co2 : CO2 content in the air [ppm]\n * ph2o : partial pressure of water vapour [in Pa = 0.01 mbar]\n if set, overrules RHUM -- if not defined, partial pressure will be given on output\n * eso : set this keyword to use E. Marchetti's moist air refractive index instead of Ciddor + Hill & Lawrance\n * column : set this keyword to convert to input into fs/(mopl/m²), instead of the standard unitless n-1 value\n * nws : on output, returns the refractive index of pure water vapour (unless ESO keyword is set)\n \n **CALLS**\n \n * MOL_DENS\n * N_H2O\n \n **REFERENCE**\n \n * J.E. Decker et al. \"Updates to the NRC gauge block interferometer\", NRC document 42753, 8 August 2000\n * P.E. Ciddor, \"The refractive index of air: new equations for the visible and near infrared\", Appl. Opt. 35 (9), 1566-1573\n * J. Meisner & R. Le Poole, \"Dispersion affecting the VLTI and 10 micron interferometry using MIDI\", Proc. SPIE 4838\n * ``_\n \n **MODIFICATION HISTORY**\n \n * Version 1.0, 17-SEP-2002, by Roland den Hartog, ESA / ESTEC / Genie team, rdhartog@rssd.esa.int\n * Version 1.1, 09-OCT-2002, RdH: conversion to column densities (Meisner's n^hat) implemented\n * Version 1.2, 01-NOV-2002, RdH: water vapor index based on approximation also valid in the IR\n * Version 1.3, 03-JUL-2003, OA: PostScript output of test harness modified\n * Version 1.4, 15-DEC-2009, OA: Removed discontinuity at 1.7µm by using tabulated water vapour refraction index instead of models + improved header\n * SCIFYsim , Oct. 2020, : Translated to python by R. Laugier for SCIFYsim\n\n \"\"\"\n \n cvac = 299792458.\n dax, dw = mol_dens(temp, pres, rhum, co2, ph2o=ph2o, wvdens=True)\n # Compute the refractive index of moist air...\n if eso:\n #\n PS = -10474.0 + 116.43*temp - 0.43284*temp**2 + 0.00053840*temp**3\n P2 = rhum/100.0 * PS\n P1 = pres - P2\n D1 = P1/temp * (1.0 + P1*(57.90*1.0e-8-(9.3250 * 1.0e-4/temp) \\\n + (0.25844/temp**2)))\n D2 = P2/temp * (1.0 + P2*(1.0 + 3.7e-4*P2) * (-2.37321e-3 + (2.23366/temp) - (710.792/temp**2) + (7.75141e4/temp**3)))\n S = 1e-6 / lambda_\n nair = 1.0e-8*((2371.34 + 683939.7/(130-S**2) + 4547.3/(38.9 - S**2)) * D1 \\\n + (6487.31 + 58.058*S**2 - 0.71150*S**4 + 0.08851*S**6)*D2)\n else:\n #... or Ciddor\n # Refractive index of pure air...\n k0, k1, k2, k3 = 238.0185, 5792105., 57.362 , 167917. # um^-2\n s2 = (1. /(lambda_ * 1e+6))**2 # um^-2\n naxs = 1e-8*(k1/(k0 - s2) + k3/(k2 - s2))*(1. + 0.534e-6*(co2 - 450.)) # in reality this is n-1\n\n # and pure water vapor, using the Hill & Lawrence approximation in IR, Ciddor in VIS\n #n=N_ELEMENTS(lambda) & nws=FLTARR(n)\n #w=WHERE(lambda GT 1.7D-6, c) & IF c GT 0 THEN nws[w]=N_H2O(lambda[w], APPROX=1, CO2=co2, PRES=pres, RHUM=rhum, TEMP=temp) # Hill & Lawrence\n #w=WHERE(lambda LE 1.7D-6, c) & IF c GT 0 THEN nws[w]=N_H2O(lambda[w], APPROX=2, CO2=co2, PRES=pres, RHUM=rhum, TEMP=temp) # Ciddor\n nws = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum, co2=co2) # returns the water vapour refractive index (n-1) as tabulated by Mathar \n #nws = N_H2O(lambda, APPROX=2, CO2=co2, PRES=pres, RHUM=rhum, TEMP=temp) # Ciddor\n\n # Compute the densities of air, standard dry air, water vapor and standard water vapor\n daxs = mol_dens(288.15, 1013.25, 0., 450.)# !RL Added the CO2 value. Bug?\n dummy, dws = mol_dens(293.15, 13.33, 100., 450., wvdens=True)\n\n # Compute the phase index\n nair = (dax/daxs)*naxs + (dw/dws)*nws\n \n # Convert units to fs/(mol/m2)\n if column:\n rhoc = cvac*dax*1e-15\n nair = nair/rhoc\n \n return nair\n\n# Test harness\ndef test_air():\n import matplotlib.pyplot as plt\n \n # Comparison with Meisner's figure\n freq = np.linspace(20., 166., 1460) # THz (very similar to \"np.arange(1460)/10.+20.\")\n c = 2.997925e+8\n lambda_ = c / freq / 1e+12\n\n temp = 273.15 + 15.\n co2 = 350.\n model = n_air(lambda_, temp=temp, co2=co2, column=True)\n \n nwv, freq = n_h2o(lambda_, freq=True, temp=temp, rhum=0., column=True)\n nwda, freq = n_h2o(lambda_, freq=True, temp=temp, rhum=0., column=True, wda=True)\n \n plt.figure()\n plt.plot(freq, model)\n plt.xlabel(\"Frequency [Thz]\")\n plt.ylabel(r\"$\\frac{n-1}{c.\\rho}$\")\n plt.title(\"Refractive index of dry air\")\n plt.show()\n plt.figure()\n plt.plot(lambda_*1e6, model, label=\"n_air\")\n plt.plot(lambda_*1e6, nwv, label=\"n_h2o\")\n plt.title(\"Refractive index of water vapor vs. dry air\")\n plt.ylabel(r\"$\\frac{n-1}{c.\\rho}$ [$fs/mol/m^2$]\")\n plt.xlabel(r\"wavelength [$\\mu m$]\")\n plt.show()\n \n nda = n_air(lambda_, temp=temp, co2=co2, rhum=0., column=True)\n print(\"Min and max: of nda\")\n print(np.min(nda), np.max(nda))\n \n plt.figure()\n plt.plot(freq, nwv, label=\"Humid air\")\n plt.plot(freq, nda, label=\"Dry air\")\n plt.title(\"Refractive index of water vapor vs. dry air\")\n plt.ylabel(r\"$\\frac{n-1}{c.\\rho}$\")\n plt.xlabel(\"Frequency [Thz]\")\n plt.legend()\n plt.show()\n \n \n plt.figure()\n plt.plot(freq, nwda)\n plt.title(\"Refractive index of water vapor vs. dry air\")\n plt.ylabel(r\"$\\frac{n-1}{c.\\rho} $\")\n plt.xlabel(\"Frequency [Thz]\")\n plt.show()\n \n # The following figure shows the wavelength-dependence of OPD\n opd_air = 1e-15 * 1e6 * c * nda * 1.5 # differential column density = 1.5 mol/m², opd_air in µm\n opd_wda = -1e-15 * 1e6 * c * nwda * 1.5 # differential column density = -1.5 mol/m², opd_wda in µm\n opd_tot = opd_air + opd_wda\n plt.figure()\n plt.plot(lambda_*1e6, opd_air + opd_wda, label=\"n_air\")\n plt.title(\"Differential OPD\")\n plt.ylabel(r\"OPD [$\\mu m$]\")\n plt.xlabel(r\"wavelength [$\\mu m$]\")\n plt.show()\n #return model, freq\n \n # Air: comparison with tables in Ciddor paper:\n lambda_ = 633e-9\n print('Table 1 for dry air from P. Ciddor, Appl. Opt. 35, 1566 (1996)')\n print(' Temperature [C] Pressure [Pa] (n-1)*1E-8')\n temp, pres = 273.15+20., 80e+3/100.\n print(temp-273.15, pres*100., 1e8*(n_air(lambda_, temp=temp, pres=pres)))\n temp, pres = 273.15+20., 100e+3/100.\n print(temp-273.15, pres*100., 1e8*(n_air(lambda_, temp=temp, pres=pres)))\n temp, pres = 273.15+20., 120e+3/100.\n print(temp-273.15, pres*100., 1e8*(n_air(lambda_, temp=temp, pres=pres)))\n temp, pres = 273.15+10., 100e+3/100.\n print(temp-273.15, pres*100., 1e8*(n_air(lambda_, temp=temp, pres=pres)))\n temp, pres = 273.15+30., 100e+3/100.\n print(temp-273.15, pres*100., 1e8*(n_air(lambda_, temp=temp, pres=pres)))\n print(\"\")\n\n print('Table 2 for moist air from P. Ciddor, Appl. Opt. 35, 1566 (1996)')\n print(' Temperature [C] Pressure [Pa] H2O pres. CO2 [ppm] (n-1)*1E-8')\n temp, pres, ph2o, co2 = 273.15+19.526, 102094.8/100., 1065., 510.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.517, 102096.8/100., 1065., 510.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.173, 102993.0/100., 641., 450.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.173, 103006.0/100., 642., 440.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.188, 102918.8/100., 706., 450.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.189, 102927.8/100., 708., 440.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.532, 103603.2/100., 986., 600.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.534, 103596.2/100., 962., 600.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n temp, pres, ph2o, co2 = 273.15+19.534, 103599.2/100., 951., 610.\n print(temp-273.15, pres*100., ph2o, co2, 1e8*(n_air(lambda_, temp=temp, pres=pres, ph2o=ph2o, co2=co2)))\n print(\"\")\n\n print('Table 3 for moist air from P. Ciddor, Appl. Opt. 35, 1566 (1996)')\n print(' Temperature [C] Pressure [Pa] Humidity [%] (n-1)*1E-8')\n temp, pres, rhum = 273.15+20., 80e+3/100., 75.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n temp, pres, rhum = 273.15+20., 120e+3/100., 75.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n temp, pres, rhum = 273.15+40., 80e+3/100., 75.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n temp, pres, rhum = 273.15+40., 120e+3/100., 75.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n temp, pres, rhum = 273.15+50., 80e+3/100., 100.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n temp, pres, rhum = 273.15+50., 120e+3/100., 100.\n print(temp-273.15, pres*100., rhum, 1e8*(n_air(lambda_, temp=temp, pres=pres, rhum=rhum)))\n print()\n\n\n####################################################################################\n\n \nimport pathlib\nimport scipy.interpolate as interp\n\n\ndef n_h2o(lambda_, approx=False, column=False,\n co2=450., pres=1013.25, rad=False, rhum=0.,\n table=False, temp=296.15, wda=False, freq=False):\n \"\"\"\n **PURPOSE:**\n \n Returns the refractive index (n-1) of water vapor a function of (IR) wavelength (in m),\n in the range from 3.3 to 10.6 micron,\n and optionally, temperature (in K), and vapor density in (kg/m3)\n \n **Argument**\n \n * lambda_ : wavelength vector in meters\n \n **Keyword arguments:**\n \n * temp: temperature in K\n * pres: pressure in mbar\n * rhum: relative humidity in %\n * co2: CO2 fraction in ppm\n * approx: \n \n - if not set, the table from Mathar will be interpolated at the input wavelengths\n - if set to 1, use approximate formula by Hill & Lawrence\n - if set to 2, use approximate formula by Ciddor\n \n * table: set this keyword to use the full Mathar table -- warning, this modifies the lambda array on output (ONLY FOR TEST PURPOSE)\n * column: convert units to fs / (mol/m^2), such that t_delay = n_H20 * column density\n * rad: convert units to radians\n * wda: the refraction index is given for water vapour displacing air instead of bare water vapour, in units of fs/(mol/m^2)\n \n **RESTRICTIONS:**\n Does require a file 'n_mathar.dat' to be present in same directory\n The Mathar data table does not include wavelengths smaller than 1.819 µm.\n At wavelengths smaller than 1.819 µm, the Hill & Lawrence approximation is used instead.\n This produces a discontinuity of N_H2O at 1.819 µm.\n \n **CALLS:**\n \n * n_air\n \n **REFERENCE:**\n \n * R.J. Hill, R.S. Lawrence, \"Refractive index of water vapor in infrared windows\", Infrared Phys. 26, 371 - 376 (1986)\n * P.E. Ciddor, \"The refractive index of air: new equations for the visible and near infrared\", Appl. Opt. 35 (9), 1566-1573\n * F. Hase, R.J. Mathar, \"Water vapor dispersion in the atmospheric window at 10 um\", Preprint, 06-FEB-2002\n \n **MODIFICATION HISTORY:**\n \n * Version 1.0, 13-SEP-2002, by Roland den Hartog, ESA / ESTEC / Genie team, rdhartog@rssd.esa.int\n * Version 2.0, 09-OCT-2002, RdH: included tabulated data by R.J. Mathar (obtained via J. Meisner)\n * Version 2.1, 29-OCT-2002, RdH: conversion to WDA implemented\n * Version 2.2, 01-NOV-2002, RdH: Ciddor's approximation implemented\n * Version 2.3, 15-DEC-2009, OA: Improved header\n \n **TESTED**\n \n * 13-SEP-2002, RdH: comparison with measurements by Hase and Mathar\n * 09-OCT-2002, RdH: direct comparison between Hill & Lawrence's approximation and Mathar's data\n * 15-NOV-2004, RdH: implemented option to convert output directly into radians\n \"\"\"\n cvac = 299792458.\n # !RL we need to do something for global variables\n global nh2o, freqM, nh2oM\n if rad :\n column = True\n \n airdens, wvdens = mol_dens(temp, pres, rhum, co2, wvdens=True) #mol/m3\n rhoc = wvdens * cvac * 1e-15 # converts n-1 into units of fs / mol / m2\n \n # The Mathar data table does not include wavelengths smaller than 1.819 µm.\n # At wavelengths smaller than 1.819 µm, use the Hill & Lawrence approximation instead\n separate = False\n if not approx:\n w1 = lambda_ < 1.819e-6 \n c1 = np.count_nonzero(w1)\n w2 = np.logical_not(w1)\n c2 = np.count_nonzero(w2)\n \n if (c1>0) and (c2>0):\n separate = True\n lam1 = lambda_[w1]\n # the Hill & Lawrence approximation will be used on this part of the spectrum\n lam2 = lambda_[w2]\n elif (c1>0) and (c2==0):\n approx = True\n if approx or separate:\n if not separate:\n lam1 = lambda_\n freq1 = m2thz(lam1) # Frequencies in THz\n if approx==2: # !RL need to check that call!\n s1 = 1/(lam1*1e6)\n s2 = s1**2#(lam1*1e6)**2 # um^-2\n cf, w0 = 1.022, 295.235\n w1 = 2.6422 # um^-2\n w2 = -0.032380 # um^-4\n w3 = 0.004028 # um^-6\n s4 = s1**4 # !RL How about that?\n s6 = s1**6\n nh2o1 = 1e-8 * cf * (w0 + w1*s2 + w2*s4 + w3*s6) # in reality this is n-1\n #print(\"nh2o1 (n-1)\", nh2o1)\n else :\n tt=temp/273.16\n x=1e-5/lam1\n Q=18.015*wvdens # go from mol / m3 to g / m3\n nh2o1 = 1e-6 * Q * \\\n ( (0.957 - 0.928*(tt**0.4)*(x - 1.)) \\\n /(1.03*(tt**0.17) - 19.8*(x**2) + 8.1*(x**4) - 1.7*(x**8)) \\\n + 3747./(12449. - (x**2)) )\n #HELP, tt, x, q, nh2o1\n #PRINT, MIN(tt), MAX(tt), MIN(x), MAX(x), MIN(q), MAX(q), MIN(nh2o1), MAX(nh2o1)\n if column or wda :\n nh2o1 = nh2o1/rhoc\n \n if not approx:\n if not separate:\n lam2 = lambda_\n #if nh2oM.shape[0] < 2: # !RL Should probably cleanup this condition\n r = pathlib.Path(__file__).parent.absolute()\n n_mathar = np.loadtxt(r/\"data/n_mathar.dat\")\n freqM = n_mathar[:, 0]\n nh2oM = n_mathar[:, 1]\n # Interpolate\n if lam2.shape[0]<=0 or table:\n #lam2 = cvac/freqM/1e+12 \n lam2 = thz2m(freqM)\n freq2 = freqM\n nh2o2 = nh2oM\n else:\n freq2 = m2thz(lam2) # THz\n spline = interp.splrep(freqM, nh2oM)\n nh2o2 = interp.splev(freq2, spline, ext=3)\n # Conversion \n nh2o2 = (9.05e-7 + nh2o2) * (6.022 * 3335000.) # fs /mol /m2\n if not (column or wda) :\n nh2o2 = nh2o2 * rhoc # convert to dimensionless n-1\n # Reform the arrays if they were separated\n if approx:\n freqs = freq1\n nh2o = nh2o1\n else :\n if separate:\n lambda_ = np.concatenate((lam1, lam2), axis=0)\n freqs = np.concatenate((freq1, freq2), axis=0)\n nh2o = np.concatenate((nh2o1, nh2o2), axis=0) \n else :\n lambda_ = lam2\n freqs = freq2\n nh2o = nh2o2\n \n if wda :\n nh2o = nh2o - n_air(lambda_, column=True, pres=pres,\n rhum=0., temp=temp)\n if rad :\n nh2o = nh2o * 1e15 * cvac * 2. * np.pi / lambda_\n if freq:\n return nh2o, freqs\n else:\n return nh2o\ndef m2thz(lambda_):\n return 299792458.* 1e-12/lambda_ \ndef thz2m(f):\n return 299792458./ (f * 1e12)\n\n# Test harness\ndef test_h2o():\n import matplotlib.pyplot as plt\n lambda_ = np.array([3.368, 3.392, 3.508, 10.246, 10.571,\n 10.591, 10.611, 10.632, 10.653])*1e-6\n print(\"Frequencies: \")\n print(299792458./lambda_ * 1e-12)\n temp = 273.15 + 20.\n pres = 743.\n rhum = 99.\n approx = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum, approx=True)\n mathar = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum)\n appcol = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum, approx=True, column=True)\n matcol = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum, column=True)\n print(' (n-1) * 1E+6 (n-1)/rho/c')\n print(' lambda Hill & Lawrence Mathar Hill & Lawrence Mathar')\n for i in range(8):\n print(lambda_[i]*1e6, approx[i]*1e6, mathar[i]*1e6, appcol[i], matcol[i])\n print(\"\")\n temp=273.15+23.\n rhum=42.3\n \n mathar, freq = n_h2o(lambda_, temp=temp, pres=pres, rhum=rhum,\n column=True, table=True, freq=True)\n # !RL Non-idempotent code ahead!\n #lambda_ = lambda_*1e6\n #n = mathar.shape[0]\n dr = 0.5*np.abs(mathar[0] - mathar[-1])\n ar = 0.5*(mathar[0] - mathar[-1])\n print( mathar[0], mathar[-1])\n plt.figure()\n plt.plot(freq, mathar, label=\"Mathar\")\n plt.title(\"Refractive index of H2O vapor\")\n plt.xlabel(\"Frequency [THz]\")\n plt.ylabel(r\"$n-1$\")\n plt.show()\n \n # Hill and Lawrence approximation\n lambda_ = np.linspace(1.e-6, 10.6e-6, 1000)\n approx_hl, freq2 = n_h2o(lambda_, freq=True, temp=temp, rhum=rhum,\n approx=True, column=True)\n # Ciddor approximation \n approx_c, freq2 = n_h2o(lambda_, freq=True, temp=temp, rhum=rhum,\n approx=2, column=True)\n \n plt.figure()\n plt.plot(freq, mathar, label=\"Mathar\")\n plt.plot(freq2, approx_hl, label=\"Hill and Lawrence approx.\")\n plt.plot(freq2, approx_c, label=\"Ciddor approx.\")\n plt.xlabel(\"Frequency [THz]\")\n plt.title(\"Refractive index of H2O vapor\")\n plt.legend()\n plt.show()\n plt.figure()\n plt.plot(thz2m(freq)*1e6, mathar, label=\"Mathar\")\n plt.plot(thz2m(freq2)*1e6, approx_hl, label=\"Hill and Lawrence approx.\")\n plt.plot(thz2m(freq2)*1e6, approx_c, label=\"Ciddor approx.\")\n plt.xlabel(r\"Wavelength [$\\mu m$]\")\n plt.title(\"Refractive index of H2O vapor\")\n plt.legend()\n plt.show()\n \n # WDA\n mathar, freq2 = n_h2o(lambda_*1e6, freq=True, temp=temp, rhum=rhum, wda=True)","repo_name":"rlaugier/SCIFYsim","sub_path":"scifysim/n_air.py","file_name":"n_air.py","file_ext":"py","file_size_in_byte":22798,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"4366863207","text":"from selenium import webdriver\nimport random\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom faker import Faker\n\nfake = Faker()\nwebsite_url = \"https://ecommerce-playground.lambdatest.io/index.php?route=account/register\"\ndriver = webdriver.Chrome()\ndriver.get(website_url)\ndriver.maximize_window()\n\n# generated brand-new Browser session\ndriver.delete_all_cookies()\n\n# Check \"Account registration\" page Title\nacct_reg_expected_title = \"Register Account\"\nacct_reg_actual_title = driver.title\nif acct_reg_expected_title == acct_reg_actual_title:\n print('\"Account registration\" page Title is correct:', driver.title)\nelse:\n print('\"Account registration\" page Title is wrong:', driver.title)\n\n\n# Check \"Account registration\" page URL\nacct_reg_expected_url = \"https://ecommerce-playground.lambdatest.io/index.php?route=account/register\"\nacct_reg_actual_url = driver.current_url\nif acct_reg_expected_url == acct_reg_actual_url:\n print('\"Account registration\" page URL is correct:', driver.current_url)\nelse:\n print('\"Account registration\" page URL is wrong:', driver.current_url)\n\n\nmainPageTitle = \"Register Account\"\nassert driver.title == mainPageTitle\n\nWebDriverWait(driver, 2).until(EC.visibility_of_element_located((By.XPATH, \"//h1[contains(text(),'Register Account')]\")))\n\n# filling in the form\n# first_name\ndriver.find_element(By.ID, \"input-firstname\").send_keys(fake.first_name())\n\n# last_name\ndriver.find_element(By.ID, \"input-lastname\").send_keys(fake.last_name())\n\n# random email with no Faker lib\n# random_email = str(random.randint(0, 99999)) + \"myemail\" + \"@example.com\"\n# driver.find_element(By.ID, \"input-email\").send_keys(random_email)\n\n# random email with Faker lib\ndriver.find_element(By.ID, \"input-email\").send_keys(fake.email())\n\n# telephone\ndriver.find_element(By.ID, \"input-telephone\").send_keys(fake.phone_number())\n\n# password\nfakePassword = fake.password()\ndriver.find_element(By.ID, \"input-password\").send_keys(fakePassword)\n\n# password_confirm\ndriver.find_element(By.ID, \"input-confirm\").send_keys(fakePassword)\n\n# newsletter\ndriver.find_element(By.XPATH, \"//label[@for='input-newsletter-yes']\").click()\n\n# terms\ndriver.find_element(By.XPATH, \"//label[@for='input-agree']\").click()\n\n# continue_button\ndriver.find_element(By.XPATH, \"//input[@value='Continue']\").click()\n\n# asserting that the browser title is correct with No Exception\n# assert driver.title == \"Your Account Has Been Created!\"\n\n# asserting that the browser title is correct with Exception\ntry:\n assert driver.title == \"Your Account Has Been Created!\"\n print(\"Title is Correct. Current Title is:\", driver.title)\nexcept AssertionError:\n print(\"Title is different. Current Title is:\", driver.title)\n\n# get Text from Paragraph and store it in variable\ntext_congrats_website = driver.find_element(By.XPATH, \"//p[contains(text(),'Congratulations! Your new')]\").text\ntext_congrats_expected = \"Congratulations! Your new account has been successfully created!\"\n\n# Compare expected and actual Paragraph text\ntry:\n assert text_congrats_website == text_congrats_expected\n print(\"Paragraph text is correct. Current text is:\", text_congrats_website)\nexcept AssertionError:\n print(\"Paragraph text is different. Current text is:\", text_congrats_website)\n\n# click button \"Continue\" with Exception\ndriver.find_element(By.XPATH, '//*[@class=\"btn btn-primary\"]').click()\ntry:\n assert driver.title == \"My Account\"\nexcept AssertionError:\n print(\"Title is different. Current Title is:\", driver.title)\n\n# click \"Edit Account\" button\ndriver.find_element(By.LINK_TEXT, \"Edit Account\").click()\ntime.sleep(0.5)\n\ntry:\n assert driver.title == \"My Account Information\"\nexcept AssertionError:\n print(\"Title is different. Current Title is:\", driver.title)\n\n# closing the browser\ndriver.quit()\n","repo_name":"illya-sky/my-Selenium-Python-automations","sub_path":"user-account-checks.py","file_name":"user-account-checks.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71805550170","text":"from nltk.tokenize import wordpunct_tokenize, sent_tokenize\r\nfrom nltk.lm import KneserNeyInterpolated\r\nfrom nltk import trigrams\r\nfrom nltk.lm.preprocessing import padded_everygram_pipeline\r\nimport nltk\r\nimport glob\r\nimport time\r\nfrom multiprocessing import Pool\r\nimport pickle\r\nfrom nltk.lm import Vocabulary\r\nimport string\r\n\r\n\r\nvocab_file = \"surprise_vocab.tsv\"\r\n\r\n\r\ndef load_vocab(vocab):\r\n \"\"\"\r\n loads the vocab (~50000 words) into a list\r\n \"\"\"\r\n with open(vocab, \"r\", encoding=\"utf-8\") as infile:\r\n vocab_list = [line.strip(\"\\n\") for line in infile]\r\n return vocab_list\r\n\r\n\r\ndef wordpunct_tokenize_no_nums_punct(sentence):\r\n \"\"\"\r\n Replace number strings with \"NUMBERITEM\" (in vocab) and punctuation with \"PUNCTITEM\" (also added to vocab)\r\n \"\"\"\r\n tokens = wordpunct_tokenize(sentence)\r\n tokens_fixed = []\r\n for token in tokens:\r\n if token.isdigit():\r\n tokens_fixed.append(\"NUMBERITEM\")\r\n elif token in string.punctuation:\r\n tokens_fixed.append(\"PUNCTITEM\")\r\n else:\r\n tokens_fixed.append(token)\r\n return tokens_fixed\r\n\r\n\r\ndef tokenized_docs(file_name):\r\n \"\"\"\r\n Gets docs into a format of a list of a list of tokens, where internal token lists represent sentences\r\n and the outer list represents the document. \r\n \"\"\"\r\n\r\n with open(file_name, \"r\", encoding=\"utf-8\") as in_file:\r\n text = in_file.read()\r\n\r\n sentences = sent_tokenize(text)\r\n tokens = [wordpunct_tokenize_no_nums_punct(sentence) for sentence in sentences]\r\n return tokens\r\n\r\n\r\ndef process_test(doc_name, test_sentence_in, test_sentence_some, test_sentence_not):\r\n \"\"\"\r\n Old small test function\r\n \"\"\"\r\n corpus_sentences = tokenized_docs(doc_name)\r\n lm = build_model(corpus_sentences)\r\n\r\n # Give test perplexities for a sentence in the corpus, with some parts from the corpus, and with no corpus relation\r\n print(lm.perplexity(trigrams(wordpunct_tokenize(test_sentence_in))))\r\n print(lm.perplexity(trigrams(wordpunct_tokenize(test_sentence_some))))\r\n print(lm.perplexity(trigrams(wordpunct_tokenize(test_sentence_not))))\r\n\r\n\r\ndef model_test():\r\n \"\"\"\r\n Old small test function\r\n \"\"\"\r\n start = time.perf_counter()\r\n test_in = \"Pursuant to clause 31 of the Listing\"\r\n test_some_in = \"Perquisites shall be evaluated as per the goat cheese exemption, cowboy\"\r\n test_not_in = \"Howdy, doodle! Yankee Poodle! Somebody devoured Einstein's pewl noodle??\"\r\n process_test(\"GGDANDE_2013.txt\", test_in, test_some_in, test_not_in)\r\n end = time.perf_counter()\r\n print(end - start)\r\n\r\n\r\ndef merge_doc_sentences(doc_sentences):\r\n # Joining the lists within the doc_sentences to make a single list of all sentences in the corpus\r\n corpus_sentences = []\r\n for i in range(0, len(doc_sentences)):\r\n corpus_sentences.extend(doc_sentences.pop(0))\r\n return corpus_sentences\r\n\r\n\r\ndef build_model(corpus_sentences):\r\n \"\"\"\r\n Builds the model\r\n :param corpus_sentences: a list of the tokenized sentences from the entire corpus\r\n :return:\r\n \"\"\"\r\n vocab_list = load_vocab(vocab_file)\r\n\r\n # Create and fit model to text\r\n lm = KneserNeyInterpolated(3, vocabulary=Vocabulary(vocab_list))\r\n text, vocab = padded_everygram_pipeline(3, corpus_sentences)\r\n lm.fit(text, vocab)\r\n\r\n # Check for appropriate model size\r\n print(\"lm vocab size: \", len(lm.vocab))\r\n\r\n return lm\r\n\r\n\r\ndef model_make_and_save(year_desired):\r\n start = time.perf_counter()\r\n\r\n files_paths = \"/data/annual_reports_tesseract/*\" +str(year_desired) + \"*.txt\"\r\n files = glob.glob(files_paths)\r\n pool = Pool(30)\r\n lst_of_doc_tokens = pool.map(tokenized_docs, files)\r\n pool.close()\r\n pool.join()\r\n\r\n corpus_sentences = merge_doc_sentences(lst_of_doc_tokens)\r\n model = build_model(corpus_sentences)\r\n outfile_name = \"surpriseModel\" + str(year_desired) + \".pkl\"\r\n with open(outfile_name, \"wb\") as model_file:\r\n pickle.dump(model, model_file, pickle.HIGHEST_PROTOCOL)\r\n end = time.perf_counter()\r\n elapsed = (end - start) / 60\r\n print(\"Time to run:\", elapsed)\r\n return model\r\n\r\n\r\nif __name__ == \"__main__\":\r\n nltk.download(\"punkt\")\r\n \r\n years = range(2016, 2017)\r\n\r\n for year in years:\r\n model_make_and_save(year)\r\n","repo_name":"mivler/DemonetizationProject","sub_path":"Get_Perplexity.py","file_name":"Get_Perplexity.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18483098378","text":"from typing import Optional\n\nfrom discord import (\n app_commands as ac,\n Embed,\n Member,\n User,\n Interaction,\n ButtonStyle,\n NotFound,\n)\nfrom discord.app_commands import locale_str as _T\nfrom discord.ui import View, Button\nfrom discord.ext.commands import Bot, Cog\nfrom utils import get_lumberjack, cd_but_soymilk\n\nlog = get_lumberjack(__name__)\n\n\nclass InspectCog(Cog):\n def __init__(self, bot: Bot):\n self.bot = bot\n self.ctx_menu = ac.ContextMenu(\n name='inspect_user',\n callback=self.inspect_ctx_menu\n )\n self.bot.tree.add_command(self.ctx_menu)\n\n async def _get_target_detail(self, target: Member | User) -> str:\n if target.global_name is None:\n name = f'{target.name}#{target.discriminator}'\n else:\n name = target.global_name\n if target.nick is not None:\n name += f' ({target.display_name})'\n\n color = target.color\n avatar_url = target.display_avatar.url\n\n fetched_target = await self.bot.fetch_user(target.id)\n banner_url = fetched_target.banner.url if fetched_target.banner else None\n\n return name, color, avatar_url, banner_url\n\n async def inspect_coro(\n self, intx: Interaction, user: Member, target: Member\n ):\n await intx.response.defer()\n\n target = await intx.guild.fetch_member(target.id)\n name, color, avatar_url, banner_url = await self._get_target_detail(target)\n\n if user == target:\n desc = (await intx.translate('self_inspection')).format(\n user.mention\n )\n else:\n desc = (await intx.translate('other_inspection')).format(\n user.mention, target.mention\n )\n\n view = View().add_item(Button(\n style=ButtonStyle.link,\n url=f'{target.avatar}',\n label=await intx.translate('avatar_src'),\n ))\n\n embed = Embed(\n description=desc,\n color=color,\n ).set_author(\n name=name,\n icon_url=target.avatar,\n ).set_footer(\n text=await intx.translate(_T('beta', shared=True))\n )\n\n if banner_url is not None:\n view.add_item(Button(\n style=ButtonStyle.link,\n url=banner_url,\n label=await intx.translate('banner_src'),\n ))\n embed.set_thumbnail(\n url=avatar_url\n ).set_image(\n url=banner_url\n )\n else:\n embed.set_image(url=avatar_url)\n\n await intx.followup.send(embed=embed, view=view)\n\n @ac.command(name='inspect')\n @ac.describe(target='target')\n @ac.checks.dynamic_cooldown(cd_but_soymilk)\n async def inspect_slash(self, intx: Interaction, target: Member | User):\n try:\n await self.inspect_coro(intx, intx.user, target)\n except NotFound as err:\n log.exception(err)\n\n @ac.checks.dynamic_cooldown(cd_but_soymilk)\n async def inspect_ctx_menu(self, intx: Interaction, target: Member):\n try:\n await self.inspect_coro(intx, intx.user, target)\n except NotFound as err:\n log.exception(err)\n\n\nasync def setup(bot: Bot):\n await bot.add_cog(InspectCog(bot))\n","repo_name":"eesoymilk/eeSoybot","sub_path":"extensions/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"18338015833","text":"import os\nimport numpy as np\nimport matplotlib\nimport torch\n# matplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch import nn\nimport torch\nimport copy\nfrom tqdm import tqdm, trange\nimport math\n\n# from torch.utils.tensorboard import SummaryWriter\nwork_dir = \"./\"\ndevice = torch.device(\"cuda:3\" if torch.cuda.is_available() else \"cpu\")\ndevice\n\n\nclass Curve_Model_All_In_One(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(7, 4096)\n self.linear2 = nn.Linear(4096, 4096)\n self.linear3 = nn.Linear(4096, 4096)\n self.linear4 = nn.Linear(4096, 3)\n\n\n def forward(self, x):\n x = self.linear1(x)\n x = torch.nn.functional.relu(x)\n x = self.linear2(x)\n x = torch.nn.functional.relu(x)\n x = self.linear3(x)\n x = torch.nn.functional.relu(x)\n x = self.linear4(x)\n\n return x\n \n \n \ndata_dir = f'{work_dir}data/data07081/'\n\n# 时间, 起点b、l、h, 终点b、l、h \n# 发射系坐标\ndata_files = os.listdir(data_dir)[:1]\nx_index=7\nbatch_size = 26240\nmodel = torch.load(\"model/Curve_Model_All_In_One-learn_rate_1e-06-mse_loss-adam-epoch_97-loss_346846928960.0-last_lr_ 0.0000010000-err_ 840443.7089612312.chpk\")\nmodel.to(device)\nmodel.eval()\nwith torch.no_grad():\n for i in trange(len(data_files)):\n file = data_files[i]\n single_trace = np.loadtxt(os.path.join(data_dir, file), dtype=np.float32)[:-2]\n start_point = single_trace[0, x_index: x_index + 3]\n end_point = single_trace[-1, x_index: x_index + 3]\n feature_list = []\n for j, one_time_data in enumerate(single_trace):\n time = one_time_data[:1]\n feature_list.append(np.concatenate((time, start_point, end_point)))\n pred_trace = []\n for feature in torch.utils.data.DataLoader(torch.utils.data.TensorDataset(torch.from_numpy(np.stack(feature_list))), batch_size=batch_size, sampler=None):\n pred_points = model(feature).to('cpu')\n pred_trace.append(pred_points)\n pred_trace = np.stack(pred_trace)\n \n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.plot(xs=single_trace[:, x_index], ys=single_trace[:, x_index + 1], zs=single_trace[:, x_index + 2])\n fig.show()\n\n # 轨迹\n ax.plot(xs=pred_trace[:, 0], ys=pred_trace[:, 1], zs=pred_trace[:, 2])\n # 起点\n ax.scatter(start_point[0], start_point[1], start_point[2], 'r')\n fig.show()","repo_name":"Aaron-LHR/mss-missile","sub_path":"vis_trace.py","file_name":"vis_trace.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20545185127","text":"# -*- coding: utf-8 -*-\r\nfrom scrapy.spider import BaseSpider\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom scrapy.http import Request\r\nfrom scrapy.utils.response import get_base_url\r\nfrom product_spiders.items import Product, ProductLoaderWithoutSpaces as ProductLoader\r\nfrom urlparse import urljoin\r\n\r\nfrom product_spiders.utils import extract_price\r\n\r\nclass EssexautogroupSpider(BaseSpider):\r\n name = u'trustford-essexautogroup.com'\r\n allowed_domains = ['www.essexautogroup.com']\r\n start_urls = ('http://www.essexautogroup.com/ford/new-offers/', )\r\n\r\n def _start_requests(self):\r\n yield Request('http://www.essexautogroup.com/ford/new-offers/ford-btourneob-connect/', callback=self.parse_product)\r\n\r\n\r\n def parse(self, response):\r\n base_url = get_base_url(response)\r\n hxs = HtmlXPathSelector(response)\r\n for url in hxs.select('//div[@class=\"list-item \"]//a[@title=\"View Offer\"]/@href').extract():\r\n yield Request(urljoin(base_url, url), callback=self.parse_product)\r\n\r\n @staticmethod\r\n def parse_product(response):\r\n hxs = HtmlXPathSelector(response)\r\n base_url = get_base_url(response)\r\n models = response.xpath('//div[contains(@class, \"row-fluid\") and .//table[@class=\"data\"] and div[contains(@class, \"media span\")]]')\r\n for model in models:\r\n loader = ProductLoader(item=Product(), selector=model)\r\n name = model.xpath('.//p/strong//text()').extract()[-1].strip()\r\n if not name:\r\n name = model.xpath('.//p/strong[contains(text(), \"Ford\")]//text()').extract()[-1].strip()\r\n loader.add_value('name', name)\r\n prices = model.xpath('.//tr[td[contains(text(), \"Cash\")]]/td[not(contains(text(), \"Cash\"))]/text()').re('\\d+,\\d+')\r\n prices = map(extract_price, prices)\r\n price = min(prices)\r\n loader.add_value('price', price)\r\n image_url = model.xpath('.//picture/source/@data-placeholder').extract()\r\n image_url = 'http:' + image_url[0] if image_url else ''\r\n loader.add_value('image_url', image_url)\r\n loader.add_value('identifier', '_'.join(name.split()))\r\n loader.add_value('url', response.url)\r\n yield loader.load_item()\r\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/trustford/essexautogroup.py","file_name":"essexautogroup.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8021610187","text":"from gnome.ops.density import init_density, recalc_density\nfrom gnome.ops.default_constants import default_water_density\nfrom gnome.environment.water import Water\nfrom gnome.spills.substance import NonWeatheringSubstance\nfrom gnome.spills.gnome_oil import GnomeOil\nfrom gnome.spill_container import SpillContainer\nimport numpy as np\nfrom gnome.ops import weathering_array_types\n\nfrom datetime import datetime, timedelta\nfrom gnome import scripting as gs\n\ndef test_init_density():\n sc = SpillContainer()\n sc.substance = NonWeatheringSubstance()\n sc.prepare_for_model_run(weathering_array_types)\n sc._append_data_arrays(100)\n sc['density'] = np.ones(100)\n assert np.all(sc['density'] == 1)\n sc.substance.standard_density = 900\n default_water = Water()\n\n #when using a nonweathering substance, init_density should smiply init to\n # the substance standard_density.\n init_density(sc, 100, water=None)\n assert np.all(sc['density'] == 900)\n sc.substance.standard_density = 800\n init_density(sc, 100, water=default_water)\n assert np.all(sc['density'] == 800)\n\n sc.substance = GnomeOil('oil_ans_mp')\n \n init_density(sc, 100, water=None)\n rho1 = sc['density'].copy()\n default_water.temperature = 300\n init_density(sc, 100, water=default_water)\n assert np.all(sc['density'] < rho1)\n\ndef test_recalc_density():\n# Setup with NonWeatheringSubstance, 100 LEs\n sc = SpillContainer()\n# nw_subs = NonWeatheringSubstance(standard_density=900)\n# sc.substance = nw_subs\n# fix it **********\n spill = gs.surface_point_line_spill(num_elements=100,\n start_position=(0.0, 0.0, 0.0),\n release_time=datetime(2014, 1, 1, 0, 0),\n amount=100,\n units='bbl',\n substance = NonWeatheringSubstance(standard_density=900)) \n sc.spills.add(spill)\n# fix it **********\n sc.prepare_for_model_run(weathering_array_types)\n sc._append_data_arrays(100)\n sc.mass_balance['avg_density'] = 0\n init_density(sc, 100, water=None, aggregate=False)\n sc['mass'][:] = 10 #necessary for avg_density\n\n assert np.all(sc['density'] == 900)\n default_water = Water()\n assert sc.mass_balance['avg_density'] == 0\n\n #Nonweathering density should not get recalculated.\n #Aggregation should still occur.\n recalc_density(sc, water=default_water, aggregate=True)\n assert np.all(sc['density'] == 900) \n assert sc.mass_balance['avg_density'] == 900\n\n \n# new_subs = GnomeOil('oil_crude')\n# sc.rewind()\n# sc.substance = new_subs\n# fix it **********\n sc = SpillContainer()\n spill = gs.surface_point_line_spill(num_elements=100,\n start_position=(0.0, 0.0, 0.0),\n release_time=datetime(2014, 1, 1, 0, 0),\n amount=100,\n units='bbl',\n substance=GnomeOil('oil_crude')) \n sc.spills.add(spill)\n# fix it **********\n sc.prepare_for_model_run(weathering_array_types)\n sc._append_data_arrays(100)\n sc.mass_balance['avg_density'] = 0\n sc['mass'][:] = 10 #necessary for avg_density and mass components\n# new_subs.initialize_LEs(100, sc, environment={'water':default_water})\n sc.substance.initialize_LEs(100, sc, environment={'water':default_water})\n\n# init_rho = new_subs.density_at_temp(default_water.get('temperature'))\n init_rho = sc.substance.density_at_temp(default_water.get('temperature'))\n assert np.all(sc['density'] == init_rho)\n new_water = Water(temperature=277)\n recalc_density(sc, water=new_water, aggregate=True)\n\n #temp went down so density goes up.\n assert np.all(sc['density'] > init_rho)\n assert sc.mass_balance['avg_density'] > init_rho\n\ndef test_sinker():\n sc = SpillContainer()\n new_subs = GnomeOil('oil_crude')\n new_subs.densities = [1004.0]\n new_subs.density_ref_temps = [288.15]\n sc.substance= new_subs\n sc.prepare_for_model_run(weathering_array_types)\n sc._append_data_arrays(100)\n\n w = Water()\n w.set('temperature', 288, 'K')\n w.set('salinity', 0, 'psu')\n init_density(sc, 100, water=w)\n assert np.all(sc['density'] == w.get('density'))\n\n\n\n \n \n","repo_name":"NOAA-ORR-ERD/PyGnome","sub_path":"py_gnome/tests/unit_tests/test_ops/test_density.py","file_name":"test_density.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"32"} +{"seq_id":"26826429093","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 09:33:00 2018\n\n@author: YudongCai\n\n@Email: yudongcai216@gmail.com\n\"\"\"\n\nimport os\nimport click\n\n\n\ndef loadblastout(blastout):\n with open(blastout) as f:\n records = []\n for line in f:\n if line[0] != '#':\n sline = line.strip().split()\n qid = sline[0]\n start = int(sline[6])\n end = int(sline[7])\n records.append(f'{qid}\\t{min(start, end)}\\t{max(start, end)}')\n else:\n if records:\n tmpfile = f'tmp_{qid}.bed'\n with open(tmpfile, 'w') as f_out:\n f_out.write('\\n'.join(records))\n records = []\n yield qid, calbedlen(tmpfile)\n os.remove(tmpfile)\n\ndef calquerycov(blastout, querylen):\n queryaln = {}\n with open(blastout) as f:\n records = []\n for line in f:\n if line[0] != '#':\n sline = line.strip().split()\n qid = sline[0]\n start = int(sline[6])\n end = int(sline[7])\n records.append(f'{qid}\\t{min(start, end)}\\t{max(start, end)}')\n else:\n if records:\n tmpfile = f'tmp_{qid}.bed'\n with open(tmpfile, 'w') as f_out:\n f_out.write('\\n'.join(records))\n records = []\n queryaln[qid] = calbedlen(tmpfile) / querylen[qid]\n os.remove(tmpfile)\n return queryaln\n\n\ndef calbedlen(bedfile):\n cmd = \"sort -k2,2n %s | bedtools merge -d 1 | awk '{a+=($3-$2+1)};END{print a}'\" % bedfile\n try:\n return int(os.popen(cmd).readline().strip())\n except ValueError:\n return 0\n\n\n@click.command()\n@click.option('--blastout', help='blast result')\n@click.option('--genomefile', help='contigID\\tcontiglen')\n@click.option('--outfile', help='outfile')\ndef main(blastout, genomefile, outfile):\n \"\"\"\n qid col 0\n qstart col 6\n qend col 7\n query id 中不能有括号\n \"\"\"\n querylen = {x.split()[0]: int(x.split()[1].strip()) for x in open(genomefile).readlines()}\n result = calquerycov(blastout, querylen)\n with open(outfile, 'w') as f:\n for k,v in result.items():\n f.write(f'{k}\\t{v}\\n')\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"silvewheat/bioNotes","sub_path":"XX_unClasified/mergeblastout.py","file_name":"mergeblastout.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"43208082559","text":"# Given an array arr[] of integers. Find a peak element i.e. an element that is not smaller than its neighbors and return indices\n\n# Note: For corner elements, we need to consider only one neighbor. \n# Input: array[]= {5, 10, 20, 15}\n# Output: 20\n# Explanation: The element 20 has neighbors 10 and 15, both of them are less than 20. return indices\n\n# Input: array[] = {10, 20, 15, 2, 23, 90, 67}\n# Output: 20 or 90\n# Explanation: The element 20 has neighbors 10 and 15, both of them are less than 20, similarly 90 has neighbors 23 and 67. return indices\n\ndef peakElement(arr:list, n:int) -> list:\n peak = []\n if(n == 1):\n return [0]\n if(arr[0] >= arr[1]):\n peak.append(0)\n for i in range(len(arr)):\n if(i != len(arr)-1 and (arr[i] >= (arr[i-1])) and (arr[i] >= arr[i+1])):\n # peak.append(arr[i])\n peak.append(i)\n if (arr[n - 1] >= arr[n - 2]):\n peak.append(n-1)\n return peak\n\nprint(peakElement([5, 10, 20, 15],4))\nprint(peakElement([10, 20, 15, 2, 23, 90, 67, 100],8))\nprint(peakElement([1,2,3],3))\n ","repo_name":"dstrivedi/Python-Problems","sub_path":"GeeksForGeeks/findPeakElement.py","file_name":"findPeakElement.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11146744592","text":"import requests\nimport sling\n\nclass WikidataService:\n def __init__(self):\n self.commons = sling.Store()\n self.n_id = self.commons[\"id\"]\n self.n_is = self.commons[\"is\"]\n self.wikiconv = sling.WikiConverter(self.commons)\n self.commons.freeze()\n self.session = requests.Session()\n\n def handle(self, request):\n # Get QID for item.\n params = request.params()\n qid = params[\"qid\"][0]\n print(\"fetch wikidata item for\", qid)\n\n # Fetch item from wikidata site.\n url = \"https://www.wikidata.org/wiki/Special:EntityData/\" + qid + \".json\"\n r = self.session.get(url)\n\n # Convert item to frame.\n store = sling.Store(self.commons)\n item, revision = self.wikiconv.convert_wikidata(store, r.content)\n\n # Create frame where id: is changed to is:.\n slots = []\n for name, value in item:\n if name == self.n_id:\n slots.append((self.n_is, item))\n else:\n slots.append((name, value))\n frame = store.frame(slots)\n\n return frame\n\n","repo_name":"ringgaard/sling","sub_path":"case/service/wikidata.py","file_name":"wikidata.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"32"} +{"seq_id":"72292444570","text":"import wx\r\n\r\nfrom user_list import UserListDialog\r\n\r\nclass UnfollowDialog (UserListDialog):\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(UnfollowDialog, self).__init__(title=_(\"Unfollow someone\"), style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, *args, **kwargs)\r\n #First Row\r\n wx.StaticText(parent=self.pane, label=_(\"Unfollow who:\"))\r\n self.setup_users()\r\n #Radio buttons\r\n self.action = wx.RadioBox(parent=self.pane, label=_(\"Action:\"), choices=[_('Unfollow'), _('Block'), _('Report as spam')])\r\n self.action.SetSizerProps(expand=True)\r\n self.finish_setup()\r\n","repo_name":"mitsugusakamoto/TheQube","sub_path":"src/session/twitter/gui/unfollow.py","file_name":"unfollow.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5328958027","text":"from know.boxes import *\nfrom functools import partial\nfrom typing import Callable, Iterable\nfrom front import APP_KEY, RENDERING_KEY, ELEMENT_KEY, NAME_KEY\nfrom i2 import Pipe, Sig\nfrom front.crude import Crudifier\n\nfrom streamlitfront import mk_app, binder as b\nfrom streamlitfront.elements import (\n SelectBox,\n SuccessNotification,\n KwargsInput,\n PipelineMaker,\n)\n\nif not b.mall():\n b.mall = dict(\n step_factories=dict(\n # Source Readers\n files_of_folder=FuncFactory(Files),\n files_of_zip=FuncFactory(FilesOfZip),\n # Store Transformers\n key_transformer=key_transformer,\n val_transformer=val_transformer,\n key_filter=filter_keys,\n extract_extension=FuncFactory(extract_extension),\n # Object Transformation\n make_codec=make_decoder,\n # Boolean Functions\n regular_expression_filter=regular_expression_filter,\n make_function_conjunction=make_function_conjunction,\n ),\n steps=dict(),\n pipelines=dict(),\n # exec_outputs=dict(),\n )\nmall = b.mall()\nif not b.selected_step_factory():\n b.selected_step_factory = 'files_of_folder'\n\ncrudifier = partial(Crudifier, mall=mall)\n\n\n@crudifier(param_to_mall_map=dict(step_factory='step_factories'), output_store='steps')\ndef mk_step(step_factory: Callable, kwargs: dict):\n return partial(step_factory, **kwargs)\n\n\n@crudifier(output_store='pipelines')\ndef mk_pipeline(steps: Iterable[Callable]):\n return Pipe(*steps)\n\n\n@crudifier(\n param_to_mall_map=dict(pipeline='pipelines'),\n # output_store='exec_outputs'\n)\ndef exec_pipeline(pipeline: Callable, kwargs):\n return pipeline(**kwargs)\n\n\ndef get_step_name(step):\n return [k for k, v in mall['steps'].items() if v == step][0]\n\n\ndef get_selected_pipeline_sig():\n if not b.selected_pipeline():\n return Sig()\n return Sig(mall['pipelines'][b.selected_pipeline()])\n\n\nconfig = {\n APP_KEY: {'title': 'Data Preparation'},\n RENDERING_KEY: {\n 'mk_step': {\n NAME_KEY: 'Pipeline Step Maker',\n 'execution': {\n 'inputs': {\n 'step_factory': {\n ELEMENT_KEY: SelectBox,\n 'options': mall['step_factories'],\n 'value': b.selected_step_factory,\n },\n 'kwargs': {\n ELEMENT_KEY: KwargsInput,\n 'func_sig': Sig(\n mall['step_factories'][b.selected_step_factory()]\n ),\n },\n },\n 'output': {\n ELEMENT_KEY: SuccessNotification,\n 'message': 'The step has been created successfully.',\n },\n },\n },\n 'mk_pipeline': {\n NAME_KEY: 'Pipeline Maker',\n 'execution': {\n 'inputs': {\n 'steps': {\n ELEMENT_KEY: PipelineMaker,\n 'items': list(mall['steps'].values()),\n 'serializer': get_step_name,\n },\n },\n 'output': {\n ELEMENT_KEY: SuccessNotification,\n 'message': 'The pipeline has been created successfully.',\n },\n },\n },\n 'exec_pipeline': {\n NAME_KEY: 'Pipeline Executor',\n 'execution': {\n 'inputs': {\n 'pipeline': {\n ELEMENT_KEY: SelectBox,\n 'options': mall['pipelines'],\n 'value': b.selected_pipeline,\n },\n 'kwargs': {\n ELEMENT_KEY: KwargsInput,\n 'func_sig': get_selected_pipeline_sig(),\n },\n }\n },\n },\n },\n}\n\nif __name__ == '__main__':\n funcs = [mk_step, mk_pipeline, exec_pipeline]\n app = mk_app(funcs, config=config)\n app()\n","repo_name":"i2mint/streamlitfront","sub_path":"streamlitfront/examples/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"71707352411","text":"from __future__ import print_function\n\nfrom rx import Observable, Observer\nfrom rx.concurrency import ThreadPoolScheduler\nfrom threading import current_thread\nimport multiprocessing, time, random\nimport subprocess\nimport os\nimport signal\nimport time\nimport json\n\n# calculate number of CPU's and add 1, then create a ThreadPoolScheduler with that number of threads\noptimal_thread_count = multiprocessing.cpu_count() + 1\npool_scheduler = ThreadPoolScheduler(optimal_thread_count)\n\ndef singleton(cls, *args, **kw):\n instances = {}\n def _singleton():\n if cls not in instances:\n instances[cls] = cls(*args, **kw)\n return instances[cls]\n return _singleton\n\n# class create_task_Observer(Observer):\n# def on_next(self, value):\n# ready_tasks.append(\"task_{0}\".format(value))\n# print(\"From server task_{0} is added to ready-task queue\".format(value))\n# def on_completed(self):\n# pass\n# def on_error(self, error):\n# pass\n\n\n@singleton\nclass ObservableWrapper():\n def __init__(self):\n self.task_producer = None\n self.running_tasks = []\n # mock the pub\n def publish_tasks(self, time_arr, task_arr):\n if self.task_producer is None:\n self.task_producer = Observable.from_(time_arr).flat_map(lambda i: Observable.timer(i * 1000).switch_map(lambda i: Observable.just(i).subscribe_on(pool_scheduler))).zip(Observable.from_(task_arr), lambda x, y: y).publish().ref_count()\n return self.task_producer\n\ndef create_tasks_ob(script_name):\n with open(script_name) as data_file:\n script = json.load(data_file)\n def get_timer(script):\n time_arr = []\n Observable.from_(script).map(lambda e: e['sequence']).subscribe(lambda s: time_arr.append(s))\n return time_arr\n return ObservableWrapper().publish_tasks(get_timer(script), script)\n\n\n\ndef add_task(task):\n p = subprocess.Popen('exec /Users/yqfang/.pyenv/versions/2.6.9/bin/python 10s_process.py > {0}_log.txt'.format(task['name']), shell=True)\n ObservableWrapper().running_tasks.append((task, p))\n print(\"add {0}, pid: {1}, current thread{2}\".format(task['name'], p.pid, current_thread().name))\n return p.pid\n\ndef get_task_info(task):\n for t in ObservableWrapper().running_tasks:\n if task['name'] == t[0]['name']:\n return t\ndef remove_task(task):\n for t in ObservableWrapper().running_tasks:\n if task['name'] == t[0]['name']:\n ObservableWrapper().running_tasks.remove(t)\ndef kill_task(task):\n pid = get_task_info(task)[1].pid\n get_task_info(task)[1].kill()\n print(\"kill {0}, sts: {1}, cur thread, {2}\".format(task['name'], get_task_info(task)[1].pid, current_thread().name))\n # remove_task(task)\ndef stat_task(task):\n st = get_task_info(task)[1].poll()\n print(\"the {0}, stat is : {1}, current thread {2}\".format(task['name'], st, current_thread().name))\n if str(st) == 'None':\n return \"ok\"\n else:\n return st\n\ndef do_task_by_type(task):\n if \"add\" == task['type']:\n return add_task(task)\n elif \"kill\" == task['type']:\n return kill_task(task)\n elif \"stat\" == task['type']:\n return stat_task(task)\n else:\n pass\n\nclass TaskObserver(Observer):\n def on_next(self, task):\n do_task_by_type(task)\n def on_completed(self):\n pass\n def on_error(self, error):\n pass\n\ntask_ob = create_tasks_ob(\"script.json\").subscribe(TaskObserver())\n\n\n\n\n\ninput(\"\\n\")\n\n\n# ob = ObservableWrapper()\n#\n# a.publish_tasks(15)\n#\n# input(\"\")\n#\n#\n# def do_task(task_info):\n# p = subprocess.Popen(\"/Users/yqfang/.pyenv/versions/2.6.9/bin/python 10s_process.py > output_{0}.txt\".format(task_info), shell= True)\n# running_tasks.append((task_info, p))\n# ready_tasks.remove(task_info)\n# return (task_info, p)\n#\n\n\n\n\n# def dispach_task():\n# Observable.interval(6000) \\\n# .switch_map(lambda i: Observable.from_(ready_tasks).subscribe_on(pool_scheduler).map(lambda r: do_task(r))) \\\n# .subscribe(on_next=lambda i: print(\"start task: {0} with thread {1}\".format(i, current_thread().name)), on_error=lambda e: print(e))\n#\n# def print_task_st(t):\n# return \"task_name: {0} is running and the task_st: {1}\".format(t[0], \"ok\" if t[1].poll() is None else t[1].poll())\n#\n# def watch_running_task():\n# Observable.interval(7000) \\\n# .switch_map(lambda i: Observable.from_(running_tasks).subscribe_on(pool_scheduler).map(lambda s: print_task_st(s))) \\\n# .subscribe(on_next=lambda s: print(\"Received {0} on {1}, running-size: {2}, ready_running-size: {3}\".format(s, current_thread().name, len(running_tasks), len(ready_tasks))))\n#\n#\n# create_task(5)\n# dispach_task()\n# watch_running_task()\n#\n# while True:\n# time.sleep(5)\n\n# Observable.interval(2000) \\\n# .map(lambda i: Observable.from_(sp).map(lambda p: (i, p.poll()))) \\\n# .merge_all() \\\n# .subscribe(lambda s: print(s))","repo_name":"yqfang/rx-libs","sub_path":"rxpy-example/4.3_subprocess_observe_on.py","file_name":"4.3_subprocess_observe_on.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30778160329","text":"import os\nimport datetime as dt\nimport time\n\ndef erase_site():\n os.system('rm -rf site/*.html')\n\ndef pandoc(x):\n pref = x.count('/') * '../'\n s = 'pandoc -B includes/head -A includes/foot --css=' + pref + 'theme/print.css --css=' + pref + 'theme/mobile.css --css=' + pref + 'theme/fugitive.css --css=' + pref + 'theme/pygments.css -o site/' + x[:-3] + '.html src/' + x\n os.system(s)\n\ndef routine():\n erase_site()\n for rt, dirs,files in os.walk('src/'):\n root = rt[4:]\n if root != '':\n root += '/'\n os.system('mkdir site/' + root)\n for filename in files:\n if filename.endswith('.md'):\n pandoc(root + filename)\n #for filename in os.listdir('src/'):\n # if filename.endswith('.md'):\n # pandoc(filename)\n\nwhile True:\n now = dt.datetime.now()\n time.sleep(1)\n changed = False\n\n for root, dirs,files in os.walk('.'): \n for fname in files:\n path = os.path.join(root, fname)\n st = os.stat(path) \n mtime = dt.datetime.fromtimestamp(st.st_mtime)\n if mtime > now:\n changed = True\n\n if changed:\n routine()\n","repo_name":"gaubian/PersonalWebsite","sub_path":"bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74918195291","text":"#Álvaro Crego Deán\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 04 20:00:41 2018\n\n@author: Usuario\n\"\"\"\n\n'''\nHe empleado el ejericio 3 como ejemplo, por su sencillez.\n\nProbando con esta configuración, de tal manera que la función\nse va a una especie de asíntota horizontal, vemos que el deltat\ncrece de manera abrupta (ya que no hay cambios fuertes en la función\nsino que se estabiliza, para lo cual podemos trabajar con deltat muy grandes).\nAsí es que, si pusiésemos N=110, el programa tardaría mucho tiempo en ejecutar ¿por qué?\nporque la función se haría tan plana que el deltat crecería de manera infinita.\nPor ello, he obligado a cesar un bucle while (con la función break)\ncuando el deltat sea superior a un cierto límite. De esta manera salvamos\nel problema de que el deltat sea infinito y el programa no se ejecute.\n\nPor otro lado vemos que para tiempos bajos el deltat comienza a crecer (para\nesta configuración de k=4) ya que la cantidad (p1-p) es cada vez menor.\n\nEste programa sería especialmente útil para funciones que cambiasen\nde manera muy abrupta en el tiempo \n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN=115\ndeltat=10e-2\nt=0\np=7.6\nr=9\nk=8\n\nP=[]\nT=[]\nDeltat=[]\n\n#definimos dos límites, uno superior y otro inferior.\nMaxi=0.05\nmini=0.001\nu=0.9 #u es el valor máximo que aceptamos de deltat para que no se me vaya a infinito\n\nfor i in range(N): \n deltat=10e-2 #cada vez que comience una iteracción, el deltat vuelve\n #a ser el original \n p1=p+deltat*(r*p*(1-p/k))\n \n if (p1-p)>Maxi: #si la diferencia en el eje x es grande: \n while (p1-p)>Maxi: #mientras sea más grande que nuestro límite maxi:\n deltat=deltat/2\n p1=p+deltat*(r*p*(1-p/k))\n else:\n while (p1-p)= 50:\n return await ctx.send(\"Max väli 50\")\n\n res = get_command_from_db(ctx.guild.id, cmd_name)\n\n if res is None:\n return await ctx.send(\"Tämän nimistä komentoa ei ole olemassa.\")\n\n cmd_type = res[0]['command']['type']\n\n if cmd_type != 'audio':\n return await ctx.send(\"Tämä komento ei ole ääni-komento.\")\n\n audio_fn = f\"cogs/audio/{cmd_name}.mp3\"\n\n audio_f = AudioSegment.from_mp3(audio_fn)\n\n new_audio = audio_f + db\n\n new_audio.export(audio_fn)\n\n msg = f\"{cmd_name} {str(db)} dB\"\n\n await ctx.send(msg, delete_after=5)\n\n @commands.command()\n async def add_entry(self, ctx, command_name):\n r = upsert_to_entry(ctx.author, command_name)\n\n if r is False:\n await ctx.send(\"Tämän nimistä komentoa ei ole olemassa.\")\n else:\n await ctx.send(\"Entry lisätty!\")\n\n @commands.command(aliases=['rm_entry'])\n async def remove_entry(self, ctx):\n remove_entry_from_db(ctx.author)\n\n await ctx.send(\"Poistettiin entry!\")\n\n\ndef setup(bot):\n bot.add_cog(Misc(bot))","repo_name":"SaneSL/hyymori-bot","sub_path":"cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1894099715","text":"import math\r\n\r\npeople = int(input())\r\nentrance_fee = float(input())\r\ndeck_chair_price = float(input())\r\numbrella_price = float(input())\r\n\r\nentrance_total = entrance_fee * people\r\ndeck_chair = math.ceil(people * 0.75)\r\numbrella = math.ceil(people / 2)\r\ntotal_deck_chair = deck_chair * deck_chair_price\r\ntotal_umbrella = umbrella * umbrella_price\r\n\r\ntotal_amount = entrance_total + total_umbrella + total_deck_chair\r\n\r\nprint(f\"{total_amount:.2f} lv.\")","repo_name":"LazChu/SoftUni-projects","sub_path":"Programming Basics with Python/exams/exam_6_7_july/pool_day.py","file_name":"pool_day.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23042817666","text":"import logging\r\nimport os\r\nimport azure.functions as func\r\nimport smtplib, ssl\r\nfrom twilio.rest import Client\r\n\r\ntemperature_threshold = int(os.environ.get('TEMPERATURE_THRESHOLD', 30))\r\n\r\n# Email configs\r\nport = 587\r\nsmtp_server = \"smtp.gmail.com\"\r\nsender_email = os.environ.get('EMAIL_FROM')\r\nreceiver_email = os.environ.get('EMAIL_TO')\r\npassword = os.environ.get('EMAIL_APP_PASSWORD')\r\ncontext = ssl.create_default_context()\r\n\r\n\r\ndef main(documents: func.DocumentList) -> str:\r\n if not documents:\r\n return ''\r\n\r\n item = documents[0]\r\n temperature = int(item.get('temperature', 0))\r\n if temperature <= temperature_threshold:\r\n return ''\r\n\r\n send_email(temperature)\r\n send_sms(temperature)\r\n\r\n logging.info('Document id: %s', documents[0])\r\n\r\n\r\ndef send_email(temp):\r\n message = f\"\"\"\\\r\nSubject: High Temperature Alarm: {temp} \r\n\r\nSensor has detected that the temperature is as high as: {temp}.\"\"\"\r\n\r\n with smtplib.SMTP(smtp_server, port) as server:\r\n server.ehlo()\r\n server.starttls(context=context)\r\n server.ehlo()\r\n server.login(sender_email, password)\r\n server.sendmail(sender_email, receiver_email, message)\r\n\r\n\r\ndef send_sms(temp):\r\n account_sid = os.environ['TWILIO_ACCOUNT_SID']\r\n auth_token = os.environ['TWILIO_AUTH_TOKEN']\r\n client = Client(account_sid, auth_token)\r\n\r\n message = client.messages.create(\r\n body=f\"Sensor has detected that the temperature is as high as: {temp}\",\r\n from_=os.environ['TWILIO_FROM'],\r\n to=os.environ['TWILIO_TO'])\r\n\r\n print(f'Twilio message sent with id {message.sid}')\r\n","repo_name":"redhat-raptor/raspberry-pi-cloud-function","sub_path":"alarm-functions/AlarmTrigger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73416826332","text":"#! /usr/bin/env python3\nimport os, sys, requests\n\nEXTERNAL_IP = '34.72.38.165'\n\ndef extract_data_from_txt(files, path):\n data = []\n for text in files:\n with open(os.path.join(path, text)) as f:\n line = f.read().split('\\n')\n line_data = {}\n line_data['title'] = line[0]\n line_data['name'] = line[1]\n line_data['date'] = line[2]\n line_data['feedback'] = ' '.join(line[3:])\n data.append(line_data)\n f.close()\n return data\n\ndef post(external_ip, content):\n url = 'http://{}/feedback/'.format(external_ip)\n send = requests.post(url, data = content)\n return send\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n text_files = [x for x in os.listdir(path) if '.txt' in x]\n\n data = extract_data_from_txt(text_files, path)\n for line in data:\n post_data = post(EXTERNAL_IP, line)\n print('{} : {}'.format(post_data.status_code, post_data.reason))\n print(\"Done !\")","repo_name":"Hyuto/bangkit-ml-2021","sub_path":"IT Automation/Capstone/Capstone_week-2.py","file_name":"Capstone_week-2.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44426545139","text":"import yaml\nfrom selenium import webdriver\n\ndef test_get_cookie():\n \"\"\"\n 步骤:\n 1.下载并安装版本合适的Chromedriver\n 2.关闭所有Chrome进程\n 3.执行chrome --remote-debugging-port=9222开启复用浏览器\n 4.打开测试页面并手动登陆一次\n 5.执行下面的代码获取cookie并保存\n :return:\n \"\"\"\n option = webdriver.ChromeOptions()\n # 设置debug地址\n option.debugger_address = '127.0.0.1:9222'\n driver = webdriver.Chrome(options=option)\n driver.get('http://10.22.181.60/#/Home')\n driver.implicitly_wait(5)\n cookies = driver.get_cookies()\n print(cookies)\n yaml.dump(cookies, open('../file/cookie.yaml', 'w', encoding='UTF-8'))","repo_name":"hsinway/uiautomation_CDP","sub_path":"test_CDP_Web/function/get_cookie.py","file_name":"get_cookie.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6297427966","text":"#coding=utf-8\nimport threading\nimport time\n\n# create a mutex\nmutex = threading.Lock()\n# locking\n# mutex.acquire()\n\n# unlocking\n# mutex.release()\n\ndef saySorryMeal():\n\t# mutex.acquire()\n\tfor i in range(5):\n\t\tprint(\"Honey, I was wrong, can I have my meal?\")\n\t\ttime.sleep(1)\n\t# mutex.release()\n\ndef saySorryMoney():\n\t# mutex.acquire()\n\tfor i in range(5):\n\t\tprint(\"Hsoney, I was wrong, can I have my money?\")\n\t\ttime.sleep(1)\n\t# mutex.release()\t\n\ndef main():\n\tt1 = threading.Thread(target = saySorryMeal)\n\tt2 = threading.Thread(target = saySorryMoney)\n\tt1.start()\n\tt2.start()\n\n\ttime.sleep(1)\n\n\tprint(threading.enumerate())\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"lamkiboo/PythonTest","sub_path":"PYTHON/thread/thread_lock.py","file_name":"thread_lock.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25549558910","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nimport fogros2\n\ndef generate_launch_description():\n ld = LaunchDescription()\n intermediate_transport = 'raw'\n topic_name = '/camera/image_raw'\n new_cloud_topic_name = topic_name + \"/cloud\"\n img_publisher_node = Node(\n package=\"image_transport_benchmarker\", executable=\"image_pub\", output=\"screen\")\n image_listener_node = Node(\n package=\"image_transport_benchmarker\", executable=\"raw_test_cloud\", output=\"screen\")\n # img_encoder_node = Node(\n # package=\"image_transport\", executable=\"republish\", output=\"screen\",\n # arguments=[\n # 'raw', # Input\n # 'raw', # Output\n # ],\n # remappings=[\n # (\"in\", \"/camera/image_raw\"),\n # (\"out\", \"/camera/image_raw/raw\")\n # ]\n # )\n # img_decoder_node = Node(\n # package=\"image_transport\", executable=\"republish\", output=\"screen\",\n # arguments=[\n # 'raw', # Input\n # 'raw', # Output\n # ],\n # remappings=[\n # (\"in\", \"/camera/image_raw/raw\"),\n # (\"out\", \"/camera/image_raw/cloud\")\n # ]\n # )\n\n image_listener_node_robot = Node(\n package=\"image_transport_benchmarker\", executable=\"raw_test\", output=\"screen\")\n ld.add_action(img_publisher_node)\n ld.add_action(image_listener_node)\n ld.add_action(image_listener_node_robot) \n # ld.add_action(img_encoder_node)\n # ld.add_action(img_decoder_node)\n return ld\n","repo_name":"KDharmarajanDev/image-transport-benchmarking","sub_path":"launch/local_raw_test.launch.py","file_name":"local_raw_test.launch.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29487568090","text":"from typing import Tuple\nfrom functools import lru_cache\n\nfrom ..core.base import FeatureExtractor\nfrom astropy.coordinates import SkyCoord\nimport pandas as pd\nimport numpy as np\n\n\nclass GalacticCoordinatesExtractor(FeatureExtractor):\n def __init__(self, from_metadata=False):\n super(GalacticCoordinatesExtractor, self).__init__()\n self.from_metadata = from_metadata\n\n @lru_cache(1)\n def get_features_keys(self) -> Tuple[str, ...]:\n return 'gal_b', 'gal_l'\n\n @lru_cache(1)\n def get_required_keys(self) -> Tuple[str, ...]:\n if self.from_metadata:\n return ()\n else:\n return 'ra', 'dec'\n\n def compute_from_metadata(self, detections, metadata):\n coordinates = SkyCoord(\n ra=metadata['ra'],\n dec=metadata['dec'],\n frame='icrs',\n unit='deg')\n galactic = coordinates.galactic\n np_galactic = np.stack((galactic.b.degree, galactic.l.degree), axis=-1)\n galactic_coordinates_df = pd.DataFrame(\n np_galactic,\n index=metadata.index,\n columns=['gal_b', 'gal_l'])\n galactic_coordinates_df.index.name = 'oid'\n det_oids = detections.index.unique()\n galactic_coordinates_df = galactic_coordinates_df.loc[det_oids]\n return galactic_coordinates_df\n\n def compute_from_detections(self, detections):\n radec_df = detections[['ra', 'dec']].groupby(level=0).mean()\n coordinates = SkyCoord(\n ra=radec_df.values[:, 0],\n dec=radec_df.values[:, 1],\n frame='icrs',\n unit='deg')\n galactic = coordinates.galactic\n np_galactic = np.stack((galactic.b.degree, galactic.l.degree), axis=-1)\n galactic_coordinates_df = pd.DataFrame(\n np_galactic,\n index=radec_df.index,\n columns=['gal_b', 'gal_l'])\n galactic_coordinates_df.index.name = 'oid'\n return galactic_coordinates_df\n\n def _compute_features(self, detections, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n detections :class:pandas.`DataFrame`\n DataFrame with detections of an object.\n kwargs Not required.\n\n Returns :class:pandas.`DataFrame`\n -------\n\n \"\"\"\n if self.from_metadata:\n return self.compute_from_metadata(detections, kwargs['metadata'])\n else:\n return self.compute_from_detections(detections)\n","repo_name":"alercebroker/lc_classifier","sub_path":"lc_classifier/features/extractors/galactic_coordinates_extractor.py","file_name":"galactic_coordinates_extractor.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"3613085034","text":"# Standard library imports\nimport json\nimport os\n\n# Third party imports\nimport flask\nimport flask_cors\nfrom opengeodeweb_back import geode_functions, geode_objects\nimport werkzeug\n\nimport blueprints.tools.blueprint_file_converter as bp_file_converter\nimport blueprints.tools.blueprint_validity_checker as bp_validity_checker\nimport blueprints.tools.blueprint_crs_converter as bp_crs_converter\nfrom opengeodeweb_back.routes import blueprint_routes\n\n\ntools_routes = flask.Blueprint(\"tools_routes\", __name__)\nflask_cors.CORS(tools_routes)\n\n\n@tools_routes.before_request\ndef before_request():\n geode_functions.create_lock_file(\n os.path.abspath(flask.current_app.config[\"LOCK_FOLDER\"])\n )\n\n\n@tools_routes.teardown_request\ndef teardown_request(exception):\n geode_functions.remove_lock_file(\n os.path.abspath(flask.current_app.config[\"LOCK_FOLDER\"])\n )\n geode_functions.create_time_file(\n os.path.abspath(flask.current_app.config[\"TIME_FOLDER\"])\n )\n\n\ntools_routes.register_blueprint(\n bp_file_converter.file_converter_routes,\n url_prefix=\"/file_converter\",\n name=\"file_converter_blueprint\",\n)\ntools_routes.register_blueprint(\n bp_validity_checker.validity_checker_routes,\n url_prefix=\"/validity_checker\",\n name=\"validity_checker_blueprint\",\n)\ntools_routes.register_blueprint(\n bp_crs_converter.crs_converter_routes,\n url_prefix=\"/crs_converter\",\n name=\"crs_converter_blueprint\",\n)\ntools_routes.register_blueprint(\n blueprint_routes.routes,\n url_prefix=\"/\",\n name=\"blueprint_routes\",\n)\n","repo_name":"Geode-solutions/Website-Back","sub_path":"blueprints/blueprint_tools.py","file_name":"blueprint_tools.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16796772262","text":"#! /usr/bin/env python3\nimport sys\nsys.path.append(\"..\")\n\nimport numpy as np\nfrom numpy.linalg import inv\nimport math\nfrom front_end.graph import Graph, Vertex\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n'''\nreference:\nKolkir, Oct 14, 2021\nslam-playground\ngithub\nvisited: July 31, 2022\nhttps://github.com/Kolkir/slam-playground/blob/main/doc/slam_se2_derivation.ipynb\n\nThe implementation structure is from Kolkir's code.\nThe code is studied and modified to suit the project.\n\n'''\n\n\n'''\nA pose can be represented as a homogeneous transformation matrix\nX = |R t| = |cos -sin x| \n |0 1| |sin cos y|\n |0 0 1|\nor as a 3 component vector\nx = [x, y, theta]\nFor the graph:\nX = [(x1, y1, theta1), (x2, y2, theta2), ... ,(xn, yn, thetan)] in vector.\n'''\n\n\n'''\n odom_data and trans_data are x_y_data.\n e(xi, xj, zij) is a function that computes a difference between\n the exepected observation z'ij (transformed observation) and the real observation zij.\n The edge represents the odom informataion between two nodes.\n eij(xi, xj) = zij - z'ij(xi, xj)\n'''\n\n# --util----------------------------------------------------------------------------------------------------------------------------\ndef t2v(trans):\n # transformation matrix to vector\n v = np.zeros((3,1))\n v[:2,0] = trans[:2,2]\n v[2] = np.arctan2(trans[1,0], trans[0,0])\n return v\n\ndef v2t(v):\n # vector to transformation matrix\n cos = math.cos(v[2]) # v[2] = theta\n sin = math.sin(v[2])\n trans = np.array([[cos, -sin, v[0]],\n [sin, cos, v[1]],\n [0, 0, 1]]) \n return trans\n# -----------------------------------------------------------------------------------------------------------------------------------\n\n\ndef calculate_err(xi, xj, uij):\n '''\n for 2D SLAM, the error function is\n eij(xi, xj) = t2v(inv_Uij(inv_Xi * Xj))\n where Uij is the odometry measurement and\n Uij, Xi and Xj are represented as homogeneous transformation matrices\n \n The position and orientation difference between the pose xi and the pose xj\n is written in a form of the transformation matrix.\n '''\n\n # convert a vector form (xi, xj and uji) into \n # a homogeneous transformation matrix form\n trans_i = v2t(xi)\n trans_j = v2t(xj)\n trans_uij = v2t(uij)\n\n # calculate error matrix\n #err_trans = np.dot(inv(trans_uij), np.dot(inv(trans_i), trans_j))\n err_trans = inv(trans_uij) @ (inv(trans_i) @ trans_j)\n\n # convert error matrix to a vector\n err = t2v(err_trans) # 3 * 1\n return err\n\n\ndef calculate_jacobian(vi, vj, uij):\n '''\n inv(Xi)Xj = |R t|\n |0 1|\n represents the conversion matrix from the coordinate system j to\n the coordinate system i.\n => the pose of j from i\n Zij is the measurement of the pose of j from i.\n params:\n vi = xi (pose at i in a vector form (xi, yi, thetai))\n vj = xj (pose at j in a vector form (xj, yj, thetaj))\n uij = odom info\n '''\n\n \n\n si = np.sin(vi[2])\n ci = np.cos(vi[2])\n dr_i = np.array([[-si, ci], [-ci, -si]]).T\n dt_ij = np.array([vj[:2] - vi[:2]]).T\n\n t_i = v2t(vi)\n t_j = v2t(vj)\n t_u = v2t(uij)\n R_i = t_i[:2,:2]\n R_z = t_u[:2,:2]\n\n A_i = np.vstack((np.hstack((-R_z.T @ R_i.T, R_z.T @ (dr_i.T @ dt_ij))), \n [0, 0, -1]))\n B_j = np.vstack((np.hstack((R_z.T @ R_i.T, np.zeros((2,1)))),\n [0, 0, 1]))\n\n\n assert A_i.shape == B_j.shape\n \n print(\"A_i\", A_i)\n\n return A_i, B_j\n\n\n\ndef optimize_graph(graph:Graph, tolerance=1e-5, iterations=2):\n #cov = 0.01\n\n sigma_x = 0.01\n sigma_y = 0.01\n sigma_theta = 0.01\n\n omega = np.zeros((3, 3))\n omega[0,0] = sigma_x\n omega[1,1] = sigma_y\n omega[2,2] = sigma_theta\n\n mean_errors = []\n\n # degree of freedom for 2D (x, y, theta)\n n = 3\n \n for _ in range(iterations):\n\n edges = graph.edges\n vertices = graph.verticies\n m = len(vertices)\n poses = []\n for vertex in vertices:\n poses.append(vertex.pose)\n \n poses = np.array(poses)\n \n X = np.array(poses).T\n print(\"X shape\", X.shape)\n\n # define \n H = np.zeros((m * n, n * m)).astype(np.float)\n\n # define a coefficient vector\n b = np.zeros((m * n, 1)).astype(np.float)\n\n for count, edge in enumerate(edges):\n vi = edge.vi\n vj = edge.vj\n uij = edge.uij\n\n xi = vi.pose\n xj = vj.pose\n\n if count == len(edges) -1:\n e_ij = calculate_err(xi, xj, uij) \n # e_ij = calculate_err(xi, xj, uij) \n else:\n e_ij = np.zeros((3, 1))\n #e_ij = calculate_err(xi, xj, uij) \n print(\"err\", e_ij)\n\n A_ij, B_ij = calculate_jacobian(xi, xj, uij)\n\n # compute the contribution of this constraint to the linear system\n H_ii = A_ij.T @ omega @ A_ij\n H_ij = A_ij.T @ omega @ B_ij\n H_ji = B_ij.T @ omega @ A_ij\n H_jj = B_ij.T @ omega @ B_ij\n\n # compute the coefficient vector\n b_i = A_ij.T @ omega @ e_ij\n b_j = B_ij.T @ omega @ e_ij\n\n # get the index of the vertex\n i = graph.get_index_vertex(vi)\n j = graph.get_index_vertex(vj)\n\n # print(\"index i\", i)\n # print(\"index j\", j)\n\n\n index_i = i * n\n index_j = j * n\n\n # update the linear system\n H[index_i:index_i+n, index_i:index_i+n] += H_ii\n H[index_i:index_i+n, index_j:index_j+n] += H_ij\n H[index_j:index_j+n, index_i:index_i+n] += H_ji\n H[index_j:index_j+n, index_j:index_j+n] += H_jj\n\n # update the coefficient vector\n b[index_i:index_i+n] += b_i\n b[index_j:index_j+n] += b_j\n\n\n \n # fix the position of the first vertex (init pose)\n H[:n,:n] += np.eye(n)\n \n L = np.linalg.cholesky(H)\n\n X_update = -(inv(L.T) @ inv(L)) @ b\n X_update = np.reshape(X_update, (m, n)).astype(np.float)\n\n print(\"dx shape\", np.shape(X_update))\n # print(\"dx\", dx)\n \n\n for count, value in enumerate(X_update):\n #print(\"update value\", value)\n print(\"value shape\", value.shape)\n poses[count] += value\n\n graph.update_vertex_pose(vertices[count], poses[count])\n\n \n\n converged, mean_err = is_converged(edges, tolerance)\n mean_errors.append(mean_err)\n\n if converged:\n break\n \n return np.array(poses).astype(np.float), np.array(mean_errors).astype(np.float)\n\n\n\ndef is_converged(edges, tolerance=1e-5):\n mean_err = 0\n for edge in edges:\n xi = edge.vi.pose\n xj = edge.vj.pose\n uij = edge.uij\n err = calculate_err(xi, xj, uij)\n mean_err += err\n \n mean_err /= len(edges)\n\n if np.all(mean_err <= tolerance):\n return True, mean_err\n \n return False, mean_err\n\n\n\n\n\ndef plot_path(ground_pose, raw_pose, X):\n print(\"ground pose\", ground_pose)\n print(\"transformed\", X)\n print(\"raw pose\", raw_pose)\n \n # assert np.shape(ground_pose) == np.shape(raw_pose) == np.shape(X)\n\n ground_pose_T = ground_pose.T\n raw_pose_T = raw_pose.T\n X_T = X.T\n\n plt.scatter(ground_pose_T[0], ground_pose_T[1], color='g', alpha=0.3, label=\"ground truth path\")\n plt.scatter(raw_pose_T[0], raw_pose_T[1], color='r', alpha=0.3, label=\"path with odometry error\")\n plt.scatter(X_T[0], X_T[1], color='k', alpha=0.3, label=\"optimized path\")\n\n plt.plot(ground_pose_T[0], ground_pose_T[1], color='g')\n plt.plot(raw_pose_T[0], raw_pose_T[1], color='r')\n plt.plot(X_T[0], X_T[1], color='k')\n\n\n plt.legend()\n plt.show()\n\n\n\ndef plot_mean_err(mean_err):\n print(\"lenth\", len(mean_err))\n for i in range(len(mean_err)):\n plt.scatter(i, mean_err[i][0], color='pink')\n #plt.scatter(i, mean_err[i][1], color='blue')\n #plt.scatter(i, mean_err[i][2], color='g')\n plt.show()","repo_name":"hyeny99/Graph-SLAM","sub_path":"src/scripts/back_end/graph_optimization.py","file_name":"graph_optimization.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16690315416","text":"menu = {\n \"Margherita\": \"18.20\",\n \"Tradycyjna\": \"22.90\",\n \"Farmera\": \"27.20\",\n \"Bolognese\": \"22.90\",\n \"Tonno\": \"27.20\",\n \"Romana\": \"27.20\",\n \"Bruno\": \"22.90\",\n \"Vesuvio\": \"28.00\",\n \"Gyros\": \"32.00\",\n \"Oregano\": \"27.20\",\n}\n\nmaxk = max(menu, key=menu.get)\nmink = min(menu, key=menu.get)\n\nfor k, v in menu.items():\n print(k)\n print(v)\n print(k, v)\n\nprint(maxk)\nprint(mink)\n\ndel menu[maxk]\ndel menu[mink]\n\nprint(menu)\n\n\nnazwa = str(input(\"Podaj nazwę pizzy: \"))\ncena = str(input(\"Podaj jej cenę: \"))\nmenu[nazwa] = cena\n\nprint(menu)\n","repo_name":"BVGdragon1025/-wiczenia","sub_path":"Semestr 1/cwiczenia9/cwiczenie1.py","file_name":"cwiczenie1.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32511225104","text":"import numpy as np\r\nimport random\r\nfrom scipy.special import comb\r\n\r\nclass LocationScaleAugmentation(object):\r\n def __init__(self, vrange=(0.,1.), background_threshold=0.01, nPoints=4, nTimes=100000):\r\n self.nPoints=nPoints\r\n self.nTimes=nTimes\r\n self.vrange=vrange\r\n self.background_threshold=background_threshold\r\n self._get_polynomial_array()\r\n\r\n def _get_polynomial_array(self):\r\n def bernstein_poly(i, n, t):\r\n return comb(n, i) * (t ** (n - i)) * (1 - t) ** i\r\n t = np.linspace(0.0, 1.0, self.nTimes)\r\n self.polynomial_array = np.array([bernstein_poly(i, self.nPoints - 1, t) for i in range(0, self.nPoints)]).astype(np.float32)\r\n\r\n def get_bezier_curve(self,points):\r\n xPoints = np.array([p[0] for p in points])\r\n yPoints = np.array([p[1] for p in points])\r\n xvals = np.dot(xPoints, self.polynomial_array)\r\n yvals = np.dot(yPoints, self.polynomial_array)\r\n return xvals, yvals\r\n\r\n def non_linear_transformation(self, inputs, inverse=False, inverse_prop=0.5):\r\n start_point,end_point=inputs.min(),inputs.max()\r\n xPoints = [start_point, end_point]\r\n yPoints = [start_point, end_point]\r\n for _ in range(self.nPoints-2):\r\n xPoints.insert(1, random.uniform(xPoints[0], xPoints[-1]))\r\n yPoints.insert(1, random.uniform(yPoints[0], yPoints[-1]))\r\n xvals, yvals = self.get_bezier_curve([[x, y] for x, y in zip(xPoints, yPoints)])\r\n if inverse and random.random()<=inverse_prop:\r\n xvals = np.sort(xvals)\r\n else:\r\n xvals, yvals = np.sort(xvals), np.sort(yvals)\r\n return np.interp(inputs, xvals, yvals)\r\n\r\n def location_scale_transformation(self, inputs, slide_limit=20):\r\n scale = np.array(max(min(random.gauss(1, 0.1), 1.1), 0.9), dtype=np.float32)\r\n location = np.array(random.gauss(0, 0.5), dtype=np.float32)\r\n location = np.clip(location, self.vrange[0] - np.percentile(inputs, slide_limit), self.vrange[1] - np.percentile(inputs, 100 - slide_limit))\r\n return np.clip(inputs*scale + location, self.vrange[0], self.vrange[1])\r\n\r\n def Global_Location_Scale_Augmentation(self, image):\r\n image=self.non_linear_transformation(image, inverse=False)\r\n image=self.location_scale_transformation(image).astype(np.float32)\r\n return image\r\n\r\n def Local_Location_Scale_Augmentation(self,image, mask):\r\n output_image = np.zeros_like(image)\r\n\r\n mask = mask.astype(np.int32)\r\n\r\n output_image[mask == 0] = self.location_scale_transformation(self.non_linear_transformation(image[mask==0], inverse=True, inverse_prop=1))\r\n\r\n for c in range(1,np.max(mask)+1):\r\n if (mask==c).sum()==0:continue\r\n output_image[mask == c] = self.location_scale_transformation(self.non_linear_transformation(image[mask == c], inverse=True, inverse_prop=0.5))\r\n\r\n if self.background_threshold>=self.vrange[0]:\r\n output_image[image <= self.background_threshold] = image[image <= self.background_threshold]\r\n\r\n return output_image\r\n","repo_name":"Kaiseem/SLAug","sub_path":"dataloaders/location_scale_augmentation.py","file_name":"location_scale_augmentation.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"32"} +{"seq_id":"28866113708","text":"from django.shortcuts import render\nfrom .forms import *\nimport CIS.models as models\nfrom django.contrib import messages\nfrom .services import Customer, Order, ProductsInOrder\nimport zeep\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ncustomer = None\norder = None\norderId = None\nuser = None\n\ndef home(request):\n global order\n global customer\n return render(request, 'IS/home.html')\n\ndef login(request):\n return render(request, 'IS/login.html')\n\ndef order_detail(request, order_id):\n global orderId\n global user\n\n orderId = order_id\n \n order = models.Order.objects.get(id=order_id)\n store = models.Store.objects.get(id=order.store_id)\n customer = models.Customer.objects.get(id=order.customer_id)\n products = []\n productsFromOrder = models.ProductsInOrder.objects.filter(order=order_id)\n \n for product in productsFromOrder:\n productTemp = models.Product.objects.get(id=product.product_id)\n if product.alternative_for is not None:\n productAlternative = models.Product.objects.get(id=product.alternative_for)\n productListTemp = ProductsInOrder(productTemp.id, productTemp.name, productTemp.price, productTemp.weight, \n productTemp.breakable, product.amount, product.available, product.status, productTemp.image,\n product.alternative_for, productAlternative.name, productAlternative.image, productAlternative.price)\n else:\n productListTemp = ProductsInOrder(productTemp.id, productTemp.name, productTemp.price, productTemp.weight, \n productTemp.breakable, product.amount, product.available, product.status, productTemp.image, None, None, None, None)\n products.append(productListTemp)\n \n context = {\n 'products': products,\n 'order': order,\n 'store' : store,\n 'customer': customer,\n 'user' : user,\n }\n\n if order.courier_id is not None:\n curier = models.Courier.objects.get(id=order.courier_id)\n context = {\n 'products': products,\n 'order': order,\n 'store' : store,\n 'customer': customer,\n 'curier' : curier,\n 'user' : user,\n }\n\n return render(request, 'IS/order_detail.html', context)\n\ndef orders(request):\n global customer\n global order\n global orderId\n global user\n\n if request.method == 'POST':\n for key, value in request.POST.items():\n if key == 'save order':\n if 'status' in request.POST:\n status = request.POST['status']\n models.ProductsInOrder.objects.filter(order = orderId).update(status = status)\n if user.email == 'pokladnik@gmail.com':\n models.Order.objects.filter(id=orderId).update(prepared=True) \n \n orders = []\n # load products to be shown in catalogue\n if user.email == 'pokladnik@gmail.com':\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='pripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n if orderTemp.prepared == False:\n orders.append(orderTemp)\n elif user.email == 'skladnik@gmail.com':\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='nepripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n orders.append(orderTemp)\n \n paginator = Paginator(orders, 5)\n page = request.GET.get('page')\n orders = paginator.get_page(page)\n\n context = {\n 'orders' : orders,\n }\n\n return render(request, 'IS/orders.html', context)\n\n # handle potential login from previous site\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n\n # validate email with WSDL\n validator_wsdl = 'http://pis.predmety.fiit.stuba.sk/pis/ws/Validator?WSDL'\n validator_client = zeep.Client(wsdl=validator_wsdl)\n success = validator_client.service.validateEmail(\n request.POST['email'])\n\n if login_form.is_valid() and success:\n\n db_customer = models.Customer.objects.get(email=request.POST['email'])\n print(db_customer.email)\n user = db_customer\n\n # if we got here, we have read our customer details from db\n # and we'll send message about successful login to the catalogue\n if db_customer.email == 'skladnik@gmail.com':\n messages.add_message(request, messages.SUCCESS,\n 'Boli ste úspešne prihlásený ako skladník.')\n # load products to be shown in catalogue\n orders = []\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='nepripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n orders.append(orderTemp) \n paginator = Paginator(orders, 5)\n page = request.GET.get('page')\n orders = paginator.get_page(page)\n\n context = {\n 'orders' : orders,\n }\n\n return render(request, 'IS/orders.html', context)\n\n\n elif db_customer.email == 'pokladnik@gmail.com':\n messages.add_message(request, messages.SUCCESS,\n 'Boli ste úspešne prihlásený ako pokladník.')\n # load products to be shown in catalogue\n orders = []\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='pripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n if orderTemp.prepared == False:\n orders.append(orderTemp) \n paginator = Paginator(orders, 5)\n page = request.GET.get('page')\n orders = paginator.get_page(page)\n\n context = {\n 'orders' : orders,\n }\n\n return render(request, 'IS/orders.html', context)\n else:\n messages.add_message(request, messages.ERROR,\n 'Nesprávny e-mail alebo heslo.')\n context = {\n 'email': request.POST['email'],\n 'password': request.POST['password']\n }\n return render(request, 'IS/login.html', context=context)\n\n orders = []\n # load products to be shown in catalogue\n if user.email == 'pokladnik@gmail.com':\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='pripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n if orderTemp.prepared == False:\n orders.append(orderTemp)\n elif user.email == 'skladnik@gmail.com':\n productsInOrderWhichAreReady = models.ProductsInOrder.objects.filter(status='nepripravený').values('order').distinct()\n for productInOrderWhichIsReady in productsInOrderWhichAreReady:\n orderTemp = models.Order.objects.get(id=productInOrderWhichIsReady['order'])\n orders.append(orderTemp)\n\n paginator = Paginator(orders, 5)\n page = request.GET.get('page')\n orders = paginator.get_page(page)\n\n context = {\n 'orders' : orders,\n }\n\n return render(request, 'IS/orders.html', context)\n","repo_name":"katka-juhasova/PIS-project","sub_path":"IS/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70371295450","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef calculator():\n return render_template('calculator.html')\n\n@app.route('/calculate', methods=['POST'])\ndef calculate():\n if request.method == 'POST':\n num1 = float(request.form['num1'])\n num2 = float(request.form['num2'])\n operation = request.form['operation']\n\n if operation == 'add':\n result = num1 + num2\n operation_symbol = '+'\n elif operation == 'subtract':\n result = num1 - num2\n operation_symbol = '-'\n elif operation == 'multiply':\n result = num1 * num2\n operation_symbol = 'x'\n elif operation == 'divide':\n if num2 == 0:\n return \"Cannot divide by zero!\"\n result = num1 / num2\n operation_symbol = '/'\n\n return render_template('result.html', num1=num1, num2=num2, operation_symbol=operation_symbol, result=result)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"sayali-29/simple-calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35480925926","text":"import discord\nimport asyncio\nfrom discord.ext import commands, tasks\nfrom datetime import datetime\nimport os\n\n\n# client bot thing\nbot = commands.Bot(command_prefix = \"!\",intents = discord.Intents.all())\nbot.remove_command(\"help\")\n\n# print(datetime.datetime.today().day)\n\n@bot.event\nasync def on_ready():\n print(\"We have logged in as {0.user}\".format(bot))\n await bot.get_channel(818021991247511574).send('**~ Look bby, i🐛 did a thing 💞 ~**\\n type **!help** for commands!')\n\n# Start command: displays all commands avaliable\n# each needs to begin with !*blank*\n@bot.command()\nasync def help(ctx):\n await ctx.send('***here lies the anniversary bot commands!***📜\\n\\n'\n '**!help** -- prints bot commands 🤖\\n'\n '**!date** -- tells what date our anniversary is 📅\\n'\n '**!today** -- tells whether today is our anniversary or not ❓\\n'\n '**!together** -- tells how long we\\'ve been together ♾️\\n'\n '**!next** -- tells how long till next anniversary ⌛\\n'\n '**!kill** -- kills da bot 🤖🔫\\n')\n\n# Start command: displays all commands avaliable\n# each needs to begin with !*blank*\n@bot.command()\nasync def date(ctx):\n await ctx.send('date of anniversary: **November 20th, 2019**💐')\n\n# Start command: displays all commands avaliable\n# each needs to begin with !*blank*\n@bot.command()\nasync def together(ctx):\n x = datetime.today() - datetime(2019, 11, 20)\n days = x.days\n seconds = x.seconds\n\n year = days // 365\n days = days - (365* year)\n month = days // 30\n days = days - (30 * month)\n hours = seconds // 3600\n seconds = seconds - (3600 * hours)\n minutes = seconds // 60\n seconds = seconds - (60 * minutes)\n await ctx.send('*~you two 🥧 **cutie pies** 🥧 have been together for...~*')\n await ctx.send('***' + str(year) + '*** years, ***' + str(month) + '*** months, ***' + str(days) + '*** days, ***' + str(hours) + '*** hours, ***' + str(minutes) + '*** minutes, ***' + str(seconds) + '*** seconds '+'😱')\n\n\n@bot.command()\nasync def next(ctx):\n\n x = datetime.today()\n if x.month == 12:\n if x.day >= 20:\n y = datetime(year=x.year+1,month=1,day=20)\n else:\n y = datetime(year=x.year,month=x.month,day=20)\n else:\n if x.day >= 20:\n y = datetime(year=x.year,month=x.month+1,day=20)\n else:\n y = datetime(year=x.year,month=x.month,day=20)\n z = y-x\n days = z.days\n seconds = z.seconds\n hours = seconds // 3600\n seconds = seconds - (3600 * hours)\n minutes = seconds // 60\n seconds = seconds - (60 * minutes)\n await ctx.send('*~get hyyppeedd💯, your next anniversary is in...~*')\n await ctx.send('⏱️ ***'+ str(days) + '*** days, ***' + str(hours) + '*** hours, ***' + str(minutes) + '*** minutes, ***' + str(seconds) + '*** seconds ')\n\n@bot.command()\nasync def today(ctx):\n if datetime.today().day != 20:\n await ctx.send('**no, its not today** 😢')\n else:\n await ctx.send('**yea it is! YAYAY! don\\'t forget to give kithes** 💋')\n\n# Kill_Bot command: ends the game that is currently gonig onskips the timer for lobby creation\n@bot.command()\nasync def kill(ctx):\n await ctx.send('bye love birds 🐥💖🐥')\n await ctx.send('i die now ⚰️')\n await bot.logout()\n\n#bot token\nbot.run(os.environ['token'])\n\n\n\n","repo_name":"orlandoMiguel/anniversaryBOT","sub_path":"anniv.py","file_name":"anniv.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4225735051","text":"# https://leetcode.com/problems/find-nearest-point-that-has-the-same-x-or-y-coordinate/submissions/913903133/\n# Date of Submission: 2023-03-12\n\n# Runtime: 717 ms, faster than 75.33% of Python3 online submissions for Find Nearest Point That Has the Same X or Y Coordinate.\n# Memory Usage: 19.3 MB, less than 30.68% of Python3 online submissions for Find Nearest Point That Has the Same X or Y Coordinate.\n#\n\n# Problem:\n# You are given two integers, x and y, which represent your current location on a \n# Cartesian grid: (x, y). You are also given an array points where each \n# points[i] = [ai, bi] represents that a point exists at (ai, bi). \n# \n# A point is valid if it shares the same x-coordinate or the same y-coordinate as your location.\n\n# Return the index(0-indexed) of the valid point with the smallest Manhattan distance \n# from your current location. If there are multiple, return the valid point with the \n# smallest index. If there are no valid points, return -1.\n\nclass Solution:\n def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:\n\n closestDistance = 99999\n index = -1\n\n for i in range(0, len(points)):\n if points[i][0] == x or points[i][1] == y:\n manhattanDistance = calculateManhattanDistance(\n x, points[i][0],\n y, points[i][1])\n\n if manhattanDistance < closestDistance:\n index = i\n closestDistance = manhattanDistance\n return index\n\n\ndef calculateManhattanDistance(x1, x2, y1, y2):\n return abs(x1-x2) + abs(y1-y2)\n","repo_name":"Retroflux/playground","sub_path":"LeetCodeSolutions/Python/1779-Find_Nearest_Point_That_Has_the_Same_X_or_Y_Coordinate/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5813644918","text":"import re\n\nmobdef_pattern = r'^\\s*mob\\s+(\\S+)\\s+(\".+\")\\s+\\S+'\n\nwith open('maps.txt', 'r') as file:\n locations = file.read().splitlines()\n\nwith open('D:/RO/roteu/zone/scriptdata/mobdef.sc', 'r') as file:\n mobdefs = file.read().splitlines()\n\ntreasure_pattern = \\\n\"\"\"// ================[{map}]======================\nnpc \"{map}\" \"#{map}_control\" HIDDEN_NPC 1 1 5 0 0\nOnInit:\n\tdisablenpc \"Mark#{map}_1\"\n\tdisablenpc \"Chest#{map}_1\"\n\tdisablenpc \"#Aura_{map}_1\"\n\tdisablenpc \"Mark#{map}_2\"\n\tdisablenpc \"Chest#{map}_2\"\n\tdisablenpc \"#Aura_{map}_2\"\n\tdisablenpc \"Mark#{map}_3\"\n\tdisablenpc \"Chest#{map}_3\"\n\tdisablenpc \"#Aura_{map}_3\"\n\tcmdothernpc \"#{map}_control\" \"spawn\"\nreturn\nOnCommand: \"spawn\"\n\tvar place = rand 1 3\n\tif (place == 1)\n\t\tenablenpc \"Mark#{map}_1\"\n\telseif (place == 2)\n\t\tenablenpc \"Mark#{map}_2\"\n\telse\n\t\tenablenpc \"Mark#{map}_3\"\n\tendif\nreturn\nOnCommand: \"found\"\n\tdisablenpc \"Mark#{map}_1\"\n\tdisablenpc \"Mark#{map}_2\"\n\tdisablenpc \"Mark#{map}_3\"\n\tInitTimer\nreturn\nOnTimer: 10000\n\tdisablenpc \"Chest#{map}_1\"\n\tdisablenpc \"#Aura_{map}_1\"\n\tdisablenpc \"Chest#{map}_2\"\n\tdisablenpc \"#Aura_{map}_2\"\n\tdisablenpc \"Chest#{map}_3\"\n\tdisablenpc \"#Aura_{map}_3\"\nreturn\nOnTimer: 600000\n\tcmdothernpc \"#{map}_control\" \"spawn\"\n\tstoptimer\nreturn\nnpc \"{map}\" \"Chest#{map}_1\" 4_TREASURE_BOX {mark_1_x} {mark_1_y} 5 0 0\nnpc \"{map}\" \"#Aura_{map}_1\" 4_ENERGY_BLACK {mark_1_x} {mark_1_y} 5 0 0\nnpc \"{map}\" \"Mark#{map}_1\" 4_CRACK {mark_1_x} {mark_1_y} 5 0 0\nOnClick:\n\tvar pirate_17410 = isbegin_quest 17410\n\tif (pirate_17410 == 1)\n\t setquest 17411\n\t\tcompletequest 17410\n\tendif\n\tvar found = rand 1 100\n\tif (found <= 30)\n\t\tenablenpc \"#Aura_{map}_1\"\n\t\tConsumeSpecialItem Anodyne_B\n\telseif (found <= 60)\n\t\tcmdothernpc \"Mark#{map}_1\" \"zombie\"\n\telse\n\t\tcmdothernpc \"Mark#{map}_1\" \"treasure\"\n\tendif\n\tcmdothernpc \"#{map}_control\" \"found\"\nreturn\t\nOnCommand: \"zombie\"\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_1_x_minus} {mark_1_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_1_x_plus} {mark_1_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_1_x} {mob_1_y_plus}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_1_x} {mob_1_y_minus}\n\tInitTimer\nreturn\nOnTimer: 10000\n\tresetmymob\n\tstoptimer\nreturn\nOnCommand: \"treasure\"\n\tenablenpc \"Chest#{map}_1\"\n\tvar item_count = 0\n\tvar item1 = C_Blue_Rose_Eyepatch\n\tvar item2 = C_Choco_Minihat\n\tvar item3 = C_Clover_Silkhat\n\tvar item4 = C_Harvest_Festa_Hat\n\tvar item5 = C_Gryphon_Hairband\n\tvar item6 = Azure_Jewel\n\tvar item7 = Cardinal_Jewel\n\tvar item8 = Blue_Jewel\n\tvar item9 = Golden_Jewel\n\tvar item10 = Bluish_Green_Jewel\n\tvar item11 = Crystal_Jewel\n\tvar item12 = Zargon\n\tvar item13 = Skyblue_Jewel\n\tvar item14 = Scarlet_Jewel\n\tvar item15 = Crystal_Jewel_\n\tvar item16 = Acti_Potion\n\tvar item17 = E_Small_Life_Potion\n\tvar item18 = Giant_Fly_Wing\n\tvar item19 = Guyak_Candy\n\tvar item20 = E_Mysterious_Water\n\tvar item21 = Warp_Free_Ticket\n\tvar item22 = Gold\n\tvar item23 = E_Med_Life_Potion\n\tvar item24 = Comp_Tyr's_Blessing\n\tvar item25 = Comp_Kafra_Card\n\twhile(1)\n\t\tif (item_count < 13)\n\t\t\tvar x = rand {treasure_1_x_minus} {treasure_1_x_plus}\n\t\t\tvar y = rand {treasure_1_y_minus} {treasure_1_y_plus}\n\t\t\tvar item_number = rand 1 25\n\t\t\tif (item_number == 1)\n\t\t\t\tItemDown item1 1 x y\n\t\t\telseif (item_number == 2)\n\t\t\t\tItemDown item2 1 x y\n\t\t\telseif (item_number == 3)\n\t\t\t\tItemDown item3 1 x y\n\t\t\telseif (item_number == 4)\n\t\t\t\tItemDown item4 1 x y\n\t\t\telseif (item_number == 5)\n\t\t\t\tItemDown item5 1 x y\n\t\t\telseif (item_number == 6)\n\t\t\t\tItemDown item6 1 x y\n\t\t\telseif (item_number == 7)\n\t\t\t\tItemDown item7 1 x y\n\t\t\telseif (item_number == 8)\n\t\t\t\tItemDown item8 1 x y\n\t\t\telseif (item_number == 9)\n\t\t\t\tItemDown item9 1 x y\n\t\t\telseif (item_number == 10)\n\t\t\t\tItemDown item10 1 x y\n\t\t\telseif (item_number == 11)\n\t\t\t\tItemDown item11 1 x y\n\t\t\telseif (item_number == 12)\n\t\t\t\tItemDown item12 1 x y\n\t\t\telseif (item_number == 13)\n\t\t\t\tItemDown item13 1 x y\n\t\t\telseif (item_number == 14)\n\t\t\t\tItemDown item14 1 x y\n\t\t\telseif (item_number == 15)\n\t\t\t\tItemDown item15 1 x y\n\t\t\telseif (item_number == 16)\n\t\t\t\tItemDown item16 1 x y\n\t\t\telseif (item_number == 17)\n\t\t\t\tItemDown item17 1 x y\n\t\t\telseif (item_number == 18)\n\t\t\t\tItemDown item18 1 x y\n\t\t\telseif (item_number == 19)\n\t\t\t\tItemDown item19 1 x y\n\t\t\telseif (item_number == 20)\n\t\t\t\tItemDown item20 1 x y\n\t\t\telseif (item_number == 21)\n\t\t\t\tItemDown item21 1 x y\n\t\t\telseif (item_number == 22)\n\t\t\t\tItemDown item22 1 x y\n\t\t\telseif (item_number == 23)\n\t\t\t\tItemDown item23 1 x y\n\t\t\telseif (item_number == 24)\n\t\t\t\tItemDown item24 1 x y\n\t\t\telse\n\t\t\t\tItemDown item25 1 x y\n\t\t\tendif\n\t\t\tvar item_count = item_count + 1\n\t\telse\n\t\t\texitwhile\n\t\tendif\n\tendwhile\nreturn\nnpc \"{map}\" \"Chest#{map}_2\" 4_TREASURE_BOX {mark_2_x} {mark_2_y} 5 0 0\nnpc \"{map}\" \"#Aura_{map}_2\" 4_ENERGY_BLACK {mark_2_x} {mark_2_y} 5 0 0\nnpc \"{map}\" \"Mark#{map}_2\" 4_CRACK {mark_2_x} {mark_2_y} 5 0 0\nOnClick:\n\tvar pirate_17410 = isbegin_quest 17410\n\tif (pirate_17410 == 1)\n\t setquest 17411\n\t\tcompletequest 17410\n\tendif\n\tvar found = rand 1 100\n\tif (found <= 30)\n\t\tenablenpc \"#Aura_{map}_2\"\n\t\tConsumeSpecialItem Anodyne_B\n\telseif (found <= 60)\n\t\tcmdothernpc \"Mark#{map}_2\" \"zombie\"\n\telse\n\t\tcmdothernpc \"Mark#{map}_2\" \"treasure\"\n\tendif\n\tcmdothernpc \"#{map}_control\" \"found\"\nreturn\t\nOnCommand: \"zombie\"\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_2_x_minus} {mark_2_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_2_x_plus} {mark_2_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_2_x} {mob_2_y_plus}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_2_x} {mob_2_y_minus}\n\tInitTimer\nreturn\nOnTimer: 10000\n\tresetmymob\n\tstoptimer\nreturn\nOnCommand: \"treasure\"\n\tenablenpc \"Chest#{map}_2\"\n\tvar item_count = 0\n\tvar item1 = C_Blue_Rose_Eyepatch\n\tvar item2 = C_Choco_Minihat\n\tvar item3 = C_Clover_Silkhat\n\tvar item4 = C_Harvest_Festa_Hat\n\tvar item5 = C_Gryphon_Hairband\n\tvar item6 = Azure_Jewel\n\tvar item7 = Cardinal_Jewel\n\tvar item8 = Blue_Jewel\n\tvar item9 = Golden_Jewel\n\tvar item10 = Bluish_Green_Jewel\n\tvar item11 = Crystal_Jewel\n\tvar item12 = Zargon\n\tvar item13 = Skyblue_Jewel\n\tvar item14 = Scarlet_Jewel\n\tvar item15 = Crystal_Jewel_\n\tvar item16 = Acti_Potion\n\tvar item17 = E_Small_Life_Potion\n\tvar item18 = Giant_Fly_Wing\n\tvar item19 = Guyak_Candy\n\tvar item20 = E_Mysterious_Water\n\tvar item21 = Warp_Free_Ticket\n\tvar item22 = Gold\n\tvar item23 = E_Med_Life_Potion\n\tvar item24 = Comp_Tyr's_Blessing\n\tvar item25 = Comp_Kafra_Card\n\twhile(1)\n\t\tif (item_count < 13)\n\t\t\tvar x = rand {treasure_2_x_minus} {treasure_2_x_plus}\n\t\t\tvar y = rand {treasure_2_y_minus} {treasure_2_y_plus}\n\t\t\tvar item_number = rand 1 25\n\t\t\tif (item_number == 1)\n\t\t\t\tItemDown item1 1 x y\n\t\t\telseif (item_number == 2)\n\t\t\t\tItemDown item2 1 x y\n\t\t\telseif (item_number == 3)\n\t\t\t\tItemDown item3 1 x y\n\t\t\telseif (item_number == 4)\n\t\t\t\tItemDown item4 1 x y\n\t\t\telseif (item_number == 5)\n\t\t\t\tItemDown item5 1 x y\n\t\t\telseif (item_number == 6)\n\t\t\t\tItemDown item6 1 x y\n\t\t\telseif (item_number == 7)\n\t\t\t\tItemDown item7 1 x y\n\t\t\telseif (item_number == 8)\n\t\t\t\tItemDown item8 1 x y\n\t\t\telseif (item_number == 9)\n\t\t\t\tItemDown item9 1 x y\n\t\t\telseif (item_number == 10)\n\t\t\t\tItemDown item10 1 x y\n\t\t\telseif (item_number == 11)\n\t\t\t\tItemDown item11 1 x y\n\t\t\telseif (item_number == 12)\n\t\t\t\tItemDown item12 1 x y\n\t\t\telseif (item_number == 13)\n\t\t\t\tItemDown item13 1 x y\n\t\t\telseif (item_number == 14)\n\t\t\t\tItemDown item14 1 x y\n\t\t\telseif (item_number == 15)\n\t\t\t\tItemDown item15 1 x y\n\t\t\telseif (item_number == 16)\n\t\t\t\tItemDown item16 1 x y\n\t\t\telseif (item_number == 17)\n\t\t\t\tItemDown item17 1 x y\n\t\t\telseif (item_number == 18)\n\t\t\t\tItemDown item18 1 x y\n\t\t\telseif (item_number == 19)\n\t\t\t\tItemDown item19 1 x y\n\t\t\telseif (item_number == 20)\n\t\t\t\tItemDown item20 1 x y\n\t\t\telseif (item_number == 21)\n\t\t\t\tItemDown item21 1 x y\n\t\t\telseif (item_number == 22)\n\t\t\t\tItemDown item22 1 x y\n\t\t\telseif (item_number == 23)\n\t\t\t\tItemDown item23 1 x y\n\t\t\telseif (item_number == 24)\n\t\t\t\tItemDown item24 1 x y\n\t\t\telse\n\t\t\t\tItemDown item25 1 x y\n\t\t\tendif\n\t\t\tvar item_count = item_count + 1\n\t\telse\n\t\t\texitwhile\n\t\tendif\n\tendwhile\nreturn\nnpc \"{map}\" \"Chest#{map}_3\" 4_TREASURE_BOX {mark_3_x} {mark_3_y} 5 0 0\nnpc \"{map}\" \"#Aura_{map}_3\" 4_ENERGY_BLACK {mark_3_x} {mark_3_y} 5 0 0\nnpc \"{map}\" \"Mark#{map}_3\" 4_CRACK {mark_3_x} {mark_3_y} 5 0 0\nOnClick:\n\tvar pirate_17410 = isbegin_quest 17410\n\tif (pirate_17410 == 1)\n\t setquest 17411\n\t\tcompletequest 17410\n\tendif\n\tvar found = rand 1 100\n\tif (found <= 30)\n\t\tenablenpc \"#Aura_{map}_3\"\n\t\tConsumeSpecialItem Anodyne_B\n\telseif (found <= 60)\n\t\tcmdothernpc \"Mark#{map}_3\" \"zombie\"\n\telse\n\t\tcmdothernpc \"Mark#{map}_3\" \"treasure\"\n\tendif\n\tcmdothernpc \"#{map}_control\" \"found\"\nreturn\t\nOnCommand: \"zombie\"\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_3_x_minus} {mark_3_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mob_3_x_plus} {mark_3_y}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_3_x} {mob_3_y_plus}\n\tcallmonster \"{map}\" E_ZOMBIE_TREASURE \"Pirate's Soul\" {mark_3_x} {mob_3_y_minus}\n\tInitTimer\nreturn\nOnTimer: 10000\n\tresetmymob\n\tstoptimer\nreturn\nOnCommand: \"treasure\"\n\tenablenpc \"Chest#{map}_3\"\n\tvar item_count = 0\n\tvar item1 = C_Blue_Rose_Eyepatch\n\tvar item2 = C_Choco_Minihat\n\tvar item3 = C_Clover_Silkhat\n\tvar item4 = C_Harvest_Festa_Hat\n\tvar item5 = C_Gryphon_Hairband\n\tvar item6 = Azure_Jewel\n\tvar item7 = Cardinal_Jewel\n\tvar item8 = Blue_Jewel\n\tvar item9 = Golden_Jewel\n\tvar item10 = Bluish_Green_Jewel\n\tvar item11 = Crystal_Jewel\n\tvar item12 = Zargon\n\tvar item13 = Skyblue_Jewel\n\tvar item14 = Scarlet_Jewel\n\tvar item15 = Crystal_Jewel_\n\tvar item16 = Acti_Potion\n\tvar item17 = E_Small_Life_Potion\n\tvar item18 = Giant_Fly_Wing\n\tvar item19 = Guyak_Candy\n\tvar item20 = E_Mysterious_Water\n\tvar item21 = Warp_Free_Ticket\n\tvar item22 = Gold\n\tvar item23 = E_Med_Life_Potion\n\tvar item24 = Comp_Tyr's_Blessing\n\tvar item25 = Comp_Kafra_Card\n\twhile(1)\n\t\tif (item_count < 13)\n\t\t\tvar x = rand {treasure_3_x_minus} {treasure_3_x_plus}\n\t\t\tvar y = rand {treasure_3_y_minus} {treasure_3_y_plus}\n\t\t\tvar item_number = rand 1 25\n\t\t\tif (item_number == 1)\n\t\t\t\tItemDown item1 1 x y\n\t\t\telseif (item_number == 2)\n\t\t\t\tItemDown item2 1 x y\n\t\t\telseif (item_number == 3)\n\t\t\t\tItemDown item3 1 x y\n\t\t\telseif (item_number == 4)\n\t\t\t\tItemDown item4 1 x y\n\t\t\telseif (item_number == 5)\n\t\t\t\tItemDown item5 1 x y\n\t\t\telseif (item_number == 6)\n\t\t\t\tItemDown item6 1 x y\n\t\t\telseif (item_number == 7)\n\t\t\t\tItemDown item7 1 x y\n\t\t\telseif (item_number == 8)\n\t\t\t\tItemDown item8 1 x y\n\t\t\telseif (item_number == 9)\n\t\t\t\tItemDown item9 1 x y\n\t\t\telseif (item_number == 10)\n\t\t\t\tItemDown item10 1 x y\n\t\t\telseif (item_number == 11)\n\t\t\t\tItemDown item11 1 x y\n\t\t\telseif (item_number == 12)\n\t\t\t\tItemDown item12 1 x y\n\t\t\telseif (item_number == 13)\n\t\t\t\tItemDown item13 1 x y\n\t\t\telseif (item_number == 14)\n\t\t\t\tItemDown item14 1 x y\n\t\t\telseif (item_number == 15)\n\t\t\t\tItemDown item15 1 x y\n\t\t\telseif (item_number == 16)\n\t\t\t\tItemDown item16 1 x y\n\t\t\telseif (item_number == 17)\n\t\t\t\tItemDown item17 1 x y\n\t\t\telseif (item_number == 18)\n\t\t\t\tItemDown item18 1 x y\n\t\t\telseif (item_number == 19)\n\t\t\t\tItemDown item19 1 x y\n\t\t\telseif (item_number == 20)\n\t\t\t\tItemDown item20 1 x y\n\t\t\telseif (item_number == 21)\n\t\t\t\tItemDown item21 1 x y\n\t\t\telseif (item_number == 22)\n\t\t\t\tItemDown item22 1 x y\n\t\t\telseif (item_number == 23)\n\t\t\t\tItemDown item23 1 x y\n\t\t\telseif (item_number == 24)\n\t\t\t\tItemDown item24 1 x y\n\t\t\telse\n\t\t\t\tItemDown item25 1 x y\n\t\t\tendif\n\t\t\tvar item_count = item_count + 1\n\t\telse\n\t\t\texitwhile\n\t\tendif\n\tendwhile\nreturn\n\n\"\"\"\ntrader_pattern = \\\n \"\"\"// ================[{map}]======================\nnpc \"{map}\" \"Pirate#{map}\" 4_CAT_SAILOR2 {trader_x} {trader_y} {trader_ang} 0 0\nOnInit:\n\tNpcOverBMP \"group_3\"\n\t\n\tAddQuestIDCondition {quest}\n\tSetQuestConditionBegin {quest} 1 4\n\tSetQuestConditionQuest {quest} 0\n\tSetQuestConditionEnd\n\tSetQuestConditionBegin {quest} 1 4\n\tSetQuestConditionQuest {quest} 2\n\tSetQuestConditionEnd\n\tSetQuestConditionBegin {quest} 1 4\n\tSetQuestConditionItem 6839 499 \"<\"\n\tSetQuestConditionEnd \nreturn\nOnTouch2:\n\tTalk2me \"Pirate#{map}\" \"Everything fits together on the maps... So where is this chest?\"\nreturn\nOnClick:\n\tdialog \"[Pirate]\"\n\tdialog \"If I could read a map, \"\n\tdialog \"I would have become a captain...\"\n\tdialog \"Oh! Hi. Looking for someone?\"\n\tdialog \"Or maybe something...\"\n\twait\n\tchoose menu \"Tell me about treasures\" \"Cursed coins\" \"Help is needed?\"\n\tcase 1\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"What treasures? Ah, okay...\"\n\t\tdialog \"I haven't been able to find \"\n\t\tdialog \"anything for several days.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"You may have noticed that \"\n\t\tdialog \"the local pirates have begun \"\n\t\tdialog \"internecine showdowns.\"\n\t\tdialog \"But they often come across both \"\n\t\tdialog \"merchants and ordinary wanderers.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"Therefore, recently, many\"\n\t\tdialog \"sailors and merchants hide\"\n\t\tdialog \"their wealth in chests... \"\n\t\tdialog \"Until better times.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"And... I'm trying to find them,\"\n\t\tdialog \"he-he. If you are lucky enough,\"\n\t\tdialog \"you can find something too.\"\n\t\tclose\n\t\treturn\n\tbreak\n\tcase 2\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"Ah... There is such a legend...\"\n\t\tdialog \"Some time ago,\"\n\t\tdialog \"when there were more pirates,\"\n\t\tdialog \"than ordinary sailors,\"\n\t\tdialog \"a ship full of gold\"\n\t\tdialog \"was sailing across the ocean...\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"It was gold taken away from\"\n\t\tdialog \"the poor and hard workers\"\n\t\tdialog \"from the land. The villainous\"\n\t\tdialog \"sailors who took it away were \"\n\t\tdialog \"pleased at first. They were \"\n\t\tdialog \"wasting it on a dissolute life.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"But soon ... The sea ceased\"\n\t\tdialog \"to delight them ... Rum and food\"\n\t\tdialog \"no longer brought pleasure,\"\n\t\tdialog \"and they truly became\"\n\t\tdialog \"insensitive creatures.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"The coins were cursed by\"\n\t\tdialog \"people from whom it was stolen,\"\n\t\tdialog \"and only a person who collects\"\n\t\tdialog \"five hundred of these gold\"\n\t\tdialog \"coins and donates them to the poor\"\n\t\tdialog \"will remove the curse from this gold.\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"...I would like to become the hero\"\n\t\tdialog \"of this legend. But for this \"\n\t\tdialog \"I need to collect these 500 cursed coins...\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"If you bring coins, I'll give you\"\n\t\tdialog \"a real pirate cocked hat!\"\n\t\tdialog \"This is, of course, a strange offer\"\n\t\tdialog \"for 500 gold coins...\"\n\t\twait\n\t\tdialog \"[Pirate]\"\n\t\tdialog \"But then you can contribute to \"\n\t\tdialog \"a good cause! And what can \"\n\t\tdialog \"you do with these cursed coins?...\"\n\t\twait\n\t\tchoose menu \"Can I try on a hat?\" \"Can I buy a pirate hat?\" \"I don't want a pirate hat...\"\n case 1\n if (v[jp_event63] > 4)\n dialog \"[Pirate]\"\n dialog \"Wait a minute...\"\n dialog \"You have already rented\"\n dialog \"this hat 5 times!\"\n dialog \"I think you don't need\"\n dialog \"to bring Cursed Coins anymore.\"\n wait\n dialog \"[Pirate]\"\n dialog \"I can't believe...\"\n dialog \"You and I will finally remove\"\n dialog \"the curse from this gold...\"\n dialog \"I will take it upon myself\"\n dialog \"to distribute them to the poor.\"\n wait\n dialog \"[Pirate]\"\n dialog \"And here is my pirate cocked hat.\"\n dialog \"You deserve it.\"\n setitem jp_event63 0\n getitem C_Pirate_Bandana 1 \n close\n return\n else\n dialog \"[Pirate]\"\n dialog \"Well, I can give it for a while...\"\n dialog \"But I can't trust \"\n dialog \"it to just anyone...\"\n wait\n dialog \"[Pirate]\"\n dialog \"Rent will cost 100 Cursed Coins.\"\n dialog \"But if you are going to buy \" \n dialog \"this hat in the future,\"\n dialog \"it will cost 100 coins less.\"\n dialog \"Do you want to rent?\"\n wait\n choose menu \"Rent a hat (100 Cursed Coins)\" \"Cancel \"\n case 1\n if (v[Gold_Chocoin] > 99)\n dropitem Gold_Chocoin 100\n RentItem C_Pirate_Bandana 86400 0 0 0 0 0 \n getitem jp_event63 1\n dialog \"[Pirate]\"\n dialog \"Here you go. And remember\"\n dialog \"that it's only for one day.\"\n close\n return\n else\n dialog \"[Pirate]\"\n dialog \"You don't have enough coins.\"\n dialog \"Although I'm not good at \"\n dialog \"arithmetic, I count \"\n dialog \"the coins as they should be. \"\n close\n return\n endif\n break\n case 2\n close\n return\n break\n endchoose\n\t\t\t\tendif\n\t\t\tbreak\n case 2\n if (v[jp_event63] > 4)\n dialog \"[Pirate]\"\n dialog \"Wait a minute...\"\n dialog \"You have already rented\"\n dialog \"this hat 5 times!\"\n dialog \"I think you don't need\"\n dialog \"to buy this hat.\"\n wait\n dialog \"[Pirate]\"\n dialog \"I can't believe...\"\n dialog \"You and I will finally remove\"\n dialog \"the curse from this gold...\"\n dialog \"I will take it upon myself\"\n dialog \"to distribute them to the poor.\"\n wait\n dialog \"[Pirate]\"\n dialog \"And here is my pirate cocked hat.\"\n dialog \"You deserve it.\"\n setitem jp_event63 0\n getitem C_Pirate_Bandana 1 \n close\n return\n else\n var rent_count = v[jp_event63]\n var rent_discount = rent_count * 100\n var hat_cost = 500 - rent_discount\n dialog \"[Pirate]\"\n dialog \"Do you remember that you \"\n dialog \"have to bring at least 500\"\n dialog \"Cursed Coins?\"\n if (hat_cost < 500)\n dialog \"But you rented this hat\"\n dialog \"\"+rent_count+\" times, so I can give it\"\n dialog \"to you for \"+hat_cost+\" coins.\"\n endif\n choose menu \"Buy a hat (\"+hat_cost+\" coins)\" \"Cancel\"\n case 1\n if (v[Gold_Chocoin] >= hat_cost)\n dialog \"[Pirate]\"\n dialog \"I can't believe...\"\n dialog \"You and I will finally remove\"\n dialog \"the curse from this gold...\"\n dialog \"I will take it upon myself\"\n dialog \"to distribute them to the poor.\"\n wait\n dialog \"[Pirate]\"\n dialog \"And here is my pirate cocked hat.\"\n dialog \"You deserve it.\"\n dropitem Gold_Chocoin hat_cost\n setitem jp_event63 0\n getitem C_Pirate_Bandana 1 \n close\n return\n else\n dialog \"[Pirate]\"\n dialog \"You don't have enough coins.\"\n dialog \"Although I'm not good at \"\n dialog \"arithmetic, I count \"\n dialog \"the coins as they should be. \"\n close\n return\n endif\n break\n case 2\n close\n return\n break\n endchoose\n endif\n break\n case 3\n\t\t\t dialog \"[Pirate]\"\n\t\t\t\tdialog \"If you don't need it, and neither\"\n\t\t\t\tdialog \"do the pirate hat,\"\n\t\t\t\tdialog \"then I can exchange them \"\n\t\t\t\tdialog \"for several bottles of rum,\"\n\t\t\t\tdialog \"one per 5 Cursed coins.\"\n\t\t\t\twait\n\t\t\t\tchoose menu \"I want to exchange \" \"I will keep \"\n\t\t\t\tcase 1\n\t\t\t\t dialog \"[Pirate]\"\n dialog \"How many Bottles of Rum do you want to get?\"\n dlgwrite 1 500\n if input == 0\n dialog \"[Pirate]\"\n dialog \"Well, right.\"\n close\n return\n elseif error\n dialog \"[Pirate]\"\n dialog \"So do you want to exchange or not?\"\n close\n return\n else\n var rum_cost = input * 5\n if (v[Gold_Chocoin] >= rum_cost)\n var weight_check = GetInventoryRemainCount 23660 input\n if ((weight_check == 2)|(weight_check == 3))\n dialog \"[Pirate]\"\n dialog \"Your inventory is full.\"\n close\n return\n else\n dialog \"[Pirate]\"\n dialog \"Here you go. Your choice, of course... \"\n dropitem Gold_Chocoin rum_cost\n getitem Dogly_Bottle_Z input\n close\n return\n endif\n else\n dialog \"[Pirate]\"\n dialog \"You don't have enough coins.\"\n close\n return\n endif\n endif\n\t\t\t\tbreak\n\t\t\t\tcase 2\n\t\t\t\t dialog \"[Pirate]\"\n dialog \"Well, right.\"\n close\n return\n\t\t\t\tbreak\n\t\t\t\tendchoose\n\t\t\tbreak\n\t\tendchoose\n\tbreak\n\tcase 3\n\t\tvar pirate_{quest} = isbegin_quest {quest}\n\t\tvar hunt_check = checkquest_hunting {quest}\n\t\tif (pirate_{quest} == 0)\n\t\t\tdialog \"[Pirate]\"\n\t\t\tdialog \"Actually, yes. It would be nice\"\n\t\t\tdialog \"if you liberated this area \"\n\t\t\tdialog \"from the creatures.\"\n\t\t\tdialog \"My team and I just found here...\"\n\t\t\tdialog \"It doesn't matter what.\"\n\t\t\twait\n\t\t\tchoose menu \"I will take it\"\n\t\t\t\tcase 1\n\t\t\t\t\tdialog \"[Pirate]\"\n\t\t\t\t\tdialog \"Nice.\"\n\t\t\t\t\tdialog \"500 creatures will be enough.\"\n\t\t\t\t\tdialog \"Of course, I will not leave you without a reward.\"\n\t\t\t\t\tsetquest {quest}\n\t\t\t\t\tclose\n\t\t\t\t\treturn\n\t\t\t\tbreak\n\t\t\tendchoose\n\t\telseif (pirate_{quest} == 1)\n\t\t\tif (hunt_check == 2)\n\t\t\t var weight_check = GetInventoryRemainCount 1301 2\n if ((weight_check == 2)|(weight_check == 3))\n dialog \"[Pirate]\"\n dialog \"Free up some space in \"\n dialog \"your inventory and return. \"\n close\n return\n endif\n\t\t\t\tdialog \"[Pirate]\"\n\t\t\t\tdialog \"Great job! A couple of bottles\"\n\t\t\t\tdialog \"of our rum will brighten up your evening.\"\n\t\t\t\tcompletequest {quest}\n\t\t\t\tgetitem Comp_Glass_Of_Illusion 10\n\t\t\t\tgetitem Dogly_Bottle_Z 10\n\t\t\t\tgetitem Gold_Chocoin 20\n\t\t\t\twait\n\t\t\t\tdialog \"[Pirate]\"\n\t\t\t\tdialog \"Also, I found some coins \"\n\t\t\t\tdialog \"that you will need if you want \"\n\t\t\t\tdialog \"the pirate cocked hat. \"\n\t\t\t\tclose\n\t\t\t\treturn\n\t\t\telse\n\t\t\t\tdialog \"[Pirate]\"\n\t\t\t\tdialog \"Need help only in liberating \"\n\t\t\t\tdialog \"the territory. Come back \"\n\t\t\t\tdialog \"when you finish the task.\"\n\t\t\t\tclose\n\t\t\t\treturn\n\t\t\tendif\n\t\telse\n\t\t\tdialog \"[Pirate]\"\n\t\t\tdialog \"Thanks, but no more help is needed here.\"\n\t\t\tclose\n\t\t\treturn\n\t\tendif\n\tbreak\n\tendchoose\nreturn\n\n\"\"\"\nquestmission_pattern = \\\n \"\"\"quest [\n\tquest_info ({quest}, \"Pirate's trader quest\")\n\thunt (\"{quest_mob}\", 500)\n]\n\"\"\"\nquest_pattern = \\\n \"\"\"{quest}#Pirate's Pact#SG_FEEL#QUE_NOIMAGE#\nYou and Pirate have come to an agreement. Kill 500 ^000077{mob_name}^000000 to get a reward.#\nDefeat monsters#\n\"\"\"\ntreasure_text = ''\ntrader_text = ''\nquests_text = ''\nquests_mission = ''\n\nfor location in locations:\n one = re.split(r'\\s+', location)\n map = one[0]\n mark_1_x = one[1]\n mark_1_y = one[2]\n mark_2_x = one[3]\n mark_2_y = one[4]\n mark_3_x = one[5]\n mark_3_y = one[6]\n trader_x = one[7]\n trader_y = one[8]\n mob_1_x_plus = str(int(mark_1_x) + 2)\n mob_1_x_minus = str(int(mark_1_x) - 2)\n mob_1_y_plus = str(int(mark_1_y) + 2)\n mob_1_y_minus = str(int(mark_1_y) - 2)\n mob_2_x_plus = str(int(mark_2_x) + 2)\n mob_2_x_minus = str(int(mark_2_x) - 2)\n mob_2_y_plus = str(int(mark_2_y) + 2)\n mob_2_y_minus = str(int(mark_2_y) - 2)\n mob_3_x_plus = str(int(mark_3_x) + 2)\n mob_3_x_minus = str(int(mark_3_x) - 2)\n mob_3_y_plus = str(int(mark_3_y) + 2)\n mob_3_y_minus = str(int(mark_3_y) - 2)\n treasure_1_x_plus = str(int(mark_1_x) + 7)\n treasure_1_x_minus = str(int(mark_1_x) - 7)\n treasure_1_y_plus = str(int(mark_1_y) + 7)\n treasure_1_y_minus = str(int(mark_1_y) - 7)\n treasure_2_x_plus = str(int(mark_2_x) + 7)\n treasure_2_x_minus = str(int(mark_2_x) - 7)\n treasure_2_y_plus = str(int(mark_2_y) + 7)\n treasure_2_y_minus = str(int(mark_2_y) - 7)\n treasure_3_x_plus = str(int(mark_3_x) + 7)\n treasure_3_x_minus = str(int(mark_3_x) - 7)\n treasure_3_y_plus = str(int(mark_3_y) + 7)\n treasure_3_y_minus = str(int(mark_3_y) - 7)\n trader_ang = one[9]\n quest = one[10]\n quest_mob = one[11]\n print(quest_mob)\n for mobdef in mobdefs:\n mobdef = mobdef.strip()\n if re.search(mobdef_pattern, mobdef):\n mob_dbname = re.search(mobdef_pattern, mobdef).group(1)\n if mob_dbname == quest_mob: mob_name = re.search(mobdef_pattern, mobdef).group(2).replace('\\\"', '')\n else:\n pass\n new_treasure = treasure_pattern.format(map=map,\n mark_1_x=mark_1_x,\n mark_1_y=mark_1_y,\n mark_2_x=mark_2_x,\n mark_2_y=mark_2_y,\n mark_3_x=mark_3_x,\n mark_3_y=mark_3_y,\n mob_1_x_plus=mob_1_x_plus,\n mob_1_x_minus=mob_1_x_minus,\n mob_1_y_plus=mob_1_y_plus,\n mob_1_y_minus=mob_1_y_minus,\n mob_2_x_plus=mob_2_x_plus,\n mob_2_x_minus=mob_2_x_minus,\n mob_2_y_plus=mob_2_y_plus,\n mob_2_y_minus=mob_2_y_minus,\n mob_3_x_plus=mob_3_x_plus,\n mob_3_x_minus=mob_3_x_minus,\n mob_3_y_plus=mob_3_y_plus,\n mob_3_y_minus=mob_3_y_minus,\n treasure_1_x_plus=treasure_1_x_plus,\n treasure_1_x_minus=treasure_1_x_minus,\n treasure_1_y_plus=treasure_1_y_plus,\n treasure_1_y_minus=treasure_1_y_minus,\n treasure_2_x_plus=treasure_2_x_plus,\n treasure_2_x_minus=treasure_2_x_minus,\n treasure_2_y_plus=treasure_2_y_plus,\n treasure_2_y_minus=treasure_2_y_minus,\n treasure_3_x_plus=treasure_3_x_plus,\n treasure_3_x_minus=treasure_3_x_minus,\n treasure_3_y_plus=treasure_3_y_plus,\n treasure_3_y_minus=treasure_3_y_minus)\n treasure_text += new_treasure\n new_trader = trader_pattern.format(map=map,\n trader_x=trader_x,\n trader_y=trader_y,\n trader_ang=trader_ang,\n quest=quest,\n quest_mob=quest_mob)\n trader_text += new_trader\n new_quest = quest_pattern.format(quest=quest,\n mob_name=mob_name)\n quests_text += new_quest\n new_questmission = questmission_pattern.format(quest=quest,\n quest_mob=quest_mob)\n quests_mission += new_questmission\n\nwith open('questmission_eu.txt', 'w', encoding='utf-8') as file:\n file.write(quests_mission)\n\nwith open('questid2display_eu.txt', 'w', encoding='utf-8') as file:\n file.write(quests_text)\n\nwith open('treasures_eu.sc', 'w', encoding='utf-8') as file:\n file.write(treasure_text)\n\nwith open('traders_eu.sc', 'w', encoding='utf-8') as file:\n file.write(trader_text)\nprint(\"Готово!\")\n","repo_name":"roroman0303/Projects","sub_path":"Old/Ragnarok/One-time scripts/Генератор сокровищ/generate_eu.py","file_name":"generate_eu.py","file_ext":"py","file_size_in_byte":30017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28884779527","text":"# WAP accept 15 words in a list display only palindrome word and special word.\nUserList = eval(input(\"Enter 15 words in a list: \"))\npalindrome = []\nspecial = []\nfor i in UserList:\n if i == i[::-1]:\n palindrome.append(i)\n elif i[0] == i[-1]:\n special.append(i)\nprint(i for i in palindrome)\nprint(i for i in special)\n\n# WAP accept a string in uppercase.Convert all letters of the word other than the first letter to lowercase. Display the new string...\nuserInp = input(\"Enter a String in uppercase: \").upper().split(\" \")\nfor i in userInp:\n print(i[0]+i[1:].lower(), end=\" \")\n\n\n# WAP input a string. Print the new string after converting every alternate letter to uppercase and next immidiate letter in lower case. Special character remains same\nUserInp = input(\"Enter string: \").strip()\nft = UserInp[0].isupper()\nprint(UserInp[0], end=\"\")\nif ft:\n for i in UserInp[1:]:\n if i == \" \":\n print(i, end=\" \")\n if UserInp.index(i) % 2 == 0:\n print(i.upper(), end=\"\")\n if UserInp.index(i) % 2 != 0:\n print(i.lower(), end=\"\")\nelse:\n for i in UserInp[1:]:\n if i == \" \":\n print(i, end=\"\")\n if UserInp.index(i) % 2 == 0:\n print(i.lower(), end=\"\")\n if UserInp.index(i) % 2 != 0:\n print(i.upper(), end=\"\")\n\n# WAP to count and print the word that has at least one consecutive pair of alphabets. Eg: MODEM IS AN ELECTRONIC DEVICE should output as MODEM and DEVICE and print 2\nuserInp = input(\"Enter sentence: \").strip().upper().split(\" \")\ncount = 0\nfor i in range(len(userInp)):\n for j in range(len(userInp[i])-1):\n if chr(ord(userInp[i][j])+1) == userInp[i][j+1]:\n count += 1\n print(userInp[i], end=\" \")\n break\nprint(f\"\\nNo of words are {count}\")\n","repo_name":"SoumadeepChoudhury/Java_Python_Projects","sub_path":"Python Project/src/soumadeepPython/ClassWork14.03.2022.py","file_name":"ClassWork14.03.2022.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"36531499965","text":"import json\n\nfrom my_token import TOKEN_VK\nfrom my_token import TOKEN_YA\nimport requests\nfrom pprint import pprint\nimport json as js\nimport urllib\n\nclass VK:\n\n def __init__(self, access_token, user_id, version='5.131'):\n self.token = access_token\n self.id = user_id\n self.version = version\n self.params = {'access_token': self.token, 'v': self.version}\n\n def users_info(self):\n url = 'https://api.vk.com/method/users.get'\n params = {'user_ids': self.id}\n response = requests.get(url, params={**self.params, **params})\n return response.json()\n\n def get_photo(self):\n url = 'https://api.vk.com/method/photos.get'\n params = {**self.params, 'owner_id': self.id, 'album_id': 'profile', 'extended': 1, 'photo_sizes': 1}\n response = requests.get(url, params=params)\n return response.json()\n\n\nclass YandexDisk:\n\n def __init__(self, token):\n self.token = token\n\n def make_upload_link(self, name_your_path):\n upload_url = \"https://cloud-api.yandex.net/v1/disk/resources/upload\"\n headers = self.get_headers()\n params = {\"path\": name_your_path, \"overwrite\": \"true\"}\n response = requests.get(upload_url, headers=headers, params=params)\n return response.json()['href']\n\n def upload(self, name_your_path, url_photo_vk):\n href = self.make_upload_link(name_your_path=name_your_path)\n params = {'path': name_your_path, 'url': url_photo_vk}\n response = requests.post(url=href, params=params)\n response.raise_for_status()\n if response.status_code == 201:\n print('Success')\n\n def get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': f'OAuth {self.token}'\n }\n\n def get_files_list(self):\n files_url = 'https://cloud-api.yandex.net/v1/disk/resources/files'\n headers = self.get_headers()\n response = requests.get(files_url, headers=headers)\n return response.json()\n\n def create_folder(self, name_your_path):\n files_url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = self.get_headers()\n params = {'path': name_your_path}\n response = requests.put(files_url, headers=headers, params=params)\n return response.json()\n\n\n\naccess_token = TOKEN_VK\ntoken = TOKEN_YA\nuser_id = 264521131\nvk = VK(access_token, user_id)\nya = YandexDisk(token)\nmax_photo_count = vk.get_photo()['response']['count']\nmax_photo = vk.get_photo()['response']['items']\nnew = []\nURL = []\n\n# my_dic = {new[i]: new[i+1] for i in range(0, len(new), 2)}\n# json_ob = json.dumps(my_dic, indent=4)\n# print(type(vk.get_photo())\n\nfor i in max_photo:\n for j in i['sizes']:\n if j['type'] == 'z':\n new.append({i['likes']['count']: j['url']})\n URL.append(j['url'])\na = URL[0]\nfolder_for_photos = ya.create_folder('downloaded_photos')\nya.upload(folder_for_photos, a)","repo_name":"smnfive/kurs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32472956989","text":"import numpy as np\n# from permutation import permutation\nfrom functions import gain\nfrom estimators import naive_estimate\nfrom sklearn.utils import check_X_y\nfrom copy import deepcopy\nfrom early_stopping import chi_square\nfrom multiprocess import Pool\n\n# This algorithm runs very slow\n\ndef split(node, dim, value):\n \"\"\"For a single node split\n \"\"\"\n left, right = list(), list()\n for each in node:\n if each[dim] <= value:\n left.append(each)\n else:\n right.append(each)\n return left, right\n\ndef new_nodes(node_list, dim, value):\n \"\"\"Create all node list\n \"\"\"\n new_node_list = []\n for node in node_list:\n left, right = split(node, dim, value)\n if left:\n new_node_list.append(left)\n if right:\n new_node_list.append(right)\n return new_node_list\n\ndef info_gain(node_list, y_dict):\n \"\"\"y_dict = {points, value}\n Information gain\n \"\"\"\n X, y = list(), list()\n for i in range(len(node_list)):\n for each in node_list[i]:\n X.append(i)\n y.append(y_dict[tuple(each)])\n X, y = np.array(X), np.array(y)\n return gain(X, y)# - permutation(X, y).summary()/entropy(y)\n\ndef single_loop(node_list, dim, value, y_dict):\n \"\"\"For multi-processing only\n \"\"\"\n new_node_list = new_nodes(node_list, dim, value)\n gains = info_gain(node_list, y_dict)\n return gains, new_node_list, dim, value\n\ndef partial_comparison(data, node_list, y_dict, early_stopping, num_pre_bins, pre_gain, pool):\n node_list_list, dim_list, value_list = list(), list(), list()\n final_gain, final_nodes, final_value = None, None, None\n n, p = data.shape\n for dim in range(p):\n uniq_values = np.unique(data[:, dim])\n for value in uniq_values:\n dim_list.append(dim)\n value_list.append(value)\n y_dic_list = [y_dict for _ in range(len(value_list))]\n node_list_list = [node_list for _ in range(len(value_list))]\n new_node_list_list = pool.starmap(new_nodes, zip(node_list_list, dim_list, value_list))\n gains_list = pool.starmap(info_gain, zip(new_node_list_list, y_dic_list))\n final_dim, final_p_value = -1, np.infty\n for i in range(len(gains_list)):\n degree_of_freedom = len(new_node_list_list[i]) - num_pre_bins\n p_current = chi_square(current_value=gains_list[i], previous_value=pre_gain, size=n, degree_of_freedom=degree_of_freedom)\n if p_current < final_p_value:\n final_p_value = p_current\n # if final_dim !=-1 and final_dim < dim_list[i]:\n # continue\n final_gain = gains_list[i]\n final_nodes = new_node_list_list[i]\n final_dim = dim_list[i]\n final_value = value_list[i]\n return final_gain, final_nodes, final_dim, final_value, final_p_value\n\nclass simpleJointDiscretizationMI:\n\n def __init__(self, early_stopping, delta=0.05) -> None:\n self.early_stopping = early_stopping\n self.delta = delta\n pass\n\n def fit(self, data, target):\n # main function\n data, target = check_X_y(data, target)\n n, p = data.shape\n pool = Pool()\n dim_list, value_list, final_gain, previous_gain, node_list = [], [], 0, 0, [deepcopy(data)]\n y_dict = dict(zip([tuple(each) for each in data], target))\n data, target = check_X_y(data, target)\n h_y = naive_estimate(target)\n stop=False\n step_mi_list= []\n num_pre_bins = 1\n while not stop:\n final_gain, final_nodes, final_dim, final_value, final_p_value = partial_comparison(data, node_list, y_dict, self.early_stopping, num_pre_bins, previous_gain, pool)\n if self.early_stopping == 'chi_square_adjust':\n new_delta = self.delta/(p*(n-1)-len(value_list)-1)\n elif self.early_stopping == 'chi_square':\n new_delta = self.delta/((n-1)*(p-len(value_list)-1))\n\n if final_p_value <= new_delta and final_gain!=None:\n previous_gain = final_gain\n num_pre_bins = len(final_nodes)\n node_list = final_nodes\n dim_list.append(final_dim)\n value_list.append(final_value)\n step_mi_list.append(final_gain)\n else:\n stop=True\n break\n pool.close()\n return dim_list, step_mi_list/h_y, num_pre_bins, value_list, num_pre_bins\n","repo_name":"YiwenLu-yiwen/PhD","sub_path":"Discretization/joint_n2algorithmMI.py","file_name":"joint_n2algorithmMI.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10856055207","text":"# The module has below functions\n# Mainfunc2() : This is the main function which calls other function based on user input\n# devicelogin() : This function is called inside Mainfunc2_1 and used to create threads\n# Mainfunc2_1() : This function initiates the threads\n# deviceloginCustom() : this would be used in customcmds function and used to create threads\n# customcmds() : Function to run custom commands to mentioned list of devices\n\n##Difference between this module and func2 is that in this module we have placed restriction that it can login to more than 5 devices at a time.\n\nimport netmiko\nimport sys\nimport os\nimport threading\nimport time\nimport pandas\nimport subprocess\n\nsys.path.append(os.path.dirname(os.getcwd())) ## Modifying sys.path in order to use CommonFunc package\nfrom CommonFunc import CommonFunc # from CommonFunc package importing CommonFunc module\nUsername = 'test'\nPwd = 'test'\n\n#cmds = CommonFunc.commandlist(os.path.dirname(os.getcwd()) + '\\\\Jsonfiles' + '\\\\func2commands.json', 'Juniper') ## commandlist function excepts the path of the file\n\nCiscodevicelist, juniperdevicelist = CommonFunc.devicelist(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\Jsonfiles' + '\\\\DeviceList.json')\n## Ciscodevicelist and juniperdevice list gives both name and ip of the device\nCiscocommandlist = CommonFunc.commandlist(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\Jsonfiles' + '\\\\func2commands.json', 'Cisco')\nJunipercommandlist = CommonFunc.commandlist(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\Jsonfiles' + '\\\\func2commands.json', 'Juniper')\ndef Mainfunc2():\n print(\"Below are commands I am going to run\")\n for cmds in Ciscocommandlist:\n print('-> ' + cmds)\n print(\"\\n\")\n print(\"Below is the list of device I have ----------------------\")\n for device in Ciscodevicelist:\n print(device)\n input2 = input(\"Enter 1 if you want to run the above commands to the saved devices or press 2 if you want edit the commands or device list \\n\")\n if input2 == str(1):\n Mainfunc2_1()\n print(\"Operation Completed\")\n elif input2 == str(2):\n customcmds()\n else:\n print(\"You didnt enter 1 and neither 2\")\n\ndef devicelogin(list, cmdlist):\n try:\n netconnect = netmiko.ConnectHandler(**list)\n except netmiko.ssh_exception.NetmikoTimeoutException:\n print(\"Cant Login to Device {}\".format(list['host']))\n except netmiko.ssh_exception.NetMikoAuthenticationException:\n print(\"Credentials not working for {}\".format(list['host']))\n else:\n filepath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\' + \"ScriptOutput\" + '\\\\' + list['host'] + '.txt'\n file = open(filepath, 'a+')\n for cmds in cmdlist:\n output = netconnect.send_command(cmds)\n print('************************', file = file)\n file.write(cmds)\n print(' ', file= file)\n file.write(output)\n print(' ', file= file)\n # print('************************', file = file)\n\n print(\"Script Ended at {}\".format(time.asctime()), file = file)\n print('************************', file = file)\n print('************************', file = file)\n print('\\n'*3, file = file)\n file.close()\n\ndef Mainfunc2_1():\n cisconetmikoobj = [] ## This list will have the parameters in the format we pass to netmiko ConnectHandler\n if len(Ciscodevicelist) > 0:\n for items in Ciscodevicelist:\n cisconetmikoobj.append({'device_type':'cisco_ios' , 'host':items['IP'], 'username':Username, 'password':Pwd})\n ThreadlistCisco = []\n n = 1\n for items in cisconetmikoobj:\n ThreadlistCisco.append(threading.Thread(target=devicelogin, name= 'Thread' + str(n), args = [items, Ciscocommandlist]))\n n = n+ 1\n n = 0\n for elem in ThreadlistCisco:\n if (n+1)%5 == 0:\n elem.start() ## This will make sure if more than 5 devices are there in the device list\n elem.join() ## then the script waits for the 5th device to complete\n ## And will also check if commands have exexuted on the last 5 devices\n no = 1 ## as per below while loop\n while True:\n if ThreadlistCisco[n].is_alive():\n print(\"Still working on {}\".format(Ciscodevicelist[n]['Name']))\n time.sleep(2)\n if no == 15: ## So if we have waited more than 30 sec then exit the function\n sys.exit(\"Exiting the function\")\n no = no + 1\n continue\n if ThreadlistCisco[n-1].is_alive():\n print(\"Still working on {}\".format(Ciscodevicelist[n-1]['Name']))\n time.sleep(2)\n if no == 15: ## So if we have waited more than 30 sec then exit the function\n sys.exit(\"Exiting the function\")\n no = no + 1\n continue\n if ThreadlistCisco[n-2].is_alive():\n print(\"Still working on {}\".format(Ciscodevicelist[n-2]['Name']))\n time.sleep(2)\n if no == 15: ## So if we have waited more than 30 sec then exit the function\n sys.exit(\"Exiting the function\")\n no = no + 1\n continue\n if ThreadlistCisco[n-3].is_alive():\n print(\"Still working on {}\".format(Ciscodevicelist[n-3]['Name']))\n time.sleep(2)\n if no == 15: ## So if we have waited more than 30 sec then exit the function\n sys.exit(\"Exiting the function\")\n no = no + 1\n continue\n if ThreadlistCisco[n-4].is_alive():\n print(\"Still working on {}\".format(Ciscodevicelist[n-4]['Name']))\n time.sleep(2)\n if no == 15: ## So if we have waited more than 30 sec then exit the function\n sys.exit(\"Exiting the function\")\n no = no + 1\n continue\n else:\n break\n continue ### This continue is so that below threads.start() wont come in script for the 5th item\n elem.start() ## as it is already started\n n = n + 1\n\n\n if len(Ciscodevicelist) == 0:\n print(\"There are no Cisco device mentioned in the file to run commands on\")\n\ndef deviceloginCustom(list, cmdlist):\n try:\n netconnect = netmiko.ConnectHandler(**list)\n except netmiko.ssh_exception.NetmikoTimeoutException:\n print(\"Cant Login to Device {}\".format(list['host']))\n except netmiko.ssh_exception.NetMikoAuthenticationException:\n print(\"Credentials not working for {}\".format(list['host']))\n else:\n filepath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\' + \"ScriptOutput\" + '\\\\' + \"Function2\" + '\\\\' + list['host'] + '.txt'\n file = open(filepath, 'w+')\n for cmds in cmdlist:\n output = netconnect.send_command(cmds)\n print('************************', file = file)\n file.write(cmds)\n print(' ', file= file)\n file.write(output)\n print(' ', file= file)\n # print('************************', file = file)\n\n print(\"Script Ended at {}\".format(time.asctime()), file = file)\n print('************************', file = file)\n print('************************', file = file)\n print('\\n'*3, file = file)\n file.close()\n\ndef customcmds():\n print(\"Edit the Excel files just opened on your Screen and save it\")\n print(\"\"\"Add the Device Name and Commands in that Excel which you want to run and after that type 'Done' or else type 'Exit'\n \"\"\")\n filepath = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\\\Jsonfiles' + '\\\\' + 'CustomDevice.xlsx'\n os.startfile(filepath)\n time.sleep(2)\n n = 1\n while True:\n input1 = input()\n if input1 == 'Done' or input1 == 'done':\n print(\"Great Job\")\n break\n elif input1 == 'Exit' or input1 == 'exit':\n sys.exit()\n else:\n print(\"You didnt enter Done or exit. 1 more try. Type exit or Done\")\n if n == 2:\n sys.exit()\n else:\n n = n + 1\n subprocess.run('cls', shell = True)\n excel_devicename = pandas.read_excel(filepath, sheet_name='DeviceList', skiprows = 1)\n excel_cmd = pandas.read_excel(filepath, sheet_name='Commands', skiprows = 1)\n devicedict = dict(zip(excel_devicename.Name, excel_devicename.IP))\n cmdlist = ['terminal length 0']\n cmdlist= cmdlist + excel_cmd.Command.to_list()\n\n print(\"So below are the list of commands you want to run\")\n for cmd in cmdlist:\n print(cmd)\n print(\"And below are the list of Devices on which you want to run above command\")\n print(devicedict)\n input1 = input(\"To continue type 'Go' and press enter: \\n\")\n if input1 != 'Go' and input1 != 'go':\n sys.exit(\"You type {} and not 'Go' so quiting the program\".format(input1))\n\n threadlist = []\n n = 1\n for deviceip in devicedict.values():\n netmikoobj = {'device_type':'cisco_ios' , 'host':deviceip, 'username':Username, 'password':Pwd}\n threadlist.append(threading.Thread(target=deviceloginCustom, name= 'Thread' + str(n), args = [netmikoobj, cmdlist]))\n n = n + 1\n for threads in threadlist:\n threads.start()\n","repo_name":"pokhriyalsid/BotX","sub_path":"Function2/func2_1.py","file_name":"func2_1.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20330827008","text":"\"\"\"\nModule comprising python utilities.\n\n@author: Dr. Paul Iacomi\n@date: Jan 2021\n\"\"\"\n\n__all__ = [\n \"pairwise\",\n]\n\nfrom itertools import tee\n\n\ndef pairwise(iterable):\n \"\"\"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\"\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n","repo_name":"pauliacomi/homeproc","sub_path":"homeproc/common/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30005836937","text":"\ndef all_about_strings(txt):\n L, FC,LC = len(txt), txt[0],txt[-1] #len, first, second\n #Midle\n if len(txt)%2==1:\n M = txt[len(txt)//2]\n else:\n M= txt[(len(txt)//2)-1:(len(txt)//2)+1] \n return [L,FC,LC,M,second(txt)]\n \ndef second(X):\n N = list(enumerate(X))\n L =[]\n for inx,s in N:\n if s==X[1]:\n L.append(inx)\n if len(L)>=2:\n return '@ index {}'.format(L[-1])\n else:\n return \"not found\"\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"pEozhEet5c8aFJdso_7.py","file_name":"pEozhEet5c8aFJdso_7.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37620008457","text":"\"\"\"Possion Solver in 1D.\"\"\"\n\n\nimport numpy as np\nfrom scipy.signal import savgol_filter\nfrom scipy.constants import elementary_charge, epsilon_0\n\n\nclass POISSON1D(object):\n \"\"\"Create 1D mesh objet.\"\"\"\n\n def __init__(self, width, nx):\n self.width = width # domain width in x direction\n # number of nodes within the boundary/main body\n # total nodes will be (nx+2)\n self.nx = nx\n self.delx = width/(nx+1) # delta_x\n# <------------------------- width -------------------------->\n# x_0=0.0 ---- x_1 ---- x_2 ---- ... ---- x_n ---- x_n+1=width\n# Metal ---------------- Vacuum ------------------------ Metal\n # init x coordinates\n self.x = np.linspace(0.0, self.width, self.nx+2)\n\n def calc_invA(self, idiag=0):\n \"\"\"\n Construct 1d Possion matrix.\n\n 2, -1,\n -1, 2, -1,\n A = [ -1, 2, ... ]\n ...\n -1\n -1, 2\n compute invert A.\n \"\"\"\n self.A = np.zeros((self.nx, self.nx), dtype=np.float)\n self.A[0, 0] = 2.0\n self.A[1, 0] = -1.0\n self.A[-2, -1] = -1.0\n self.A[-1, -1] = 2.0\n for i in range(1, self.nx-1):\n self.A[i-1, i] = -1.0\n self.A[i, i] = 2.0\n self.A[i+1, i] = -1.0\n if idiag == 1:\n print(self.A)\n self.invA = np.linself.Alg.inv(self.A)\n\n def calc_pot(self, charge, bc, ismooth=0):\n \"\"\"\n Solve 1d Poisson's equation, return potential.\n\n Using finite diferences, inverse the matrix\n Poisson's equation in vacuum\n d2/dx2*phi(x) = -rho(x)/eps0\n discretize it with finite differences\n A*P(x)/dx2 = -rho(x)/eps0\n\n 2, -1,\n -1, 2, -1,\n A = [ -1, 2, ... ]\n ...\n -1\n -1, 2\n\n phi(x) = -invA*rho(x)/eps0*dx2 + ax + b\n a = (phi[-1] - phi[0])/width\n b = phi[0] + (invA*rho)[0]*/eps0*dx2\n $param charge: an array of total net charge density, size = nx\n $param bc=(bc_l,bc_r): a 2-tuple, boundary conditions\n \"\"\"\n self.pot = np.zeros(self.nx+2)\n charge = charge*elementary_charge/epsilon_0\n self.pot[1:-1] = np.matmul(self.invA, charge)*self.delx*self.delx\n self.pot[0], self.pot[-1] = bc[0], bc[-1]\n if ismooth == 1:\n self.pot = savgol_filter(self.pot, 11, 3)\n\n\nif __name__ == '__main__':\n width, nx = 100.0, 99\n","repo_name":"buckees/Poisson-Solver-1D","sub_path":"poisson_solver_1d.py","file_name":"poisson_solver_1d.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19255633920","text":"# 第6章-6 求指定层的元素个数 (40分)\r\n# 输入一个嵌套列表,再输入层数,求该层的数字元素个数。\r\n\r\n# 输入格式:\r\n# 第一行输入列表 第二行输入层数\r\n\r\n# 输出格式:\r\n# 在一行中输出元素个数\r\n\r\n# 输入样例:\r\n# 在这里给出一组输入。例如:\r\n\r\n# [1,2,[3,4,[5,6],7],8]\r\n# 3\r\n# 输出样例:\r\n# 在这里给出相应的输出。例如:\r\n\r\n# 2\r\n\r\ns = input()\r\nn = int(input())\r\n\r\nlevel = 0\r\ncount = 0\r\nfor i in range(len(s)):\r\n # print(s[i])\r\n if s[i] == '[':\r\n level += 1\r\n elif s[i] == ']':\r\n level -= 1\r\n\r\n if level == n and s[i].isdigit():\r\n count += 1\r\nprint(count)\r\n\r\n\r\n\r\n","repo_name":"hengyi111/PTA-Python","sub_path":"6_6.py","file_name":"6_6.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22137893774","text":"import numpy as np\nimport time\nimport math\nv = np.random.rand(100)\n\ntic = time.time()\nu = np.zeros((100,1))\nfor i in range(100):\n for j in range(100):\n u[i] = math.exp(v[i])\ntoc = time.time()\n\nprint(\"Not vectorized:\" + str(1000*(toc-tic)) + \"ms\")\n\n\ntic = time.time()\nu = np.exp(v)\ntoc = time.time()\n\nprint(\"Vectorized:\" + str(1000*(toc-tic)) + \"ms\")\n\n","repo_name":"Qamra/python","sub_path":"Machine-Learning/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20744492522","text":"\nimport numpy as np\nimport sys\nimport _convam_grad\nimport tensorflow as tf\n# from tensorflow.contrib.layers import flatten\n\nnp.set_printoptions(threshold=sys.maxsize)\nconvam_module = tf.load_op_library('/home/jing/AMDNN/convam_gpu.so')\n\nFLOAT = False\n\ndef float_comparison(x, y):\n return np.abs(x.numpy()-y.numpy())/y.numpy()\n\ndef int_comparison(x, y):\n return x.numpy().astype(int)-y.numpy().astype(int)\n\ndef test_convam(_x,_w,_strides=[1, 1, 1, 1]):\n with tf.device('/gpu:0'):\n x = _x\n W = _w\n with tf.GradientTape(persistent=True) as g:\n g.watch(x)\n g.watch(W)\n wx_convam = convam_module.convam(x, W, strides=_strides, padding='VALID')\n wx_conv = tf.nn.conv2d(x, W, strides=_strides, padding='VALID')\n wx_convam_same = convam_module.convam(x, W, strides=_strides, padding='SAME')\n wx_conv_same = tf.nn.conv2d(x, W, strides=_strides, padding='SAME')\n \n grad_filter_am = g.gradient(wx_convam, W)\n grad_input_am = g.gradient(wx_convam,x)\n grad_filter = g.gradient(wx_conv, W)\n grad_input = g.gradient(wx_conv,x)\n grad_filter_am_same = g.gradient(wx_convam_same, W)\n grad_input_am_same = g.gradient(wx_convam_same,x)\n grad_filter_same = g.gradient(wx_conv_same, W)\n grad_input_same = g.gradient(wx_conv_same,x)\n\n #evaluate\n forward_am = wx_convam\n forward_conv = wx_conv\n\n backward_filter_am = grad_filter_am\n backward_filter_conv = grad_filter\n\n backward_input_am = grad_input_am\n backward_input_conv = grad_input\n\n forward_am_same = wx_convam_same\n forward_conv_same = wx_convam_same\n\n backward_filter_am_same = grad_filter_am_same\n backward_filter_conv_same = grad_filter_same\n\n backward_input_am_same = grad_input_am_same\n backward_input_conv_same = grad_input_same\n success = True\n comparator = float_comparison if FLOAT else int_comparison\n abs_err_forward_valid = comparator(forward_am,forward_conv)\n abs_err_forward_valid = np.asarray(abs_err_forward_valid)\n max_abs_err_forward_valid = np.max(abs_err_forward_valid)\n err_forward_valid = np.mean(comparator(forward_am,forward_conv)) \n if err_forward_valid > 1e-7:\n print(\"case start err_forward_valid\")\n\n # print(forward_am)\n # print(forward_conv)\n print(err_forward_valid)\n print(max_abs_err_forward_valid)\n print( _x.shape)\n print( _w.shape)\n print(\"VALID\")\n print(\"case end\")\n print()\n success = False\n\n abs_err_forward_same = comparator(forward_am_same,forward_conv_same)\n abs_err_forward_same = np.asarray(abs_err_forward_same)\n max_abs_err_forward_same = np.max(abs_err_forward_same)\n err_forward_same = np.mean(comparator(forward_am_same,forward_conv_same))\n if err_forward_same > 1e-7:\n print(\"case start err_forward_same\")\n # print(forward_am_same)\n # print(forward_conv_same)\n print(err_forward_same)\n print(max_abs_err_forward_same)\n print(_x.shape)\n print(_w.shape)\n print(\"SAME\")\n print(\"case end\")\n print()\n success = False\n\n abs_err_backward_filter_valid = comparator(backward_filter_am, backward_filter_conv)\n abs_err_backward_filter_valid = np.asarray(abs_err_backward_filter_valid)\n max_abs_err_backward_filter_valid = np.max(abs_err_backward_filter_valid)\n err_backward_filter_valid = np.mean(comparator(backward_filter_am, backward_filter_conv))\n if err_backward_filter_valid > 1e-7:\n print(\"case start err_backward_filter_valid\")\n # print(backward_filter_am)\n # print(backward_filter_conv)\n print(err_backward_filter_valid)\n print(max_abs_err_backward_filter_valid)\n print( _x.shape)\n print( _w.shape)\n print(\"VALID FILTER\")\n print(\"case end\")\n print()\n success = False\n abs_err_backward_input_valid = comparator(backward_input_am, backward_input_conv)\n abs_err_backward_input_valid = np.asarray(abs_err_backward_input_valid)\n max_abs_err_backward_input_valid = np.max( abs_err_backward_input_valid)\n err_backward_input_valid = np.mean(comparator(backward_input_am, backward_input_conv))\n if err_backward_input_valid > 1e-7:\n print(\"case start err_backward_input_valid\")\n # print(backward_input_am)\n # print(backward_input_conv)\n print(err_backward_input_valid)\n print(max_abs_err_backward_input_valid)\n print( _x.shape)\n print( _w.shape)\n print(\"VALID INPUT\")\n print(\"case end\")\n print()\n success = False\n abs_err_backward_filter_same = comparator(backward_filter_am_same,backward_filter_conv_same)\n abs_err_backward_filter_same = np.asarray( abs_err_backward_filter_same)\n max_abs_err_backward_filter_same = np.max( abs_err_backward_filter_same)\n err_backward_filter_same = np.mean(comparator(backward_filter_am_same,backward_filter_conv_same))\n if err_backward_filter_same > 1e-7:\n print(\"case start err_backward_filter_same\")\n # print(backward_filter_am_same)\n # print(backward_filter_conv_same)\n print(err_backward_filter_same)\n print(max_abs_err_backward_filter_same)\n print( _x.shape)\n print( _w.shape)\n print(\"SAME FILTER\")\n print(\"case end\")\n print()\n success = False\n abs_err_backward_input_same =comparator(backward_input_am_same, backward_input_conv_same)\n abs_err_backward_input_same = np.asarray(abs_err_backward_input_same)\n max_abs_err_backward_input_same = np.max(abs_err_backward_input_same)\n err_backward_input_same = np.mean(comparator(backward_input_am_same, backward_input_conv_same))\n if err_backward_input_same > 1e-7:\n print(\"case start err_backward_input_same\")\n # print(backward_input_am_same)\n # print(backward_input_conv_same)\n print(err_backward_input_same)\n print(max_abs_err_backward_input_same)\n print( _x.shape)\n print( _w.shape)\n print(\"SAME INPUT\")\n print(\"case end\")\n print()\n success = False\n\n\n return success\n\ndef get_random_np(x):\n return tf.convert_to_tensor(np.random.rand(x[0],x[1],x[2],x[3]),dtype=float),tf.convert_to_tensor(np.random.rand(x[4],x[5],x[6],x[7]),dtype=float)\ndef get_random_int_np(x):\n return tf.convert_to_tensor(np.random.randint(-2,2,size=x[0:4]),dtype=float),tf.convert_to_tensor(np.random.randint(-2,2,size=x[4:8]),dtype=float)\nshape_dict = {\n 0:(1,4,4,1,4,4,1,1),\n 1:(1,4,4,1,3,3,1,1),\n 2:(1,4,4,1,2,2,1,1),\n 3:(1,4,4,1,1,1,1,1),\n 4:(1,4,8,1,4,4,1,1),\n 5:(1,4,8,1,3,3,1,1),\n 6:(1,4,8,1,2,2,1,1),\n 7:(1,4,8,1,1,1,1,1),\n 8:(1,4,8,1,4,5,1,1),\n 9:(1,4,8,1,3,4,1,1),\n 10:(1,4,8,1,2,1,1,1),\n 11:(1,4,8,1,1,1,1,1),\n 12:(1,4,8,2,4,5,2,3),\n 13:(1,4,8,2,3,4,2,3),\n 14:(1,4,8,2,2,1,2,3),\n 15:(1,4,8,2,1,1,2,3),\n 12:(1,8,4,3,5,4,3,4),\n 13:(1,8,4,3,3,4,3,4),\n 14:(1,8,4,3,2,1,3,4),\n 15:(1,8,4,3,1,1,3,4),\n 16:(1,20,20,3,5,5,3,4),\n 17:(1,20,20,3,5,4,3,4),\n 18:(1,20,20,3,2,1,3,4),\n 19:(1,20,20,3,1,1,3,4),\n 20:(128,32,32,1,5,5,1,6),\n 21:(128,14,14,6,5,5,6,16),\n 22:(4,4,4,1,2,2,1,2),\n 23:(4,14,14,1,5,5,1,16),\n}\n\n\ntest_passed = True\nfor i in range(0,2):\n for shape in shape_dict.values():\n x,w = get_random_np(shape) if FLOAT else get_random_int_np(shape) \n stride = [1,i+1,i+1,1]\n result = test_convam(_x=x,_w=w,_strides=stride)\n if result == False:\n test_passed = False\n print(\"test with shape x: \"+str(shape[0:4])+\" w: \"+str(shape[4:8]) + \"Stride: \" + str(stride) + \"Passed: \"+ str(result))\n\nexit(0) if test_passed else exit(1)\n","repo_name":"AaronJing/ApproxTrain","sub_path":"test/convam_final_test.py","file_name":"convam_final_test.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"32481309619","text":"import sys\nimport time\nimport os\nimport runpy\nimport os.path\nimport argparse\nimport psspecial\nfrom pyproc import *\nfrom string import Template\n\nfrom org.nmrfx.processor.datasets.vendor import RefInfo\nfrom org.nmrfx.processor.math import Vec\nfrom java.lang import Runtime\nfrom org.python.util import PythonInterpreter;\n\ndef autoPhaseFirstRow(vec, doFirst = False):\n regions = [0.025,0.45]\n KAISER(vector=vec)\n ZF(vector=vec)\n FT(vector=vec)\n REGIONS(regions,vector=vec)\n phases = vec.autoPhase(doFirst, 0, 0, 0, 45.0, 1.0)\n return phases\n\ndef makeScript(args):\n nProc = Runtime.getRuntime().availableProcessors()\n if nProc > 4:\n nProc -= 2\n fileName = args.fidfile\n if not os.path.exists(fileName):\n raise Exception('FID file \"' + fileName + '\" doesn\\'t exist')\n\n datasetName = args.dataset\n\n refInfo = RefInfo()\n\n if os.path.isabs(fileName):\n filePath = fileName\n else:\n filePath = os.path.join(os.getcwd(),fileName)\n\n fidInfo = FID(filePath)\n nmrData = fidInfo.fidObj\n np = nmrData.getNPoints()\n vec = Vec(np,True)\n nmrData.readVector(0,vec)\n phases = []\n if args.autoPhase:\n phases.append(autoPhaseFirstRow(vec))\n elif args.phaseArgs1 != \"\":\n ph = args.phaseArgs1\n phases.append([float(ph[0]), float(ph[1])])\n else:\n phases.append([0.0, 0.0])\n if args.phaseArgs2 != \"\":\n ph = args.phaseArgs2\n phases.append([float(ph[0]), float(ph[1])])\n else:\n phases.append([0.0, 0.0])\n if args.phaseArgs3 != \"\":\n ph = args.phaseArgs3\n phases.append([float(ph[0]), float(ph[1])])\n else:\n phases.append([0.0, 0.0])\n if args:\n refInfo.setDirectRef(args.refArg)\n parString = refInfo.getParString(nmrData, nmrData.getNDim(), \"\")\n scriptOps = autoGenScript(fidInfo, args, phases)\n #scriptOps = Template(scriptOps).substitute()\n script = '''\nimport os\nfrom pyproc import *\nprocOpts(nprocess=$nProc)\nFID('$filePath')\nCREATE('$dataset')\n$parString\n$scriptOps\n'''\n\n if args.autoPhase:\n script +='DIM()\\nDPHASE(dim=0)\\n'\n\n script = Template(script).substitute(nProc=nProc, filePath=filePath, dataset=datasetName,parString=parString,scriptOps=scriptOps)\n script += '\\nrun()\\n'\n\n nmrData.close()\n # removes nmrData object from processor so we don't have two when processing is done\n useProcessor()\n return script\n\ndef saveScript(script):\n scriptName = 'process_auto.py'\n scriptFile = os.path.join(os.getcwd(),scriptName)\n\n fOut = open(scriptFile,'w')\n fOut.write(script)\n fOut.close\n\ndef execScript(script):\n interpreter = PythonInterpreter()\n interpreter.exec(script)\n\n\ndef autoGenScript(fidInfo, args=None, phases=None):\n coefDicts = {'hy':'hyper','hy-r':'hyper-r','ea':'echo-antiecho','ea-r':'echo-antiecho-r','ge':'ge','sep':'sep','re':'real'}\n script = ''\n if fidInfo.nd < 2:\n script += 'DIM(1)\\n'\n script += 'EXPD(lb=0.5)\\n'\n script += 'ZF()\\n'\n script += 'FT()\\n'\n script += 'AUTOPHASE(firstOrder=True)\\n'\n else:\n script += psspecial.scriptMods(fidInfo, 0)\n script += 'DIM(1)\\n'\n script += 'TDSS()\\n'\n gotTDComb = False\n if args and args.tdcombArgs2 != \"\":\n tdComb = args.tdcombArgs2\n tdComb = coefDicts[tdComb]\n gotTDComb = True\n script += \"TDCOMB(dim=2 ,coef='\" + tdComb + \"')\\n\"\n if args and args.tdcombArgs3 != \"\":\n tdComb = args.tdcombArgs3\n tdComb = coefDicts[tdComb]\n gotTDComb = True\n script += \"TDCOMB(dim=3 ,coef='\" + tdComb + \"')\\n\"\n if not gotTDComb:\n for iDim in range(2,fidInfo.nd+1):\n if not fidInfo.fidObj.isFrequencyDim(iDim-1):\n continue\n if not fidInfo.isComplex(iDim-1):\n continue\n if fidInfo.mapToDatasetList[iDim-1] == -1:\n continue\n fCoef = fidInfo.getSymbolicCoefs(iDim-1)\n if fCoef != None and fCoef != 'hyper' and fCoef != 'sep':\n script += 'TDCOMB('\n script += \"dim=\"+str(iDim)\n script += \",coef='\"\n script += fCoef\n script += \"')\\n\"\n script += 'SB()\\n'\n script += 'ZF()\\n'\n script += 'FT()\\n'\n if phases and len(phases[0]) == 2:\n ph10 = phases[0][0]\n ph11 = phases[0][1]\n script += 'PHASE(ph0='+str(ph10)+',ph1='+str(ph11)+')\\n'\n else:\n script += 'PHASE(ph0=0.0,ph1=0.0)\\n'\n\n if args and args.extractArgs != \"\":\n extract = args.extractArgs\n script += \"EXTRACT(\" + extract[0] + \",\" + extract[1] + \",mode='region')\\n\"\n\n fCoef = fidInfo.getSymbolicCoefs(1)\n\n if fCoef != None and fCoef == 'sep':\n script += \"COMB(coef='sep')\\n\"\n if fidInfo.nd > 2 and fidInfo.fidObj.getSampleSchedule() != None:\n multiDim = 'DIM(2'\n for mDim in range(2,fidInfo.nd):\n multiDim += ',' + str(mDim+1)\n multiDim += ')'\n script += multiDim + '\\n'\n script += 'NESTA()\\n'\n for iDim in range(2,fidInfo.nd+1):\n if fidInfo.size[iDim-1] < 2:\n continue\n if fidInfo.mapToDatasetList[iDim-1] == -1:\n continue\n if not fidInfo.fidObj.isFrequencyDim(iDim-1):\n continue\n script += 'DIM('+str(iDim)+')\\n'\n if iDim == 2 and fidInfo.nd == 2 and fidInfo.fidObj.getSampleSchedule() != None:\n script += 'NESTA()\\n'\n script += 'SB(c=0.5)\\n'\n script += 'ZF()\\n'\n script += 'FT('\n negatePairs = fidInfo.negatePairsFT(iDim-1)\n negateImag = fidInfo.negateImagFT(iDim-1)\n if negatePairs:\n script += 'negatePairs=True'\n if negateImag:\n if negatePairs:\n script += ','\n script += 'negateImag=True'\n script += ')\\n'\n fCoef = fidInfo.getSymbolicCoefs(iDim-1)\n if fCoef != None and fCoef == 'sep':\n script += \"MAG()\\n\"\n else:\n if phases and len(phases[iDim-1]) == 2:\n ph10 = phases[iDim-1][0]\n ph11 = phases[iDim-1][1]\n script += 'PHASE(ph0='+str(ph10)+',ph1='+str(ph11)+')\\n'\n if not args:\n script += 'run()'\n return script\n\n","repo_name":"onemoonsci/nmrfxprocessor","sub_path":"src/main/resources/autoscript.py","file_name":"autoscript.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24071021837","text":"import datetime\nfrom tkinter import *\nimport random\nfrom tkinter import filedialog,messagebox\n\n#creamos esta variable para definir los botones\noperador = ''\n\n#vamos a crear 3 variables con los precios\nprecios_comida = [1.32, 1.65, 2.31, 3.22, 1.22, 1.99, 2.05]\nprecios_bebida = [0.25, 0.99, 1.21, 1.54, 1.08, 1.10, 2.00]\nprecios_postres = [1.54, 1.68, 1.32, 1.97, 2.55, 2.14, 1.94]\n\n\n\n\n\n\ndef click_boton(numero):\n global operador\n operador = operador + numero\n visor_calculadora.delete(0,END)\n visor_calculadora.insert(END, operador)\n\ndef borrar():\n global operador\n operador = ''\n visor_calculadora.delete(0,END)\n\ndef obtener_resultado():\n global operador\n resultado = str(eval(operador))\n visor_calculadora.delete(0, END)\n visor_calculadora.insert(0,resultado)\n operador = ''\n\ndef revisar_check():\n # va a llamar a esta función cada vez que se activa un checkbutton\n x= 0\n for c in cuadros_comida:\n if variable_comida[x].get() == 1:\n cuadros_comida[x].config(state=NORMAL)\n if cuadros_comida[x].get() == '0':\n cuadros_comida[x].delete(0,END)\n cuadros_comida[x].focus()\n else:\n cuadros_comida[x].config(state=DISABLED)\n texto_comida[x].set('0')\n x +=1\n\n x = 0\n for c in cuadros_bebida:\n if variable_bebida[x].get() == 1:\n cuadros_bebida[x].config(state=NORMAL)\n if cuadros_bebida[x].get() == '0':\n cuadros_bebida[x].delete(0,END)\n cuadros_bebida[x].focus()\n else:\n cuadros_bebida[x].config(state=DISABLED)\n texto_bebida[x].set('0')\n x +=1\n\n x = 0\n for c in cuadros_postre:\n if variable_postre[x].get() == 1:\n cuadros_postre[x].config(state=NORMAL)\n if cuadros_postre[x].get() == '0':\n cuadros_postre[x].delete(0,END)\n cuadros_postre[x].focus()\n else:\n cuadros_postre[x].config(state=DISABLED)\n texto_postre[x].set('0')\n x +=1\n\ndef total():\n #cuando pulse tendrá que sumar todo lo que haya\n subtotal_comida =0\n p =0\n for cantidad in texto_comida:\n subtotal_comida= subtotal_comida+(float(cantidad.get())*precios_comida[p])\n p+=1\n\n\n subtotal_bebida = 0\n p = 0\n for cantidad in texto_bebida:\n subtotal_bebida= subtotal_bebida+(float(cantidad.get())*precios_bebida[p])\n p+=1\n\n\n subtotal_postre = 0\n p = 0\n for cantidad in texto_postre:\n subtotal_postre= subtotal_postre+(float(cantidad.get())*precios_postres[p])\n p+=1\n\n subtotal = subtotal_comida+subtotal_bebida+subtotal_postre\n impuestos = subtotal*0.07\n total = subtotal+impuestos\n\n var_coste_comida.set(f'{round(subtotal_comida,2)} €')\n var_coste_bebida.set(f'{round(subtotal_bebida, 2)} €')\n var_coste_postre.set(f'{round(subtotal_postre, 2)} €')\n var_Subtotal.set(f'{round(subtotal, 2)} €')\n var_Impuesto.set(f'{round(impuestos, 2)} €')\n var_Total.set(f'{round(total, 2)} €')\n\ndef recibo():\n texto_recibo.delete(1.0, END)\n num_recibo = f'N# - {random.randint(1000,9999)}'\n fecha = datetime.datetime.now()\n fecha_recibo = f'{fecha.day}/{fecha.month}/{fecha.year}--{fecha.hour}:{fecha.minute}'\n texto_recibo.insert(END, f'Datos:\\t{num_recibo} \\t{fecha_recibo}\\n')\n texto_recibo.insert(END,f'*'*49+'\\n')\n texto_recibo.insert(END, 'Items\\t\\tCant.\\tCoste Items \\n')\n texto_recibo.insert(END,f'-'*63+'\\n')\n\n #ahora tenemos que ver que ha consumido, cantidad y precio de cada plato\n #para comidas\n x =0\n for comida in texto_comida:\n if comida.get() !='0':\n texto_recibo.insert(END,f'{lista_comida[x]}\\t\\t{comida.get()}\\t{int(comida.get())*precios_comida[x]} €\\n')\n x +=1\n #para bebidas\n x = 0\n for bebida in texto_bebida:\n if bebida.get() != '0':\n texto_recibo.insert(END,f'{lista_bebida[x]}\\t\\t{bebida.get()}\\t{int(bebida.get()) * precios_bebida[x]} €\\n')\n x += 1\n # para postres\n x = 0\n for postre in texto_postre:\n if postre.get() != '0':\n texto_recibo.insert(END,f'{lista_postres[x]}\\t\\t{postre.get()}'\n f'\\t{round(int(postre.get()) * precios_postres[x],2)} €\\n')\n x += 1\n\n texto_recibo.insert(END, f'-' * 63 + '\\n')\n texto_recibo.insert(END, f'Precio de la Comida:\\t\\t {var_coste_comida.get()}\\n')\n texto_recibo.insert(END, f'Precio de la Bebida:\\t\\t {var_coste_bebida.get()}\\n')\n texto_recibo.insert(END, f'Precio de los Postres:\\t\\t{var_coste_postre.get()}\\n')\n texto_recibo.insert(END, f'-' * 63 + '\\n')\n texto_recibo.insert(END, f'Subtotal:\\t\\t{var_Subtotal.get()}\\n')\n texto_recibo.insert(END, f'Impuestos:\\t\\t {var_Impuesto.get()}\\n')\n texto_recibo.insert(END, f'Total:\\t\\t{var_Total.get()}\\n')\n texto_recibo.insert(END, f'-' * 63 + '\\n')\n texto_recibo.insert(END,'Gracias por su visita\\n')\n texto_recibo.insert(END, '\\n')\n texto_recibo.insert(END, f'*' * 53 + '\\n')\n\n\ndef guardar():\n\n # toma la informacion del recibo y lo mete en una variable\n info_recibo = texto_recibo.get(1.0,END)\n archivo = filedialog.asksaveasfile(mode='w',defaultextension='.txt')\n archivo.write(info_recibo)\n archivo.close()\n messagebox.showinfo('Información','Su recibo ha sido guardado')\n\ndef resetear():\n # texto recibo\n texto_recibo.delete(0.1,END)\n # cantidades de platos\n for texto in texto_comida:\n texto.set('0')\n for texto in texto_bebida:\n texto.set('0')\n for texto in texto_postre:\n texto.set('0')\n # desabilitar checkbox\n for cuadro in cuadros_comida:\n cuadro.config(state=DISABLED)\n for cuadro in cuadros_bebida:\n cuadro.config(state=DISABLED)\n for cuadro in cuadros_postre:\n cuadro.config(state=DISABLED)\n # desabilitar variables( guarda el valor 0 0 1 del checkbuton\n\n for v in variable_comida:\n v.set(0)\n for v in variable_bebida:\n v.set(0)\n for v in variable_postre:\n v.set(0)\n\n # limpiar el cuadro de los subtotales\n\n var_coste_comida.set('')\n var_coste_bebida.set('')\n var_coste_postre.set('')\n var_Subtotal.set('')\n var_Impuesto.set('')\n var_Total.set('')\n\n# vamos a iniciar a tkinter\naplicacion = Tk()\n\n# tamaño de la ventana\naplicacion.geometry('1120x540+0+0')\n\n#evitar maximizar\naplicacion.resizable(0,0)\n\n#Titulo de la ventana\naplicacion.title('Restaurante la Maruja - SISTEMA DE FACTURACION')\n\n#COLOR DE FONDO\naplicacion.config(bg='burlywood1')\n\n# Panel Superior\npanel_superior= Frame(aplicacion,bd=1, relief = RIDGE)\npanel_superior.pack(side=TOP)\n\n#Titulo del panel superior\n\netiqueta_titulo = Label(panel_superior, text='Sistema de facturación', fg = 'azure', font=('Dosis', 54),\n bg ='burlywood3', width = 27)\netiqueta_titulo.grid(row=0, column=0)\n\n\n# panel Izquierdo\n\npanel_izquierdo = Frame(aplicacion,bd=1, relief= RIDGE)\npanel_izquierdo.pack(side=LEFT)\n\n#Panel subtotal\npanel_costos = Frame(panel_izquierdo, bd=1, relief=RIDGE, bg='azure4', padx=120)\npanel_costos.pack(side=BOTTOM)\n\n# panel comidas\npanel_comidas = LabelFrame(panel_izquierdo,text=\"Comida\", font=('Dosis',12,'bold'), bd=1,relief=RIDGE, fg='azure4')\npanel_comidas.pack(side=LEFT)\n\n#panel bebidas\n\npanel_bebidas = LabelFrame(panel_izquierdo,text=\"Bebidas\", font=('Dosis',12,'bold'), bd=1,relief=RIDGE, fg='azure4')\npanel_bebidas.pack(side=LEFT)\n\n#panel postres\n\npanel_postres = LabelFrame(panel_izquierdo,text=\"Postres\", font=('Dosis',12,'bold'), bd=1,relief=RIDGE, fg='azure4')\npanel_postres.pack(side=LEFT)\n\n\n# PANEL DERECHA\npanel_derecha = Frame(aplicacion, bd=1, relief= RIDGE)\npanel_derecha.pack(side=RIGHT)\n\n#panel calculadora\n\npanel_calculadora = Frame(panel_derecha,bd=1, relief=RIDGE, bg='burlywood')\npanel_calculadora.pack()\n\n#panel Recibo\n\npanel_recibo = Frame(panel_derecha,bd=1, relief=RIDGE, bg='burlywood')\npanel_recibo.pack()\n\n#panel Botones\n\npanel_botones = Frame(panel_derecha,bd=1, relief=RIDGE, bg='burlywood')\npanel_botones.pack()\n\n# Lista de productos\nlista_comida=['Pollo','Cordero','Salmón','Merluza','Kebap','Durum','Pizza']\nlista_bebida=['Agua Mineral','Cerveza','Refrescos','Zumos','Vinos Blancos','Vinos Tintos','Vinos Rosados']\nlista_postres=['Tartas','Bollerias','Helados','Frutas','Turrones','Mazapanes','Roscón']\n\n# ahora tendremos que cargar todo ello mediante un loop\n# para ello utilizaremos un checkButton\n#Generar variable items comida\nvariable_comida = []\ncuadros_comida = []\ntexto_comida = []\ncontador = 0\nfor comida in lista_comida:\n\n #crear checkbutton\n variable_comida.append('')\n variable_comida[contador]= IntVar()\n comida= Checkbutton(panel_comidas,text=comida.title(), font=('Dosis',16,'bold'), onvalue=1, offvalue= 0,\n variable= variable_comida[contador], command=revisar_check)\n\n comida.grid(row = contador, column = 0, sticky=W)\n\n\n # visualizar los cuadros de entrada\n cuadros_comida.append('')\n texto_comida.append('')\n texto_comida[contador] = StringVar()\n texto_comida[contador].set('0')\n cuadros_comida[contador] = Entry(panel_comidas, font=('Dosis', 18,'bold'), bd=1,width=6, state=DISABLED,\n textvariable = texto_comida[contador])\n cuadros_comida[contador].grid(row= contador, column=1)\n contador += 1\n\n#Generar variable items bebida\nvariable_bebida=[]\ncuadros_bebida = []\ntexto_bebida = []\ncontador = 0\nfor bebida in lista_bebida:\n # crear checkbutton\n variable_bebida.append('')\n variable_bebida[contador]= IntVar()\n bebida= Checkbutton(panel_bebidas,text=bebida.title(), font=('Dosis',16,'bold'), onvalue=1, offvalue= 0,\n variable= variable_bebida[contador], command=revisar_check)\n\n bebida.grid(row = contador, column = 0, sticky=W)\n\n # visualizar los cuadros de entrada\n cuadros_bebida.append('')\n texto_bebida.append('')\n texto_bebida[contador] = StringVar()\n texto_bebida[contador].set('0')\n cuadros_bebida[contador] = Entry(panel_bebidas, font=('Dosis', 18, 'bold'), bd=1, width=6, state=DISABLED,\n textvariable=texto_bebida[contador])\n cuadros_bebida[contador].grid(row=contador, column=1)\n contador += 1\n\n\n\n#Generar variable items postres\nvariable_postre=[]\ncuadros_postre = []\ntexto_postre = []\ncontador= 0\nfor postre in lista_postres:\n # crear checkbutton\n variable_postre.append('')\n variable_postre[contador]= IntVar()\n postre= Checkbutton(panel_postres,text=postre.title(), font=('Dosis',16,'bold'), onvalue=1, offvalue= 0,\n variable = variable_postre[contador], command=revisar_check)\n\n postre.grid(row = contador, column = 0, sticky=W)\n\n # visualizar los cuadros de entrada\n cuadros_postre.append('')\n texto_postre.append('')\n texto_postre[contador] = StringVar()\n texto_postre[contador].set('0')\n cuadros_postre[contador] = Entry(panel_postres, font=('Dosis', 18, 'bold'), bd=1, width=6, state=DISABLED,\n textvariable=texto_postre[contador])\n cuadros_postre[contador].grid(row=contador, column=1)\n contador += 1\n\n\n# PANEL DE SUBTOTALES\n#creamos la variables que necesitamos comida\nvar_coste_comida = StringVar()\nvar_coste_bebida = StringVar()\nvar_coste_postre = StringVar()\nvar_Subtotal = StringVar()\nvar_Impuesto = StringVar()\nvar_Total = StringVar()\n\n#etiqueta de Comida\netiqueta_coste_comida = Label(panel_costos, text='Coste Comida', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_coste_comida.grid(row=0, column=0)\n\n#Cuadro de entrada\ntexto_coste_comida = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_coste_comida)\ntexto_coste_comida.grid(row=0, column=1, padx=41)\n\n\n#etiqueta de Bebida\netiqueta_coste_bebida = Label(panel_costos, text='Coste Bebida', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_coste_bebida.grid(row=1, column=0)\n\n#Cuadro de entrada\ntexto_coste_bebida = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_coste_bebida)\ntexto_coste_bebida.grid(row=1, column=1, padx=41)\n\n\n#etiqueta de Postre\netiqueta_coste_postre = Label(panel_costos, text='Coste Postre', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_coste_postre.grid(row=2, column=0)\n\n#Cuadro de entrada\ntexto_coste_postre = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_coste_postre)\ntexto_coste_postre.grid(row=2, column=1, padx=41)\n\n\n\n#etiqueta de Subtotal\netiqueta_Subtotal = Label(panel_costos, text='Subtotal', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_Subtotal.grid(row=0, column=2)\n\n#Cuadro de entrada\ntexto_Subtotal = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_Subtotal)\ntexto_Subtotal.grid(row=0, column=3, padx=41)\n\n\n#etiqueta de Impuestos\netiqueta_Impuesto = Label(panel_costos, text='Impuestos', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_Impuesto.grid(row=1, column=2)\n\n#Cuadro de entrada\ntexto_Impuesto = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_Impuesto)\ntexto_Impuesto.grid(row=1, column=3, padx=41)\n\n\n#etiqueta de Total\netiqueta_Total = Label(panel_costos, text='Total', font=('Dosis', 12, 'bold'),\n bg='azure4', fg='white')\netiqueta_Total.grid(row=2, column=2)\n\n#Cuadro de entrada\ntexto_Total = Entry(panel_costos, font=('Dosis',12,'bold'), bd= 1, width= 10, state='readonly',\n textvariable=var_Total)\ntexto_Total.grid(row=2, column=3, padx=41)\n\n\n# botones\nbotones =['total','recibo','guardar','resetear']\nbotones_creado=[]\ncolumnas =0\nfor boton in botones:\n boton = Button(panel_botones, text= boton.title(), font=('Dosis',10,'bold'), fg= 'white', bg='azure4',\n bd=1, width= 9)\n\n #añadimos a la lista creada ahora\n botones_creado.append(boton)\n boton.grid(row=0, column=columnas)\n columnas +=1\n\n#creamos el acceso a la funciones de los botones\nbotones_creado[0].config(command=total)\nbotones_creado[1].config(command=recibo)\nbotones_creado[2].config(command=guardar)\nbotones_creado[3].config(command=resetear)\n\n# recibo\ntexto_recibo = Text(panel_recibo, font=('Dosis',12,'bold'), bd=1,width=40,height=14)\ntexto_recibo.grid(row=0,column=0)\n\n\n# calculadora\n\nvisor_calculadora = Entry(panel_calculadora, font=('Dosis',14,'bold'), width= 32, bd =1)\nvisor_calculadora.grid(row=0, column=0, columnspan=4)\n\n# vamos a hacer la calculadora\n\nbotones_calculadora= ['7','8','9','+','4','5','6','-',\n '1','2','3','x','R','Borrar','0','/']\nbotones_guardados =[]\n\n#creamos una variable\nfila = 1\ncolumna = 0\n\nfor boton in botones_calculadora:\n boton = Button(panel_calculadora, text=boton.title(), font=('Dosis',10,'bold'),\n fg='white',bg='azure4',bd=1,width=10)\n\n botones_guardados.append(boton)\n\n boton.grid(row=fila, column=columna)\n if columna == 3:\n fila += 1\n columna += 1\n if columna ==4:\n columna=0\n\n#FUNCIONALIDAD DE LA CALCULADORA\n\n#empezamos con los botones que se va mostrar en pantalla\nbotones_guardados[0].config(command=lambda: click_boton('7'))\nbotones_guardados[1].config(command=lambda: click_boton('8'))\nbotones_guardados[2].config(command=lambda: click_boton('9'))\nbotones_guardados[3].config(command=lambda: click_boton('+'))\nbotones_guardados[4].config(command=lambda: click_boton('4'))\nbotones_guardados[5].config(command=lambda: click_boton('5'))\nbotones_guardados[6].config(command=lambda: click_boton('6'))\nbotones_guardados[7].config(command=lambda: click_boton('-'))\nbotones_guardados[8].config(command=lambda: click_boton('1'))\nbotones_guardados[9].config(command=lambda: click_boton('2'))\nbotones_guardados[10].config(command=lambda: click_boton('3'))\nbotones_guardados[11].config(command=lambda: click_boton('*'))\nbotones_guardados[12].config(command=obtener_resultado)\nbotones_guardados[13].config(command=borrar)\nbotones_guardados[14].config(command=lambda: click_boton('0'))\nbotones_guardados[15].config(command=lambda: click_boton('/'))\n\n# evitar que la ventana se cierre\naplicacion.mainloop()","repo_name":"Peypa0512/python2","sub_path":"Practico/dia12- Maquina Registradora/Modulo_TKinter.py","file_name":"Modulo_TKinter.py","file_ext":"py","file_size_in_byte":16609,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72460316250","text":"import time\nimport SX1509\nimport IO_Types\n\n\nPIN_VUSB = 1\nPIN_LBO = 2\nPIN_AMP_SD = 3\n\nPIN_RESET_BUTTON = 10\n\nDEVICE_ADDRESS = 0x3E # device address of SX1509\n\n#Set up I2C\n\n#Initialize the expander\nIOExpander = SX1509.SX1509()\nIOExpander.clock(oscDivider = 4)\nIOExpander.debounceTime(32)\n\n#Set up pins\nIOExpander.keypad(rows=3,columns=8)\n#IOExpander.enableInterrupt(PIN_RESET_BUTTON, IO_Types.INTERRUPT_STATE_RISING)\n#IOExpander.enableInterrupt(PIN_VUSB, IO_Types.INTERRUPT_STATE_FALLING)\n#IOExpander.enableInterrupt(PIN_LBO, IO_Types.INTERRUPT_STATE_FALLING)\n\nprint('IO Expander Initialized')\n\nwhile 1:\n# InterruptVals = IOExpander.interruptSource()\n# if(InterruptVals & (1< int:\n ans = 0\n while n:\n n &= n-1\n ans += 1\n return ans\n","repo_name":"NearTheSeas/algorithm","sub_path":"python/Offer_15.py","file_name":"Offer_15.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37333432082","text":"from django.shortcuts import render\n\nfrom django.shortcuts import render, redirect\n\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\n\nfrom .models import RepoTable\nfrom django.contrib import admin\nimport requests\nfrom django.http import HttpResponse\nimport pygal\n\ndef homepage(request): \n \n\t \n response = requests.get(\"https://api.github.com/users/Lucky-Yandy/repos\")\n repos = response.json()\n\n for repo in repos:\n repo_name = repo['name']\n repo_star = repo['stargazers_count']\n repo_size = repo['size']\n repo_language = repo['language']\n context = {\n 'repos': repos,\n }\n \n return render(request, 'allrepos.html', context)\n\n \n\n \n \n ############################# \n \n \ndef all_repos(request):\n response = requests.get(\"https://api.github.com/users/Lucky-Yandy/repos\")\n repos = response.json()\n\n for repo in repos:\n repo_name = repo['name']\n repo_star = repo['stargazers_count']\n repo_size = repo['size']\n repo_language = repo['language']\n context = {\n 'repos': repos,\n }\n \n return render(request, 'allrepos.html', context)\n \n \n \n \n \n\ndef repo_size(request):\n response = requests.get(\"https://api.github.com/users/Lucky-Yandy/repos\")\n repos = response.json() \n \n repo_names = []\n repo_sizes = []\n for repo in repos:\n repo_names.append(repo['name'])\n repo_sizes.append(repo['size'])\n \n bar_chart = pygal.Bar(margin=50)\n bar_chart.title = 'Repo Size'\n \n bar_chart.x_labels = repo_names\n bar_chart.add('Size (bytes)', repo_sizes)\n \n chart_svg_as_datauri = bar_chart.render_data_uri()\n \n context = {\n 'chart_svg_as_datauri': chart_svg_as_datauri\n }\n return render(request, 'reposize.html', context)\n \n \n \ndef repo_language(request): \n response = requests.get(\"https://api.github.com/users/Lucky-Yandy/repos\")\n repos = response.json() \n \n repo_languages = []\n \n for repo in repos:\n language = repo['language']\n if language is not None:\n repo_languages.append(language)\n \n \n \n language_counts = {}\n for language in repo_languages:\n if language in language_counts:\n language_counts[language] += 1\n else:\n language_counts[language] = 1\n \n pie_chart = pygal.Pie()\n pie_chart.title = 'Repo Language'\n pie_chart.config.margin = 50\n \n for language, count in language_counts.items():\n pie_chart.add(language, count)\n \n chart_svg_as_datauri = pie_chart.render_data_uri()\n \n context = {\n 'chart_svg_as_datauri': chart_svg_as_datauri\n }\n return render(request, 'repolanguage.html', context)\n\n\n\n\n\n","repo_name":"Lucky-Yandy/django-dashboard","sub_path":"githubstatitics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5959028567","text":"import numpy as np\nimport timeit\n\n\n# C array of cash inputs C[0] being C1\n# r fractional interest rate, ex: 2% becomes 0.02\n\nC = 120.0 * np.arange(500,1200) # use 120.0 for massive speed difference\nr = 0.01\n# HW1:P1 Abdullah Irfan Basheer\n#Part 1 explicit loop\ndef explicitLoop( C, r ):\n S = 0\n for i in range(len(C)):\n S += C[i]/((1+r)**(i+1))\n return S\n\n#Part 2 horners scheme\ndef hornersLoop( C, r ):\n n = len(C) #number of payments in cashflow\n S = C[n-1]\n for i in range(n-1):\n S = S/(1+r) + C[(n-1) - (i+1)]\n return S/(1+r) #one more factor since last term is 0 w.r.t Horners\n\n#Part 3 polyval function\ndef evalPolyval( C, r ):\n Cflip = np.append( np.flip(C), [0.0]) # Re-order & append 0 for scheme.\n return np.polyval(Cflip, 1/(1+r))\n\n#Part 4 dot product of vectors\ndef dotProduct( C, r ):\n n = len(C)\n facs = (1/(1+r)) ** ((np.array(range(n)) + 1)) #factors; r**[1, 2, ..., 10]\n return np.dot(C, facs)\n\n#Outputs\n\nOexp = timeit.Timer('explicitLoop( C , r )', 'from __main__ import explicitLoop, C, r')\nOhorner = timeit.Timer('hornersLoop( C , r )', 'from __main__ import hornersLoop, C, r')\nOpoly = timeit.Timer('evalPolyval( C , r )', 'from __main__ import evalPolyval, C, r')\nOdot = timeit.Timer('dotProduct( C , r )', 'from __main__ import dotProduct, C, r')\n\n# printing all results \nprint(\"Explicit Loop Eval: \", explicitLoop( C , r ), \"time = \", Oexp.timeit(1000))\nprint(\"Horner's Loop Eval: \", hornersLoop( C , r ), \"time = \", Ohorner.timeit(1000))\nprint(\"Polyval Eval: \", evalPolyval( C , r ), \"time = \", Opoly.timeit(1000))\nprint(\"Dot Product Eval: \", dotProduct( C , r ), \"time = \", Odot.timeit(1000))","repo_name":"JWizards/stochastic-methods-lab","sub_path":"HW01_2022_09_13/P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18728794726","text":"from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer\nfrom keras.models import Sequential\nfrom tensorflow.keras import layers,applications\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport keras\nimport numpy as np\nimport sklearn\n\nimg_height = 244\nimg_width = 244\n\nbatch_size = 16\n\ndef plot_it(acc,val_acc,loss,val_loss):\n\n plt.figure(figsize=(16, 8))\n plt.subplot(1, 2, 1)\n plt.plot(range(10), len(range(10))*[acc], label='acurácia treino')\n plt.plot(range(10), len(range(10))*[val_acc], label='acurácia validação')\n plt.legend()\n plt.title('Acurácias')\n\n plt.subplot(1, 2, 2)\n plt.plot(range(10), len(range(10))*[loss], label='loss treino')\n plt.plot(range(10), len(range(10))*[val_loss], label='loss validação')\n plt.legend()\n plt.title('Loss')\n\n plt.show()\n pass\n\n\ndef augmentation():\n IMG_SIZE = 255\n batch_size = 20\n\n\ndef augment():\n data_augmentation = Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal_and_vertical\", input_shape=(img_height, img_width, 3)),\n layers.experimental.preprocessing.RandomRotation(0.05)\n ]\n )\n return data_augmentation\n\n\ndef kerasTry():\n data_augmentation = tf.keras.Sequential([\n layers.experimental.preprocessing.RandomFlip(\"horizontal_and_vertical\"),\n layers.experimental.preprocessing.RandomRotation(0.2)])\n\n image = tf.expand_dims(image, 0)\n plt.figure(figsize=(10, 10))\n\n for i in range(9):\n augmented_image = data_augmentation(image)\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(augmented_image[0])\n plt.axis(\"off\")\n pass\n\ndef model_normie():\n augmentation=augment()\n\n model = Sequential ([augmentation,\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n Conv2D(16,3, padding='same',activation='relu'),\n MaxPooling2D(),\n Conv2D(32, 3, padding='same',activation='relu'),\n MaxPooling2D(),\n Conv2D(64, 3, padding='same',activation='relu'),\n #MaxPooling2D(),\n Flatten(),\n Dense(128, activation='relu',input_dim=5,kernel_regularizer='l2',bias_regularizer='l2'),\n Dense(5, activation='softmax')\n ]\n )\n\n model.compile(optimizer='adam', loss=keras.losses.CategoricalCrossentropy(), metrics=['accuracy'])\n\n return model\n\ndef nn_no_transfer(data, labels, nome ):\n #x_train, x_test, y_train, y_test = train_test_split(data, labels, random_state = 42, test_size = 0.20)\n AUTOTUNE = tf.data.AUTOTUNE\n data = data.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n labels = labels.cache().prefetch(buffer_size=AUTOTUNE)\n #y_train = to_categorical(y_train)\n #y_test = to_categorical(y_test)\n\n model = model_normie()\n\n model.summary()\n H = model.fit(data,batch_size=batch_size, epochs=10, validation_data=labels)\n\n acc = H.history['accuracy']\n val_acc = H.history['val_accuracy']\n loss = H.history['loss']\n val_loss = H.history['val_loss']\n\n plot_it(acc,val_acc,loss,val_loss)\n\n #x_test_n = model.predict(x_test)\n y_pred = model.predict(labels)\n y_pred = np.argmax(y_pred, axis=1)\n\n y_true = np.concatenate([y for x, y in labels], axis=0)\n y_true = np.argmax(y_true, axis=1)\n\n print(\"Coletando resultados para \",nome)\n print(sklearn.metrics.classification_report(y_true, y_pred,zero_division=1))\n\n pass\n\ndef model_tl():\n data_augmentation=augment()\n \n preprocess_input = keras.applications.densenet\n\n\n baseModel = MobileNetV2(include_top=False, weights='imagenet')\n baseModel.trainable = False\n\n\n inputs=keras.Input(shape=(img_height, img_width, 3))\n x = data_augmentation(inputs)\n #x = preprocess_input(x)\n x = baseModel(x)\n x = keras.layers.GlobalAveragePooling2D()(x)\n x = keras.layers.Dense(128, activation='relu',kernel_regularizer='l2',bias_regularizer='l2')(x)\n x = keras.layers.Dropout(0.2)(x)\n outputs = keras.layers.Dense(5, activation='softmax',kernel_regularizer='l2')(x)\n\n model = keras.Model(inputs, outputs)\n\n model.compile(optimizer='adam', loss=keras.losses.CategoricalCrossentropy(), metrics=['accuracy'])\n\n return model\n\ndef nn_transfer_learn(train_dataset,validation_dataset,nome):\n AUTOTUNE = tf.data.AUTOTUNE\n train_dataset = train_dataset.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n validation_dataset = validation_dataset.cache().prefetch(buffer_size=AUTOTUNE)\n\n model = model_tl()\n model.summary()\n\n H = model.fit(train_dataset,batch_size=batch_size, epochs=10, validation_data=validation_dataset)\n '''\n acc = H.history['accuracy']\n val_acc = H.history['val_accuracy']\n loss = H.history['loss']\n val_loss = H.history['val_loss']\n\n plot_it(acc,val_acc,loss,val_loss)\n '''\n for layer in model.layers[177:]:\n layer.trainable = True\n\n H = model.fit(train_dataset,batch_size=batch_size, epochs=20, validation_data=validation_dataset)\n '''\n acc += H.history['accuracy']\n val_acc += H.history['val_accuracy']\n loss += H.history['loss']\n val_loss += H.history['val_loss']\n\n plot_it(acc,val_acc,loss,val_loss)\n '''\n y_pred = model.predict(validation_dataset)\n y_pred = np.argmax(y_pred, axis=1)\n\n y_true = np.concatenate([y for x, y in validation_dataset], axis=0)\n y_true = np.argmax(y_true, axis=1)\n\n print(\"Coletando resultados para \",nome)\n print(sklearn.metrics.classification_report(y_true, y_pred,zero_division=1))\n\n pass\n\ndef check_gpu():\n print(device_lib.list_local_devices())\n pass","repo_name":"rafaelcicerojoe/Art-Recongtion","sub_path":"neural_net.py","file_name":"neural_net.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7817113699","text":"import datetime\nfrom django.db import models\nfrom django.contrib import admin\nfrom django.forms import CheckboxSelectMultiple\nfrom collation.models import Project\nfrom collation.forms import CollationProjectForm\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n\n form = CollationProjectForm\n formfield_overrides = {\n models.ManyToManyField: {'widget': CheckboxSelectMultiple, 'help_text': 'Select all users who need access.'}\n }\n\n def save_model(self, request, obj, form, change):\n if obj.version_number is None:\n obj.version_number = 1\n else:\n obj.version_number += 1\n if obj.id is not None:\n # then we are editing\n if hasattr(request.user, 'full_name') and request.user.full_name != '':\n obj.last_modified_by = request.user.full_name\n else:\n obj.last_modified_by = request.user.username\n obj.last_modified_time = datetime.datetime.now()\n else:\n # this is being created\n if hasattr(request.user, 'full_name') and request.user.full_name != '':\n obj.created_by = request.user.full_name\n else:\n obj.created_by = request.user.username\n obj.created_time = datetime.datetime.now()\n\n super().save_model(request, obj, form, change)\n\n\nadmin.site.register(Project, ProjectAdmin)\n","repo_name":"Multimedia-Avesta/django_wce_collation","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23214084587","text":"from datetime import datetime\nimport os\nfrom tkinter import MULTIPLE\n\n\nDATA_URL = 'https://dchart-api.vndirect.com.vn/dchart'\n\nALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',\n 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n# ALPHABET = ['A']\nINTERVAL = \"D\"\n\nMULTIPLE_THREAD_CONNECTION = 15\nSLEEP_CRAWL_TIME = 1.5\n\nCSV_PATH = f'{os.getcwd()}/out/csv/'\nJSON_PATH = f'{os.getcwd()}/out/json/'\nSQL_PATH = f'{os.getcwd()}/out/sql/'\nPOSTGRESQl_PATH = f'{os.getcwd()}/out/postgresql/'\n\n\nPOSTGRESQL_INFO = {\n \"host\": \"10.78.28.51\",\n \"database\": \"dbpugna\",\n \"user\": \"gbsofts\",\n \"password\": \"gbsoftsdev@000\"\n}\n\nPOSTGRESQl_TABLE = 'tbstockchart'\n\n\ndef POSTGRESQL_INSERT_QUERY_BUILDER(stockCode, stockType, tradingDate, open, close, high, low, volume):\n user = 'system'\n date = datetime.now()\n return f'''INSERT INTO tbstockchart(symbolname, symboltype, tradingdate, \"open\", \"close\", high, low, volume, createddate, createduser, lastmodifieddate, lastmodifieduser) VALUES('{stockCode}', '{stockType}', '{tradingDate}', {open}, {close}, {high}, {low}, {volume}, '{date}','{user}', '{date}','{user}')'''\n\n\ndef POSTGRESQL_DELETE_YEAR_DATA_QUERY_BUILDER(stockCode, year):\n return f\"delete from tbstockchart where symbolname = '{stockCode}' and tradingdate > '{year-1}-01-01' and tradingdate < '{year+1}-01-01'\"\n","repo_name":"quangkhoi1228/tradingview_crawler","sub_path":"configs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12940426008","text":"from setuptools import setup\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nwith open('README.md', encoding='utf-8') as f:\n readme = f.read()\n\nextras_require = {\n 'docs': [\n 'sphinx==4.0.2',\n 'sphinxcontrib_trio==1.1.2',\n 'sphinxcontrib-websupport',\n ],\n}\n\nsetup(\n name='qq.py',\n version='0.2.2',\n description='QQ 频道 API 的 Python Wrapper',\n py_modules=[\"qq\"],\n packages=['qq', \"qq.types\"],\n license='MIT',\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n install_requires=requirements,\n extras_require=extras_require,\n python_requires='>=3.8.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Natural Language :: Chinese (Simplified)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Internet',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities',\n 'Typing :: Typed',\n ],\n url=\"https://github.com/foxwhite25/qq.py\",\n author='foxwhite25',\n author_email='vct.xie@gmail.com'\n)\n","repo_name":"Shawn102938/qq.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"2472242410","text":"class Solution:\n def longestCommonPrefix(self, strs: list[str]) -> str:\n str1, str2 = min(strs), max(strs)\n i=0\n while i < len(str1):\n if str1[i] != str2[i]:\n str1 = str1[:i]\n i=i+1\n return str1\n \nc=Solution()\nstrs = list(map(str,input().split()))\nprint(c.longestCommonPrefix(strs))\n\n'''\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\n \n\nExample 1:\n\nInput: strs = [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: strs = [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n \n\nConstraints:\n\n1 <= strs.length <= 200\n0 <= strs[i].length <= 200\nstrs[i] consists of only lower-case English letters\n'''","repo_name":"rajeswari98/Python-Codes","sub_path":"LeetCode Problems/Longest Common Prefix.py","file_name":"Longest Common Prefix.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22020631296","text":"\"\"\"Script to preprocess KB into a set of entities and adjacency matrices.\n\nOutput is a text file mapping entity names to their ids (vocab.txt) a relation\nfile mapping relation names to their ids and a\nsparse numpy matrix of shape V x V x R of directed relations.\n\"\"\"\n\nimport sys\nimport io\nimport operator\nimport numpy as np\nimport cPickle as pkl\nfrom scipy.sparse import csr_matrix\n\ndata_path = sys.argv[1]\nout_path = sys.argv[2]\n\ninput_file = data_path + \"/knowledge_source/wiki_entities/wiki_entities_kb.txt\"\noutput_file = out_path + \"/processed_kb.pkl\"\nentity_vocab = out_path + \"/entity_vocab.txt\"\nrelation_vocab = out_path + \"/relation_vocab.txt\"\n\nRELATIONS = {\n \"directed_by\": 0,\n \"written_by\": 1,\n \"starred_actors\": 2,\n \"release_year\": 3,\n \"in_language\": 4,\n \"has_genre\": 5,\n \"has_imdb_rating\": 6,\n \"has_imdb_votes\": 7,\n \"has_tags\": 8,\n}\n\ndef read_line(line):\n if line == u\"\\n\":\n return None\n tokens = line.strip().split()\n head = []\n found = False\n for ii, tt in enumerate(tokens[1:]): # ignore leading number\n if tt in RELATIONS:\n relation = tt\n found = True\n break\n head.append(tt)\n if not found: return None\n head = u\" \".join(head)\n tails = []\n ctail = []\n for tt in tokens[ii+2:]:\n if tt.endswith(u\",\"):\n ctail.append(tt[:-1])\n tails.append(u\" \".join(ctail))\n ctail = []\n else:\n ctail.append(tt)\n tails.append(u\" \".join(ctail))\n return [(head, relation, tail) for tail in tails]\n\nif __name__ == \"__main__\":\n entity_map = {}\n edges = {r: [] for r in RELATIONS}\n with io.open(input_file) as f:\n for line in f:\n rels = read_line(line)\n if rels is None: continue\n for rel in rels:\n if rel[0] not in entity_map:\n entity_map[rel[0]] = len(entity_map)\n if rel[2] not in entity_map:\n entity_map[rel[2]] = len(entity_map)\n edges[rel[1]].append((entity_map[rel[0]],\n entity_map[rel[2]]))\n adjacency = {}\n for r in RELATIONS:\n if not edges[r]: continue\n adjacency[r] = csr_matrix((np.ones((len(edges[r]),)), zip(*edges[r])),\n shape=[len(entity_map), len(entity_map)])\n\n pkl.dump([entity_map, adjacency], open(output_file, \"wb\"))\n\n # save entity vocab\n sorted_entities = sorted(entity_map.items(), key=operator.itemgetter(1))\n f = io.open(entity_vocab, \"w\", encoding=\"utf-8\")\n f.write(u\"\\n\".join([item[0] for item in sorted_entities]))\n f.close()\n # save relation vocab\n relations = RELATIONS.keys()\n f = open(relation_vocab, \"w\")\n f.write(\"\\n\".join(relations))\n f.close()\n","repo_name":"haitian-sun/GraftNet","sub_path":"wikimovie_preprocessing/process_kb.py","file_name":"process_kb.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"32"} +{"seq_id":"42993834907","text":"\"\"\" This handles uploaded OS-script files and\n translated JS files \"\"\"\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom os_online.translator.translator import main as translate\nimport os, tarfile, shutil, glob\n\ndef make_random_path():\n \"\"\" this functions creates a random path for storing\n JS file \"\"\"\n return \"%s\" % (User.objects.make_random_password(20))\n\ndef try_translation(infile, resultname):\n \"\"\" this function tries to translate an OS-script \"\"\"\n translate(infile, resultname)\n os.remove(infile)\n\ndef handle_uploaded_file(u_file, dest):\n \"\"\" this function handles uploaded OS-script file \"\"\"\n destfolder = _get_path([dest])\n destfile = _get_path([dest,dest])\n pool = _get_path([dest,'pool'])\n if not os.path.isdir(destfolder):\n os.mkdir(destfolder)\n with open(destfile, 'w+') as destination:\n for chunk in u_file.chunks():\n destination.write(chunk)\n if tarfile.is_tarfile(destfile):\n _check_tar(destfolder, destfile)\n tar = tarfile.open(destfile)\n tar.extractall(destfolder)\n tar.close()\n allfiles = os.listdir(destfolder)\n for filename in allfiles:\n if filename.endswith('.opensesame'):\n old_name = _get_path([dest,filename])\n new_name = destfile + '.opensesame'\n os.rename(old_name, new_name)\n else:\n os.rename(destfile, destfile + '.opensesame')\n\ndef _check_tar(target_dir, tar):\n tar = tarfile.open(tar)\n for t_file in tar.getmembers():\n name = t_file.name\n if not os.path.abspath(os.path.join(target_dir, name)).startswith(\\\n target_dir):\n raise UnsafeTarFile\n \ndef _get_path(path_list):\n path = os.path.join(settings.MEDIA_ROOT, reduce(os.path.join,path_list))\n return path\n","repo_name":"sakisbl/OpenSesameOnline","sub_path":"webapp/os_online/experimenter/file_handler.py","file_name":"file_handler.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35803408425","text":"import pytest\nfrom aiconfig.Config import AIConfigRuntime\n\nfrom aiconfig.schema import ConfigMetadata, ModelMetadata, Prompt, PromptMetadata\n\nfrom ..util.mock_parser import MockModelParser\n\n\n@pytest.fixture\ndef ai_config_runtime():\n runtime = AIConfigRuntime.create(\"Untitled AIConfig\")\n return runtime\n\n\ndef test_get_model_settings(ai_config_runtime: AIConfigRuntime):\n \"\"\"\n Test the get_model_settings_for_prompt method of the AIConfig class.\n 3 cases:\n 1. settings is defined as an empty dictionary\n 2. settings is defined under prompt metadata\n 3. settings is defined under config metadata. This is essentially the final default\n 4. settings is defined under config metadata, but model and not settings is defined under prompt metadata. This should default to the config metadata\n \"\"\"\n mock_model_parser = MockModelParser()\n\n prompt = Prompt(\n name=\"Prompt1\",\n input=\"input doesn't matter here\",\n metadata=PromptMetadata(\n model=\"fake model\",\n ),\n )\n ai_config_runtime.add_prompt(prompt.name, prompt)\n\n prompt = ai_config_runtime.prompts[0]\n\n assert mock_model_parser.get_model_settings(prompt, ai_config_runtime) == {}\n\n # settings is defined as {}. Should be returned as {}\n aiconfig = AIConfigRuntime(\n name=\"test\",\n metadata=ConfigMetadata(**{\"models\": {\"fakemodel\": {\"fake_setting\": \"True\"}}}),\n # here is settings = None. This implies that settings were not passed in. Should default to global params\n prompts=[\n Prompt(\n name=\"test\",\n input=\"test\",\n metadata=PromptMetadata(model=ModelMetadata(name=\"test\", settings={})),\n )\n ],\n )\n\n prompt = aiconfig.prompts[0]\n\n assert mock_model_parser.get_model_settings(prompt, aiconfig) == {}\n # settings is defined as None. Should be returned as config level, ie {\"fake_setting\": \"True\"}\n aiconfig = AIConfigRuntime(\n name=\"test\",\n metadata=ConfigMetadata(**{\"models\": {\"fakemodel\": {\"fake_setting\": \"True\"}}}),\n # here is settings = None. This implies that settings were not passed in. Should default to global params\n prompts=[\n Prompt(\n name=\"test\",\n input=\"test\",\n metadata=PromptMetadata(\n model=ModelMetadata(name=\"fakemodel\", settings=None)\n ),\n )\n ],\n )\n\n prompt = aiconfig.prompts[0]\n\n assert mock_model_parser.get_model_settings(prompt, aiconfig) == {\n \"fake_setting\": \"True\"\n }\n\n with pytest.raises(IndexError, match=r\"Prompt '.*' not in config\"):\n prompt = Prompt(\n name=\"doesn't exist\",\n input=\"doesn't exist\",\n metadata=PromptMetadata(model=\"doesn't exist\"),\n )\n mock_model_parser.get_model_settings(prompt, aiconfig)\n","repo_name":"lastmile-ai/aiconfig","sub_path":"python/tests/parsers/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"32"} +{"seq_id":"30940095634","text":"from __future__ import division, print_function\n\n# Import Python modules\nimport sys\nimport random\n\n# Import Broadband modules\nimport bband_utils\n\ndef calculate_rvfac(mean_rvfac, range_rvfac, seed):\n \"\"\"\n This function calculates a random rvfac value based on the mean\n and range values, plus a seed to generate a random number\n \"\"\"\n random.seed(seed)\n rvfac = mean_rvfac + range_rvfac * ((random.random() * 2) - 1)\n return rvfac\n\nclass GenslipCfg(object):\n \"\"\"\n Define the configuration parameters for the GP rupture generator\n \"\"\"\n\n def __init__(self, a_srcname=None):\n \"\"\"\n Sets basic class parameters, then parses a_srcname for more information\n \"\"\"\n\n # User defined parms\n self.SLIP_SIGMA = 0.85\n # This is now the default inside genslip-3.3, so don't need to use it\n # self.RAND_RAKE_RANGE = 60\n\n self.RTDEP = 6.5\n self.RTDEP_RANGE = 1.5\n self.MEAN_RVFAC = 0.8\n self.RANGE_RVFAC = 0.05\n self.SHAL_VRUP = 0.6\n\n # Default RISETIME_COEF set for western US simulations,\n # override in velocity model config file. This parameter used\n # to be set to 1.6, but was modified by RWG in November 2013\n # when the Rupture Generator was updated to version 3.3. The\n # value was reset to 1.6 for Genslip 5.0.1\n self.RISETIME_COEF = 1.6\n\n # self.EXTRA_RTFAC = 0.0\n self.RISETIME_FAC = 2\n self.RT_SCALEFAC = 1\n self.RT_RAND = 0\n\n # As in genslip-3.3, we are using 'Mliu' stype, which is the default\n # self.STYPE = \"ucsb\"\n\n # Extra parameters in genslip-3.3, updated for genslip-5.0.1\n self.SLIP_WATER_LEVEL = -1\n self.DEEP_RISETIMEDEP = 17.5\n self.DEEP_RISETIMEDEP_RANGE = 2.5\n self.DEEP_RISETIME_FAC = 2.0\n\n # Read SRC FILE\n if a_srcname:\n self.CFGDICT = bband_utils.parse_src_file(a_srcname)\n\nif __name__ == \"__main__\":\n ME = GenslipCfg()\n print(\"Created Test Config Class: %s\" % (sys.argv[0]))\n","repo_name":"UWGeotech/bbpUW","sub_path":"bbp/comps/genslip_cfg.py","file_name":"genslip_cfg.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9097349347","text":"from Classes.playercharacter import *\nwarlock_dict = {}\n\n\nclass Warlock(PlayerCharacter):\n\tdef __init__(self):\n\t\tself.max_slots = [0] * 6\n\t\tself.current_slots = [0] * 6\n\t\tself.mystic_arcanum = False\n\t\tself.eldritch_master = False\n\t\tself.warlock_options = {}\n\t\tsuper().__init__()\n\n\tdef set_max_spell_slots(self, level):\n\t\tif level >= 1:\n\t\t\tself.max_slots[1] = 1\n\t\tif level >= 2:\n\t\t\tself.max_slots[1] = 2\n\t\tif level >= 3:\n\t\t\tself.max_slots[1] = 0\n\t\t\tself.max_slots[2] = 2\n\t\tif level >= 5:\n\t\t\tself.max_slots[2] = 0\n\t\t\tself.max_slots[3] = 2\n\t\tif level >= 7:\n\t\t\tself.max_slots[3] = 0\n\t\t\tself.max_slots[4] = 2\n\t\tif level >= 9:\n\t\t\tself.max_slots[4] = 0\n\t\t\tself.max_slots[5] = 2\n\t\tif level >= 11:\n\t\t\tself.max_slots[5] = 3\n\t\tif level >= 17:\n\t\t\tself.max_slots[5] = 4\n\n\tdef set_current_spell_slots(self):\n\t\tself.current_slots[1] = self.max_slots[1]\n\t\tself.current_slots[2] = self.max_slots[2]\n\t\tself.current_slots[3] = self.max_slots[3]\n\t\tself.current_slots[4] = self.max_slots[4]\n\t\tself.current_slots[5] = self.max_slots[5]\n\n\tdef use_spell(self):\n\t\tspell_level = 0\n\t\tif 1 <= self.get_level() == 2:\n\t\t\tspell_level = 1\n\t\tif 3 <= self.get_level() <= 4:\n\t\t\tspell_level = 2\n\t\tif 5 <= self.get_level() <= 6:\n\t\t\tspell_level = 3\n\t\tif 7 <= self.get_level() <= 8:\n\t\t\tspell_level = 4\n\t\tif 9 <= self.get_level() <= 20:\n\t\t\tspell_level = 5\n\t\tif self.check_spell(spell_level):\n\t\t\tself.current_slots[spell_level] -= 1\n\t\t\tprint(\"Used spell slot\")\n\t\tif not self.check_spell(spell_level):\n\t\t\tprint(\"Not enough slots left\")\n\n\tdef check_spell(self, level):\n\t\tif self.current_slots[level] > 0:\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Warlock out of spell slots until next short/long rest.\")\n\t\t\treturn False\n\n\tdef get_mystic_arcanum(self):\n\t\treturn self.mystic_arcanum\n\n\tdef use_mystic_arcanum(self):\n\t\tif not self.get_mystic_arcanum():\n\t\t\tself.mystic_arcanum = True\n\t\t\tprint(\"Used Mystic Arcanum\")\n\t\telse:\n\t\t\tprint(\"Need to long rest first\")\n\n\tdef reset_mystic_arcanum(self):\n\t\tif self.get_mystic_arcanum():\n\t\t\tprint(\"Reset Mystic Arcanum\")\n\t\t\tself.mystic_arcanum = False\n\t\telse:\n\t\t\tprint(\"Can still use Mystic Arcanum\")\n\n\tdef get_eldritch_master(self):\n\t\treturn self.eldritch_master\n\n\tdef use_eldritch_master(self):\n\t\tif not self.get_eldritch_master():\n\t\t\tself.set_current_spell_slots()\n\t\telse:\n\t\t\tprint(\"Need to long rest before using Eldritch Master again\")\n\n\tdef reset_eldritch_master(self):\n\t\tif self.get_eldritch_master():\n\t\t\tprint(\"Reset Eldritch Master\")\n\t\t\tself.eldritch_master = False\n\t\telse:\n\t\t\tprint(\"Can still use Eldritch Master this long rest\")\n\n\tdef create_warlock_options(self):\n\t\tself.warlock_options['0'] = \"[3]: Use Spell Slot\\n\" + \"[4]: Reset Spell Slots\\n\" + \\\n\t\t\t\t\t\"[5]: Use Mystic Arcanum\\n\" + \"[6]: Reset Mystic Arcanum\\n\" + \\\n\t\t\t\t\t\"[7]: Use Eldritch Master\" + \"[8]: Reset Eldritch Master\"\n\n\t\tself.warlock_options['3'] = self.use_spell\n\t\tself.warlock_options['4'] = self.set_current_spell_slots\n\t\tself.warlock_options['5'] = self.use_mystic_arcanum\n\t\tself.warlock_options['6'] = self.reset_mystic_arcanum\n\t\tself.warlock_options['7'] = self.use_eldritch_master\n\t\tself.warlock_options['8'] = self.reset_eldritch_master\n\t\treturn self.warlock_options\n\n\tdef list_options(self):\n\t\tselection = int_checker(self.warlock_options.get(\"0\"))\n\t\tprint(selection)\n\t\tself.warlock_options[\"{}\".format(selection)]()\n\n\tdef set_base_warlock_level(self):\n\t\tself.base_change_level()\n\t\tself.set_max_spell_slots(self.get_level())\n\t\tself.set_current_spell_slots()\n\n\nclass Archfey(Warlock):\n\tdef __init__(self):\n\t\tself.fey_presence = False\n\t\tself.misty_escape = False\n\t\tself.dark_delirium = False\n\t\tself.archfey_options = {}\n\t\tsuper().__init__()\n\n\tdef get_fey_presence(self):\n\t\treturn self.fey_presence\n\n\tdef get_misty_escape(self):\n\t\treturn self.misty_escape\n\n\tdef get_dark_delirium(self):\n\t\treturn self.dark_delirium\n\n\tdef use_fey_presence(self):\n\t\tif not self.get_fey_presence():\n\t\t\tprint(\"Used Fey Presence\")\n\t\t\tself.fey_presence = True\n\t\telse:\n\t\t\tprint(\"Already used Fey Presence this short/long rest\")\n\t\t\treturn\n\n\tdef use_misty_escape(self):\n\t\tif not self.get_misty_escape():\n\t\t\tprint(\"Used Misty Escape\")\n\t\t\tself.misty_escape = True\n\t\telse:\n\t\t\tprint(\"Already used Misty Escape this short/long rest\")\n\t\t\treturn\n\n\tdef use_dark_delirium(self):\n\t\tif not self.get_dark_delirium():\n\t\t\tprint(\"Used Dark Delirium\")\n\t\t\tself.dark_delirium = True\n\t\telse:\n\t\t\tprint(\"Already used Dark Delirium this short/long rest\")\n\n\tdef reset_fey_presence(self):\n\t\tif self.get_fey_presence():\n\t\t\tprint(\"Reset Fey Presence\")\n\t\t\tself.fey_presence = False\n\t\telse:\n\t\t\tprint(\"Can still use Fey Presence\")\n\n\tdef reset_misty_escape(self):\n\t\tif self.get_misty_escape():\n\t\t\tprint(\"Reset Misty Escape\")\n\t\t\tself.misty_escape = False\n\t\telse:\n\t\t\tprint(\"Can still use Misty Escape\")\n\n\tdef reset_dark_delirium(self):\n\t\tif self.get_dark_delirium():\n\t\t\tprint(\"Reset Dark Delirium\")\n\t\t\tself.dark_delirium = False\n\t\telse:\n\t\t\tprint(\"Can still use Dark Delirium\")\n\n\tdef create_archfey_options(self):\n\t\tself.archfey_options['0'] = \"[9]: Use Fey Presence\\n\" + \"[10]: Reset Fey Presence\\n\" + \"[11]: Use Misty Escape\\n\" + \\\n\t\t\t\t\t\"[12]: Reset Misty Escape\\n\" + \"[13]: Use Dark Delirium\\n\" + \"[14]: Reset Dark Delirium\\n\" + \\\n\t\t\t\t\t\"[15]: Change Level\\n\" + \"[16]: Exit\\n\"\n\n\t\tself.archfey_options['9'] = self.use_fey_presence\n\t\tself.archfey_options['10'] = self.reset_fey_presence\n\t\tself.archfey_options['11'] = self.use_misty_escape\n\t\tself.archfey_options['12'] = self.reset_misty_escape\n\t\tself.archfey_options['13'] = self.use_dark_delirium\n\t\tself.archfey_options['14'] = self.reset_dark_delirium\n\t\tself.archfey_options['15'] = self.set_base_warlock_level\n\t\tself.archfey_options['16'] = leave\n\t\treturn self.archfey_options\n\n\tdef list_options(self):\n\t\tselection = int_checker(self.archfey_options.get(\"0\"))\n\t\tprint(selection)\n\t\tself.archfey_options[\"{}\".format(selection)]()\n\n\nclass Fiend(Warlock):\n\tdef __init__(self):\n\t\tself.dark_ones_own_luck = False\n\t\tself.hurl_through_hell = False\n\t\tself.fiend_options = {}\n\t\tsuper().__init__()\n\n\tdef get_dark_ones_own_luck(self):\n\t\treturn self.get_dark_ones_own_luck()\n\n\tdef get_hurl_through_hell(self):\n\t\treturn self.hurl_through_hell\n\n\tdef use_dark_ones_own_luck(self):\n\t\tif not self.get_dark_ones_own_luck():\n\t\t\tprint(\"Warlock used Dark One's Own Luck\")\n\t\t\tself.dark_ones_own_luck = True\n\t\telse:\n\t\t\tprint(\"Already used Dark One's Own Luck this rest\")\n\t\t\treturn\n\n\tdef use_hurl_through_hell(self):\n\t\tif not self.get_hurl_through_hell():\n\t\t\tprint(\"Warlock used Hurl Through Hell\")\n\t\t\tself.hurl_through_hell = True\n\t\telse:\n\t\t\tprint(\"Warlock already used Hurl Through Hell this rest\")\n\t\t\treturn\n\n\tdef reset_dark_ones_own_luck(self):\n\t\tif self.get_dark_ones_own_luck():\n\t\t\tprint(\"Reset Dark One's Own Luck\")\n\t\t\tself.dark_ones_own_luck = False\n\t\telse:\n\t\t\tprint(\"Can still use Dark One's Own Luck this rest period\")\n\n\tdef reset_hurl_through_hell(self):\n\t\tif self.get_hurl_through_hell():\n\t\t\tprint(\"Reset Hurl Through Hell\")\n\t\t\tself.hurl_through_hell = False\n\t\telse:\n\t\t\tprint(\"Hurl Through Hell Still Usable this rest period\")\n\n\tdef create_fiend_options(self):\n\t\tself.fiend_options['0'] = \"[9]: Use Dark One's Own Luck\\n\" + \"[10]: Reset Dark One's Own Luck\\n\" + \\\n\t\t\t\t\t\"[11]: Use Hurl Through Hell\\n\" + \"[12]: Reset Hurl Through Hell\\n\" + \\\n\t\t\t\t\t\"[13]: Change Level\\n\" + \"[14]: Exit\\n\"\n\n\t\tself.fiend_options['9'] = self.use_dark_ones_own_luck\n\t\tself.fiend_options['10'] = self.reset_dark_ones_own_luck\n\t\tself.fiend_options['11'] = self.use_hurl_through_hell\n\t\tself.fiend_options['12'] = self.reset_hurl_through_hell\n\t\tself.fiend_options['13'] = self.set_base_warlock_level\n\t\tself.fiend_options['14'] = leave\n\n\tdef list_options(self):\n\t\tselection = int_checker(self.fiend_options.get(\"0\"))\n\t\tprint(selection)\n\t\tself.fiend_options[\"{}\".format(selection)]()\n\n\nclass Old(Warlock):\n\tdef __init__(self):\n\t\tself.entropic_ward = False\n\t\tself.old_options = {}\n\t\tsuper().__init__()\n\n\tdef get_entropic_ward(self):\n\t\treturn self.entropic_ward\n\n\tdef use_entropic_ward(self):\n\t\tif not self.get_entropic_ward():\n\t\t\tprint(\"Warlock used Entropic Ward\")\n\t\t\tself.entropic_ward = True\n\t\telse:\n\t\t\tprint(\"Already used Entropic Ward this rest\")\n\n\tdef reset_entropic_ward(self):\n\t\tif self.get_entropic_ward():\n\t\t\tprint(\"Reset Entropic Ward\")\n\t\t\tself.entropic_ward = False\n\t\telse:\n\t\t\tprint(\"Can still use Entropic Ward this rest period\")\n\n\tdef create_old_options(self):\n\t\tself.old_options['0'] = \"[9]: Use Entropic Ward\\n\" + \"[10]: Reset Entropic Ward\\n\" + \\\n\t\t\t\t\t\"[11]: Change Level\\n\" + \"[12]: Exit\\n\"\n\t\tself.old_options['9'] = self.use_entropic_ward\n\t\tself.old_options['10'] = self.reset_entropic_ward\n\t\tself.old_options['11'] = self.set_base_warlock_level\n\t\tself.old_options['12'] = leave\n\t\treturn self.old_options\n\n\tdef list_options(self):\n\t\tselection = int_checker(self.old_options.get(\"0\"))\n\t\tprint(selection)\n\t\tself.old_options[\"{}\".format(selection)]()\n\n\ndef merge_base_warlock_dicts(player):\n\twarlock_opts = player.create_warlock_options()\n\tmerge_dicts(player.create_player_character_options(), warlock_opts)\n\treturn warlock_opts\n\n\ndef create(name, subclass):\n\tplayer = subclass()\n\tplayer.set_name(name)\n\treturn player\n\n\ndef create_warlock(name):\n\tplayer = create(name, Warlock)\n\tplayer.set_base_warlock_level()\n\tmerge_base_warlock_dicts(player)\n\treturn player\n\n\ndef create_archfey_warlock(name):\n\tplayer = create(name, Archfey)\n\tplayer.set_base_warlock_level()\n\tmerge_dicts(merge_base_warlock_dicts(player), player.create_archfey_options())\n\treturn player\n\n\ndef create_fiend_warlock(name):\n\tplayer = create(name, Fiend)\n\tplayer.set_base_warlock_level()\n\tmerge_dicts(merge_base_warlock_dicts(player), player.create_fiend_options())\n\treturn player\n\n\ndef create_old_warlock(name):\n\tplayer = create(name, Old)\n\tplayer.set_base_warlock_level()\n\tmerge_dicts(merge_base_warlock_dicts(player), player.create_old_options())\n\treturn player\n\n\ndef main_warlock_making(name, dictionary):\n\tplayer_subclass = int_checker(\"What is their subclass?\\n[1]: Archfey\\n[2]: Fiend\\n[3]: Old\\n[4]: Other\\n\")\n\tif player_subclass == \"1\":\n\t\tp1 = create_archfey_warlock(name)\n\t\tclass_options = Archfey.list_options\n\telif player_subclass == \"2\":\n\t\tp1 = create_fiend_warlock(name)\n\t\tclass_options = Fiend.list_options\n\telif player_subclass == \"3\":\n\t\tp1 = create_old_warlock(name)\n\t\tclass_options = Old.list_options\n\telse:\n\t\tp1 = create_warlock(name)\n\t\tclass_options = Warlock.list_options\n\n\tdictionary[f'{name}'] = {\"character\": p1, \"subclass\": player_subclass, \"options\": class_options}\n","repo_name":"tbholstein95/dnddmtracker","sub_path":"Classes/subclasses/playerclasses/warlock.py","file_name":"warlock.py","file_ext":"py","file_size_in_byte":10389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4884050680","text":"# flask is a python web framework. it allows us to send and receive user requests\n# with a minimal number of lines of non-web3py code. flask is beyond the scope of\n# this tutorial so the flask code won't be commented. that way we can focus on\n# how we're working with our smart contract\nfrom flask import Flask, request, render_template\n\n# solc is needed to compile our Solidity code\nfrom solc import compile_source\nimport vyper\nfrom vyper import compiler\nfrom web3.auto import w3\n\n# web3 is needed to interact with eth contracts\nfrom web3 import Web3, HTTPProvider\n\n# we'll use ConciseContract to interact with our specific instance of the contract\nfrom web3.contract import ConciseContract\n\n# initialize our flask app\napp = Flask(__name__)\n\n# declare the candidates we're allowing people to vote for.\n# note that each name is in bytes because our contract variable\n# candidateList is type bytes32[]\n\n# open a connection to the local ethereum node\nhttp_provider = HTTPProvider('http://localhost:7545')\nweb3 = Web3(http_provider)\neth_provider = Web3(http_provider).eth\n\n# we'll use one of our default accounts to deploy from. every write to the chain requires a\n# payment of ethereum called \"gas\". if we were running an actual test ethereum node locally,\n# then we'd have to go on the test net and get some free ethereum to play with. that is beyond\n# the scope of this tutorial so we're using a mini local node that has unlimited ethereum and\n# the only chain we're using is our own local one\ndefault_account = eth_provider.accounts[0]\nprint('Address of owner: ',default_account)\n# every time we write to the chain it's considered a \"transaction\". every time a transaction\n# is made we need to send with it at a minimum the info of the account that is paying for the gas\ntransaction_details = {\n 'from': default_account,\n}\n\n#Deploy tokens\n\ntoken_to_deploy = open('./vypercoin.vy','r')\ntoken_read = token_to_deploy.read()\ntoken_d = {'token_name':'DARF',\n 'token_symbol':'DRF',\n 'token_decimal':18,\n 'token_initialSupply':1000000}\ntoken_abi = compiler.mk_full_signature(token_read)\ntoken_bytecode = '0x' + compiler.compile(token_read).hex()\ntoken_factory = eth_provider.contract(\n abi=token_abi,\n bytecode=token_bytecode,\n )\ntoken_constructor = token_factory.constructor(token_d['token_name'].encode('utf-8'),token_d['token_symbol'].encode('utf-8'),\\\n token_d['token_decimal'],token_d['token_initialSupply'])\ntoken_transaction_hash = token_constructor.transact(transaction_details)\ntransaction_receipt_token = eth_provider.getTransactionReceipt(token_transaction_hash)\ntoken_address = transaction_receipt_token['contractAddress']\ntoken_instance = eth_provider.contract(\n abi=token_abi,\n address=token_address,\n )\nprint(token_instance)\nprint('Token address:',token_address)\n\nvyper_file = open('./postinvest.v.py','r')\nvyper_text = vyper_file.read()\ncontract_abi = compiler.mk_full_signature(vyper_text)\ncontract_bytecode = '0x' + compiler.compile(vyper_text).hex()\n# create a contract factory. the contract factory contains the information about the\n# contract that we probably will not change later in the deployment script.\ncontract_factory = eth_provider.contract(\n abi=contract_abi,\n bytecode=contract_bytecode,\n)\n# here we pass in a list of smart contract constructor arguments. our contract constructor\n# takes only one argument, a list of candidate names. the contract constructor contains\n# information that we might want to change. below we pass in our list of voting candidates.\n# the factory -> constructor design pattern gives us some flexibility when deploying contracts.\n# if we wanted to deploy two contracts, each with different candidates, we could call the\n# constructor() function twice, each time with different candidates.\n\ncontract_constructor = contract_factory.constructor(500,10,10,10,token_address)\nprint('Contract ABI')\nprint(contract_abi)\nprint('Contract Bytecode')\nprint(contract_bytecode)\n\n# here we deploy the smart contract. the bare minimum info we give about the deployment is which\n# ethereum account is paying the gas to put the contract on the chain. the transact() function\n# returns a transaction hash. this is like the id of the transaction on the chain\n\ntransaction_hash = contract_constructor.transact(transaction_details)\nprint(transaction_hash)\n\n# if we want our frontend to use our deployed contract as it's backend, the frontend\n# needs to know the address where the contract is located. we use the id of the transaction\n# to get the full transaction details, then we get the contract address from there\ntransaction_receipt = eth_provider.getTransactionReceipt(transaction_hash)\ncontract_address = transaction_receipt['contractAddress']\nprint('contract address:',contract_address)\ncontract_instance = eth_provider.contract(\n abi=contract_abi,\n address=contract_address,\n # when a contract instance is converted to python, we call the native solidity\n # functions like: contract_instance.call().someFunctionHere()\n # the .call() notation becomes repetitive so we can pass in ConciseContract as our\n # parent class, allowing us to make calls like: contract_instance.someFunctionHere()\n ContractFactoryClass=ConciseContract,\n)\nglobal contract_address_set\ncontract_address_set = contract_address\ndict_values = {}\nprivate_key = ''\n\n@app.route('/setting', methods=['GET','POST'])\ndef setting():\n private_key_get = request.form.get('private_key')\n print(request.form.get('private_key'))\n if private_key_get is not None:\n private_key = private_key_get\n dict_values.update({'private_key':private_key})\n print(private_key)\n else:\n private_key = ''\n\n return render_template('settings.html', private_key=private_key)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n get_info = contract_instance.get_ballance_of_depo()\n print(get_info)\n\n balance = token_instance.functions.balanceOf(default_account).call()\n symbol = token_instance.functions.symbol().call()\n print(balance)\n print(web3.toWei('10000','ether'))\n print(symbol)\n nonce = eth_provider.getTransactionCount(default_account)\n print(nonce)\n print('Ether to wei:',w3.toWei('1','ether'))\n print('Balance of contract:',token_instance.functions.balanceOf(contract_address).call())\n contract_address_from = web3.toChecksumAddress(contract_address)\n own_address = web3.toChecksumAddress(default_account)\n # address to '0x4776e07A2A155410F601e3e0bfBbA7242a35493a' 0x3143ae291f6f04d22affc9f66578eff22f47aef3\n\n txn = token_instance.functions.transfer(contract_address_from,w3.toWei('598.999','ether')).buildTransaction(\n {\n 'chainId': 5777,\n 'gas': 1000000,\n 'gasPrice': w3.toWei('43', 'wei'),\n 'nonce': nonce, })\n print(txn)\n\n if 'private_key' in dict_values.keys():\n privat_key = dict_values['private_key']\n else:\n try:\n private_key_file = open('./private_key','r')\n privat_key = private_key.read()\n except:\n pass\n\n signed_txn = w3.eth.account.signTransaction(txn, private_key=privat_key)\n print(signed_txn)\n\n result = eth_provider.sendRawTransaction(signed_txn.rawTransaction)\n print(result)\n result_txn = w3.toHex(w3.sha3(signed_txn.rawTransaction))\n print(result_txn)\n\n\n return render_template('index.html', owner=default_account,token = token_address,smartAddress = contract_address)\n\n\nif __name__ == '__main__':\n # set debug=True for easy development and experimentation\n # set use_reloader=False. when this is set to True it initializes the flask app twice. usually\n # this isn't a problem, but since we deploy our contract during initialization it ends up getting\n # deployed twice. when use_reloader is set to False it deploys only once but reloading is disabled\n app.run(debug=True, use_reloader=False)\n","repo_name":"stepanetssergey/flask_tester","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30891761538","text":"import typing\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom paasng.platform.modules.helpers import SlugbuilderBinder\nfrom paasng.platform.modules.models import AppBuildPack, AppSlugBuilder\n\n\nclass Command(BaseCommand):\n help = \"绑定 slugbuilder 和 buildpack\"\n\n def add_arguments(self, parser):\n parser.add_argument('--image', dest=\"image\", help=\"slugbuilder name\")\n parser.add_argument('--buildpack', dest=\"buildpack_ids\", type=int, help=\"buildpack id\", nargs=\"*\")\n parser.add_argument('--buildpack-name', dest=\"buildpack_names\", help=\"buildpack name\", nargs=\"*\")\n parser.add_argument('--dry-run', dest=\"dry_run\", help=\"dry run\", action=\"store_true\")\n\n def get_slugbuilder(self, image: str) -> AppSlugBuilder:\n \"\"\"根据条件获取一个 slugbuilder 对象\"\"\"\n return AppSlugBuilder.objects.get(name=image)\n\n def get_buildpacks(\n self, buildpack_ids: typing.List[int], buildpack_names: typing.List[str]\n ) -> typing.Iterable[AppBuildPack]:\n \"\"\"根据条件获取 buildpack queryset\"\"\"\n qs = AppBuildPack.objects.all()\n if buildpack_ids:\n qs = qs.filter(pk__in=buildpack_ids)\n if buildpack_names:\n qs = qs.filter(name__in=buildpack_names)\n return qs\n\n @transaction.atomic\n def handle(self, image, buildpack_ids, buildpack_names, dry_run, **kwargs):\n slugbuilder = self.get_slugbuilder(image)\n buildpacks = self.get_buildpacks(buildpack_ids, buildpack_names)\n binder = SlugbuilderBinder(slugbuilder)\n\n for buildpack in [bp for bp in buildpacks if bp.region == slugbuilder.region]:\n print(\n f\"binding buildpack {buildpack.name}[{buildpack.pk}] to slugbuilder \"\n f\"{slugbuilder.name}[{slugbuilder.pk}]\"\n )\n if not dry_run:\n binder.bind_buildpack(buildpack)\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/platform/modules/management/commands/bind_buildpacks.py","file_name":"bind_buildpacks.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"21108487263","text":"from django.core.management.base import BaseCommand\nfrom django_seed import Seed\nimport random\nfrom question.models import Category, Question\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n seeder = Seed.seeder()\n\n seeder.add_entity(Category, 50, {'category_name': lambda x: f'Category {random.randint(0,1000)}'})\n\n seeder.add_entity(\n Question, \n 50, \n {\n 'category': lambda x: Category.objects.order_by('?').first(),\n 'question_text': lambda x: f'Question {random.randint(1000,2000)}',\n }\n )\n seeder.execute()\n","repo_name":"abhishekkushwaha5005/QuestionManagementSystem","sub_path":"question/management/commands/seed_data.py","file_name":"seed_data.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8802347814","text":"# 2019-01-14 pass\n# set a balance, if positive this could be a start station, we try to walk through and see if the sum always >= 0\n\ndef canCompleteCircuit(self, gas, cost):\n \"\"\"\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n \"\"\"\n if not gas:\n return 0\n balance = list(map(lambda x,y:x-y,gas,cost))\n for i in range(len(gas)):\n init = i\n sum = gas[i]-cost[i]\n while sum >= 0:\n i += 1\n \n if i > len(gas)-1:\n i = 0\n # this must be after i > len(gas)-1 condition to deal with the case list length =1 \n if i == init:\n return init \n sum += gas[i]-cost[i]\n return -1\n\n\n# key optimization: if start from station A cannot reach station B, any station from A and B cannot reach station B as well (suppose station A is valid or positive)\n\ndef canCompleteCircuit(self, gas, cost):\n \"\"\"\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n \"\"\"\n start = 0\n total = 0\n tank = 0\n \n for i in range(len(gas)):\n tank = tank + gas[i] - cost[i] \n if tank < 0:\n start = i+1\n total += tank\n tank = 0\n return -1 if total + tank < 0 else start","repo_name":"RioAraki/leetcode2020","sub_path":"leetcode_python/134_GasStation[Done].py","file_name":"134_GasStation[Done].py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15240868099","text":"\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport preprocessing\nimport textvectorizer\nfrom scipy import sparse\n\n\"\"\"\nWeigthed tag vectorizer\n=======================\n\nCreate a TF-IDF vectorizer of each document. Also for each document run the\nweighted tag vectorizer with depth - 1. This way a descriptor for a\npart of the network is created assuming that documents in the neighborghood\nsay something about the document in question.\n\"\"\"\n\ndef vectorize(data, new_doc, local = False):\n data_bows, new_doc_bow, vectorizer = textvectorizer.vectorize(data, new_doc, True)\n descriptors = dict(data_bows)\n\n # create a zero vector\n zero_vector = sparse.csc_matrix((1, len(vectorizer.get_feature_names())))\n depth = 2\n\n # Create descriptors for documents in network\n for i in range(0, depth):\n tmp_descriptors = {}\n\n for key in data.items():\n # Take vectors of all links and add with averaged 0.5 weight\n try:\n links = map(lambda x: descriptors[x], data.data['items'][key]['links'])\n except KeyError as e:\n print('Unexpected error')\n continue\n descriptor = descriptors[key] + zero_vector + 0.5*sum(links)/(len(links) + 1) \n tmp_descriptors[key] = descriptor\n descriptors = tmp_descriptors\n\n if (local):\n return zip(descriptors.keys(), descriptors.values()), new_doc_bow, vectorizer\n \n return(zip(descriptors.keys(), descriptors.values()), new_doc_bow)\n","repo_name":"PerceptumNL/TweedejaarsProject","sub_path":"src/vectorizers/weighted_text_vectorizer.py","file_name":"weighted_text_vectorizer.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12490555704","text":"a=int(input(\"Enter\"))\r\ndef fib(a):\r\n if (a<0):\r\n print(\"not correct\")\r\n elif(a==0):\r\n return 0\r\n elif (a==1):\r\n return 1\r\n else:\r\n return fib(a-1)+fib(a-2)\r\nprint (fib(a))\r\n","repo_name":"Aravindan007/Python","sub_path":"fibnosis.py","file_name":"fibnosis.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21021317640","text":"#!/usr/bin/python3\nFIZZ = \"Fizz\"\nBUZZ = \"Buzz\"\n\n\ndef fizzbuzz():\n for num in range(1, 101):\n if (num % 3 and num % 5):\n print(\"%s%s\" % (FIZZ, BUZZ), end=' ')\n elif (num % 3):\n print(\"%s\" % (FIZZ), end=' ')\n elif (num % 5):\n print(\"%s\" % (BUZZ), end=' ')\n else:\n print(\"%d\" % (num), end=' ')\n","repo_name":"isahjohna/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/12-fizzbuzz.py","file_name":"12-fizzbuzz.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28897075610","text":"# # Math Module Here # #\n\n# how well do you know your operators?\n# you can add each variable into the print function below\n# to check you work, like so:\n# print(first_number)\n\n# multiply two factors of 64\nfirst_number = None\n\n# divide first_number by 10\nsecond_number = None\n\n# add second_number to first_number, then subtract 2\nthird_number = None\n\n# now, using parentheses, divide second_number by 2, then multiply it by 20, and finally add 2.4\nfourth_number = None\n\n# think of two different ways to raise 8 to the 2nd power\n# hint, for one of them you have to import a module discussed in class\nsquared_number_one = None\nsquared_number_two = None\n","repo_name":"jforsyth/ENGR265-spring2023","sub_path":"Module 1/3 - Practice Problems/1.3.0 - Operators/Operators.py","file_name":"Operators.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28801002170","text":"from runner import Runner\nfrom common.arguments import get_common_args, get_mixer_args\nfrom dmfb import*\n\nif __name__ == '__main__':\n Psuccess=[]\n Tavg=[]\n args = get_common_args()\n args = get_mixer_args(args)\n # ----一次运行FF\n env = DMFBenv(args.chip_size, args.chip_size, args.drop_num,\n args.block_num, fov=args.fov, stall=args.stall)\n env_info = env.get_env_info()\n args.n_actions = env_info[\"n_actions\"]\n args.n_agents = env_info[\"n_agents\"]\n args.obs_shape = env_info[\"obs_shape\"]\n args.episode_limit = env_info[\"episode_limit\"] # 就是max_step 最大步长约束\n runner = Runner(env, args)\n if not args.evaluate:\n runner.run(args.ith_run)\n else:\n average_episode_rewards, average_episode_steps, average_episode_constraints, success_rate = runner.evaluate()\n Psuccess.append(success_rate)\n Tavg.append(average_episode_steps)\n print('The averege total_rewards of {} is {}'.format(\n args.alg, average_episode_rewards))\n print('The each epoch total_steps is: {}'.format(\n average_episode_steps))\n print('The successful rate is: {}'.format(success_rate))\n env.close()\n # ----\n if args.evaluate:\n np.save('Psuccess_{}_{}'.format(args.chip_size,args.drop_num),Psuccess)\n np.save('Tavg_{}_{}'.format(args.chip_size,args.drop_num),Tavg)\n","repo_name":"lhkwok9/MARL-DMFB-main","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36711719205","text":"import re\n\ndef semantic_analyzer(code):\n variables = {}\n functions = []\n count_lines = 0\n verify_integrity = 0\n count_keys = 0\n\n # Regex\n declarationPattern = re.compile(r\"(int|char)\\s* \\*?\\s*(\\w+)(\\[[0-9]*\\])?;\")\n findVarPattern = r\"\\b([a-zA-Z_][a-zA-Z0-9_]*(?:\\[[0-9]+\\])?)\\b\"\n complexDeclarationPattern = re.compile(r\"(int|char) ((\\w+)(\\[\\w+])?, )+((\\w+)(\\[\\w+])?;)\")\n mathPattern = r\"([a-zA-Z]+\\d*)\\s*=\\s*([a-zA-Z]+\\d*)\\s*([-+*/])\\s*([a-zA-Z]+\\d*)\\s*([-+*/])\\s*([a-zA-Z]+\\d*);\"\n assignmentMathPattern = re.compile(mathPattern)\n functionCallPattern = re.compile(r\"(\\w+)\\s*\\((.*?)\\);\")\n functionCreatePattern = re.compile(r\"(\\w+)\\s*\\((.*?)\\)\\s*{\")\n printPattern = re.compile(r'print\\(\"([^\"]+)\"\\s*,\\s*(.*)\\);')\n simplePrintPattern = re.compile(r'print\\(\\\"\\w*\\\"\\);')\n simpleAtribPattern = re.compile(r\"(\\w+)\\s*=\\s*(\\w+);\")\n varToVarPattern = re.compile(r\"([a-zA-Z]+) = ([a-zA-Z]+);\")\n varToNumber = re.compile(r\"([a-zA-Z]+) = ([0-9]+);\")\n varToString = re.compile(r\"([a-zA-Z]+)\\s*=\\s*\\\"\\w*\\\";\")\n keyPatterns = re.compile(r\"}\")\n\n lines = code.splitlines()\n\n for line in lines:\n verify_integrity = -1\n count_lines += 1\n\n if not line:\n continue\n\n declarationMatcher = declarationPattern.search(line)\n if declarationMatcher:\n verify_integrity = 0\n varType = declarationMatcher.group(1)\n varName = declarationMatcher.group(2)\n variables[varName] = varType\n \n complexDeclarationMatcher = complexDeclarationPattern.search(line)\n if complexDeclarationMatcher:\n verify_integrity = 0\n getVariables = re.findall(findVarPattern, complexDeclarationMatcher.group())\n varType = getVariables[0]\n for index, var in enumerate(getVariables):\n if index != 0:\n variables[var] = varType\n \n assignmentMathMatcher = assignmentMathPattern.search(line)\n if assignmentMathMatcher:\n verify_integrity = 0\n varType1 = variables.get(assignmentMathMatcher.group(1))\n varType2 = variables.get(assignmentMathMatcher.group(2))\n varType3 = variables.get(assignmentMathMatcher.group(4))\n varType4 = variables.get(assignmentMathMatcher.group(6))\n if varType1 != \"int\" or varType2 != \"int\":\n print(f\"Erro semântico: variável não declarada. Linha {count_lines}\")\n return -1, count_lines \n if varType3 != \"int\" or varType4 != \"int\":\n print(f\"Erro semântico: variável não declarada. Linha {count_lines}\")\n return -1, count_lines \n \n functionCreateMatcher = functionCreatePattern.search(line)\n if functionCreateMatcher:\n verify_integrity = 0\n count_keys += 1\n try:\n functions.append(functionCreateMatcher.group(1))\n if len(functionCreateMatcher.group()) > 1:\n continue\n params = functionCreateMatcher.group(2).split(\",\")\n for param in params:\n var = param.strip()\n varType = variables.get(var)\n if var[0] == '\"' and var[len(var) - 1] == '\"':\n continue \n if not varType:\n print(f\"Erro semântico: chamada inválida. Linha {count_lines}\")\n return -1, count_lines \n except Exception as e:\n print(f\"Erro semântico: chamada inválida. Linha {count_lines}\")\n return -1, count_lines \n \n verifyKey = keyPatterns.search(line)\n if verifyKey:\n verify_integrity = 0\n count_keys = count_keys - 1\n\n functionCallMatcher = functionCallPattern.search(line)\n if functionCallMatcher:\n verify_integrity = 0\n functionName = functionCallMatcher.group(1)\n \n if functionName == \"print\":\n printMatcher = printPattern.search(line)\n simplePrintMatcher = simplePrintPattern.search(line)\n if not printMatcher and not simplePrintMatcher:\n print(\"entrei aq\")\n print(f\"Erro semântico: chamada inválida da função print. Linha {count_lines}\")\n return -1, count_lines\n \n if printMatcher:\n string = printMatcher.group(1)\n if string.find(\"%\"):\n vars = printMatcher.group(2).split(\",\")\n for var in vars:\n param = var.strip()\n varType = variables.get(param)\n if not varType:\n print(f\"Erro semântico: chamada inválida da função print. Linha {count_lines}\")\n print(\"Variável {} não declarada\".format(var))\n return -1, count_lines\n else:\n try:\n functions.index(functionName)\n except Exception as e:\n print(f\"Função inexistente. Linha {count_lines}\")\n return -1, count_lines\n try:\n params = functionCallMatcher.group(2).split(\",\")\n for param in params:\n var = param.strip()\n varType = variables.get(var)\n if var[0] == '\"' and var[len(var) - 1] == '\"':\n continue \n if not varType:\n print(f\"Erro semântico: chamada inválida da função fie. Linha {count_lines}\")\n return -1, count_lines \n except Exception as e:\n print(f\"Erro semântico: chamada inválida da função fie. Linha {count_lines}\")\n return -1, count_lines \n \n simpleAtribMatcher = simpleAtribPattern.search(line)\n if simpleAtribMatcher:\n verify_integrity = 0\n isVarToVar = varToVarPattern.search(line)\n if isVarToVar:\n var1 = isVarToVar.group(1)\n var2 = isVarToVar.group(2)\n varType1 = variables.get(var1)\n varType2 = variables.get(var2)\n if not varType1:\n print(f\"Variável {var1} não declarada. Linha {count_lines}\")\n return -1, count_lines \n if not varType2:\n print(f\"Variável {var2} não declarada. Linha {count_lines}\")\n return -1, count_lines \n continue\n isVarToNumber = varToNumber.search(line)\n if isVarToNumber:\n var = isVarToNumber.group(1)\n varType = variables.get(var)\n if varType != \"int\":\n print(f\"Variável {var} não declarada ou com tipo incorreto. Linha {count_lines}\")\n return -1, count_lines\n continue\n \n isVarToString = varToString.search(line)\n if isVarToString:\n verify_integrity = 0\n var = isVarToString.group(1)\n varType = variables.get(var)\n if varType != \"char\":\n print(f\"Variável {var} não declarada ou com tipo incorreto. Linha {count_lines}\")\n return -1, count_lines \n \n if verify_integrity == -1:\n return verify_integrity, count_lines\n \n if count_keys != 0:\n verify_integrity = -1\n\n return verify_integrity, count_lines\n \ndef readCodeFile():\n file = open('teste.txt', 'r')\n code = file.read()\n file.close()\n return code\n\ncode_test = readCodeFile()\n\nverify, count_lines = semantic_analyzer(code_test)\n\nif verify != -1:\n print(\"Compilado com sucesso!\")\nelse:\n print(f\"Erro, não foi possível compilar. Linha {count_lines}\")","repo_name":"lubpolita/SemanticAnalyzer","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39941828242","text":"from pyspark.sql import SparkSession\nimport pyspark.sql.functions as f\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef main():\n spark= SparkSession.builder.appName(\"Titanic\").getOrCreate()\n df= spark.read.format(\"csv\").option(\"inferschema\",\"true\").option(\"header\",\"true\")\\\n .load(\"/Users/teetu/Documents/Master_in_Web_science/Summer_2019/Big_Data/Assignments/TutorialsM/Titanic/titanic.csv\")\n df.printSchema()\n #How many male and female survived\n df_Survived=df.select(\"sex\",\"survived\").groupBy(\"sex\").agg(f.expr(\"count(survived) as survivedNum\"))\\\n .orderBy(\"survivedNum\", ascending=False)\n df_Survived.show()\n # df_Survived.printSchema()\n # df_Survived = df_Survived.toPandas()\n # plt.scatter(df_Survived['sex'], df_Survived['survivedNum'])\n # plt.show()\n # df_Survived.plot(x=\"sex\", y=\"survivedNum\", kind=\"bar\")\n # +------+-----------+\n # | sex | survivedNum |\n # +------+-----------+\n # | female | 233 |\n # | male | 109 |\n # +------+-----------+\n #dead as per sex\n df_dead = df.select(\"sex\", \"survived\").filter(f.expr(\"survived==0\")).groupBy(\"sex\")\\\n .agg(f.expr(\"count(survived) as deadNum\")) \\\n .orderBy(\"deadNum\", ascending=False)\n df_dead.show()\n #How many survived as per pclass'\n df_Survived_Pclass= df.select(\"Pclass\",\"survived\").groupBy(\"Pclass\").agg(f.count(\"survived\").alias(\"survived\"))\\\n .orderBy(\"survived\", ascending=False)\n df_Survived_Pclass.show()\n # +------+--------+\n # | Pclass | survived |\n # +------+--------+\n # | 1 | 136 |\n # | 3 | 119 |\n # | 2 | 87 |\n # +------+--------+\n #age group as per survived\n df_survived_age= df.select(\"Age\",\"survived\").groupBy(\"Age\").agg(f.count(\"survived\").alias(\"survivedNum\"))\\\n .orderBy(\"survivedNum\", ascending= False).na.drop()\n df_survived_age.show()\n # df_survived_age = df_survived_age.toPandas()\n # df_survived_age.plot(x=\"Age\", y=\"survivedNum\", kind=\"bar\")\n\n spark.stop()\nif __name__ ==\"__main__\":\n main()\n","repo_name":"bksaini078/PYSPARK-Machine-learning-on-Titanic-data","sub_path":"TitanicDataframe.py","file_name":"TitanicDataframe.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74895962650","text":"import heapq\nfrom collections import defaultdict\n\n\ndef solution(operations):\n mapping = defaultdict(int)\n min_heap = []\n max_heap = []\n for op in operations:\n if op.startswith(\"I\"):\n num = int(op[2:])\n mapping[num] += 1\n heapq.heappush(min_heap, num)\n heapq.heappush(max_heap, -num)\n elif op.startswith(\"D -1\"):\n while min_heap:\n num = heapq.heappop(min_heap)\n if mapping[num] > 0:\n mapping[num] -= 1\n break\n else:\n while max_heap:\n num = -heapq.heappop(max_heap)\n if mapping[num] > 0:\n mapping[num] -= 1\n break\n\n if not min_heap or not max_heap:\n return [0, 0]\n\n # synchronize the min_heap and max_heap\n while mapping[-max_heap[0]] == 0:\n heapq.heappop(max_heap)\n while mapping[min_heap[0]] == 0:\n heapq.heappop(min_heap)\n\n return [-max_heap[0], min_heap[0]]\n","repo_name":"debbs061/algorithm","sub_path":"src/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8643888067","text":"import pandas as pd\nimport numpy as np\nimport ast\n\n# results of parameter validation and than test set of best C parameters with non overlapping data\ndf_param_validation_nonoverlap = pd.read_csv(\"results/non_overlap_param_val.csv\")\ndf_nonoverlap_test_set = pd.read_csv(\"results/experiment_nonoverlap_results.csv\").drop('Unnamed: 0',axis=1)\n\n\n# results of parameter validation and than test set of best C parameters with overlapping data\ndf_param_validation_overlap = pd.read_csv(\"results/overlap_params_results.csv\")\ndf_overlap_test_set = pd.read_csv(\"results/experiment_overlap_results.csv\").drop('Unnamed: 0',axis=1)\n\n\n#results of using only one svm\ndf_single_svm = pd.read_csv(\"results/results_single_svm.csv\").drop('Unnamed: 0',axis=1)\n\n\n\ndef group_by_machines_and_return_max_c(df):\n means = df.drop('Unnamed: 0',axis=1).groupby('num_svms', as_index=False).mean()\n max = means['f1'].max()\n best_results = means.loc[means['f1'] == max]\n\n print(\"\\n----- Averaged scores grouped by number of machines ------\\n\")\n print(means)\n #best num machines\n num_machines = best_results['num_svms']\n\n # print(int(num_machines))\n\n print(\"\\n----- Scores for number of machines with highest average F1 score ------\\n\")\n # best C param\n result_best_svms = df.loc[df['num_svms'] == int(num_machines)].drop('Unnamed: 0',axis=1).sort_values('f1', ascending=False)\n\n print(result_best_svms)\n best_C_param = result_best_svms['c'][:3]\n print(\"\\n----- 3 best F1 scores ------\\n\")\n print(best_C_param)\n print(\"\\n---------------------------------------------------------------------------\\n\")\n # returns the best C\n return result_best_svms['c'][:1].values[0]\n\n\n\ndef eval_test_for_best_c(df, c):\n ### u can evaluate all cs that u want by just setting c to a fixed value\n\n # c = 1000 #for example\n\n\n #creates list of best c: 100 -> [100,100,100,100,100] depends on num svms\n cs = [int(c) for i in range(df.num_svms.min())]\n\n filter_best_c = []\n for index, row in df.iterrows():\n #convert string in df to list of ints\n r = list(map(int,ast.literal_eval(row.c)))\n if r == (cs): \n filter_best_c = row\n print(filter_best_c)\n\n\ndef combination_evaluation(df):\n print(df.sort_values('f1', ascending=False))\n\ndef eval_single_svm(df):\n print(df)\n\n\n\n####### ALL results using the NOT overlapping data buckets ############\n\nprint(\"\\n####### Evaluation of the best parameters using validation set using non overlapping data ############\\n\")\nbest_c_nonoverlap = group_by_machines_and_return_max_c(df_param_validation_nonoverlap)\n\nprint(\"\\n------ get test run results of best validated c of nonoverlapping data ------\\n\")\neval_test_for_best_c(df_nonoverlap_test_set, best_c_nonoverlap)\n\nprint(\"\\n------ get best results of all cs, including the combination of cs of non overlapping data ------\\n\")\ncombination_evaluation(df_nonoverlap_test_set)\n\n\n\n\n####### ALL results using the OVERLAPPING data buckets ############\n\nprint(\"\\n####### Evaluation of the best parameters using validation set using overlapping data ############\\n\")\nbest_c_overlap = group_by_machines_and_return_max_c(df_param_validation_nonoverlap)\n\nprint(\"\\n####### Evaluation of the best parameters using validation set using overlapping data ############\\n\")\neval_test_for_best_c(df_overlap_test_set, best_c_overlap)\n\nprint(\"\\n------ get best results of all cs, including the combination of cs of non overlapping data ------\\n\")\ncombination_evaluation(df_overlap_test_set)\n\n########### Results single SVM ###############\n### Attention time in seconds !!! ###\n\nprint(\"\\n####### Evaluation of the single SVM for comparison ############\\n\")\n\neval_single_svm(df_single_svm)","repo_name":"Maddi97/kmlmm_kdd","sub_path":"exp_and_eval.py","file_name":"exp_and_eval.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15018294900","text":"#Maior, Menor, Igual\n\nnum_1 = int(input('Informe um número: '))#informe o primeiro número\nnum_2 = int(input('Informe outro número: '))#informe o segundo número\n\nif num_1 > num_2: #Se o primeiro número for maior que o segundo\n print('O primeiro valor é maior.')\nelif num_2 > num_1: #Se o segundo número for maior que o primeiro\n print('O segundo valor é maior.')\nelse: #Se um não é maior ou menor que o outro então são valores iguais.\n print('Ambos os valores são iguais')","repo_name":"GabrielSilva2y3d/Curso-em-video-python-exercicios","sub_path":"Ex038.py","file_name":"Ex038.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2857558189","text":"\nprint(\"Author : Ali Hasan\")\nfirst_day=int(input(\"\\nEnter First Day : \"))\ntotal_day=int(input(\"\\nEnter Total Day (MAX 31) : \"))\ncount=1\nprint(\"\\nSUN \\tMON \\tTUE \\tWED \\tTHU \\tFRI \\tSAT\")\nfor i in range(0,6):\n for j in range(0,7):\n if count>total_day:\n break\n if i==0:\n if j0:\n if j<7:\n print('%2d\\t'%count,end=\"\")\n count=count+1\n else:\n print(\" \")\n print()","repo_name":"starkhasan/Foundation","sub_path":"Python/Python program/Calendar_Program.py","file_name":"Calendar_Program.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"213960599","text":"\n\"\"\"\nBenchmark all apps (overnight run).\n\"\"\"\n\nimport os\nimport shutil\nimport argparse\nimport time\n\ndef configure_app(appdir, appname, do_rgb, do_gray):\n filename = '../apps/{appdir}/{appname}_config.py'.format(**locals())\n\n s = \"\"\"\ndo_rgb = {do_rgb} # Whether to run RGB test\ndo_gray = {do_gray} # Whether to run gray test\n\"\"\".format(**locals())\n\n with open(filename, 'wt') as f:\n f.write(s)\n\ndef main():\n parser = argparse.ArgumentParser(description='Run benchmarks of example programs.')\n parser.add_argument('--no-tune', dest='tune', action='store_false', help='do not run tuner. only find speedups vs comparison baselines')\n parser.add_argument('--print', dest='do_print', action='store_true', help='do not run tuner or find speedups: only print compiler commands')\n parser.add_argument('--safe', dest='safe', action='store_true', help='supply --safe option to compiler')\n parser.add_argument('--check', dest='check', action='store_true', help='only check existence of input files; do not run compiler')\n parser.add_argument('--float32', dest='float32', action='store_true', help='use applications variants that use 32-bit floats')\n parser.add_argument('--args', dest='args', help='extra args to supply to compiler: use --args \"\\-\\-blah\"; escape dashes using \\-')\n parser.add_argument('--no-validate', dest='validate', action='store_false', help='disable profiling and validation of speedups')\n parser.set_defaults(tune=True)\n parser.set_defaults(do_print=False)\n parser.set_defaults(safe=False)\n parser.set_defaults(check=False)\n parser.set_defaults(float32=False)\n parser.set_defaults(validate=True)\n parser.set_defaults(args='')\n args = parser.parse_args()\n args.args = args.args.replace('\\-', '-')\n cmd_options = ''\n if not args.tune:\n cmd_options = ' --no-tune'\n if args.safe:\n cmd_options += ' --safe'\n else:\n if args.validate:\n cmd_options += ' --profile --validate-speedups-after'\n\n if len(args.args):\n cmd_options += ' ' + args.args\n\n def system(s, add_options=True, check=True):\n if args.float32 and check:\n L = s.split()\n filename = L[2]\n filename = filename.replace('.py', '_float32.py')\n L[2] = filename\n s = ' '.join(L)\n if args.check and check:\n filename = s.split()[2]\n if not os.path.exists(filename):\n print('*** File missing: {}'.format(filename))\n else:\n print(' File found: {}'.format(filename))\n return\n if add_options:\n s += cmd_options\n print(s)\n if not args.do_print:\n os.system(s)\n\n outdir = 'out_' + time.strftime('%Y_%m_%d')\n if os.path.exists(outdir):\n outdir0 = outdir\n counter = 2\n while True:\n outdir = outdir0 + '_' + str(counter)\n if not os.path.exists(outdir):\n break\n counter += 1\n\n# if os.path.exists('out'):\n# shutil.rmtree('out')\n\n# system('python compiler.py ../apps/composite/composite_4channel.py --out-dir {}/composite_4channel'.format(outdir))\n\n# system('python compiler.py ../apps/bilateral_grid/bilateral_grid_clean_small.py --out-dir {}/bilateral_grid_clean_small'.format(outdir))\n\n# system('python compiler.py ../apps/interpolate/interpolate_float.py --out-dir {}/interpolate_float'.format(outdir))\n\n system('python compiler.py ../apps/mandelbrot/mandelbrot.py --out-dir {}/mandelbrot'.format(outdir))\n\n system('python compiler.py ../apps/composite/composite_rgb.py --out-dir {}/composite_rgb'.format(outdir))\n\n system('python compiler.py ../apps/composite_gray/composite.py --out-dir {}/composite_gray'.format(outdir))\n\n system('python compiler.py ../apps/blur_one_stage/blur_one_stage_rgb.py --out-dir {}/blur_one_stage_rgb'.format(outdir))\n\n system('python compiler.py ../apps/blur_one_stage_gray/blur_one_stage.py --out-dir {}/blur_one_stage_gray'.format(outdir))\n\n system('python compiler.py ../apps/blur_two_stage/blur_two_stage_rgb.py --out-dir {}/blur_two_stage_rgb'.format(outdir))\n\n system('python compiler.py ../apps/blur_two_stage_gray/blur_two_stage.py --out-dir {}/blur_two_stage_gray'.format(outdir))\n\n system('python compiler.py ../apps/interpolate/interpolate.py --out-dir {}/interpolate'.format(outdir))\n\n system('python compiler.py ../apps/optical_flow_patchmatch/optical_flow_patchmatch.py --out-dir {}/optical_flow_patchmatch'.format(outdir))\n\n system('python compiler.py ../apps/pacman/pacman.py --out-dir {}/pacman'.format(outdir))\n\n# system('python compiler.py ../apps/raytracer/raytracer.py --out-dir out/raytracer')\n\n# system('python compiler.py ../apps/blur_two_stage/blur_two_stage_4channel.py --out-dir {}/blur_two_stage_4channel'.format(outdir))\n# system('python compiler.py ../apps/blur_one_stage/blur_one_stage_4channel.py --out-dir {}/blur_one_stage_4channel'.format(outdir))\n\n# system('python compiler.py ../apps/harris_corner/harris_corner.py --out-dir {}/harris_corner'.format(outdir))\n system('python compiler.py ../apps/harris_corner_circle/harris_corner_circle.py --out-dir {}/harris_corner_circle'.format(outdir))\n\n system('python compiler.py ../apps/raytracer/raytracer.py --out-dir {}/raytracer'.format(outdir))\n\n system('python compiler.py ../apps/bilateral_grid/bilateral_grid.py --out-dir {}/bilateral_grid'.format(outdir))\n\n system('python compiler.py ../apps/local_laplacian/local_laplacian.py --out-dir {}/local_laplacian'.format(outdir))\n\n system('python compiler.py ../apps/camera_pipe/camera_pipe.py --out-dir {}/camera_pipe'.format(outdir))\n\n# system('python compiler.py --validate {} --out-dir validate_{} --validate-speedups > {}/validate.csv'.format(outdir, outdir, outdir))\n\n if not args.do_print and not args.check:\n \n system('python bench_stats.py {}'.format(outdir), add_options=False, check=False)\n\nif __name__ == '__main__':\n main()\n","repo_name":"uva-graphics/vizgen","sub_path":"proj/compiler/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"38053851660","text":"from influxdb import DataFrameClient\nfrom sqlalchemy import create_engine\n\ndef save_on_off_to_db(influxdb, postgresdb, df_for_export, columns_for_tag):\n\n # for influxdb\n client_test = DataFrameClient(host=influxdb.host, port=influxdb.port, database=influxdb.database)\n client_test.write_points(df_for_export, influxdb.sink_table, \n tag_columns=columns_for_tag, \n batch_size=10000,\n time_precision='ms')\n # for postgresdb\n engine = create_engine(postgresdb.ENGINE)\n df_for_export.to_sql(postgresdb.table_name_sink, con=engine, if_exists='append', index=True)\n","repo_name":"XiaozhanYang/on_off_detection_sprint","sub_path":"functions/batch_processing/save_on_off_to_db.py","file_name":"save_on_off_to_db.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72525118491","text":"import datetime\nimport typing\n\nfrom apistar import http, Settings\nfrom apistar.authentication import Authenticated\n\nfrom .compat import SqlalchemySession, DjangoSession\nfrom .settings import get_settings\n\n\nclass BaseAuthentication():\n\n def get_credentials(self, authorization):\n if authorization is None:\n return None\n\n scheme, token = authorization.split()\n if scheme.title() != 'Token' or not token:\n return None\n\n return token\n\n\nclass SQLAlchemyTokenAuthentication(BaseAuthentication):\n\n def authenticate(self, authorization: http.Header,\n session: SqlalchemySession,\n settings: Settings) -> typing.Union[None, Authenticated]:\n user_settings = get_settings(settings)\n token = self.get_credentials(authorization)\n\n if not token:\n return\n\n TokenModel = user_settings['TOKEN_MODEL']\n UserModel = user_settings['USER_MODEL']\n instance = session.query(TokenModel).filter(TokenModel.token == token).first()\n if not instance:\n return\n \n now = datetime.datetime.now()\n difference = datetime.timedelta(days=user_settings['EXPIRY_TIME'])\n if user_settings['IS_EXPIRY_TOKEN'] and instance.created_at < (now - difference):\n return\n\n user = session.query(UserModel).filter(UserModel.id == instance.user_id).first()\n return Authenticated(\n username=getattr(user, user_settings['USERNAME_FIELD']),\n user=user\n )\n\n\nclass DjangoTokenAuthentication(BaseAuthentication):\n\n def authenticate(self, authorization: http.Header,\n session: DjangoSession,\n settings: Settings) -> typing.Union[None, Authenticated]:\n user_settings = get_settings(settings)\n token = self.get_credentials(authorization)\n\n if not token:\n return\n\n TokenModel = getattr(session, user_settings['TOKEN_MODEL'])\n UserModel = getattr(session, user_settings['USER_MODEL'])\n instance = TokenModel.objects.filter(token=token).first()\n if not instance:\n return\n\n now = datetime.datetime.now()\n difference = datetime.timedelta(days=user_settings['EXPIRY_TIME'])\n if user_settings['IS_EXPIRY_TOKEN'] and instance.created_at < (now - difference):\n return\n\n user = UserModel.objects.filter(id=instance.user_id).first()\n return Authenticated(\n username=getattr(user, user_settings['USERNAME_FIELD']),\n user=user\n )\n","repo_name":"bahattincinic/apistar_token_authentication","sub_path":"apistar_token_auth/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26911411754","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import path\nfrom flask_login import LoginManager\n\n# define new db. This is a database object\ndb = SQLAlchemy()\nDB_NAME= \"database.db\"\n\n# create basic app\ndef create_app():\n app = Flask(__name__)\n #encrypt session data\n app.config['SECRET_KEY'] = 'kefnkwelk ewklfnefk'\n # sql alchemy db is located below in db_name. Store db is flask folder\n app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'\n \n #initialize db\n db.init_app(app)\n\n\n\n # import blueprints\n from .views import views\n from. auth import auth\n\n # doing this to access routes\n app.register_blueprint(views, url_prefix='/')\n app.register_blueprint(auth, url_prefix='/')\n\n # defines class User and Notes to create db. Mind the relative import strategy\n from .models import User, Note\n\n # initialize function\n create_database(app)\n\n login_manager = LoginManager()\n login_manager.login_view = 'auth.login'\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id))\n \n return app\n\ndef create_database(app):\n if not path.exists('flask/' + DB_NAME):\n db.create_all(app=app)\n print('Database created!')\n\n\n\n","repo_name":"waqar-habib/flask","sub_path":"auth_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74397306012","text":"from django.shortcuts import render\nfrom generator.models import Invoice\nfrom api.serializers import InvoiceSerializer\nfrom django.http import HttpResponseForbidden, Http404\nfrom django.db.models import Max\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import APIException, ParseError\nfrom rest_framework import status, permissions, serializers\n\nfrom pprint import pprint\n\nclass Invoice_generator(APIView):\n\t\"\"\" Create a new invoice for the invoice generator \"\"\"\n\n\tdef get(self, request, format=None):\n\n\t\t#\n\t\t# Make sure ?id=n is the only valid query\n\t\t#\n\n\t\tfor key in request.GET.keys():\n\t\t\tif key != \"id\":\n\t\t\t\treturn ParseError(\"Error: I only accept 'id' as keyword\")\n\n\t\t#\n\t\t# parse the query\n\t\t#\n\n\t\ttry:\n\t\t\tids_str = request.GET.get('id')\n\t\t\tids = ids_str.split(',')\n\t\texcept:\n\t\t\traise ParseError('Could not parse id')\n\n\t\ttry:\n\t\t\tfor i in range(0, len(ids)):\n\t\t\t\tids[i] = int(ids[i])\n\t\texcept:\n\t\t\traise ParseError('Could not convert id numbers to integers.')\n\n\t\t#\n\t\t# Serialise\n\t\t# \n\n\t\tinvoice_objs = Invoice.objects.filter(pk__in=ids)\n\t\tserializer = InvoiceSerializer(invoice_objs, many=True)\n\n\t\treturn Response(serializer.data)\n\n\tdef post(self, request, format=None):\n\n\t\tserializer = InvoiceSerializer(data=request.data)\n\n\t\t#\n\t\t# Make sure the POST doesn't do any naughty stuff. Having more than five items and messing with 'closed' and 'success' is not allowed\n\t\t#\n\t\t\n\t\tif (len(serializer.initial_data['items']) > 5):\n\t\t\traise ParseError(\"Error: 5 items is the maximum\")\n\n\t\tif 'closed' in serializer.initial_data or 'success' in serializer.initial_data:\n\t\t\traise ParseError(\"Error: JSON object contains an invalid field\")\n\n\t\t#\n\t\t# Check for validity and save to the database\n\t\t#\n\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\t\n\t\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED)\n \t\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n","repo_name":"fizk/invoice_generator","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44175223819","text":"from itertools import islice\n\n__author__ = 'Tiger'\n\n\ndef gen_primes():\n sieve = {}\n p = 2\n\n while p < 2000000:\n if p not in sieve:\n yield p\n sieve[p * p] = [p]\n else:\n for composite in sieve[p]:\n sieve.setdefault(composite + p, []).append(composite)\n del sieve[p]\n p += 1\n\nresult = sum(gen_primes())\nprint(result)","repo_name":"chesswiz16/ProjectEuler","sub_path":"problem_10.py","file_name":"problem_10.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9979491897","text":"import aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\nimport sqlite3\nimport re\nimport json\n# Function to sanitize column names\ndef sanitize_column_name(name):\n # Remove any non-alphanumeric characters\n sanitized_name = re.sub(r'\\W+', '', name)\n # Add a prefix if the name starts with a number\n if sanitized_name[0].isdigit():\n sanitized_name = 'col_' + sanitized_name\n return sanitized_name\n\nasync def scrape_course_codes():\n url = 'https://catalogue.uottawa.ca/en/courses/'\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.text()\n\n course_codes = []\n soup = BeautifulSoup(data, 'html.parser')\n li_elements = soup.select('.az_sitemap li:not(.azMenu li)')\n\n for element in li_elements:\n code = element.get_text().strip()\n course_codes.append(code)\n\n return course_codes\n\nasync def scrape_course_names(courseCode):\n url = 'https://catalogue.uottawa.ca/en/courses/' + courseCode + \"/\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.text()\n\n course_names = []\n soup = BeautifulSoup(data, 'html.parser')\n li_elements = soup.select('.courseblock')\n # print(len(li_elements))\n sc_sccoursedescs = soup.find(class_='page_content')\n faculty = \"\"\n if(sc_sccoursedescs ):\n previous_sibling = sc_sccoursedescs.find('p')\n # print( previous_sibling.get_text() )\n faculty = previous_sibling.get_text().strip()\n\n for element in li_elements:\n course = ['coursename', 'coursedesc','faculty','coursedetails','prerequisites'] # Create a new list for each course\n title_element = element.select_one('.courseblocktitle')\n course_desc = element.select_one('.courseblockdesc')\n course_details = element.select_one('.courseblockextra')\n course_prerequisites = element.find(class_='courseblockextra highlight noindent')\n name = title_element.get_text().strip().replace(\"\\xa0\", \" \")\n\n if course_desc is None:\n course[1] = \"There's no description for this course\"\n else:\n description_text = course_desc.get_text().strip()\n course[1] = description_text\n if(course_details is None):\n course[3] = \"\"\n else:\n course_details_text = course_details.get_text().strip()\n course[3] = course_details_text\n if(course_prerequisites is None):\n course[4] = \"This course has no prerequisites\"\n else:\n prerequisites_text = course_prerequisites.get_text().strip()\n course[4] = prerequisites_text\n # print(prerequisites_text)\n\n course[0] = name\n course[2] = faculty\n course_names.append(course)\n\n return course_names\n\n\n\n\nasync def main():\n try:\n connection = sqlite3.connect('courses_database.db')\n cursor = connection.cursor()\n\n course_codes = await scrape_course_codes()\n cursor.execute('''DROP TABLE courseInformation''')\n cursor.execute('''CREATE TABLE courseInformation(faculty, course_code, course_desc, course_details, prerequisites)''')\n #course_name course_code\n\n\n\n for course in course_codes:\n # print(course)\n courseSliced = course[-4:-1].lower()\n course_names = await scrape_course_names(courseSliced)\n # print(course_names[0])\n for courseName in course_names:\n i = 0\n # print(courseName)\n cursor.execute(''' INSERT INTO courseInformation VALUES (?, ?, ?, ?, ?)''', (courseName[2], courseName[0], courseName[1], courseName[3], courseName[4]))\n # Insert course names into the corresponding columns\n \n data = cursor.execute(''' SELECT * FROM courseInformation''').fetchall()\n print(json.dumps(data))\n # print('he')\n connection.commit()\n connection.close()\n except Exception as error:\n print('An error occurred during web scraping:', error)\n\n\n# Run the event loop\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"null-machine/CSI3140-P1","sub_path":"uO-ClassHub-ui/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37512873832","text":"from environment import Maze, State\n\nfrom random import choice, random\n\n\nclass Policy:\n \"\"\"\n Defaults to choosing a random action\n \"\"\"\n def __init__(self):\n self.action_table = {} # rename to Q-function?\n\n def select_action(self, state: State) -> Maze.Action:\n return choice(list(Maze.Action))\n\n\nclass OptimalPolicy(Policy):\n def __init__(self):\n super().__init__()\n\n L, R, U, D = list(Maze.Action)\n self.action_table = {\n (0, 0): R, (1, 0): R, (2, 0): R, (3, 0): R,\n (0, 1): R, (1, 1): U, (2, 1): U, (3, 1): U,\n (0, 2): R, (1, 2): U, (2, 2): L, (3, 2): L,\n (0, 3): R, (1, 3): U, (2, 3): U, (3, 3): L\n }\n\n def select_action(self, state: State) -> Maze.Action:\n return self.action_table[state.pos]\n\n\nclass QPolicy(Policy):\n def __init__(self, Q):\n super().__init__()\n\n self.Q = Q\n\n def select_action(self, state: State, epsilon: float = None) -> Maze.Action:\n \"\"\"\n Return the action with the maximum value given a state.\n If an epsilon is given, then there's a random chance to choose another action (the chance being the epsilon)\n \"\"\"\n max_a = max(self.Q[state].values())\n max_action = choice([a for a in list(Maze.Action) if self.Q[state][a] == max_a])\n\n if epsilon is not None:\n r = random()\n\n if r < epsilon:\n return choice([action for action in list(Maze.Action) if action != max_action])\n\n return max_action\n\n\nclass DoubleQPolicy(Policy):\n def __init__(self, Q1, Q2):\n super().__init__()\n\n self.Q1 = Q1\n self.Q2 = Q2\n\n @property\n def Q(self):\n return {s: {a: self.Q1[s][a] + self.Q2[s][a] for a in Maze.Action} for s in self.Q1}\n\n def select_action(self, state: State, epsilon: float = None, q=None) -> Maze.Action:\n \"\"\"\n Return the action with the maximum value given a state.\n If an epsilon is given, then there's a random chance to choose another action (the chance being the epsilon)\n \"\"\"\n if q == 1:\n Q = self.Q1\n elif q == 2:\n Q = self.Q2\n else:\n Q = self.Q\n\n max_a = max(Q[state].values())\n max_action = choice([a for a in list(Maze.Action) if Q[state][a] == max_a])\n\n if epsilon is not None:\n r = random()\n\n if r < epsilon:\n return choice([action for action in list(Maze.Action) if action != max_action])\n\n return max_action\n","repo_name":"JReesW/Autonomous-Systems","sub_path":"AS-2.2/environment/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1500934516","text":"# pybids has to be greater than 0.5\nfrom .interfaces import BIDSDataGrabberPatch\nfrom nipype.pipeline import engine as pe\nfrom argparse import ArgumentParser\nfrom nipype.interfaces import utility as niu\nimport os\n\n\ndef main():\n opts = get_parser().parse_args()\n\n # define and create the output directory\n outdir = os.path.join(\n os.path.dirname(\n os.path.abspath(opts.deriv_pipeline)), 'atlasCorrelations')\n os.makedirs(outdir, exist_ok=True)\n\n # define and create the work directory\n workdir = os.path.join(\n os.path.dirname(\n os.path.abspath(opts.deriv_pipeline)), 'work')\n os.makedirs(workdir, exist_ok=True)\n\n if opts.analysis_level == 'participant':\n # initialize participant workflow\n participant_wf = pe.Workflow(name='participant_wf', base_dir=workdir)\n # initialize connectivity workflow\n connectivity_wf = init_connectivity_wf(workdir, outdir, opts.hp, opts.lp, os.path.abspath(opts.atlas_img),\n os.path.abspath(opts.atlas_lut), opts.confounds)\n\n imgs_criteria = {\n 'imgs':\n {\n 'space': 'MNI152NLin2009cAsym',\n 'modality': 'func',\n 'type': 'preproc'\n }\n }\n\n # add in optional search criteria\n if opts.session:\n imgs_criteria['matrices']['session'] = opts.session\n if opts.task:\n imgs_criteria['matrices']['task'] = opts.task\n if opts.run:\n imgs_criteria['matrices']['run'] = opts.run\n if opts.variant:\n imgs_criteria['matrices']['variant'] = opts.variant\n\n input_node = pe.Node(\n BIDSDataGrabberPatch(\n domains=['bids', 'derivatives'],\n output_query=imgs_criteria,\n base_dir=os.path.abspath(opts.deriv_pipeline)),\n name='input_node')\n\n participant_wf.connect([\n (input_node, connectivity_wf,\n [('imgs', 'input_node.img')]),\n ])\n\n # run the participant workflow\n participant_wf.run()\n\n elif opts.analysis_level == 'group':\n\n # set the input dir (assumed participant level already run).\n input_dir = os.path.join(\n os.path.dirname(\n os.path.abspath(opts.deriv_pipeline)), 'atlasCorrelations')\n # catch if directory doesn't exist\n if not os.path.isdir(input_dir):\n raise OSError('DOES NOT EXIST: {input_dir}'.format(input_dir=input_dir))\n\n # add in optional search criteria\n matrices_criteria = {\n 'matrices':\n {\n 'space': 'MNI152NLin2009cAsym',\n 'modality': 'func',\n 'type': 'corrMatrix',\n }\n }\n if opts.session:\n matrices_criteria['matrices']['session'] = opts.session\n if opts.task:\n matrices_criteria['matrices']['task'] = opts.task\n if opts.run:\n matrices_criteria['matrices']['run'] = opts.run\n if opts.variant:\n matrices_criteria['matrices']['variant'] = opts.variant\n\n group_wf = pe.Workflow(name='group_wf', base_dir=workdir)\n\n group_collection_wf = init_group_collection_wf(work_dir=workdir,\n outdir=input_dir)\n input_node = pe.Node(\n BIDSDataGrabberPatch(\n domains=['bids', 'derivatives'],\n output_query=matrices_criteria,\n base_dir=input_dir),\n name='input_node')\n\n group_wf.connect([\n (input_node, group_collection_wf,\n [('matrices', 'input_node.matrix_tsv')]),\n ])\n\n group_wf.run()\n\n else:\n raise NameError('specify either participant or group for analysis level')\n\n\ndef get_parser():\n \"\"\"Build parser object\"\"\"\n parser = ArgumentParser(description='atlas_correlations')\n parser.add_argument('--deriv-pipeline', '-d', action='store', required=True,\n help='input derivative directory (e.g. fmriprep). '\n 'I assume the inputs are in MNI space.')\n parser.add_argument('--atlas-img', '-a', action='store',\n help='input atlas nifti')\n parser.add_argument('--atlas-lut', '-l', action='store', required=True,\n help='atlas look up table formatted with the columns: '\n 'index, regions')\n parser.add_argument('--confounds', '-c', action='store', nargs='+',\n help='names of confounds to be included in analysis')\n parser.add_argument('analysis_level', choices=['participant', 'group'],\n help='run participant level analysis, or aggregate '\n 'group level results')\n parser.add_argument('--participant_label', '--participant-label',\n action='store', nargs='+',\n help='one or more participant identifiers with the '\n 'sub- prefix removed')\n parser.add_argument('--hp', action='store', default=None,\n help='highpass filter to apply to the data')\n parser.add_argument('--lp', action='store', default=None,\n help='lowpass filter to apply to the data')\n parser.add_argument('--variant', action='store',\n help='only analyze files with a specific variant label')\n parser.add_argument('--run', action='store',\n help='only analyze files with a specific run label')\n parser.add_argument('--session', action='store',\n help='only analyze files with a specific session label')\n parser.add_argument('--task', action='store',\n help='only analyze files with a specific task label')\n return parser\n\n\ndef init_connectivity_wf(work_dir, output_dir, hp, lp,\n atlas_img, atlas_lut, confounds):\n \"\"\"\n Generates a connectivity matrix for a bold file\n\n .. workflow::\n :graph2use: orig\n :simple_form: yes\n\n from atlascorr.atlas_correlations import init_connectivity_wf\n wf = init_connectivity_wf(\n work_dir='.',\n output_dir='.',\n hp=None,\n lp=None,\n atlas_img='',\n atlas_lut='',\n confounds=[''],\n )\n\n Parameters\n ----------\n work_dir : str\n full path to directory where intermediate files will be written\n output_dir : str\n full path to directory where output files will be written\n hp : float or None\n high pass filter (frequencies higher than this pass)\n lp : float or None\n low pass filter (frequencies lower than this pass)\n atlas_img : str\n full path and name of the atlas file\n atlas_lut : str\n full path and name to atlas lookup tsv with two columns\n (regions and index)\n confounds : list\n list of confounds to include in the model\n\n Inputs\n ------\n img : str\n full path and name of the bold file\n atlas_img : str\n full path and name of the atlas file\n atlas_lut : str\n full path and name to atlas lookup tsv with two columns\n (regions and index)\n\n Outputs\n -------\n dst : str\n full path and name of the correlation matrix\n \"\"\"\n connectivity_wf = pe.Workflow(name='connectivity_wf')\n connectivity_wf.base_dir = work_dir\n\n input_node = pe.MapNode(\n niu.IdentityInterface(\n fields=['img', 'atlas_img', 'atlas_lut']),\n iterfield=['img'],\n name='input_node')\n input_node.inputs.atlas_img = atlas_img\n input_node.inputs.atlas_lut = atlas_lut\n\n get_files_node = pe.MapNode(\n niu.Function(\n function=get_files,\n input_names=['img'],\n output_names=['confounds', 'brainmask']),\n iterfield=['img'],\n name='get_files_node')\n\n confounds2df_node = pe.MapNode(\n niu.Function(\n function=proc_confounds,\n input_names=['confounds', 'confound_file'],\n output_names=['confounds_df']),\n iterfield=['confound_file'],\n name='confounds2df_node')\n confounds2df_node.inputs.confounds = confounds\n\n extract_ts_node = pe.MapNode(\n niu.Function(\n function=extract_ts,\n input_names=['img',\n 'brainmask',\n 'atlas_img',\n 'confounds_df',\n 'hp',\n 'lp'],\n output_names=['ts_matrix']),\n iterfield=['img', 'confounds_df', 'brainmask'],\n name='extract_ts_node')\n\n # initialize highpass and lowpass\n extract_ts_node.inputs.lp = lp\n extract_ts_node.inputs.hp = hp\n\n make_corr_matrix_node = pe.MapNode(\n niu.Function(\n function=make_corr_matrix,\n input_names=['ts_matrix'],\n output_names=['zcorr_matrix']),\n iterfield=['ts_matrix'],\n name='make_corr_matrix_node')\n\n write_out_corr_matrix_node = pe.MapNode(\n niu.Function(\n function=write_out_corr_matrix,\n input_names=['corr_matrix', 'atlas_lut', 'img', 'output_dir'],\n output_names=['matrix_tsv']),\n iterfield=['corr_matrix', 'img'],\n name='write_out_corr_matrix_node')\n write_out_corr_matrix_node.inputs.output_dir = output_dir\n\n connectivity_wf.connect([\n (input_node, get_files_node,\n [('img', 'img')]),\n (get_files_node, confounds2df_node,\n [('confounds', 'confound_file')]),\n (get_files_node, extract_ts_node,\n [('brainmask', 'brainmask')]),\n (confounds2df_node, extract_ts_node,\n [('confounds_df', 'confounds_df')]),\n (input_node, extract_ts_node,\n [('atlas_img', 'atlas_img'),\n ('img', 'img')]),\n (extract_ts_node, make_corr_matrix_node,\n [('ts_matrix', 'ts_matrix')]),\n (make_corr_matrix_node, write_out_corr_matrix_node,\n [('zcorr_matrix', 'corr_matrix')]),\n (input_node, write_out_corr_matrix_node,\n [('atlas_lut', 'atlas_lut'),\n ('img', 'img')]),\n\n ])\n\n return connectivity_wf\n\n\ndef init_group_collection_wf(work_dir, outdir):\n \"\"\"\n Combines correlation matrices derived from the individual\n bold files.\n\n .. workflow::\n :graph2use: orig\n :simple_form: yes\n\n from atlascorr.atlas_correlations import init_group_collection_wf\n wf = init_group_collection_wf(\n work_dir='.',\n outdir='.',\n )\n\n Parameters\n ----------\n work_dir : str\n full path to directory where intermediate files will be written\n outdir : str\n full path to directory where the group tsv will be written\n\n Inputs\n ------\n matrix_tsv : str\n full path and name to correlation matrix\n \"\"\"\n group_collection_wf = pe.Workflow(name='group_collection_wf')\n group_collection_wf.base_dir = work_dir\n\n input_node = pe.MapNode(\n niu.IdentityInterface(\n fields=['matrix_tsv']),\n iterfield=['matrix_tsv'],\n name='input_node')\n\n matrix_proc_node = pe.MapNode(\n niu.Function(\n function=proc_matrix,\n input_names=['matrix_tsv'],\n output_names=['participant_df']),\n iterfield=['matrix_tsv'],\n name='matrix_proc_node')\n\n merge_dfs_node = pe.Node(\n niu.Function(\n function=merge_dfs,\n input_names=['dfs'],\n output_names=['df']),\n name='merge_dfs_node')\n\n write_out_group_tsv_node = pe.Node(\n niu.Function(\n function=write_out_group_tsv,\n input_names=['outdir', 'df'],\n output_names=['out_file']),\n name='write_out_group_tsv_node')\n write_out_group_tsv_node.inputs.outdir = outdir\n\n group_collection_wf.connect([\n (input_node, matrix_proc_node,\n [('matrix_tsv', 'matrix_tsv')]),\n (matrix_proc_node, merge_dfs_node,\n [('participant_df', 'dfs')]),\n (merge_dfs_node, write_out_group_tsv_node,\n [('df', 'df')]),\n ])\n\n return group_collection_wf\n\n\ndef get_files(img):\n \"\"\"\n Find the brainmask and confound files given the bold file.\n\n Parameters\n ----------\n img : str\n full path and name of the bold file\n\n Returns\n -------\n confound : str\n full path and name of the confounds file\n brainmask : str\n full path and name of the brainmask file\n \"\"\"\n import re\n import os\n PROC_EXPR = re.compile(\n r'^(?P.*/)?'\n r'(?Psub-[a-zA-Z0-9]+)'\n r'(_(?Pses-[a-zA-Z0-9]+))?'\n r'(_(?Ptask-[a-zA-Z0-9]+))?'\n r'(_(?Pacq-[a-zA-Z0-9]+))?'\n r'(_(?Prec-[a-zA-Z0-9]+))?'\n r'(_(?Prun-[a-zA-Z0-9]+))?'\n r'_bold'\n r'(_(?Pspace-[a-zA-Z0-9]+))?'\n r'(_(?Pvariant-[a-zA-Z0-9]+))?'\n r'_preproc.nii.gz')\n\n def get_confound(img):\n CONF_REPL = (r'\\g'\n r'\\g'\n r'_\\g'\n r'_\\g'\n r'_\\g'\n r'_bold_confounds.tsv')\n conf_tmp = PROC_EXPR.sub(CONF_REPL, img)\n conf = re.sub('_+', '_', conf_tmp)\n if os.path.isfile(conf):\n return conf\n else:\n raise IOError('cannot find {conf}'.format(conf=conf))\n\n def get_brainmask(img):\n MASK_REPL = (r'\\g'\n r'\\g'\n r'_\\g'\n r'_\\g'\n r'_\\g'\n r'_bold_\\g_brainmask.nii.gz')\n bmask = PROC_EXPR.sub(MASK_REPL, img)\n bmask = re.sub('_+', '_', bmask)\n if os.path.isfile(bmask):\n return bmask\n else:\n raise IOError('cannot find {bmask}'.format(bmask=bmask))\n confound = get_confound(img)\n brainmask = get_brainmask(img)\n return confound, brainmask\n\n\ndef proc_confounds(confounds, confound_file):\n \"\"\"\n Filter confounds file to selected confounds &\n replaces \"n/a\"s in confounds file with the mean.\n\n Parameters\n ----------\n confounds : list\n list of confounds to include in the model\n confounds_file : str\n full path and name of the confounds file\n\n Returns\n -------\n confounds_df : pandas.core.frame.DataFrame\n dataframe containing the selected confounds\n \"\"\"\n import pandas as pd\n import numpy as np\n confounds_df = pd.read_csv(confound_file, sep='\\t', na_values='n/a')\n if 'FramewiseDisplacement' in confounds:\n confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna(\n np.mean(confounds_df['FramewiseDisplacement']))\n return confounds_df[confounds]\n\n\ndef extract_ts(img, brainmask, atlas_img, confounds_df, hp=None, lp=None):\n \"\"\"\n Extract timeseries from each region of interest described by an atlas.\n\n Parameters\n ----------\n img : str\n full path and name of the bold file\n brainmask : str\n full path and name of the brainmask file\n atlas_img : str\n full path and name of the atlas file\n confounds_df : pandas.core.frame.DataFrame\n dataframe containing confound measures\n hp : float or None\n high pass filter (frequencies higher than this pass)\n lp : float or None\n low pass filter (frequencies lower than this pass)\n\n Returns\n -------\n signals : numpy.ndarray\n 2D numpy array with each column representing an atlas region\n and each row representing a volume (time point)\n \"\"\"\n from nilearn.input_data import NiftiLabelsMasker\n if hp:\n hp = float(hp)\n if lp:\n lp = float(lp)\n masker = NiftiLabelsMasker(\n labels_img=atlas_img, standardize=True, mask_img=brainmask,\n low_pass=lp, high_pass=hp, t_r=2.0)\n return masker.fit_transform(img, confounds=confounds_df.values)\n\n\ndef make_corr_matrix(ts_matrix):\n \"\"\"\n Make a symmetric pearson's r->z transforme correlation matrix.\n\n Parameters\n ----------\n ts_matrix : numpy.ndarray\n 2D numpy array with each column representing an atlas region\n and each row representing a volume (time point)\n\n Returns\n -------\n zcorr_matrix : numpy.ndarray\n 2D symmetric matrix measuring region-region correlations\n main diagnal is all zeros\n \"\"\"\n from nilearn.connectome import ConnectivityMeasure\n import numpy as np\n\n def fisher_r_to_z(r):\n import math\n if r == 1.:\n return 0.\n else:\n return math.log((1. + r)/(1. - r))/2.\n correlation_measure = ConnectivityMeasure(kind='correlation')\n corr_matrix = correlation_measure.fit_transform([ts_matrix])[0]\n vfisher_r_to_z = np.vectorize(fisher_r_to_z)\n # fisher's r to z\n zcorr_matrix = vfisher_r_to_z(corr_matrix)\n return zcorr_matrix\n\n\ndef write_out_corr_matrix(corr_matrix, atlas_lut, img, output_dir):\n \"\"\"\n Write out a symmetric correlation matrix using BIDS naming conventions\n\n Parameters\n ----------\n corr_matrix : numpy.ndarray\n 2D symmetric matrix measuring region-region correlations\n main diagnal is all zeros\n atlas_lut : str\n full path and name to atlas lookup tsv with two columns\n (regions and index)\n img : str\n full path and name of the bold file\n output_dir : str\n full path to the base directory where all correlation matrices\n will be written out to.\n\n Returns\n -------\n dst : str\n full path and name of the correlation matrix\n \"\"\"\n import pandas as pd\n import os\n import re\n\n PROC_EXPR = re.compile(\n r'^(?P.*/)?'\n r'(?Psub-[a-zA-Z0-9]+)'\n r'(_(?Pses-[a-zA-Z0-9]+))?'\n r'(_(?Ptask-[a-zA-Z0-9]+))?'\n r'(_(?Pacq-[a-zA-Z0-9]+))?'\n r'(_(?Prec-[a-zA-Z0-9]+))?'\n r'(_(?Prun-[a-zA-Z0-9]+))?'\n r'_bold'\n r'(_(?Pspace-[a-zA-Z0-9]+))?'\n r'(_(?Pvariant-[a-zA-Z0-9]+))?'\n r'_preproc.nii.gz')\n\n name_dict = PROC_EXPR.search(img).groupdict()\n\n bids_output_dir = os.path.join(output_dir,\n name_dict['subject_id'],\n name_dict['session_id'],\n 'func')\n os.makedirs(bids_output_dir, exist_ok=True)\n\n fname = '_'.join([name_dict['subject_id'], name_dict['session_id']])\n\n key_order = ['task_id', 'acq_id', 'rec_id', 'run_id', 'space_id', 'variant_id']\n\n for key in key_order:\n if name_dict[key]:\n fname = '_'.join([fname, name_dict[key]])\n\n dst = os.path.join(bids_output_dir, fname + '_corrMatrix.tsv')\n\n atlas_lut_df = pd.read_csv(atlas_lut, sep='\\t')\n regions = atlas_lut_df['regions']\n corr_matrix_df = pd.DataFrame(corr_matrix, index=regions, columns=regions)\n corr_matrix_df.to_csv(dst, sep='\\t')\n return dst\n\n\ndef proc_matrix(matrix_tsv):\n \"\"\"\n Vectorize symmetric correlation matrix so that\n each unique region-region correlation gets a column.\n\n Parameters\n ----------\n matrix_tsv : str\n full path and name of the correlation matrix\n\n Returns\n -------\n flat_df : pandas.core.frame.DataFrame\n a flat dataframe that is one entry and has as many columns\n as there are unique region-region pairs\n \"\"\"\n import pandas as pd\n import numpy as np\n import re\n import os\n # process data:\n # read in tsv into a pandas dataframe\n tmp_df = pd.read_csv(matrix_tsv, sep='\\t', index_col=0)\n # make the dataframe into a numpy array\n tmp_arr = tmp_df.as_matrix()\n # get the upper triangle (excluding the diagonal\n upper_triangle_idx = np.triu_indices(len(tmp_df), 1)\n # extract the values from the symmetric 2D matrix into a 1D matrix\n flat_arr = tmp_arr[upper_triangle_idx]\n\n # collector for header names\n header_list = []\n # this has to mutable to not repeat combinations\n row_headers = list(tmp_df.index)\n for col_header in tmp_df.columns.values:\n row_headers.remove(col_header)\n for row_header in row_headers:\n header_list.append('-'.join([col_header, row_header]))\n\n # makes a wide dataframe with one entry\n data_df = pd.DataFrame(data=np.atleast_2d(flat_arr), columns=header_list)\n\n # process filename\n MAT_EXPR = re.compile(\n r'^(?P.*/)?'\n r'(?Psub-[a-zA-Z0-9]+)'\n r'(_(?Pses-[a-zA-Z0-9]+))?'\n r'(_(?Ptask-[a-zA-Z0-9]+))?'\n r'(_(?Pacq-[a-zA-Z0-9]+))?'\n r'(_(?Prec-[a-zA-Z0-9]+))?'\n r'(_(?Prun-[a-zA-Z0-9]+))?'\n r'(_(?Pspace-[a-zA-Z0-9]+))?'\n r'(_(?Pvariant-[a-zA-Z0-9]+))?'\n r'_corrMatrix.tsv')\n name_dict = MAT_EXPR.search(os.path.basename(matrix_tsv)).groupdict()\n info_dict = {k: v.split('-')[1] for k, v in name_dict.items() if v is not None}\n info_df = pd.DataFrame.from_records([info_dict])\n\n # returns a one row many column dataframe\n return pd.concat([info_df, data_df], axis=1)\n\n\ndef merge_dfs(dfs):\n \"\"\"\n Merge a list of dataframes where each contains one row\n showing all unique region-region pairs.\n\n Parameters\n ----------\n dfs : list\n list of dataframes where each contains one row\n showing all unique region-region pairs\n\n Returns\n -------\n out_df : pandas.core.frame.DataFrame\n merged dataframe where each row represents a unique scan\n \"\"\"\n import pandas as pd\n out_df = pd.concat(dfs, copy=False, ignore_index=True)\n headers = list(out_df.columns.values)\n # if any of these columns exist in the dataframe, move them to the front\n if 'variant_id' in headers:\n headers.insert(0, headers.pop(headers.index('variant_id')))\n if 'space_id' in headers:\n headers.insert(0, headers.pop(headers.index('space_id')))\n if 'run_id' in headers:\n headers.insert(0, headers.pop(headers.index('run_id')))\n if 'rec_id' in headers:\n headers.insert(0, headers.pop(headers.index('rec_id')))\n if 'acq_id' in headers:\n headers.insert(0, headers.pop(headers.index('acq_id')))\n if 'task_id' in headers:\n headers.insert(0, headers.pop(headers.index('task_id')))\n if 'session_id' in headers:\n headers.insert(0, headers.pop(headers.index('session_id')))\n if 'subject_id' in headers:\n headers.insert(0, headers.pop(headers.index('subject_id')))\n\n out_df = out_df[headers]\n\n return out_df\n\n\ndef write_out_group_tsv(outdir, df):\n \"\"\"\n outdir : str\n full path to the output directory for the group tsv\n df : pandas.core.frame.DataFrame\n dataframe where each row represents a unique scan\n and each column is a unique region-region pair\n \"\"\"\n import os\n out_file = os.path.join(outdir, 'group.tsv')\n df.to_csv(out_file, sep='\\t', index=False)\n return out_file\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HBClab/atlascorr","sub_path":"atlascorr/atlas_correlations.py","file_name":"atlas_correlations.py","file_ext":"py","file_size_in_byte":23489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38211856880","text":"#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\nsourceFolder = '/marconi/home/userexternal/bdoekeme/averagedVTKs'\nsourceFile = 'mergeVTKs_single.py'\njobSourceFile = 'hpc.jobVTK.PRACE'\n\ndef replaceLineInFile(filePath,strToReplace,string):\t\n\t# Replace in file\n\twith open(filePath) as f:\n\t\tnewText=f.read().replace(strToReplace, string)\n\twith open(filePath, \"w\") as f:\n\t\tf.write(newText)\n\n# Setup cases and submit jobs\nfor yaw1 in [220,230,240,250,260,270]:\n for yaw2 in [210,220,230,240,250,260,270]:\n destinationFolder = '/marconi_scratch/userexternal/bdoekeme/sediniCases/neutral_runs/runs/sdn_yaw'+str(yaw1)+'_yaw'+str(yaw2)+'/postProcessing'\n call(\"cp \" + sourceFolder + os.sep + sourceFile + \" \" + destinationFolder + '/.', shell=True)\n call(\"cp \" + sourceFolder + os.sep + jobSourceFile + \" \" + destinationFolder + '/.', shell=True)\n \n replaceLineInFile(destinationFolder+os.sep+sourceFile,'', str(yaw1)) # Yaw 1\n replaceLineInFile(destinationFolder+os.sep+sourceFile,'', str(yaw2)) # Yaw 2\n replaceLineInFile(destinationFolder+os.sep+jobSourceFile,'', 'VTK.py_yaw'+str(yaw1)+'yaw'+str(yaw2)) # Job\n call(\"cd \" + destinationFolder + \" && sbatch \"+jobSourceFile, shell=True)","repo_name":"TUDelft-DataDrivenControl/FLORISSE_M","sub_path":"Examples/model_calibration/0_vtkPostprocessing/hpc.submitMergeJobs.py","file_name":"hpc.submitMergeJobs.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"4009044963","text":"def giveUrozhay(a : list) -> int:\n max = a[0]\n for i in range(len(a) - 2):\n count = a[i] + a[i+1] + a[i+2]\n if max < count:\n max = count\n return max\n\ndef main() -> None:\n try:\n with open(\"GardenBed.txt\", 'r') as f:\n line = [int(n) for n in f.read().split()]\n print(line)\n line.append(line[0])\n line.append(line[1])\n print(giveUrozhay(line))\n\n except ValueError:\n print(ValueError('Что-то не так со вводом'))\n# main()\n\n\nmain()","repo_name":"MathyWay/GeekBransPython","sub_path":"Homework070523/Task2/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29890355413","text":"import numpy\n\ndef DCM(rot_type,angles):\n n = len(rot_type)\n y = numpy.identity(3)\n for k in reversed(range(n)):\n a = angles[k]\n M = numpy.zeros([3,3])\n if rot_type[k] == 1:\n M[0,0] = 1.0\n M[1,1] = numpy.cos(a)\n M[1,2] = numpy.sin(a)\n M[2,1] = -numpy.sin(a)\n M[2,2] = numpy.cos(a)\n elif rot_type[k] == 2:\n M[1,1] = 1.0\n M[0,0] = numpy.cos(a)\n M[0,2] = -numpy.sin(a)\n M[2,0] = numpy.sin(a)\n M[2,2] = numpy.cos(a)\n elif rot_type[k] == 3:\n M[2,2] = 1.0\n M[0,0] = numpy.cos(a)\n M[0,1] = numpy.sin(a)\n M[1,0] = -numpy.sin(a)\n M[1,1] = numpy.cos(a)\n else:\n raise Exception('Incorrect rotation rot_type. Must be a 1, 2, or 3\\n')\n y = numpy.matmul(M,y)\n return y\n\ndef BodyFrame(Wt,DEC,RA):\n\n if DEC == numpy.pi/2:\n BN = DCM([3,1,3],[Wt,(numpy.pi/2-DEC),RA])\n else:\n BN = DCM([3,1,3],[Wt,(numpy.pi/2-DEC),(numpy.pi/2+RA)])\n\n return BN\n","repo_name":"takahasy-co/ASEN6080-Sp2023-HW1-Debug-for-Students","sub_path":"lib/pylib/rot_lib.py","file_name":"rot_lib.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18451404712","text":"import mysql.connector\n\nconn = mysql.connector.connect(\n host=\"192.168.99.102\",\n user=\"root\",\n passwd=\"test\",\n database=\"user_db\",\n port=\"3308\"\n)\n\n\ndef find_all():\n query = \"SELECT * FROM users\"\n try:\n cursor = conn.cursor()\n rows = cursor.execute(query)\n cursor.close()\n return rows\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n\ndef find_one_by_id(user_id):\n query = \"SELECT * FROM users where id='%'\"\n try:\n cursor = conn.cursor()\n row = cursor.execute(query, user_id)\n cursor.close()\n return row\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n\ndef find_one_by_name(name):\n query = \"SELECT * FROM users where name='%'\"\n try:\n cursor = conn.cursor()\n row = cursor.execute(query, name)\n cursor.close()\n return row\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n\nusers = find_all()\nuser_1 = find_one_by_id(1)\nuser_pete = find_one_by_name('pete')\n\n\"\"\"\nSomething went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist\nSomething went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist\nSomething went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist\n\"\"\"\n","repo_name":"gridl/Writing-Clean-Python-Code","sub_path":"2_pythonic_code/decrorators/bad_example.py","file_name":"bad_example.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"40915895989","text":"import os\nimport unittest\nimport subprocess\n\nfrom sage_bootstrap.env import SAGE_DISTFILES\nfrom sage_bootstrap.download.mirror_list import MIRRORLIST_FILENAME\nfrom sage_bootstrap.util import is_url\nfrom sage_bootstrap.package import Package\nfrom test.capture import CapturedLog\nfrom test.config import NO_INTERNET\n\n\nEXECUTABLE = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'bin',\n 'sage-package',\n)\n\n\nclass SagePackageTestCase(unittest.TestCase):\n\n def run_command(self, *args):\n proc = subprocess.Popen(\n args,\n stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n rc = proc.returncode\n return (rc, stdout, stderr)\n \n def test_config(self):\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'config')\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertTrue(stdout.startswith('Configuration:\\n'))\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n def test_list(self):\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'list')\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertTrue('configure' in stdout.splitlines())\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n def test_name(self):\n pkg = Package('configure')\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'name', pkg.tarball_filename)\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertEqual(stdout.rstrip(), 'configure')\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n def test_tarball(self):\n pkg = Package('configure')\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'tarball', pkg.name)\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertEqual(stdout.rstrip(), pkg.tarball_filename)\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n def test_apropos(self):\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'apropos', 'python')\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertTrue(stdout.startswith('Did you mean:'))\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n @unittest.skipIf(NO_INTERNET, 'requires internet access')\n def test_download(self):\n pkg = Package('configure')\n with CapturedLog() as log:\n pkg.tarball.download()\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'download', pkg.name)\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints filename to stdout\n self.assertEqual(stdout.rstrip(), pkg.tarball.upstream_fqn)\n # Prints info to stderr\n self.assertTrue(stderr.startswith('Using cached file'))\n\n @unittest.skipIf(NO_INTERNET, 'requires internet access')\n def test_update(self):\n pkg = Package('configure')\n # The confball never has a patchlevel since we are upstream...\n self.assertEqual(pkg.patchlevel, -1)\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'update', pkg.name, pkg.version)\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints nothing to stdout\n self.assertEqual(stdout, '')\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n\n @unittest.skipIf(NO_INTERNET, 'requires internet access')\n def test_fix_checksum(self):\n pkg = Package('configure')\n rc, stdout, stderr = self.run_command(EXECUTABLE, 'fix-checksum', 'configure')\n # returns successfully\n self.assertEqual(rc, 0)\n # Prints to stdout\n self.assertEqual(stdout.rstrip(), 'Checksum of {0} unchanged'.format(pkg.tarball_filename))\n # Prints nothing to stderr\n self.assertEqual(stderr, '')\n","repo_name":"vbraun/sage-package","sub_path":"test/test_package_cmdline.py","file_name":"test_package_cmdline.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1618151358","text":"from discord.ext import commands\nfrom discord import Message, Member, Embed, File\nfrom discord.ext.commands.context import Context\n\nfrom __main__ import HeeKyung\n\nimport os\nimport json\nimport pytz\nfrom typing import List\nfrom datetime import datetime\nfrom pydub import AudioSegment\nfrom asyncio import TimeoutError\nfrom EZPaginator import Paginator\n\n\ndef isGameDeveloper():\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n return commands.check(lambda ctx: managers.get(str(ctx.author.id)))\n\n\nclass Core(commands.Cog):\n def __init__(self, bot: HeeKyung):\n self.bot = bot\n self.meetings = {\"meetingWhether\": False, \"channel\": int(), \"messages\": []}\n\n @commands.command(name=\"도움말\", aliases=[\"도움\", \"help\"])\n async def help(self, ctx: Context):\n embeds: List[Embed] = list(map(lambda x: x.set_footer(icon_url=ctx.author.avatar_url), [\n Embed(\n title=\"희경 도움말\",\n description=\"**Page 1.** ILove103\\n**Page 2.** KyungheeGames 개발자 도움말\"\n ),\n Embed(\n title=\"ILove103 도움말\",\n description=\"1학년 3반을 위한 커맨드입니다.\"\n ).add_field(\n name=\"!내전 시작\",\n value=\"롤 5 vs 5 내전을 시작합니다.\",\n inline=False,\n ).add_field(\n name=\"!내전 종료\",\n value=\"롤 5 vs 5 내전을 종료합니다.\",\n inline=False,\n ),\n Embed(\n title=\"KyungheeGames 개발자 도움말\",\n description=\"게임 개발자만 사용할 수 있습니다.\"\n ).add_field(\n name=\"!회의 시작\",\n value=\"회의를 시작합니다.\"\n ).add_field(\n name=\"!회의 종료\",\n value=\"회의를 종료합니다.\"\n ).add_field(\n name=\"!회의록\",\n value=\"이전 회의록을 불러옵니다.\"\n ).add_field(\n name=\"!관리자 추가 @멘션\",\n value=\"관리자를 추가합니다.\"\n ).add_field(\n name=\"!관리자 삭제 @멘션\",\n value=\"관리자 권한을 박탈합니다.\"\n ).add_field(\n name=\"!관리자 변경 @멘션 이름\",\n value=\"관리자의 이름을 변경합니다.\"\n ).add_field(\n name=\"!관리자 목록\",\n value=\"관리자 목록을 불러옵니다.\"\n ).add_field(\n name=\"!오디오변환\",\n value=\"파일을 같이 보내주시면 mp3 파일을 ogg 파일로 변환해드려요!\"\n )\n ]))\n msg: Message = await ctx.send(embed=embeds[0])\n await Paginator(bot=self.bot, message=msg, embeds=embeds, use_extend=True).start()\n\n @commands.group(name=\"회의\")\n @isGameDeveloper()\n async def meeting(self, ctx: Context):\n if ctx.invoked_subcommand is None:\n return\n\n @meeting.command(name=\"시작\")\n @commands.guild_only()\n async def meetingStart(self, ctx: Context):\n if self.meetings[\"meetingWhether\"]:\n return await ctx.reply(\"회의가 이미 진행중입니다.\")\n self.meetings[\"meetingWhether\"] = True\n self.meetings[\"channel\"] = int(ctx.channel.id)\n return await ctx.reply(\"회의가 시작되었습니다.\")\n\n @meeting.command(name=\"종료\")\n @commands.guild_only()\n async def meetingEnd(self, ctx: Context):\n if not self.meetings[\"meetingWhether\"]:\n return await ctx.reply(\"회의가 진행중이 아닙니다.\")\n self.meetings[\"meetingWhether\"] = False\n self.meetings[\"channel\"] = int()\n with open(\n f'./database/meetings/{datetime.now(tz=pytz.timezone(\"Asia/Seoul\")).strftime(\"%Y%m%d%H%M\")}.txt',\n \"w\",\n encoding=\"utf8\",\n ) as meeting:\n meeting.write(\"\\n\".join(self.meetings[\"messages\"]))\n self.meetings[\"messages\"] = []\n return await ctx.reply(\"회의가 종료되었습니다.\")\n\n @commands.command(name=\"회의록\")\n @isGameDeveloper()\n async def exportMeeting(self, ctx: Context):\n meetings = sorted(os.listdir(\"./database/meetings\"), reverse=True)\n if len(meetings) == 0:\n return await ctx.reply(\"회의록이 없습니다.\")\n meetingLetters = []\n for meeting in meetings:\n with open(f\"./database/meetings/{meeting}\", \"r\", encoding=\"utf8\") as f:\n meetingLetters.append(len(f.read()))\n numberEmojis = [\n \"1️⃣\",\n \"2️⃣\",\n \"3️⃣\",\n \"4️⃣\",\n \"5️⃣\",\n ]\n embed = Embed(\n title=\"회의록 목록\",\n description=\"\\n\".join(\n [\n f\"{numberEmojis[meetings.index(_)]} - {_[0:4]}.{_[4:6]}.{_[6:8]} {_[8:10]}:{_[10:12]} ( {meetingLetters[meetings.index(_)]}자 )\"\n for _ in meetings[:5]\n ]\n ),\n ).set_footer(text=\"아래 버튼을 눌러 회의록을 다운로드하세요. (60초)\")\n msg: Message = await ctx.reply(embed=embed)\n for emoji in numberEmojis[: len(meetings)]:\n await msg.add_reaction(emoji)\n try:\n react = await self.bot.wait_for(\n \"reaction_add\",\n check=lambda reaction, user: user == ctx.author\n and str(reaction.emoji) in numberEmojis,\n timeout=60,\n )\n except TimeoutError:\n return await msg.clear_reactions()\n await msg.delete()\n await ctx.send(\n file=File(\n f\"./database/meetings/{meetings[numberEmojis.index(str(react[0].emoji))]}\",\n filename=meetings[numberEmojis.index(str(react[0].emoji))],\n )\n )\n\n @commands.Cog.listener()\n async def on_message(self, message: Message):\n if message.author.bot:\n return\n if message.content == \"!회의 시작\":\n return\n if self.meetings[\"meetingWhether\"]:\n if message.channel.id == self.meetings[\"channel\"]:\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n self.meetings[\"messages\"].append(\n f\"{message.author.name if managers.get(str(message.author.id)) is None else managers[str(message.author.id)]}\"\n f\" - {datetime.now(tz=pytz.timezone('Asia/Seoul')).strftime('%m-%d %H:%M')} - {message.content}\"\n )\n\n @commands.group(name=\"관리자\")\n @isGameDeveloper()\n async def admin(self, ctx: Context):\n if ctx.invoked_subcommand is None:\n return\n\n @admin.command(name=\"추가\")\n async def adminAdd(self, ctx: Context, user: Member, *, name: str):\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n if managers.get(str(user.id)) is None:\n managers[str(user.id)] = name\n with open(\"./database/managers.json\", \"w\", encoding=\"utf8\") as f:\n json.dump(managers, f, ensure_ascii=False, indent=4)\n return await ctx.reply(f\"{user} 님의 관리자명이 `{name}` 으로 변경되었습니다.\")\n return await ctx.reply(f\"{user}({managers[str(user.id)]}) 님은 이미 관리자입니다.\")\n\n @admin.command(name=\"삭제\")\n async def adminDelete(self, ctx: Context, user: Member):\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n if managers.get(str(user.id)) is not None:\n del managers[str(user.id)]\n with open(\"./database/managers.json\", \"w\", encoding=\"utf8\") as f:\n json.dump(managers, f, ensure_ascii=False, indent=4)\n return await ctx.reply(f\"{user} 님의 관리자명이 삭제되었습니다.\")\n return await ctx.reply(f\"{user} 님은 관리자가 아닙니다.\")\n\n @admin.command(name=\"변경\")\n async def adminChange(self, ctx: Context, user: Member, *, name: str):\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n if managers.get(str(user.id)) is not None:\n managers[str(user.id)] = name\n with open(\"./database/managers.json\", \"w\", encoding=\"utf8\") as f:\n json.dump(managers, f, ensure_ascii=False, indent=4)\n return await ctx.reply(f\"{user} 님의 관리자명이 `{name}` 으로 변경되었습니다.\")\n return await ctx.reply(f\"{user} 님은 관리자가 아닙니다.\")\n\n @admin.command(name=\"목록\")\n async def adminList(self, ctx: Context):\n with open(\"./database/managers.json\", \"r\", encoding=\"utf8\") as f:\n managers = json.load(f)\n if len(managers) == 0:\n return await ctx.reply(\"관리자가 없습니다.\")\n return await ctx.reply(\n embed=Embed(\n title=\"관리자 목록\",\n description=\"\\n\".join(\n [\n f\"{ctx.guild.get_member(int(_)).mention} : {managers[_]}\"\n for _ in managers\n ]\n ),\n )\n )\n\n @commands.command(name=\"오디오변환\")\n @isGameDeveloper()\n async def audioConvert(self, ctx: Context):\n if len(ctx.message.attachments) == 0:\n return await ctx.reply(\"첨부된 파일이 없습니다.\")\n if ctx.message.attachments[0].filename.split(\".\")[-1] not in [\"mp3\", \"wav\"]:\n return await ctx.reply(\"오디오 파일이 아닙니다.\")\n with open(\n f\"./database/audioDatas/input/{ctx.message.attachments[0].filename}\", \"wb\"\n ) as fp:\n await ctx.message.attachments[0].save(fp)\n songFile = AudioSegment.from_mp3(\n f\"./database/audioDatas/input/{ctx.message.attachments[0].filename}\"\n )\n songFile.export(\n f'./database/audioDatas/output/{ctx.message.attachments[0].filename.split(\".\")[0]}.ogg',\n format=\"ogg\",\n )\n await ctx.reply(\n f\"{ctx.message.attachments[0].filename} 파일이 오디오로 변환되었습니다.\",\n file=File(\n f'./database/audioDatas/output/{ctx.message.attachments[0].filename.split(\".\")[0]}.ogg'\n ),\n )\n os.remove(f\"./database/audioDatas/input/{ctx.message.attachments[0].filename}\")\n os.remove(\n f'./database/audioDatas/output/{ctx.message.attachments[0].filename.split(\".\")[0]}.ogg'\n )\n\n\ndef setup(bot: HeeKyung):\n bot.add_cog(Core(bot))\n","repo_name":"KyungheeGames/HeeKyung","sub_path":"cogs/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7939476414","text":"from observ import reactive\n\nimport collagraph as cg\n\n\ndef test_component_template_tag():\n from tests.data.template import Template\n\n gui = cg.Collagraph(cg.DictRenderer(), event_loop_type=cg.EventLoopType.SYNC)\n container = {\"type\": \"root\"}\n state = reactive({\"more\": False})\n element = cg.h(Template, state)\n\n gui.render(element, container)\n\n content = container[\"children\"][0]\n assert content[\"type\"] == \"content\"\n\n assert len(content[\"children\"]) == 2\n for child, name in zip(content[\"children\"], [\"a\", \"b\"]):\n assert child[\"type\"] == \"child\"\n assert child[\"attrs\"][\"name\"] == name\n\n state[\"more\"] = True\n\n assert len(content[\"children\"]) == 4\n for child, name in zip(content[\"children\"], [\"a\", \"b\", \"c\", \"d\"]):\n assert child[\"type\"] == \"child\"\n assert child[\"attrs\"][\"name\"] == name\n","repo_name":"fork-tongue/collagraph","sub_path":"tests/test_template_tags.py","file_name":"test_template_tags.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"32"} +{"seq_id":"20557677397","text":"# TODO : add back/return to options on every page\nclass UserProfilePage:\n def __init__(self, backend_api):\n self.backend_api = backend_api\n \n def show_exam_list(self):\n print('^^ Available Exams ^^')\n ex_li = self.backend_api.get_exam_list()\n for ex_l in ex_li:\n print(ex_l)\n exam_id = input('Please Enter Your Choice (Exam ID): ')\n if self.backend_api.set_curr_exam(exam_id):\n self.print_all_students()\n self.exam_attendance_check_options()\n else:\n print('Invalid exam number.')\n self.show_exam_list()\n\n def print_all_students(self):\n print('^^ Available Students ^^')\n ids = self.backend_api.get_student_ids()\n for i in ids:\n print(i)\n self.exam_attendance_check_options()\n \n def exam_attendance_check_options(self):\n print('(1) Search for Student')\n print('(2) Show Student List')\n print('(3) Confirm Attendance Check')\n print('(4) Home')\n c = input('Please Enter Your Choice (1/2/3/4): ')\n if c=='1':\n self.show_search_for_students_page()\n elif c=='2':\n self.print_all_students()\n elif c=='3':\n self.confirm_list_page()\n elif c=='4':\n self.backend_api.remove_curr_offering()\n self.show_choices()\n\n def show_prof_info(self):\n print(self.backend_api.get_offering_prof_info())\n \n def confirm_list_page(self):\n self.show_prof_info()\n if self.backend_api.are_all_studence_checked():\n c = input('Enter Proffesor ID to Confirm: ')\n self.backend_api.prof_confirm()\n self.backend_api.remove_curr_offering()\n self.show_choices()\n print('Successful:)')\n else:\n print('ERROR: there are more students to check before you could finalize list')\n self.exam_attendance_check_options()\n \n def show_search_for_students_page(self):\n student_id = input('Please Enter Your Choice (Student ID): ')\n if self.backend_api.is_student_available(student_id):\n print(self.backend_api.get_student_info(student_id))\n present_or_not = input('Is present? (y/n)')\n if present_or_not=='y':\n self.backend_api.set_present(student_id)\n elif present_or_not == 'n':\n pass\n else: \n print('Invalid choice.')\n self.exam_attendance_check_options()\n answer = input('Confirm attendance? (y/n)')\n if answer=='y':\n self.backend_api.confirm_attendance(student_id)\n elif answer == 'n':\n pass\n else: \n print('Invalid choice.')\n self.exam_attendance_check_options()\n else:\n print('Invalid student id.')\n self.exam_attendance_check_options()\n\n def show_choices(self):\n print('^^ User Profile ^^')\n print('(1) Get Data From Database')\n print('(2) Perform Attendance Check')\n print('(3) Submit Attendance Results')\n print('(4) Logout')\n c = input('Please Enter Your Choice (1/2/3/4): ')\n if c=='1':\n self.backend_api.get_data_from_db()\n self.show_choices()\n elif c=='2':\n self.show_exam_list()\n elif c=='3':\n self.backend_api.submit_results()\n self.show_choices()\n elif c=='4':\n self.backend_api.join_all_threads()\n print('^^ Bye ^^')\n exit()\n else:\n self.show_choices()\n","repo_name":"nazaninsbr/Attendance-Check","sub_path":"UI-Classes/user_profile_page.py","file_name":"user_profile_page.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28858864073","text":"from Pluglead_Plugboard import *\r\nfrom Rotors import *\r\nimport time\r\n\r\n##################################################################################################################################\r\n###\r\n### READ THIS: THE PARAMETERS THAT NEED TO BE SET UP ARE THE GIVEN CONDITIONS THAT THE INSTRUCTIONS MENTIONED PLEASE GO TO THE BOTTOM TO SEE\r\n### Rotors: V, III, IV and 0 value to not include a 4 rotor (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n### Reflector: A\r\n### Ring settings: 24 12 10 and 0 value to not include a 4 rotor (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n### Starting positions: S W U and 0 value to not include a 4 rotor (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n### Plugboard pairs: UNKNOWN, the list includes WP RJ A? VF I? HN CG BS (THIS PART IS ITERATED BY THE CODE)\r\n### The code introduced was: SDNTVTPHRBNWTLMZTQKZGADDQYPFNHBPNHCQGBGMZPZLUAVGDQVYRBFYYEIXQWVTHXGNW (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n### crib TUTOR (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n###\r\n### NOTE: IF THE INPUTS DO NOT CHANGE, JUST RUN A THE RESULT WILL BE PRINTED\r\n###\r\n###################################################################################################################################\r\n\r\n\r\n\r\n###################################################################################\r\n### DO NOT MODIFY\r\n###################################################################################\r\n\r\n#___________________________________________Enigma Machine set up (DO NOT MODIFY)___________________________________________________________________\r\n# Parameters: LHS: Left Hand side & RHS: Right HAnd Side.\r\n# RHS values are in 0 by default, that means the machine is configured to use 3 rotors and 1 reflector\r\n# If any RHS value is set in 0, the simulator will use only 3 rotors + 1 reflector\r\n# To switch to M4 enigma machine (4 rotors + 1 reflector), all RHS values must be filled\r\n# Rotors:\r\n# LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor\r\n# Reflector: Reflector\r\n# Ring settings:\r\n# LHS_ring_setting, Middle_ring1_setting, Middle_ring2_setting, RHS_ring_setting\r\n# Position settings:\r\n# LHS_position_setting, Middle_position1_setting, Middle_position2_setting, RHS_position_setting\r\n# letters: String to be encoded or decoded\r\n\r\ndef enigma_machine_set_up(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor = 0, Reflector = \"\", LHS_ring_setting = \"\", Middle_ring1_setting= \"\",\r\n Middle_ring2_setting=\"\", RHS_ring_setting = 0, LHS_position_setting = \"\", Middle_position1_setting = \"\",\r\n Middle_position2_setting = \"\", RHS_position_setting = 0, letters = \"\"):\r\n\r\n check = Checkers()\r\n check.no_rep_rotors(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor, Reflector)\r\n\r\n # Variables:\r\n # Message: List where the message will appear\r\n # RHS_Click: Represents the rotation for every keypress and it stars with 1\r\n # Flag: To know if there are 3 (True) or 4 (False) rotors\r\n RHS_click = 1\r\n Message = []\r\n Flag = False\r\n\r\n # Defining parameters for 3 rotors + 1 reflector\r\n if RHS_rotor == 0 or RHS_ring_setting == 0 or RHS_position_setting == 0:\r\n RHS_rotor = Middle_rotor2\r\n RHS_ring_setting = Middle_ring2_setting\r\n RHS_position_setting = Middle_position2_setting\r\n Flag = True\r\n\r\n # Declaring all objects with all positions (3 Rotors + 1 Reflector)\r\n # Referencing to [def __init__(self, name, ring_set=\"01\", position_set=0)]:\r\n rotor1 = rotor_from_name(RHS_rotor, RHS_ring_setting, RHS_position_setting)\r\n rotor25 = rotor_from_name(Middle_rotor2, Middle_ring2_setting, Middle_position2_setting)\r\n rotor3 = rotor_from_name(Middle_rotor1, Middle_ring1_setting, Middle_position1_setting)\r\n rotor4 = rotor_from_name(LHS_rotor, LHS_ring_setting, LHS_position_setting)\r\n reflector = rotor_from_name(Reflector)\r\n\r\n\r\n # Creating For loop to encrypt // decrypt the word(s)\r\n for i in range(len(letters)):\r\n\r\n # Swapping the letters according the plugboard_setup\r\n swap_init_letter = plugboard.encode(letters[i])\r\n\r\n # Referencing to method [encode_right_to_left(self, char, element_index = 0, tuple_rotation=0, init=True):]\r\n # Variables:\r\n # init: To know if it´s the first rotor\r\n # RHS_Click: Represents the rotation for every keypress\r\n rotor1.encode_right_to_left(swap_init_letter, 0, RHS_click)\r\n\r\n # Variables:\r\n # Middle_click1: To know if the notch passed in rotor 1 and \"True\" makes reference that the character has already passed\r\n # Middle_click2: To know if the notch passed in rotor 2 and \"False\" makes reference that the character is still at the same postion [0][0]\r\n # Last_click: To rotate the last rotor (3 rotors), 0 is only considering 4 rotors\r\n # Middle_click: Sum up previous variables to know if there was a double step process == 2 and rotate once the rotor 2\r\n Middle_click1 = rotor1.notches(True)\r\n\r\n # If 3 rotors were selected flag=True, then keep only one middle rotor instead of 2\r\n if Flag == True:\r\n\r\n Middle_click2 = rotor3.notches(False)\r\n Middle_click = Middle_click1 + Middle_click2\r\n if Middle_click >= 2:\r\n Middle_click = 1\r\n\r\n rotor3.encode_right_to_left(rotor1.transfer_character, rotor1.transfer_index, Middle_click, False)\r\n Last_click = Middle_click2\r\n\r\n else:\r\n\r\n Middle_click2 = rotor25.notches(False)\r\n Middle_click = Middle_click1 + Middle_click2\r\n if Middle_click >= 2:\r\n Middle_click = 1\r\n\r\n rotor25.encode_right_to_left(rotor1.transfer_character, rotor1.transfer_index, Middle_click, False)\r\n rotor3.encode_right_to_left(rotor25.transfer_character, rotor25.transfer_index, Middle_click2, False)\r\n Last_click = 0\r\n\r\n rotor4.encode_right_to_left(rotor3.transfer_character, rotor3.transfer_index, Last_click, False)\r\n\r\n reflector.encode_right_to_left(rotor4.transfer_character, rotor4.transfer_index, 0, False)\r\n\r\n # Referencing to method [def encode_left_to_right(self, char, element_index = 0, last_matrix = 0):]\r\n # Variables:\r\n # char: To know if it´s the first rotor\r\n # RHS_Click: Represents the rotation for every keypress\r\n rotor4.encode_left_to_right(reflector.transfer_character_reverse, reflector.transfer_index, rotor4.rot_elements)\r\n\r\n if Flag == True:\r\n\r\n rotor3.encode_left_to_right(rotor4.transfer_character, rotor4.transfer_index, rotor3.rot_elements)\r\n rotor1.encode_left_to_right(rotor3.transfer_character, rotor3.transfer_index, rotor1.rot_elements)\r\n\r\n else:\r\n\r\n rotor3.encode_left_to_right(rotor4.transfer_character, rotor4.transfer_index, rotor3.rot_elements)\r\n rotor25.encode_left_to_right(rotor3.transfer_character, rotor3.transfer_index, rotor25.rot_elements)\r\n rotor1.encode_left_to_right(rotor25.transfer_character, rotor25.transfer_index, rotor1.rot_elements)\r\n\r\n rotor1.encode_left_to_right(rotor1.transfer_character, rotor1.transfer_index)\r\n\r\n # Swapping the letters according the plugboard_list\r\n swap_end_letter = plugboard.encode(rotor1.transfer_character)\r\n\r\n Message.append(swap_end_letter)\r\n #print(Message)\r\n return Message\r\n\r\n\r\n###################################################################################\r\n### DO NOT MODIFY\r\n###################################################################################\r\n\r\n#___________________________________________Plugboard set up (DO NOT MODIFY)___________________________________________________________________\r\n# To create plugboard based on the input\r\ndef plugboard_set_up(plugboard_string):\r\n if plugboard_string == \"\":\r\n pass\r\n\r\n else:\r\n # Creating a list based in the input string\r\n plugboard_list = plugboard_string.rsplit(\" \")\r\n\r\n # Plugboard Set up\r\n for i in range(len(plugboard_list)):\r\n plugboard.add(PlugLead(plugboard_list[i]))\r\n\r\n\r\n#___________________________________________decrypting (DO NOT MODIFY)___________________________________________________________________\r\ndef decrypting(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor, Reflector_list, LHS_ring_setting, Middle_ring1_setting,\r\n Middle_ring2_setting, RHS_ring_setting, LHS_position_setting, Middle_position1_setting,\r\n Middle_position2_setting, RHS_position_setting, encoded_message, crib, plug_list):\r\n\r\n # Io iterate based on plugboad combinations\r\n for i in range(len(plug_list)):\r\n\r\n # Checking if \"?\" is in the plugboard_list\r\n if plug_list[i] == \"?\":\r\n\r\n # Inserting a character in the alphabet which is not in the plugboard_list\r\n for j in range(0, 26):\r\n\r\n # It begins with A\r\n char = chr(j+65)\r\n if char not in plug_list:\r\n\r\n # Replacing \"?\" values with a char (limited once)\r\n plug_list = plug_list.replace(plug_list[i], char, 1)\r\n\r\n # Recursive to introduce remaining characters in \"?\"\r\n decrypting(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor, Reflector_list, LHS_ring_setting, Middle_ring1_setting,\r\n Middle_ring2_setting, RHS_ring_setting, LHS_position_setting, Middle_position1_setting,\r\n Middle_position2_setting, RHS_position_setting, encoded_message, crib, plug_list)\r\n\r\n\r\n if \"?\" not in plug_list:\r\n\r\n # Adding the plugboard list\r\n plugboard_set_up(plug_list)\r\n\r\n # Running enigma machine\r\n # Saving the result in a list\r\n encoded_message_proposal = enigma_machine_set_up(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor, Reflector_list,\r\n LHS_ring_setting, Middle_ring1_setting, Middle_ring2_setting, RHS_ring_setting,\r\n LHS_position_setting, Middle_position1_setting, Middle_position2_setting, RHS_position_setting,\r\n encoded_message)\r\n # print(encoded_message_proposal)\r\n\r\n # Turning the encoded_message_proposal (list) into string to be compared with crib\r\n encoded_message_proposal_string = \"\".join(encoded_message_proposal)\r\n if crib in encoded_message_proposal_string:\r\n print(f\"The plugboard configuration is: {plug_list}\")\r\n print(f\"The original message is: {encoded_message_proposal_string}\")\r\n # return Middle_position2_setting, Middle_position1_setting, LHS_position_setting, encoded_message_proposal_string\r\n\r\n # Deleting current plugboard list\r\n plugboard.delete_all()\r\n\r\n\r\n\r\n#___________________________________________Main___________________________________________________________________\r\n# To measure the time in secs\r\nstart = time.time()\r\nplugboard = Plugboard()\r\n# Insert here the plugboard_____________(DO NOT MODIFY)\r\nplug_list = \"WP RJ A? VF I? HN CG BS\"\r\n\r\n#Insert here the encoded message_____________(DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\nencoded_message = \"SDNTVTPHRBNWTLMZTQKZGADDQYPFNHBPNHCQGBGMZPZLUAVGDQVYRBFYYEIXQWVTHXGNW\"\r\n\r\n# Do no change\r\nReflector_list = \"A\"\r\n\r\n# Insert here the crib _____________(DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\ncrib = \"TUTOR\"\r\ncrib = crib.upper()\r\n\r\n#Insert here enigma machine settings_____________(DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n\r\n#Rotors: (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\n# If the machine is M3 (3 rotors + 1 Reflector), set all RHS values in 0\r\n# \"V\", \"III\", \"IV\", 0, Reflector_list=A, \"24\", \"12\", \"10\", 0, \"S\", \"W\", \"U\", 0\r\nLHS_rotor = \"V\"\r\nMiddle_rotor1 = \"III\"\r\nMiddle_rotor2 = \"IV\"\r\nRHS_rotor = 0\r\n\r\n# Ring settings: (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\nLHS_ring_setting = \"24\"\r\nMiddle_ring1_setting = \"12\"\r\nMiddle_ring2_setting = \"10\"\r\nRHS_ring_setting = 0\r\n\r\n# Position settings: (DO NOT MODIFY ONLY IF THE SET UP IS DIFFERENT)\r\nLHS_position_setting = \"S\"\r\nMiddle_position1_setting = \"W\"\r\nMiddle_position2_setting = \"U\"\r\nRHS_position_setting = 0\r\n\r\n# Function to iterate and find the answers\r\ndecrypting(LHS_rotor, Middle_rotor1, Middle_rotor2, RHS_rotor, Reflector_list, LHS_ring_setting, Middle_ring1_setting,\r\n Middle_ring2_setting, RHS_ring_setting, LHS_position_setting, Middle_position1_setting,\r\n Middle_position2_setting, RHS_position_setting, encoded_message, crib, plug_list)\r\nend = time.time()\r\nprint(f\"The execution time was: {end-start} secs\")","repo_name":"Alex-Sanchez-coder/Virtual_Enigma_Machine","sub_path":"Code_Breaking_Code 4.py","file_name":"Code_Breaking_Code 4.py","file_ext":"py","file_size_in_byte":13044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2740257867","text":"import pytest\nimport io\n\nfrom mathy_pydoc.document import Section\n\n\n@pytest.fixture\ndef section():\n return Section(None)\n\n\ndef test_preprocess_section(section):\n section.depth = 1\n section.title = \"My Header\"\n section.content = \"content\"\n section.identifier = \"section-identifier\"\n\n html_header_buffer = io.StringIO()\n section.render(html_header_buffer)\n assert (\n html_header_buffer.getvalue()\n == '

My Header

\\n\\ncontent\\n'\n )\n\n section.header_type = \"markdown\"\n markdown_header_buffer = io.StringIO()\n section.render(markdown_header_buffer)\n assert markdown_header_buffer.getvalue() == \"# My Header\\ncontent\\n\"\n\n with pytest.raises(ValueError):\n section.header_type = \"invalid\"\n section.render(markdown_header_buffer)\n","repo_name":"mathy/mathy_pydoc","sub_path":"tests/test_markdown_headers.py","file_name":"test_markdown_headers.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25168692479","text":"#!/usr/bin/python3\n\"\"\"Documentation for a rectangle class\"\"\"\n\n\nclass Rectangle:\n \"\"\"Class for a Rectangle shape\"\"\"\n\n print_symbol = \"#\"\n number_of_instances = 0\n\n def __init__(self, width=0, height=0):\n \"\"\"Initialize a rectangle instance.\n\n Args:\n width (int, optional): The width of the rectangle.\n height (int, optional): The height of the rectangle.\n\n Raises:\n TypeError: If width or height is not an integer.\n ValueError: If width or height is negative.\n \"\"\"\n\n if type(width) is not int:\n raise TypeError(\"width must be an integer\")\n if width < 0:\n raise ValueError(\"width must be >= 0\")\n self.__width = width\n\n if type(height) is not int:\n raise TypeError(\"height must be an integer\")\n if height < 0:\n raise ValueError(\"height must be >= 0\")\n self.__height = height\n Rectangle.number_of_instances += 1\n\n @property\n def width(self):\n \"\"\"Get the width of the rectangle.\n\n Returns:\n int: The width of the rectangle instance.\n \"\"\"\n\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\"Set the width of the rectangle.\n\n Args:\n value (int): The width to set.\n\n Raises:\n TypeError: If value is not an integer.\n ValueError: If value is negative.\n \"\"\"\n\n if type(value) is not int:\n raise TypeError(\"width must be an integer\")\n if value < 0:\n raise ValueError(\"width must be >= 0\")\n self.__width = value\n\n @property\n def height(self):\n \"\"\"Get the height of the rectangle.\n\n Returns:\n int: The height of the rectangle instance.\n \"\"\"\n\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\"Set the height of the rectangle.\n\n Args:\n value (int): The height to set.\n\n Raises:\n TypeError: If value is not an integer.\n ValueError: If value is negative.\n \"\"\"\n\n if type(value) is not int:\n raise TypeError(\"height must be an integer\")\n if value < 0:\n raise ValueError(\"height must be >= 0\")\n self.__height = value\n\n def area(self):\n \"\"\"Returns the area of the instance\"\"\"\n\n return self.__height * self.__width\n\n def perimeter(self):\n \"\"\"Calculate and return the perimeter of the rectangle.\n\n Returns:\n int: The perimeter of the rectangle.\n \"\"\"\n if self.__height == 0 or self.__width == 0:\n return 0\n return 2 * self.__height + 2 * self.__width\n\n def __str__(self):\n \"\"\"Return a string representation of the rectangle.\n\n Returns:\n str: A string representing the rectangle using '#'.\n \"\"\"\n\n if self.__width == 0 or self.__height == 0:\n return \"\"\n rectangle = []\n for i in range(self.__height):\n for j in range(self.__width):\n rectangle += [str(self.print_symbol)]\n if i is not self.__height - 1:\n rectangle += ['\\n']\n return ''.join(rectangle)\n\n def __repr__(self):\n \"\"\"Return a string representation that can be used with eval().\n\n Returns:\n str: A string representing the constructor of the rectangle.\n \"\"\"\n\n eva_str = []\n eva_str += [\"Rectangle(\"]\n eva_str = [str(self.__width) + \", \" + str(self.__height) + ')']\n return ''.join(eva_str)\n\n def __del__(self):\n \"\"\"Functionality for when an instance is deleted\"\"\"\n\n print(\"Bye rectangle...\")\n Rectangle.number_of_instances -= 1\n\n @staticmethod\n def bigger_or_equal(rect_1, rect_2):\n \"\"\"Compares two rectangle areas\n Args:\n rect_1 (Rectangle): the first rectangular object\n rect_2 (Rectangle): the second rectangular object\n Raises:\n TypeError: if either rectangle are not instances of the\n Rectangle class\n Returns:\n rect_1 if rect_1's area is equal or greater than rect_2's\n rect_2 if rect_2's area is greater than rect_1's\n \"\"\"\n\n if type(rect_1) is not Rectangle:\n raise TypeError(\"rect_1 must be an instance of Rectangle\")\n if type(rect_2) is not Rectangle:\n raise TypeError(\"rect_2 must be an instance of Rectangle\")\n area1 = rect_1.__width * rect_1.__height\n area2 = rect_2.__width * rect_2.__height\n\n if area1 == area2:\n return rect_1\n elif area1 > area2:\n return rect_1\n else:\n return rect_2\n\n @classmethod\n def square(cls, size=0):\n \"\"\"Returns a Rectangle instance with same width and height\n Args:\n size (int, optional): the size of the square instance\n \"\"\"\n return cls(size, size)\n","repo_name":"MoedCode/alx-higher_level_programming","sub_path":"0x08-python-more_classes/9-rectangle.py","file_name":"9-rectangle.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12468693974","text":"# import socket\n# s_cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# s_cli.connect(('127.0.0.1', 8080))\n# 'Send a data string to the socket. For the optional flags'\n# '''\n# 当send一个空字符时,虽然客服端可以发送出去,但是服务端的receive没有收到字符\n# '''\n# while True:\n# char = input('>>:'.strip())\n# if not char:\n# continue\n# num = s_cli.send(char.encode('utf-8'))\n# print('num:', num)\n# data = s_cli.recv(1024)\n# print('data', data)\n# s_cli.close()\nimport socket\nphone = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nphone.connect(('127.0.0.1', 6000))\nwhile True: # 可以循环接收发送消息\n char = input('>>').strip()\n if not char: # 防止发送空字符\n continue\n phone.send(char.encode('utf-8'))\n data = phone.recv(1024)\n print(data.decode('utf-8'))\nphone.close()","repo_name":"tsytsy/python-study","sub_path":"python_full_statck/socket_study/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17799752119","text":"import eval\n\nclass AlphaBeta():\n\n def __init__(self, color):\n self._mycolor = color\n self._nodes = 0\n self._maxscore = 100000\n self._eval = eval.Eval(color)\n\n def AlphaBetaCoup(self, b, depth, turn):\n \"\"\" Premier niveau de Minmax avec Alpha Beta \"\"\"\n if b.is_game_over() or depth == 0:\n return None\n\n v, coup = None, None\n self._nodes = 0\n for m in b.generate_legal_moves():\n # éviter les bordures prendant premiers tours du jeu.\n if ( turn < 7):\n x,y = b.unflatten(m)\n if ( x < 2 or y < 2 or x >= b._BOARDSIZE-2 or y >= b._BOARDSIZE-2):\n continue\n elif ( turn < 15):\n x,y = b.unflatten(m)\n if ( x < 1 or y < 1 or x >= b._BOARDSIZE-1 or y >= b._BOARDSIZE-1):\n continue\n\n b.push(m)\n\n ret = self.AlphaBeta(b, depth - 1, -self._maxscore, self._maxscore)\n\n if v is None or ret > v:\n coup = m\n v = ret\n b.pop()\n self._nodes += 1\n return (coup, v)\n\n def AlphaBeta(self, b, depth, alpha, beta):\n self._nodes += 1\n \n \"\"\" MinMax avec Alpha beta pruning\"\"\"\n if b.is_game_over():\n res = b.result()\n if res == \"1-0\":\n r = - ((-1)**self._mycolor) * self._maxscore\n elif res == \"0-1\":\n r = ((-1)**self._mycolor) * self._maxscore\n else:\n r = 0\n return r\n\n if depth == 0:\n e = self._eval.evaluate(b)\n return e\n\n v = None\n for move in b.generate_legal_moves():\n b.push(move)\n ret = self.BetaAlpha(b, depth-1, alpha, beta)\n b.pop()\n if v is None or ret < v:\n v = ret\n if beta > v:\n beta = v\n\n if alpha >= beta:\n return alpha\n\n return beta\n\n def BetaAlpha(self, b, depth, alpha, beta):\n self._nodes += 1\n \"\"\" MaxMin avec Alpha beta pruning\"\"\"\n if b.is_game_over():\n res = b.result()\n if res == \"1-0\":\n r = - ((-1)**self._mycolor) * self._maxscore\n elif res == \"0-1\":\n r = ((-1)**self._mycolor) * self._maxscore\n else:\n r = 0\n return r\n\n if depth == 0:\n e = self._eval.evaluate(b)\n return e\n\n v = None\n for m in b.generate_legal_moves():\n b.push(m)\n ret = self.AlphaBeta(b, depth - 1, alpha, beta)\n b.pop()\n if v is None or ret > v:\n v = ret\n if alpha < v:\n alpha = v\n if alpha >= beta:\n return beta\n return alpha\n\n","repo_name":"aliissaoui/Developping-Go-Agents-in-Python","sub_path":"GO/src/alphaBeta.py","file_name":"alphaBeta.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23931020874","text":"import time\nimport usb_midi\nimport adafruit_midi\nimport board\nimport digitalio\n\nfrom adafruit_midi.note_on import NoteOn\nfrom adafruit_midi.note_off import NoteOff\n\n\nclass BooleanObserver:\n def __init__(self):\n self.m_value = False\n self.m_firstSet = True\n\n def SetValueWithHasChangedValue(self, newValue):\n previous = self.m_value\n self.m_value = newValue\n\n if self.m_firstSet:\n self.m_firstSet = False\n return True\n else:\n return previous != newValue\n\n def GetPreviousValue(self):\n return not self.m_value\n\n def GetCurrentValue(self):\n return self.m_value\n\nmidi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)\n\n\nprint(\"Default output MIDI channel:\", midi.out_channel + 1)\n\nbutton_pins = [digitalio.DigitalInOut(getattr(board, f'D{i}')) for i in range(11)]\nbutton_state = [BooleanObserver() for i in range(11)]\nfor pin in button_pins:\n pin.direction = digitalio.Direction.INPUT\n pin.pull = digitalio.Pull.UP\n\nwhile True:\n time.sleep(0.001)\n\n for i, pin in enumerate(button_pins):\n hasPinChanged = button_state[i].SetValueWithHasChangedValue(pin.value)\n if hasPinChanged:\n# print(\"Has pin Changed \"+ str(i) +\" \"+ str(button_state[i].GetCurrentValue()))\n if not pin.value:\n midi.send(NoteOn(i+60, 120))\n else:\n midi.send(NoteOff(i+60, 120))\n\n","repo_name":"EloiStree/2023_05_01_SeeedXiaoRP2040ToMidi","sub_path":"V0/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9220968305","text":"def draw_bounding_box(img, left, top, right, bottom, class_color, label, confidence):\n overlay = img.copy()\n cv2.rectangle(img, (left, top), (right, bottom), color=class_color, thickness=-1)\n alpha = .8\n img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)\n cv2.rectangle(img, (left, top), (right, bottom), color=class_color, thickness=1)\n cv2.putText(img, \"{} - {:.1f}\".format(label, float(confidence)), (left, top - 5), cv2.FONT_HERSHEY_DUPLEX, 0.4, class_color, thickness=1)\n #cv2_imshow(image_new)\n return img\n\n\ndef getPrediction(img, width, height):\n darknet_image = make_image(width, height, 3)\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_resized = cv2.resize(img_rgb, (width, height), interpolation=cv2.INTER_LINEAR)\n\n img_height, img_width, _ = img.shape\n width_ratio = img_width/width\n height_ratio = img_height/height\n\n copy_image_from_bytes(darknet_image, img_resized.tobytes())\n detections = detect_image(network, class_names, darknet_image)\n free_image(darknet_image)\n return detections, width_ratio, height_ratio","repo_name":"sashanktalakola/licence-plate-detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3718046148","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom geometry_msgs.msg import TwistWithCovarianceStamped\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Point, Pose, Quaternion, Vector3\nfrom gazebo_msgs.msg import ModelStates\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom threading import Condition\n\nsend_another_pred_pose = False\nposition = [(0.0, 0.0, 0.0)]\np = PoseWithCovarianceStamped()\n\n\ndef __yaw_to_quat(yaw):\n \"\"\"\n Computing corresponding quaternion q to angle yaw [rad]\n :param yaw\n :return: q\n \"\"\"\n q = Quaternion(axis=[0, 0, 1], angle=yaw)\n return q.elements\n\n\ndef callback_Ground_Truth(data):\n global last_time, p\n idx = data.name.index(\"mobile_base\")\n\n bot_x = data.pose[idx].position.x\n bot_y = data.pose[idx].position.y\n orien = data.pose[idx].orientation\n _, _, yaw = R.from_quat([orien.x, orien.y, orien.z, orien.w]).as_rotvec()\n position[0] = (bot_x, bot_y, yaw)\n\n p.header.stamp = rospy.Time.now() # + rospy.Duration.from_sec(1.)\n\n # Ground_truth.append([x, y, theta, rospy.get_time() - initial_time])\n p.header.frame_id = 'map'\n p.pose.pose.position.x = bot_x\n p.pose.pose.position.y = bot_y\n\n p.pose.pose.orientation = orien\n\n\n# def __pub_position(x, y, theta):\n# \"\"\"\n# Publishing new initial position (x, y, theta) --> for localization\n# :param x x-position of the robot\n# :param y y-position of the robot\n# :param theta theta-position of the robot\n# \"\"\"\n# global Ground_truth\n\n\n# # add noise to the ground truth\n# mu, sigma = 0, 0.15 # mean and standard deviation\n# noise = np.random.normal(mu, sigma, 3)\n# p = PoseWithCovarianceStamped()\n# p.header.stamp = rospy.Time.now() # + rospy.Duration.from_sec(1.)\n\n# # Ground_truth.append([x, y, theta, rospy.get_time() - initial_time])\n# p.header.frame_id = 'map'\n# p.pose.pose.position.x = x\n# p.pose.pose.position.y = y\n# quaternion = __yaw_to_quat(theta)\n\n# p.pose.pose.orientation.w = quaternion[0]\n# p.pose.pose.orientation.x = quaternion[1]\n# p.pose.pose.orientation.y = quaternion[2]\n# p.pose.pose.orientation.z = quaternion[3]\n# print(f\"pose = {x:.3f}, {y:.3f}, {np.degrees(theta):.1f}\")\n# __pose_pub.publish(p)\n\n\ndef pose_update_callback(pose_update_msg):\n # Extract the pose and pose error from the message.\n position = pose_update_msg.pose.pose.position\n x, y, z = position.x, position.y, position.z\n orient = pose_update_msg.pose.pose.orientation\n roll, pitch, yaw = R.from_quat(\n [orient.x, orient.y, orient.z, orient.w]).as_rotvec()\n\n covariance = np.array(pose_update_msg.pose.covariance)\n output_str = '[ FAKE TARGET ] Received pose update from observer @ ' \\\n + f'time {pose_update_msg.header.stamp}:\\n' \\\n + f'- pose [xyz in m] ({x:.2f}, {y:.2f}, {z:.2f})\\n' \\\n + f'- orientation [rpy in degs] ({np.degrees(roll):.1f},' \\\n + f'{np.degrees(pitch):.1f}, {np.degrees(yaw):.1f})\\n' \\\n + f'- covariance:\\n{covariance.reshape(6, 6)}'\n rospy.loginfo(output_str)\n\n # Allow the main function to send another predicted pose to the observer.\n global send_another_pred_pose\n send_another_pred_pose = True\n\n\n# end def\n\n\ndef main():\n # Set up the ROS node.\n rospy.init_node('test_fake_target_node', log_level=rospy.DEBUG)\n rospy.Subscriber('/gazebo/model_states', ModelStates, callback_Ground_Truth)\n __pose_pub = rospy.Publisher('/gazebo_pose', PoseWithCovarianceStamped, queue_size=1)\n\n marker_position_list = list()\n marker_position_list.append((2.5, 1.3))\n marker_position_list.append((7.6, 5.2))\n marker_position_list.append((-2.5, -5.2))\n pred_pose_topic = '/target/target_future_pose'\n pose_update_topic = '/observer/target_pose_meas'\n\n pred_pose_pub = rospy.Publisher(pred_pose_topic, PoseWithCovarianceStamped,\n queue_size=5)\n pose_update_sub = rospy.Subscriber(pose_update_topic,\n PoseWithCovarianceStamped, pose_update_callback)\n PubTwist = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=1)\n\n cmd = Twist()\n global send_another_pred_pose, p\n for ri in range(8):\n cmd.linear.x = 0.2\n cmd.angular.z = 0\n\n for _ in range(1000):\n PubTwist.publish(cmd)\n rospy.sleep(0.01)\n global p\n rot = R.from_rotvec([0, 0, np.radians(ri * 25.)]).as_matrix()\n cov = np.eye(6)\n cov[[0, 1], [0, 1]] = [1.5, 3]\n cov[:3, :3] = np.matmul(rot, np.matmul(cov[:3, :3], rot.T))\n p.pose.covariance = cov.flatten().tolist()\n\n for _ in range(5):\n # PubTwist.publish(cmd)\n pred_pose_pub.publish(p)\n rospy.sleep(0.1)\n\n # Wait to send another predicted pose to the observer.\n send_another_pred_pose = False\n rospy.loginfo(\"[ FAKE TARGET ] Waiting for a pose update.\")\n while send_another_pred_pose == False:\n rospy.sleep(0.5)\n rospy.loginfo(\"[ FAKE TARGET ] Sleeping for a few seconds.\")\n\n # end for\n rospy.loginfo('Test complete!')\n rospy.spin()\n\n\n# end def\n\nif __name__ == \"__main__\":\n try:\n main()\n\n except:\n pass","repo_name":"balloon61/Multi-Robot-Navigation","sub_path":"turtlebot_gazebo/src/move_along_traj.py","file_name":"move_along_traj.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11523126699","text":"import numpy as np\nimport pandas as pd\nfrom woodwork.column_schema import ColumnSchema\nfrom woodwork.logical_types import Boolean, Datetime, Double\n\nfrom featuretools.primitives.base import TransformPrimitive\n\n\nclass CumulativeTimeSinceLastTrue(TransformPrimitive):\n \"\"\"Determines the time (in seconds) since the last boolean was `True`\n given a datetime index column and boolean column\n\n Examples:\n >>> from datetime import datetime\n >>> cumulative_time_since_last_true = CumulativeTimeSinceLastTrue()\n >>> booleans = [False, True, False, True]\n >>> datetimes = [\n ... datetime(2011, 4, 9, 10, 30, 0),\n ... datetime(2011, 4, 9, 10, 30, 10),\n ... datetime(2011, 4, 9, 10, 30, 15),\n ... datetime(2011, 4, 9, 10, 30, 30)\n ... ]\n >>> cumulative_time_since_last_true(datetimes, booleans).tolist()\n [nan, 0.0, 5.0, 0.0]\n \"\"\"\n\n name = \"cumulative_time_since_last_true\"\n input_types = [\n ColumnSchema(logical_type=Datetime, semantic_tags={\"time_index\"}),\n ColumnSchema(logical_type=Boolean),\n ]\n return_type = ColumnSchema(logical_type=Double, semantic_tags={\"numeric\"})\n\n def get_function(self):\n def time_since_previous_true(datetime_col, bool_col):\n if bool_col.dropna().empty:\n return pd.Series([np.nan] * len(bool_col))\n df = pd.DataFrame(\n {\n \"datetime\": datetime_col,\n \"last_true_datetime\": datetime_col,\n \"bool\": bool_col,\n },\n )\n not_false_indices = df[\"bool\"]\n df.loc[~not_false_indices, \"last_true_datetime\"] = np.nan\n df[\"last_true_datetime\"] = df[\"last_true_datetime\"].fillna(method=\"ffill\")\n total_seconds = (\n df[\"datetime\"] - df[\"last_true_datetime\"]\n ).dt.total_seconds()\n return pd.Series(total_seconds)\n\n return time_since_previous_true\n","repo_name":"alteryx/featuretools","sub_path":"featuretools/primitives/standard/transform/cumulative/cumulative_time_since_last_true.py","file_name":"cumulative_time_since_last_true.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":6873,"dataset":"github-code","pt":"32"} +{"seq_id":"71222207131","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport re\n\ndomain_regex = r'\\b(http|https):\\/\\/(www.|ww2.)?([a-zA-Z0-9-]+(\\.[a-zA-Z0-9-]+)+)\\b'\ndomains = []\nn = int(input())\nfor i in range(n):\n line = input()\n matches = re.findall(domain_regex, line)\n\n if matches:\n for match in matches:\n if match[2] not in domains:\n domains.append(match[2])\n\nfor i, e in enumerate(sorted(domains)):\n print(e, end=';' if i < len(domains) - 1 else '')\n","repo_name":"yps1978/hackerrank","sub_path":"DetecttheDomainName.py","file_name":"DetecttheDomainName.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70752889691","text":"import random\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import font_manager\n\nmy_font = font_manager.FontProperties(fname='/System/Library/Fonts/PingFang.ttc', size=10)\n\nx = range(0, 120)\ny1 = [random.randint(20, 25) for i in range(120)]\ny2 = [random.randint(20, 25) for k in range(120)]\nx_lable = [\"{}:{}\".format(10 + int(i / 60), (i % 60 if i % 60 >= 10 else '0' + str(i % 60))) for i in x]\n# 大小\nplt.figure(figsize=(30, 8), dpi=100)\n# 设置x轴上显示的字符(可以把数字转换成字符串)\nplt.xticks(list(x)[::2], x_lable[::2], rotation=0, fontproperties=my_font)\n# 折线图\nplt.plot(x, y1, label='北京', color='r', linestyle='-.')\n# 散点图\nplt.scatter(x, y2, label='阳泉', color='y')\n# 条形图\nplt.bar(x, y1, color='g')\n# 横着的条形图\nplt.barh(x, y1, color='g', height=0.8)\nplt.xlabel('时间', fontproperties=my_font)\nplt.ylabel('温度 单位(℃)', fontproperties=my_font)\nplt.title('10点到12点每分钟的气温变化情况', fontproperties=my_font)\n# 网格\nplt.grid(alpha=0.4, color='g', linestyle=':')\n# 图例\nplt.legend(prop=my_font, loc='upper left')\n# plt.show()\nplt.savefig('./t2.png')\n","repo_name":"houxiubin/Data_Analysis","sub_path":"demo/2_matplotlib_test.py","file_name":"2_matplotlib_test.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33601487486","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 25 11:58:55 2019\n\n@author: nsde\n\"\"\"\n\n#%%\nimport pandas as pd\nfrom ..base import Dataset\nfrom ..utility import convert_to_numeric\n\n#%%\nclass forest_fire(Dataset):\n def _create_dataframe(self):\n for f in self.files:\n if 'forestfires.csv' in f:\n df = pd.read_csv(f)\n self.dataframe = df\n \n @property\n def data(self):\n data = self.dataframe.values[:,:-1]\n data[:,2] = convert_to_numeric(data[:,2])\n data[:,3] = convert_to_numeric(data[:,3])\n return data.astype('float32')\n \n \n","repo_name":"SkafteNicki/py_uci","sub_path":"py_uci/datasets/forest_fire.py","file_name":"forest_fire.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"72308389851","text":"# H = 층수, W = 방 개수, N = 몇번째 손님\n# 1층 elevator가 정문이라 칠때\n# 두 방과의 거리는 항상 1\n# YXX or YYXX 는 Y = 층수, X = 엘베로부터 시작되는 번호\nT = int(input())\n\nfor _ in range(T):\n H, W, N = map(int, input().split())\n a = N%H\n b = N//H + 1\n if a <= 0:\n a = H\n b -= 1\n print(a*100+b)\n","repo_name":"hyunsu4020/baekjoon-Algorithm-python-","sub_path":"10250.py","file_name":"10250.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39807521925","text":"import sys\nimport os\nimport re\nimport json\n# import numpy as np\nfrom warnings import warn\n\n\ndef readcif(filename=None, debug=False):\n \"\"\"\n Open a Crystallographic Information File (*.cif) file and store all entries in a key:value dictionary\n Looped values are stored as lists under a single key entry\n All values are stored as strings\n E.G.\n crys=readcif('somefile.cif')\n crys['_cell_length_a'] = '2.835(2)'\n crys[key] = value\n available keys are give by crys.keys()\n To debug the file with outputted messages, use:\n cif = readcif(file, debug=True)\n Some useful standard CIF keywords:\n _cell_length_a\n _cell_length_b\n _cell_length_c\n _cell_angle_alpha\n _cell_angle_beta\n _cell_angle_gamma\n _space_group_symop_operation_xyz\n _atom_site_label\n _atom_site_type_symbol\n _atom_site_occupancy\n _atom_site_U_iso_or_equiv\n _atom_site_fract_x\n _atom_site_fract_y\n _atom_site_fract_z\n \"\"\"\n\n # Get file name\n filename = os.path.abspath(os.path.expanduser(filename))\n (dirName, filetitle) = os.path.split(filename)\n (fname, Ext) = os.path.splitext(filetitle)\n\n # Open file\n file = open(filename)\n text = file.read()\n file.close()\n\n # Remove blank lines\n while \"\\n\\n\" in text:\n text = text.replace(\"\\n\\n\", \"\\n\")\n lines = text.splitlines()\n\n cifvals = {'Filename': filename, 'Directory': dirName, 'FileTitle': fname}\n\n # Read file line by line, converting the cif file values to a python dict\n n = 0\n while n < len(lines):\n # Convert line to columns\n vals = lines[n].strip().split()\n\n # skip empty lines\n if len(vals) == 0:\n n += 1\n continue\n\n # Search for stored value lines\n if vals[0][0] == '_':\n if len(vals) == 1:\n # Record next lines that are not keys as string\n if lines[n + 1][0] == ';':\n n += 1\n strarg = []\n while n + 1 < len(lines) and (len(lines[n + 1]) == 0 or lines[n + 1][0].strip() not in ['_', ';']):\n strarg += [lines[n + 1].strip('\\'\"')]\n n += 1\n cifvals[vals[0]] = '\\n'.join(strarg)\n chk = 'a'\n else:\n cifvals[vals[0]] = ' '.join(vals[1:]).strip(' \\'\"\\n')\n chk = 'b'\n n += 1\n if debug:\n print('%5d %s %s = %s' % (n, chk, vals[0], cifvals[vals[0]]))\n continue\n\n # Search for loops\n elif vals[0] == 'loop_':\n n += 1\n loopvals = []\n # Step 1: Assign loop columns\n # looped columns are given by \"_column_name\"\n while n < len(lines) and len(lines[n].strip()) > 0 and lines[n].strip()[0] == '_':\n loopvals += [lines[n].split()[0]]\n cifvals[loopvals[-1]] = []\n n += 1\n\n # Step 2: Assign data to columns\n # loops until line has less segments than columns\n while n < len(lines):\n # cols = lines[n].split()\n # this fixes error on symmetry arguments having spaces\n # this will only work if the last argument in the loop is split by spaces (in quotes)\n # cols = cols[:len(loopvals) - 1] + [''.join(cols[len(loopvals) - 1:])]\n cols = [col for col in re.split(\n \"( |\\\\\\\".*?\\\\\\\"|'.*?')\", lines[n]) if col.strip()]\n if len(cols) != len(loopvals):\n break\n if cols[0][0] == '_' or cols[0] == 'loop_':\n break # catches error if loop is only 1 iteration\n if cols[0][0] == '#':\n n += 1\n continue # catches comented out lines\n if len(loopvals) == 1:\n cifvals[loopvals[0]] += [lines[n].strip(' \\\"\\'\\n')]\n else:\n for c, ll in enumerate(loopvals):\n cifvals[ll] += [cols[c]]\n n += 1\n\n if debug:\n for ll in loopvals:\n print('%5d L %s = %s' % (n, ll, str(cifvals[ll])))\n continue\n\n else:\n # Skip anything else\n if debug:\n print('%5d SKIPPED: %s' % (n, lines[n]))\n n += 1\n\n # Replace '.' in keys - fix bug from isodistort cif files\n # e.g. '_space_group_symop_magn_operation.xyz'\n current_keys = list(cifvals.keys())\n for key in current_keys:\n if '.' in key:\n newkey = key.replace('.', '_')\n cifvals[newkey] = cifvals[key]\n return cifvals\n # load_json = json.dumps(cifvals, indent=4)\n\n # return json.dumps(cifvals, indent=4)\n\n # print(load_json)\n # cif_json =json.loads(load_json)\n # print(cif_json [\"_publ_section_title\"])\n\n # print(load_json)\n\n # with open('json_data.json', 'w') as outfile:\n # outfile.write(load_json)\n\n\n# readcif(r\"C:\\Users\\chandran.narendraraj\\Desktop\\dev\\calc_django\\calc\\media\\cif_database\\3000000.cif\", debug = False)\n","repo_name":"narendraraj/calc_django","sub_path":"calc/d_spacing/readcif.py","file_name":"readcif.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15328150524","text":"from typing import ClassVar\n\nfrom pydantic import AnyHttpUrl, SecretStr\nfrom pydantic_settings import BaseSettings\n\n\nclass Settings(BaseSettings):\n bot_token: SecretStr\n redis_host: str = \"localhost\"\n sentry_url: AnyHttpUrl | None = None\n sudoers: ClassVar[list[int]] = [918317361]\n logs_channel: int | None = None\n\n class Config:\n env_file = \"data/config.env\"\n env_file_encoding = \"utf-8\"\n\n\nconfig = Settings() # type: ignore[arg-type]\n","repo_name":"HitaloM/Gojira","sub_path":"gojira/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"21608819109","text":"\"\"\"Unit testing of the Library Book view\"\"\"\nfrom django.test import TestCase, tag\nfrom django.urls import reverse\nfrom BookClub.models import User, Book\n\n\n@tag(\"views\", \"books\", \"library_books\")\nclass LibraryBooksViewTestCase(TestCase):\n \"\"\"Tests of the Library Books view.\"\"\"\n\n fixtures = [\n 'BookClub/tests/fixtures/default_users.json',\n 'BookClub/tests/fixtures/default_books.json',\n ]\n\n def setUp(self):\n self.url = reverse('library_books')\n self.user = User.objects.get(username=\"johndoe\")\n\n def test_books_url(self):\n self.assertEqual(self.url, '/library/books/')\n\n def test_get_dashboard_not_logged_in(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'library/library_books.html')\n\n def test_get_dashboard_logged_in(self):\n self.client.login(username=self.user.username, password=\"Password123\")\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'library/library_books.html')\n\n def test_no_books(self):\n Book.objects.all().delete()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'library/library_books.html')\n books = list(response.context['books'])\n self.assertEqual(len(books), 0)\n self.assertContains(response, \"There are no books matching this search.\")\n\n def test_books_show(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'library/library_books.html')\n books = list(response.context['books'])\n self.assertEqual(len(books), 4)\n\n def test_book_details_show(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'library/library_books.html')\n self.assertContains(response, \"Classical Mythology\")\n self.assertContains(response, \"Mark P. O. Morford\")\n self.assertContains(response, \"Oxford University Press\")\n self.assertContains(response, \"http://images.amazon.com/images/P/0195153448.01.MZZZZZZZ.jpg\")","repo_name":"amir-rahim/BookClubSocialNetwork","sub_path":"BookClub/tests/views/book_views/test_library_books_view.py","file_name":"test_library_books_view.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"6137937782","text":"import sys\nimport math\n\nclass Cell:\n def __init__(self, richness, neighbors):\n self.richness = richness\n self.neighbors = neighbors\n\nclass Tree():\n growcost = -1\n seedtargets = -1\n def __init__(self, index, size, dormant, bonus):\n self.index = index\n self.size = size\n self.dormant = dormant\n self.bonus = bonus\n\nclass Game:\n def __init__(self):\n self.day = 0\n self.nutrients = 0\n self.sundirection = 0\n self.my_sun = 0\n self.my_score = 0\n self.opponents_sun = 0\n self.opponent_score = 0\n self.opponent_is_waiting = 0\n self.cells = []\n self.mytrees = []\n self.optrees = []\n self.alltreesindex = []\n\ndef CountTrees(s,trees):\n i = 0\n for tree in trees:\n if tree.size == s:\n i += 1\n return i\n\ndef SortBySize(tree):\n return tree.size\n\ndef FindBestSeedByRichness(i,mytrees):\n m = -1\n res = -1\n for j in mytrees[i].seedtargets:\n if game.cells[j].richness > m:\n m = game.cells[j].richness\n res = j\n return res\n\ndef AddSeedTargets(i,mytrees,trees):\n targets = set()\n targets.add(mytrees[i].index) \n for a in range(mytrees[i].size):\n newtargets = set()\n for j in targets:\n for k in game.cells[j].neighbors:\n if k >= 0:\n newtargets.add(k)\n targets.update(newtargets)\n targets.remove(mytrees[i].index)\n toremove = []\n for target in targets:\n if game.cells[target].richness == 0 or target in trees:\n toremove += [target]\n for j in toremove:\n targets.remove(j)\n return targets\n\ndef FindBestSeed(mytrees):\n pass\n\n\ngame = Game()\npreviousday = 0\nnumber_of_cells = int(input())\nfor i in range(number_of_cells):\n index, richness, neigh_0, neigh_1, neigh_2, neigh_3, neigh_4, neigh_5 = [int(j) for j in input().split()]\n neigh = [neigh_0, neigh_1, neigh_2, neigh_3, neigh_4, neigh_5]\n game.cells += [Cell(richness,neigh)]\n #print(index, richness, file=sys.stderr, flush=True)\n\nwhile True:\n day = int(input()) \n game.day = day\n if day > previousday:\n game.sundirection += 1\n game.sundirection %= 6\n previousday = day\n nutrients = int(input()) \n game.nutrients = nutrients\n check = 1\n sun, score = [int(i) for i in input().split()]\n game.my_sun = sun\n game.my_score = score\n inputs = input().split()\n opp_sun = int(inputs[0])\n opp_score = int(inputs[1])\n opp_is_waiting = inputs[2] != \"0\" \n game.opponent_sun = opp_sun\n game.opponent_score = opp_score\n game.opponent_is_waiting = opp_is_waiting\n game.mytrees.clear()\n game.optrees.clear()\n game.alltreesindex.clear()\n #print(game.day, game.sundirection,file=sys.stderr, flush=True)\n number_of_trees = int(input()) \n for i in range(number_of_trees):\n inputs = input().split()\n cell_index = int(inputs[0]) \n size = int(inputs[1]) \n is_mine = inputs[2] != \"0\"\n is_dormant = inputs[3] != \"0\"\n game.alltreesindex += [cell_index]\n if is_mine:\n game.mytrees += [Tree(cell_index,size,is_dormant,2**(game.cells[cell_index].richness - 1))]\n else:\n game.optrees += [Tree(cell_index,size,is_dormant,2**(game.cells[cell_index].richness - 1))]\n number_of_possible_moves = int(input())\n for i in range(number_of_possible_moves):\n possible_move = input()\n game.mytrees.sort(key=SortBySize,reverse=True)\n nseeds = CountTrees(0, game.mytrees)\n for i in range(len(game.mytrees)):\n m = CountTrees(3, game.mytrees)\n n = CountTrees(game.mytrees[i].size + 1, game.mytrees)\n game.mytrees[i].growcost = n + 2**(game.mytrees[i].size + 1) - 1\n game.mytrees[i].seedtargets = AddSeedTargets(i,game.mytrees,game.alltreesindex)\n seed = FindBestSeedByRichness(i, game.mytrees)\n #print(MyTrees[i].index, MyTrees[i].seedtargets, file=sys.stderr, flush=True)\n if check and not game.mytrees[i].dormant:\n if game.mytrees[i].size > 0 and sun >= nseeds and seed >= 0 and nseeds < 1 and game.day < 24 - m - 1:\n print(\"SEED\", game.mytrees[i].index, seed)\n check = 0\n elif game.mytrees[i].size != 3 and sun >= game.mytrees[i].growcost and game.day < 24 - m -1 :\n print(\"GROW\", game.mytrees[i].index)\n check = 0\n elif game.mytrees[i].size == 3 and sun >= 4 and game.day >= 24 - m - 1:\n print(\"COMPLETE\", game.mytrees[i].index)\n check = 0\n if check:\n print(\"WAIT\")\n","repo_name":"lainislain/codingame","sub_path":"SpringChallenge2021/bronze.py","file_name":"bronze.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40280994028","text":"import shutil\nimport json\nimport os\n\nfrom chronos.script import Script\nfrom chronos.metadata import Session, Log\n\n\ndef run(arguments, event):\n arguments = json.loads(arguments)\n uid = arguments[\"uid\"]\n\n script = Script(uid)\n\n session = Session()\n\n # Remove script folder\n shutil.rmtree(script.folder)\n\n # Remove all logs from script\n session.query(Log).filter(Log.script == script.uid).delete()\n\n # Delete metadata\n session.delete(script.db)\n session.commit()\n session.close()\n\n event.trigger(\"action_complete\", {\"action\": \"delete\", \"uid\": script.uid})\n event.trigger(\"script_deleted\", {\"uid\": script.uid})\n\n return uid\n","repo_name":"simse/chronos","sub_path":"chronos/tasks/delete_script.py","file_name":"delete_script.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"32"} +{"seq_id":"29798311602","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport cv2\nfrom pyzbar.pyzbar import decode\nfrom streamlit_webrtc import webrtc_streamer\nimport av\n\nfrom streamlit_webrtc import (\n AudioProcessorBase,\n RTCConfiguration,\n VideoProcessorBase,\n WebRtcMode,\n webrtc_streamer,\n)\n\nst.title('Barcode test app')\n\nclass VideoProcessor:\n def recv(self, image):\n img = image.to_ndarray(format=\"bgr24\")\n \n gray_img = cv2.cvtColor(img,0) \n barcode = decode(gray_img)\n\n for obj in barcode:\n points = obj.polygon\n (x,y,w,h) = obj.rect\n pts = np.array(points, np.int32)\n pts = pts.reshape((-1, 1, 2))\n cv2.polylines(img, [pts], True, (0, 255, 0), 3)\n\n barcodeData = obj.data.decode(\"utf-8\")\n barcodeType = obj.type\n string = \"Data: \" + str(barcodeData) + \" | Type: \" + str(barcodeType)\n \n cv2.putText(img, string, (x,y), cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,255), 2)\n\n print(\"Barcode: \"+barcodeData +\" | Type: \"+barcodeType)\n\n return av.VideoFrame.from_ndarray(img, format=\"bgr24\")\n \n\nwebrtc_streamer(\n key=\"example\", \n video_processor_factory=VideoProcessor,\n mode=WebRtcMode.SENDRECV,\n media_stream_constraints={\"video\": True, \"audio\": False},\n)","repo_name":"JonathanDeBelg/barcode-scanner-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25204426349","text":"import uuid, json\n\nfrom flask import Flask, jsonify, request, abort\nfrom flask_cors import CORS\nfrom utils import *\n\nTASKS = [\n {\n 'id': uuid.uuid4().hex,\n 'title': 'Hackerearth problem',\n },\n {\n 'id': uuid.uuid4().hex,\n 'title': 'Economics Maths'\n }\n]\n\nDEBUG = True\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nCORS(app, resources={r'/*': {'origins': '*'}})\n\n# sanity check route\n@app.route('/ping', methods=['GET'])\ndef ping_pong():\n return jsonify('pong!')\n\n@app.route('/tasks', methods=['GET', 'POST'])\ndef tasks():\n if request.method == 'POST':\n data = request.get_json()\n print(data)\n TASKS.append({\n 'id': uuid.uuid4().hex,\n 'title': data.get('task')\n })\n return { \"tasks\": TASKS}\n\n@app.route('/tasks/', methods=['DELETE'])\ndef remove_task(task_id):\n index = findIndexById(task_id, TASKS)\n if index >= 0:\n TASKS.pop(index)\n return {\"tasks\": TASKS}\n else:\n abort(404)\n\n@app.route('/tasks/update', methods=['PUT'])\ndef update_task():\n data = request.get_json()\n index = findIndexById(data.get('id'), TASKS)\n TASKS[index] = {\n 'id' : data.get('id'),\n 'title' : data.get('title')\n }\n return { \"tasks\": TASKS }\n\nif __name__ == '__main__':\n app.run()","repo_name":"shunya0/DOitApp","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18024552821","text":"from cs_utils.service_locations import get_service_endpoint\nfrom cs_utils import requests\nfrom django.conf import settings\nfrom requests_aws4auth import AWS4Auth\nfrom college_profile.models import (OWNERSHIP, MODE_OF_STUDY,\n COURSE_LEVEL,\n COURSE_STATUS,\n COLLEGE_TYPE, UNIVERSITY_TYPE)\nimport logging\nimport uuid\n\n\ndef handle_college_update(event, context):\n logger = logging.getLogger(__name__)\n logger.info(\"event : %s , \\n context: %s, \\n \"\n \" message: initail college event logging\",\n event, context\n )\n try:\n institution_info = event['Records'][0]['Sns']['Message']\n info_list = institution_info.split('|')\n institution_id = info_list[0]\n action_type = info_list[1]\n uuid.UUID(institution_id)\n except (TypeError, ValueError, KeyError, IndexError) as err:\n logger.error(\"Error: %s\",\n err, exc_info=True)\n else:\n end_point = '/_doc/{}'.format(institution_id)\n BASE_ELASTIC_URL = get_service_endpoint('cs-elasticsearch')\n aws_url = BASE_ELASTIC_URL + end_point\n ELASTIC_UPDATE_AUTH = settings.ELASTIC_UPDATE_AUTH\n awsauth = AWS4Auth(ELASTIC_UPDATE_AUTH['AWS_ACCESS_KEY'],\n ELASTIC_UPDATE_AUTH['AWS_SECRET_KEY'],\n ELASTIC_UPDATE_AUTH['region'],\n ELASTIC_UPDATE_AUTH['service'])\n BASE_URL = get_service_endpoint('cs-college-profile')\n url = BASE_URL + '/api/institution/{}/'.format(institution_id)\n if action_type == 'deleted':\n requests.delete(aws_url, auth=awsauth)\n logger.info(\"college id: %s , \\n\"\n \"message: elastic search data deleted\",\n institution_id,\n )\n else:\n r = requests.get(url)\n if r.status_code == 200:\n institution = r.json()\n payload = {\n \"name\": institution.get(\"name\"),\n \"slug\": institution.get(\"slug_name\"),\n \"type\": 'university'\n if 'university_type' in institution.keys()\n else 'college',\n \"url\": institution.get(\"url\"),\n \"rating\": institution.get(\"rating\"),\n \"override_rating\": institution.get(\"override_rating\"),\n \"total_rating_count\": institution.get(\n \"total_rating_count\"),\n \"ownership\": dict(OWNERSHIP)[institution.get('ownership')]\n if institution.get('ownership', None) else None,\n \"description\": institution.get('description'),\n \"univ_type\": dict(UNIVERSITY_TYPE)\n [institution.get('university_type')]\n if institution.get('university_type', None) else None,\n \"college_type\": dict(COLLEGE_TYPE)\n [institution.get('college_type')]\n if institution.get('college_type', None) else None,\n \"accreditations\": [\n {\n \"accreditation_body_name\": acc.get(\n 'accreditation_name'),\n \"accreditation_body_abbrv\": acc.get(\n 'abbreviation'),\n \"score\": acc.get('score')\n }\n for acc in institution.get('accreditation')\n ],\n \"contacts\": [\n {\n \"contact_no\": contact.get('contact_no'),\n \"alternate_contact_no\": contact.get(\n \"alternate_contact_no\"),\n \"name\": contact.get('name'),\n \"email\": contact.get('email'),\n \"alternate_email\": contact.get('alternate_email'),\n }\n for contact in institution.get('contacts')],\n \"courses\": [\n {\n \"cat\": course.get('grand_parent_name'),\n \"sub_cat\": course.get('parent_name'),\n \"programme\": course.get('name'),\n \"num_seats_intake\": course.get('course_intake'),\n \"num_seats_enrolled\": course.get(\n 'course_enrolled'),\n \"override_rating\": course.get('override_rating'),\n \"mode_of_study\": dict(MODE_OF_STUDY)\n [course.get('mode_of_study')],\n \"course_level\": dict(COURSE_LEVEL)\n [course.get('course_level')],\n \"course_status\": dict(COURSE_STATUS)\n [course.get('course_status')]\n }\n for course in institution.get('courses')],\n \"addresses\": [\n {\n \"street\": address.get('street'),\n \"city\": address.get('city'),\n \"district\": address.get('district'),\n \"state\": address.get('state'),\n \"location\": '{}, {}'.format(\n address.get('latitude') if address.get(\n 'latitude')\n else 0.0,\n address.get('longitude')\n if address.get('longitude')\n else 0.0)\n }\n for address in institution.get('addresses')\n ],\n \"facilities\": [\n {\n \"infra_type\": infra.get('infra_type'),\n \"infra_count\": infra.get('infra_entity_count')\n }\n for infra in institution.get('infrastructure')\n ],\n \"images\": [\n {\n \"description\": image.get('description'),\n \"media_label\": image.get('media_label'),\n \"media_type\": image.get('media_type'),\n \"url\": image.get('url'),\n }\n for image in institution.get('images')\n if image.get('media_label') in {3, 5}\n # 3 : srp image, 5: Brochure\n ]\n }\n requests.put(aws_url, auth=awsauth, json=payload)\n logger.info(\"college id: %s , \\n\"\n \"message: elastic search updated successfully\",\n institution_id,\n )\n else:\n logger.error(\"request failed and cant do the update\")\n","repo_name":"Shubratha/job_portal","sub_path":"Untitled Folder/cs-utils/cs_utils/elasticsearch_update/sns_consumer.py","file_name":"sns_consumer.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71739606170","text":"#ImportModules\nimport ShareYourSystem as SYS\n\n\n#Short expression and set in the appended manner\nMyParenter=SYS.ParenterClass(\n\t).__setitem__(\n\t\t'ChildParenter',\n\t\tSYS.ParenterClass(\n\t\t\t).__setitem__(\n\t\t\t\t'GrandChildParenter',\n\t\t\t\tSYS.ParenterClass(\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n#Parent for the children\nMyParenter[\n\t'ChildParenter'\n\t][\n\t'GrandChildParenter'\n\t].parent(\n\t\t['IdInt']\n\t)\n\n#Definition the AttestedStr\nSYS._attest(\n\t[\n\t\t'MyParenter is '+SYS._str(\n\t\tMyParenter,\n\t\t**{\n\t\t\t'RepresentingBaseKeyStrsListBool':False,\n\t\t\t'RepresentingAlineaIsBool':False\n\t\t}\n\t\t)\n\t]\n) \n\n#Print\n\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/draft/Noders/Parenter/03_ExampleDoc.py","file_name":"03_ExampleDoc.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25492017035","text":"# coding: utf-8\n\nclass FieldInfo(object):\n swagger_types = {\n 'access': 'FieldAccess',\n 'default': 'str'\n }\n\n attribute_map = {\n 'access': 'Access',\n 'default': 'Default'\n }\n\n def __init__(self, access=None, default=None): # noqa: E501\n \"\"\"FieldInfo - a model defined in Swagger\"\"\" # noqa: E501\n self._access = None\n self._default = None\n self.discriminator = None\n if access is not None:\n self.access = access\n if default is not None:\n self.default = default\n\n @property\n def access(self):\n return self._access\n\n @access.setter\n def access(self, access):\n self._access = access\n\n @property\n def default(self):\n return self._default\n\n @default.setter\n def default(self, default):\n self._default = default\n","repo_name":"wickedwick/basys-connect-api-documentation","sub_path":"CodeSamples/python/swagger_client/models/field_info.py","file_name":"field_info.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27889057287","text":"import requests\nimport json\n\ndef localiza_cep(numero_cep):\n cep_usuario = numero_cep\n api = 'https://cep.awesomeapi.com.br/:json/:'\n link = api + cep_usuario\n resposta = requests.get(link)\n endereco_dic = resposta.json()\n\n for k, v in endereco_dic.items():\n if k == 'status':\n if v == 400:\n print('CEP invalido')\n elif v == 404:\n print('Cep não encontrado')\n elif k == 'cep':\n print(f'{endereco_dic[\"address\"]}-{endereco_dic[\"city\"]}/{endereco_dic[\"state\"]}')\n","repo_name":"Joonas2/localizador_cep","sub_path":"buscador.py","file_name":"buscador.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35389350074","text":"import pytest\nfrom herokuApp.main_folder.page_objects.context_menu_page import context_menu_page\nfrom herokuApp.main_folder.page_objects.home_page import home_page\nfrom herokuApp.resources.TestData import browser\n\n\nclass Test_TC005_user_should_see_alert_on_the_page_after_right_clicking_on_specific_area:\n @pytest.fixture()\n def browser_init(self):\n self.driver = browser(\"chrome\")\n self.driver.implicitly_wait(5)\n self.driver.maximize_window()\n yield\n self.driver.close()\n self.driver.quit()\n\n def test_user_should_see_alert_on_the_page_after_right_clicking_on_specific_area(self, browser_init):\n self.home_page = home_page(self.driver)\n self.home_page.click_on_link(\"Context Menu\")\n\n self.context_menu_page = context_menu_page(self.driver)\n self.context_menu_page.verify_default_content()\n self.context_menu_page.left_click_on_box()\n self.context_menu_page.right_click_out_box()\n self.context_menu_page.right_click_on_box()\n","repo_name":"BilenkoVlad/PythonSeleniumTests","sub_path":"herokuApp/tests/TC005_user_should_see_alert_on_the_page_after_right_clicking_on_specific_area_test.py","file_name":"TC005_user_should_see_alert_on_the_page_after_right_clicking_on_specific_area_test.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15009495911","text":"import os.path\n\nimport baker\n\nfrom lfrbuilder.dir import change_dir\nfrom lfrbuilder.appserverfile import get_property\nfrom lfrbuilder.build import build as lfrbuild\n\n@baker.command\ndef build(repository='.', bundle=None, branch=None):\n repository=os.path.expanduser(repository)\n\n if bundle is None:\n bundle = get_property('app.server.parent.dir')\n else:\n bundle = os.path.expanduser(bundle)\n\n with change_dir(repository):\n lfrbuild(\n repository=repository,\n bundle=bundle,\n branch=branch\n )\n\n","repo_name":"brandizzi/lfr-builder","sub_path":"lfrbuilder/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71875088732","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport pytest\nimport math\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n@pytest.fixture(scope=\"function\")\ndef browser():\n print(\"\\nstart browser for test..\")\n browser = webdriver.Chrome()\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n\nclass TestAbs():\n @pytest.mark.parametrize('link', [\"https://stepik.org/lesson/236895/step/1\", \n \"https://stepik.org/lesson/236896/step/1\",\n \"https://stepik.org/lesson/236897/step/1\",\n \"https://stepik.org/lesson/236898/step/1\",\n \"https://stepik.org/lesson/236899/step/1\",\n \"https://stepik.org/lesson/236903/step/1\",\n \"https://stepik.org/lesson/236904/step/1\",\n \"https://stepik.org/lesson/236905/step/1\"\n ])\n def test_abs1(browser,link):\n try:\n now_link = link\n browser = webdriver.Chrome()\n browser.implicitly_wait(5)\n browser.get(now_link)\n\n # Ваш код, который заполняет обязательные поля\n element1 = browser.find_element(By.CSS_SELECTOR, \"#ember33\")\n element1.click()\n element2 = browser.find_element(By.CSS_SELECTOR, \"#id_login_email\")\n element3 = browser.find_element(By.CSS_SELECTOR, \"#id_login_password\")\n element4 = browser.find_element(By.CSS_SELECTOR, 'button[type=\"submit\"]')\n element2.send_keys(\"**\")\n element3.send_keys(\"**\")\n element4.click()\n button = WebDriverWait(browser, 5).until(\n EC.invisibility_of_element_located((By.CSS_SELECTOR, \"#ember33\"))\n )\n element5 = browser.find_element(By.CSS_SELECTOR, \".ember-text-area\")\n answer = math.log(int(time.time()))\n element5.send_keys(str(answer))\n element6 = browser.find_element(By.CSS_SELECTOR, \"button.submit-submission\")\n element6.click()\n answer_code = browser.find_element(By.CSS_SELECTOR, \"p.smart-hints__hint\").text\n print(answer_code)\n assert answer_code==\"Correct!\",f'При значении {answer} тест упал с кодовым ответом {answer_code}'\n\n finally:\n time.sleep(5)\n browser.quit()\n\n\n\n\n","repo_name":"eva-prus/stepik_auto_tests_course","sub_path":"lesson3_6_ex4.py","file_name":"lesson3_6_ex4.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41187381633","text":"import numpy as np\nimport cv2\nfrom PIL import Image\n\n\ndef read_image(filename, color_fmt='RGB'):\n assert color_fmt in Image.MODES\n img = Image.open(filename)\n if not img.mode == color_fmt:\n img = img.convert(color_fmt)\n return np.asarray(img)\n\n\ndef save_image(filename, img, color_fmt='RGB'):\n assert color_fmt in ['RGB', 'BGR']\n if color_fmt == 'BGR' and img.ndim == 3:\n img = img[..., ::-1]\n img = Image.fromarray(img)\n return img.save(filename)\n\n\ndef show_image(img, bboxes=None, bbox_fmt='ltrb', colors=None,\n thickness=3, fig=1, delay=1, max_size=640,\n visualize=True, cvt_code=cv2.COLOR_RGB2BGR):\n if cvt_code is not None:\n img = cv2.cvtColor(img, cvt_code)\n \n # resize img if necessary\n if max(img.shape[:2]) > max_size:\n scale = max_size / max(img.shape[:2])\n out_size = (\n int(img.shape[1] * scale),\n int(img.shape[0] * scale))\n img = cv2.resize(img, out_size)\n if bboxes is not None:\n bboxes = np.array(bboxes, dtype=np.float32) * scale\n \n if bboxes is not None:\n assert bbox_fmt in ['ltwh', 'ltrb']\n bboxes = np.array(bboxes, dtype=np.int32)\n if bboxes.ndim == 1:\n bboxes = np.expand_dims(bboxes, axis=0)\n if bboxes.shape[1] == 4 and bbox_fmt == 'ltwh':\n bboxes[:, 2:] = bboxes[:, :2] + bboxes[:, 2:] - 1\n \n # clip bounding boxes\n h, w = img.shape[:2]\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, w)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, h)\n \n if colors is None:\n colors = [\n (0, 0, 255),\n (0, 255, 0),\n (255, 0, 0),\n (0, 255, 255),\n (255, 0, 255),\n (255, 255, 0),\n (0, 0, 128),\n (0, 128, 0),\n (128, 0, 0),\n (0, 128, 128),\n (128, 0, 128),\n (128, 128, 0)]\n colors = np.array(colors, dtype=np.int32)\n if colors.ndim == 1:\n colors = np.expand_dims(colors, axis=0)\n \n for i, bbox in enumerate(bboxes):\n color = colors[i % len(colors)]\n if len(bbox) == 4:\n pt1 = (int(bbox[0]), int(bbox[1]))\n pt2 = (int(bbox[2]), int(bbox[3]))\n img = cv2.rectangle(img, pt1, pt2, color.tolist(), thickness)\n else:\n pts = bbox.reshape(-1, 2)\n img = cv2.polylines(img, [pts], True, color.tolist(), thickness)\n \n if visualize:\n if isinstance(fig, str):\n winname = fig\n else:\n winname = 'window_{}'.format(fig)\n cv2.imshow(winname, img)\n cv2.waitKey(delay)\n \n if cvt_code in [cv2.COLOR_RGB2BGR, cv2.COLOR_BGR2RGB]:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n","repo_name":"Daikenan/LTMU","sub_path":"DiMP_LTMU/Global_Track/_submodules/neuron/neuron/ops/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"32"} +{"seq_id":"9827081699","text":"import sys\nimport cv2 as cv\nimport numpy\nimport numpy as np\n\nimport datetime\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom scipy.signal import savgol_filter\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import QFileDialog, QMainWindow\n\nfrom UI.untitledtest import Ui_MainWindow\n\n\n\nclass PyQtMainEntry(QMainWindow, Ui_MainWindow):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.camera = cv.VideoCapture(0)\n self.is_camera_opened = False # 摄像头有没有打开标记\n\n self.trainDate = []\n self.lable = []\n\n # 定时器:30ms捕获一帧\n self._timer = QtCore.QTimer(self)\n self._timer.timeout.connect(self._queryFrame)\n self._timer.setInterval(30)\n\n # 按钮操作函数 打开和关闭摄像头\n def btnOpenCamera_Clicked(self):\n '''\n 打开和关���摄像头\n '''\n self.is_camera_opened = ~self.is_camera_opened\n if self.is_camera_opened:\n self.btnOpenCamera.setText(\"关闭摄像头\")\n self._timer.start()\n else:\n self.btnOpenCamera.setText(\"打开摄像头\")\n self.labelCamera.clear()\n self.labelCamera.setText(\"摄像头\")\n self._timer.stop()\n\n # 按钮操作函数 捕获摄像头图片\n def btnCapture_Clicked(self):\n '''\n 捕获图片\n '''\n # 摄像头未打开,不执行任何操作\n if not self.is_camera_opened:\n return\n\n self.captured = self.frame\n\n self.Gray = cv.cvtColor(self.captured,cv.COLOR_RGB2GRAY)\n\n\n\n\n # 后面这几行代码几乎都一样,可以尝试封装成一个函数\n rows, cols, channels = self.captured.shape\n bytesPerLine = channels * cols\n # Qt显示图片时,需要先转换成QImgage类型\n QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)\n self.labelCapture.setPixmap(QPixmap.fromImage(QImg).scaled(\n self.labelCapture.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))\n\n # 按钮操作函数 从本地读取图片\n def btnReadImage_Clicked(self):\n '''\n 从本地读取图片\n '''\n # 打开文件选取对话框\n filename, _ = QFileDialog.getOpenFileName(self, '打开图片')\n if filename:\n self.captured = cv.imread(str(filename))\n # OpenCV图像以BGR通道存储,显示时需要从BGR转到RGB\n self.captured = cv.cvtColor(self.captured, cv.COLOR_BGR2RGB)\n\n self.Gray = cv.cvtColor(self.captured, cv.COLOR_RGB2GRAY)##\n\n rows, cols, channels = self.captured.shape\n bytesPerLine = channels * cols\n QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)\n self.labelCapture.setPixmap(QPixmap.fromImage(QImg).scaled(\n self.labelCapture.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))\n\n\n # # 肤色分割人脸\n # def crcb_range_sceening(self):\n # \"\"\"\n # # :param image: 图片路径\n # # :return: None\n # \"\"\"\n # # self.img = cv.imread(, cv.IMREAD_COLOR)\n # ycrcb = cv.cvtColor(self.captured, cv.COLOR_RGB2YCR_CB)\n # (y, cr, cb) = cv.split(ycrcb)\n # # print(cr,y,cb)\n # self.skin = np.zeros(cr.shape, dtype=np.uint8)\n # (x, y) = cr.shape\n # # print(cr.shape)\n # for i in range(0, x):\n # for j in range(0, y):\n # if (cr[i][j] > 140) and (cr[i][j]) < 175 and (cr[i][j] > 100) and (cb[i][j]) < 120:\n # self.skin[i][j] = 255\n # else:\n # self.skin[i][j] = 0\n #\n # # 形态学操作\n # def morphology(self):\n # k = np.ones((3, 3), np.uint8)\n # self.open = cv.morphologyEx(self.skin, cv.MORPH_OPEN, k) # 开运算\n # self.close = cv.morphologyEx(self.open, cv.MORPH_CLOSE, k) # 闭运算\n #\n # # 连通域分析\n # def region(self):\n # # 连通域分析\n # num_labels, labels, self.stats, centroids = cv.connectedComponentsWithStats(self.close, connectivity=8)\n #\n # # # 查看各个返回值\n # # # 连通域数量\n # # print('num_labels = ', num_labels)\n # # # 连通域的信息:对应各个轮廓的左上角坐标x、y、width、height和面积\n # # print('stats = ', stats)\n # # # 连通域的中心点\n # # print('centroids = ', centroids)\n # # # 每一个像素的标签1、2、3.。。,同一个连通域的标签是一致的\n # # print('labels = ', labels)\n #\n # # 不同的连通域赋予不同的颜色\n # # self.output = np.zeros((self.close.shape[0], self.close.shape[1], 3), np.uint8)\n # # for i in range(1, num_labels):\n # # mask = labels == i\n # # self.output[:, :, 0][mask] = np.random.randint(0, 255)\n # # self.output[:, :, 1][mask] = np.random.randint(0, 255)\n # # self.output[:, :, 2][mask] = np.random.randint(0, 255)\n # # cv2.namedWindow(\"region\", cv2.WINDOW_NORMAL)\n # # cv2.imshow('region', output)\n # # cv2.waitKey()\n # # cv2.destroyAllWindows()\n #\n # # 判断人脸连通域\n # def selectskin(self):\n # num = len(self.stats)\n # size_total = self.captured.shape[1] * self.captured.shape[0]\n # self.tips = []\n # for i in range(0, num):\n # long = max(self.stats[i][2], self.stats[i][3])\n # short = min(self.stats[i][2], self.stats[i][3])\n # # ratio = long / short\n # ratio = self.stats[i][3] / self.stats[i][2]\n # size = self.stats[i][4]\n # if 1.1 <= ratio <= 2 and size / size_total >= 0.001:\n # self.tips.append(i)\n # else:\n # continue\n #\n # # 对人脸连通域画框\n # def signskin(self):\n # if self.tips:\n # self.result = self.captured\n # for k in range(0, len(self.tips)):\n # i = self.tips[k]\n # pt1 = (self.stats[i][0], self.stats[i][1])\n # pt2 = (self.stats[i][0] + self.stats[i][2], self.stats[i][1] + self.stats[i][3])\n # self.result = cv.rectangle(self.result, pt1, pt2, (0, 255, 0), 2, 4)\n # else:\n # self.result = self.captured\n # box = QtWidgets.QMessageBox()\n # box.warning(self, \"提示\", \"Dont find face\")\n # print(\"Dont find face\")\n #\n #\n # def cutface(self):\n # self.saveface =\n #\n # # 定位人眼并画框\n # def findeyes(self):\n # count =0\n # eyes = cv.CascadeClassifier(\"haarcascades/haarcascade_eye.xml\")\n # self.xxx = eyes.detectMultiScale(self.Gray,1.1,5)\n # for (x,y,w,h) in self.xxx:\n # self.result = cv.rectangle(self.captured,(x,y),(x+w,y+h),(255,0,0),2)\n # count += 1\n # face_img = cv.resize(self.Gray[y:y + h, x:x + w], (200, 200))\n # face_filename = '%d.jpg' % (count)\n # cv.imwrite(face_filename, face_img)\n\n def FaceDetect(self):\n # 肤色阈值分割\n ycrcb = cv.cvtColor(self.captured, cv.COLOR_RGB2YCR_CB)\n (y, cr, cb) = cv.split(ycrcb)\n # print(cr,y,cb)\n self.skin = np.zeros(cr.shape, dtype=np.uint8)\n (x, y) = cr.shape\n # print(cr.shape)\n for i in range(0, x):\n for j in range(0, y):\n if (cr[i][j] > 140) and (cr[i][j]) < 175 and (cr[i][j] > 100) and (cb[i][j]) < 120:\n self.skin[i][j] = 255\n else:\n self.skin[i][j] = 0\n\n # 形态学操作\n k = np.ones((3, 3), np.uint8)\n self.open = cv.morphologyEx(self.skin, cv.MORPH_OPEN, k) # 开运算\n self.close = cv.morphologyEx(self.open, cv.MORPH_CLOSE, k) # 闭运算\n\n # 连通域分析\n num_labels, labels, self.allstats, centroids = cv.connectedComponentsWithStats(self.close, connectivity=8)\n\n # 判断人脸连通域\n num = len(self.allstats)\n size_total = self.captured.shape[1] * self.captured.shape[0]\n self.tips = []\n for i in range(0, num):\n long = max(self.allstats[i][2], self.allstats[i][3])\n short = min(self.allstats[i][2], self.allstats[i][3])\n # ratio = long / short\n ratio = self.allstats[i][3] / self.allstats[i][2]\n size = self.allstats[i][4]\n if 1.1 <= ratio <= 2 and size / size_total >= 0.001:\n self.tips.append(i)\n else:\n continue\n\n # 对人脸连通域画框\n if self.tips:\n self.result = self.captured\n for k in range(0, len(self.tips)):\n i = self.tips[k]\n pt1 = (self.allstats[i][0], self.allstats[i][1])\n pt2 = (self.allstats[i][0] + self.allstats[i][2], self.allstats[i][1] + self.allstats[i][3])\n self.result = cv.rectangle(self.result, pt1, pt2, (0, 255, 0), 2, 4)\n else:\n self.result = self.captured\n box = QtWidgets.QMessageBox()\n box.warning(self, \"提示\", \"Dont find face\")\n print(\"Dont find face\")\n\n def btnSignFace_Clicked(self):\n '''\n # ''\n # 灰度化\n # ''\n # # 如果没有捕获图片,则不执行操作\n # if not hasattr(self, \"captured\"):\n # return\n #\n # # self.cpatured = cv.cvtColor(self.captured, cv.COLOR_RGB2GRAY)\n # # 全局均衡化\n # # self.equalize = cv.equalizeHist(self.cpatured)\n # # 局部均衡化\n # ## createCLAHE(clipLimit=None, tileGridSize=None)\n # # clahe = cv.createCLAHE(tileGridSize=(5, 5))\n # # self.equalize = clahe.apply(self.cpatured)\n #\n # # ycrcb = cv.cvtColor(self.captured, cv.COLOR_BGR2RGB)\n # r, g, b = cv.split(self.captured)\n # # R,G,B = cv.split(ycrcb)\n #\n # # r =cv.equalizeHist(r)\n # # g = cv.equalizeHist(g)\n # # b =cv.equalizeHist(b)\n #\n # clahe = cv.createCLAHE(clipLimit=0.5, tileGridSize=(8, 8))\n # r = clahe.apply(r)\n # g = clahe.apply(g)\n # b = clahe.apply(b)\n #\n # self.equalize = cv.merge((r, g, b))\n # self.captured = self.equalize\n # # self.equalize = cv.cvtColor(aaa)\n '''\n self.FaceDetect()\n for i in self.tips:\n x,y,w,h,s = self.allstats[i]\n # 将灰度图人脸区域copy出来\n self.faceGray = self.Gray[y:y+h,x:x+w].copy()\n # 将原图彩色图人脸区域copy出来\n self.face = self.captured[y:y+h,x:x+w].copy()\n # loctime = datetime.datetime.now().strftime(\"%H_%M_%S.\")\n # name = loctime + str(i) + \".png\"\n # path = './Date/trainPhoto/'\n # cv.imwrite(path+name, self.retval, [cv.IMWRITE_PNG_COMPRESSION, 0])\n\n self.faceGray = cv.resize(self.faceGray,(256,384))\n self.face = cv.resize(self.face, (256, 384))\n # (y, cr, cb) = cv.split(faceycrcb)\n # # print(cr,y,cb)\n # self.mouth = np.zeros(cr.shape, dtype=np.uint8)\n # (x, y) = cr.shape\n # # print(cr.shape)\n # for i in range(0, x):\n # for j in range(0, y):\n # if (cr[i][j] > 160):\n # self.mouth[i][j] = 255\n # else:\n # self.mouth[i][j] = 0\n # # 形态学操作\n # k = np.ones((3, 3), np.uint8)\n # open = cv.morphologyEx(self.mouth, cv.MORPH_OPEN, k) # 开运算\n # close = cv.morphologyEx(open, cv.MORPH_CLOSE, k) # 闭运算\n #\n # num_labels, labels, stats, centroids = cv.connectedComponentsWithStats(close, connectivity=8)\n\n\n############################################\n\n\n m, n = self.faceGray.shape\n\n col = np.zeros(n)\n row = np.zeros(m)\n\n for i in range(1, m):\n r = 0\n for k in range(1, n):\n r += abs(int(self.faceGray[i, k]) - int(self.faceGray[i, k - 1]))\n row[i] = r\n\n for i in range(1, n):\n r = 0\n for k in range(1, m):\n r += abs(int(self.faceGray[k, i]) - int(self.faceGray[k - 1, i]))\n col[i] = r\n\n # 平滑行\n rrow = savgol_filter(row, 51, 3)\n # 平滑列\n rcol = savgol_filter(col, 51, 3)\n\n # 眼睛位置\n hang = int(np.where(rrow == np.max(rrow))[0])\n hang1 = hang - int(m / 12)\n hang2 = hang + int(m / 12)\n # 横向彩色眼睛\n hengxiang = self.face[hang1:hang2, 0:n]\n # 横向灰度眼睛\n hengxianghui = self.faceGray[hang1:hang2, 0:n]\n\n # 绘制表格\n # plt.figure(1)\n # plt.plot(list(range(0, m)), rrow)\n # plt.show()\n # plt.figure(2)\n # plt.plot(list(range(0, n)), rcol)\n # plt.show()\n\n # otsu\n _, eyesThreshold = cv.threshold(hengxianghui, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n cv.bitwise_not(eyesThreshold, eyesThreshold)\n\n # cv.imshow('Eyes', hengxiang)\n # cv.imshow('EyesThreshold', eyesThreshold)\n # cv.waitKey(0)\n # cv.destroyAllWindows()\n\n # ycrcb = cv.cvtColor(, cv2.COLOR_BGR2YCR_CB)\n faceycrcb = cv.cvtColor(self.face, cv.COLOR_RGB2YCR_CB)\n (Fy, Fcr, Fcb) = cv.split(faceycrcb)\n # print(cr,y,cb)\n mouth = np.zeros(Fcr.shape, dtype=np.uint8)\n (x, y) = Fcr.shape\n # print(cr.shape)\n for i in range(0, x):\n for j in range(0, y):\n if Fcr[i][j] > 160:\n mouth[i][j] = 255\n else:\n mouth[i][j] = 0\n self.Signresultold = mouth\n self.Signresultold[hang1:hang2, 0:n] = eyesThreshold\n # cv.imwrite('aaa.jpg',self.Signresult)\n self.Signresult = self.Signresultold.reshape(1,-1)\n self.Signresultlist = self.Signresult.tolist()\n\n self.trainDate.append(self.Signresultlist[0])\n\n lab = str(self.lineEdit.text())\n self.lable.append(lab)\n # print(self.lable)\n\n rows, columns = self.Signresultold.shape\n # rows, columns, channels = self.retval.shape\n bytesPerLine = columns\n # 灰度图是单通道,所以需要用Format_Indexed8\n QImg = QImage(self.Signresultold.data, columns, rows, bytesPerLine, QImage.Format_Indexed8)\n self.labelResult.setPixmap(QPixmap.fromImage(QImg).scaled(\n self.labelResult.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))\n\n\n\n\n \n\n\n\n # def train_Clicked(self):\n # recognizer = cv.face.LBPHFaceRecognizer_create()\n # recognizer.train(faces, np.array(ids))\n # recognizer.write('trainer/trainer.yml')\n\n\n\n @QtCore.pyqtSlot()\n def _queryFrame(self):\n '''\n 循环捕获图片\n '''\n ret, self.frame = self.camera.read()\n\n img_rows, img_cols, channels = self.frame.shape\n bytesPerLine = channels * img_cols\n\n cv.cvtColor(self.frame, cv.COLOR_BGR2RGB, self.frame)\n QImg = QImage(self.frame.data, img_cols, img_rows, bytesPerLine, QImage.Format_RGB888)\n self.labelCamera.setPixmap(QPixmap.fromImage(QImg).scaled(\n self.labelCamera.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))\n\n # def lunkuo(self):\n # k = cv.imread(\"./1.jpg\")\n # image,contours,hierarchy = cv.findContours(k, 2, 1)\n # cv.imwrite(\"test.jpg\", contours)\n\n def btnRecognize_Clicked(self):\n '''\n 执行程序\n '''\n if not hasattr(self, \"captured\"):\n return\n\n # _, self.cpatured = cv.threshold(\n # self.cpatured, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n\n # self.crcb_range_sceening()\n # self.morphology()\n # self.region()\n # self.selectskin()\n # self.signskin()\n # self.findeyes()\n # self.lunkuo()\n\n self.FaceDetect()\n for i in self.tips:\n x, y, w, h, s = self.allstats[i]\n # 将灰度图人脸区域copy出来\n self.faceGray = self.Gray[y:y + h, x:x + w].copy()\n # 将原图彩色图人脸区域copy出来\n self.face = self.captured[y:y + h, x:x + w].copy()\n # loctime = datetime.datetime.now().strftime(\"%H_%M_%S.\")\n # name = loctime + str(i) + \".png\"\n # path = './Date/trainPhoto/'\n # cv.imwrite(path+name, self.retval, [cv.IMWRITE_PNG_COMPRESSION, 0])\n\n self.faceGray = cv.resize(self.faceGray, (256, 384))\n self.face = cv.resize(self.face, (256, 384))\n # (y, cr, cb) = cv.split(faceycrcb)\n # # print(cr,y,cb)\n # self.mouth = np.zeros(cr.shape, dtype=np.uint8)\n # (x, y) = cr.shape\n # # print(cr.shape)\n # for i in range(0, x):\n # for j in range(0, y):\n # if (cr[i][j] > 160):\n # self.mouth[i][j] = 255\n # else:\n # self.mouth[i][j] = 0\n # # 形态学操作\n # k = np.ones((3, 3), np.uint8)\n # open = cv.morphologyEx(self.mouth, cv.MORPH_OPEN, k) # 开运算\n # close = cv.morphologyEx(open, cv.MORPH_CLOSE, k) # 闭运算\n #\n # num_labels, labels, stats, centroids = cv.connectedComponentsWithStats(close, connectivity=8)\n\n ############################################\n\n m, n = self.faceGray.shape\n\n col = np.zeros(n)\n row = np.zeros(m)\n\n for i in range(1, m):\n r = 0\n for k in range(1, n):\n r += abs(int(self.faceGray[i, k]) - int(self.faceGray[i, k - 1]))\n row[i] = r\n\n for i in range(1, n):\n r = 0\n for k in range(1, m):\n r += abs(int(self.faceGray[k, i]) - int(self.faceGray[k - 1, i]))\n col[i] = r\n\n # 平滑行\n rrow = savgol_filter(row, 51, 3)\n # 平滑列\n rcol = savgol_filter(col, 51, 3)\n\n # 眼睛位置\n hang = int(np.where(rrow == np.max(rrow))[0])\n hang1 = hang - int(m / 12)\n hang2 = hang + int(m / 12)\n # 横向彩色眼睛\n hengxiang = self.face[hang1:hang2, 0:n]\n # 横向灰度眼睛\n hengxianghui = self.faceGray[hang1:hang2, 0:n]\n\n # 绘制表格\n # plt.figure(1)\n # plt.plot(list(range(0, m)), rrow)\n # plt.show()\n # plt.figure(2)\n # plt.plot(list(range(0, n)), rcol)\n # plt.show()\n\n # otsu\n _, eyesThreshold = cv.threshold(hengxianghui, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n cv.bitwise_not(eyesThreshold, eyesThreshold)\n\n # cv.imshow('Eyes', hengxiang)\n # cv.imshow('EyesThreshold', eyesThreshold)\n # cv.waitKey(0)\n # cv.destroyAllWindows()\n\n # ycrcb = cv.cvtColor(, cv2.COLOR_BGR2YCR_CB)\n faceycrcb = cv.cvtColor(self.face, cv.COLOR_RGB2YCR_CB)\n (Fy, Fcr, Fcb) = cv.split(faceycrcb)\n # print(cr,y,cb)\n mouth = np.zeros(Fcr.shape, dtype=np.uint8)\n (x, y) = Fcr.shape\n # print(cr.shape)\n for i in range(0, x):\n for j in range(0, y):\n if Fcr[i][j] > 160:\n mouth[i][j] = 255\n else:\n mouth[i][j] = 0\n self.waitResult = mouth\n self.waitResult[hang1:hang2, 0:n] = eyesThreshold\n self.waitResult = self.waitResult.reshape(1, -1)\n\n knn = KNeighborsClassifier(n_neighbors=1)\n knn.fit(self.trainDate,self.lable)\n pRe = knn.predict(self.waitResult)\n # print(knn.predict(self.waitResult))\n box = QtWidgets.QMessageBox()\n msg = \"该人脸是\" + pRe[0] + \"的人脸\"\n box.warning(self, \"提示\", msg)\n\n # 下面的是图像显示在GUI中代码\n rows, cols, channels = self.result.shape # 改\n bytesPerLine = channels * cols\n QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)\n self.labelResult.setPixmap(QPixmap.fromImage(QImg).scaled(\n self.labelResult.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))\n\n","repo_name":"TrisAXJ/face","sub_path":"testQT.py","file_name":"testQT.py","file_ext":"py","file_size_in_byte":20436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12728026294","text":"\"\"\"\nAcreage is the area of a land in acres. To calculate the acreage,\nthe length and width of the land, which is usually given in feet,\nis multiplied to get the area in square feet.\nThen, this area in square feet is converted to acres by using the\nconversion factor of 43560.\nAccordingly, the calculated area is divided by the conversion factor\nbecause 1 acre = 43,560 square feet.\n\"\"\"\n\n# Variables and Input\nwidth = int(input(\"Please enter the width of the field in feet: \"))\nlength = int(input(\"Please enter the length of the field in feet: \"))\n\n# Area of a rectangle\narea = width * length\n\n# 1 acre = 43,560 square feet\nsqFeetPerAcre = 43560\n\n# Acreage formula\nacreage = area / sqFeetPerAcre\n\n# Display acres output\nprint(\"The area of the field in acres is\", acreage, 'acres.')\n","repo_name":"ralphcajipe/python_cs","sub_path":"machine_problems/mp1/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9267849617","text":"# write your code here\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# import seaborn as sns\n\npd.set_option('display.max_columns', 8)\n\ngeneral = pd.read_csv('test/general.csv')\nprenatal = pd.read_csv('test/prenatal.csv')\nsports = pd.read_csv('test/sports.csv')\n\nprenatal.rename(columns={'Sex': 'gender', 'HOSPITAL': 'hospital'}, inplace=True)\nsports.rename(columns={'Male/female': 'gender', 'Hospital': 'hospital'}, inplace=True)\n\ndf = pd.concat([general, prenatal, sports], ignore_index=True)\ndf.drop(columns=['Unnamed: 0'], inplace=True)\n\ndf.dropna(how='all', inplace=True)\n\ndf.loc[df['gender'] == 'male', 'gender'] = 'm'\ndf.loc[df['gender'] == 'man', 'gender'] = 'm'\ndf.loc[df['gender'] == 'female', 'gender'] = 'f'\ndf.loc[df['gender'] == 'woman', 'gender'] = 'f'\n\ndf['gender'].fillna('f', inplace=True)\ndf['bmi'].fillna('0', inplace=True)\ndf['diagnosis'].fillna('0', inplace=True)\ndf['blood_test'].fillna('0', inplace=True)\ndf['ecg'].fillna('0', inplace=True)\ndf['ultrasound'].fillna('0', inplace=True)\ndf['mri'].fillna('0', inplace=True)\ndf['xray'].fillna('0', inplace=True)\ndf['children'].fillna('0', inplace=True)\ndf['months'].fillna('0', inplace=True)\n\n# Stage 4/5\n# print('The answer to the 1st question is {}'.format(df.pivot_table(index='hospital', aggfunc='count').index[0]))\n#\n# diagnosis = df.pivot_table(index='hospital', columns='diagnosis', values='age', aggfunc='count')\n# print('The answer to the 2nd question is {}'.format(round(diagnosis.loc['df', 'stomach'] / diagnosis.loc['df'].sum(), 3)))\n# print('The answer to the 3rd question is {}'.format(round(diagnosis.loc['sports', 'dislocation'] / diagnosis.loc['sports'].sum(), 3)))\n#\n# median_age = df.groupby('hospital')[['hospital', 'age']].agg('median')\n# print('The answer to the 4th question is {}'.format(int(median_age.loc['df'] - median_age.loc['sports'])))\n#\n# max_tests = df.pivot_table(index='hospital', columns='blood_test', values='age', aggfunc='count')\n# print('The answer to the 5th question is {}, {} blood tests'.format(max_tests.t.idxmax(), int(max_tests.t.max())))\n\n# Stage 5/5\ndf.plot(y='age', kind='hist', bins=6)\nplt.show()\nprint('The answer to the 1st question: 15-35')\ndf['diagnosis'].value_counts().plot(kind='pie')\nplt.show()\nprint('The answer to the 2nd question: pregnancy')\nfig, axes = plt.subplots()\ndf['diagnosis'].value_counts().plot(kind='pie')\nprint(\"The answer to the 3rd question: It's because the plot wont show up\")\n\nplt.show()\n","repo_name":"pkk111/Hyperskill-data-analysis-for-hospitals","sub_path":"Data Analysis for Hospitals/task/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5240066973","text":"\nclass stack:\n def __init__(self):\n self.store = []\n\n def push(self, item):\n self.store.append(item)\n\n def pop(self):\n self.store.pop()\n\n def top(self):\n return self.store[-1]\n\n def isEmpty(self):\n if self.store:\n return False\n else:\n return True\n\ndef max_elem(arr):\n s = stack()\n aux = stack()\n for elem in arr:\n if elem[0] == 1:\n # push to stack \n val = elem[1]\n s.push(val)\n if aux.isEmpty():\n aux.push([val, 1])\n else:\n top_elem = aux.top()\n if val > top_elem[0]:\n aux.push([val, 1])\n\n if val == top_elem[0]:\n val_count = top_elem[1]\n val_count += 1\n aux.pop()\n aux.push([val, val_count])\n if elem[0] == 2:\n s.pop()\n \n if elem[0] == 3:\n top_elem = aux.top()\n print(top_elem[0])\n\n return 0\n\nif __name__ == \"__main__\":\n n = input()\n input_data = []\n for i in range(n):\n line = raw_input()\n line = str(line)\n line = line.split(\" \")\n line = [int(elem) for elem in line]\n input_data.append(line)\n max_elem(input_data)\n","repo_name":"mohitsh/cpp","sub_path":"stack/maxElement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33327879338","text":"import json\nfrom pathlib import Path\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\ndef init_app(setting_name: str) -> Flask:\n # app = Flask(__name__)\n app.logger.debug(\"Init Flask app config: %s\", setting_name)\n\n config_json_path = Path(__file__).parent / \"config\" / \"json-schemas\"\n for p in config_json_path.glob(\"*.json\"):\n with open(p) as f:\n json_name = p.stem\n schema = json.load(f)\n app.config[json_name] = schema\n app.logger.debug(\"Init json-schema config: %s\", setting_name)\n return app\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef root():\n return {\"message\": \"Hello!\"}\n","repo_name":"peacock0803sb/aws-sam-sandbox","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42228470729","text":"maze = [x for x in \".###.....#..#S.##..##.S#.#SS.##..#.#..##.S.#.#.SS..###.#.S.....#.#...S##.#.####..S.S.#..S...S..#LS..\"]\n\nplayerX = 0\nplayerY = 0\n\nwidth = 10\nheight = len(maze) // width\ndirections = [\"V\", \"<\", \"^\", \">\"]\n\ndef printMaze(moves = 0, doDirections = True):\n global playerX, playerY\n phase = moves % 4\n indicator = directions[phase]\n if not doDirections:\n indicator = 'S'\n copy = list(maze)\n for i, c in enumerate(maze):\n if c == \"S\":\n copy[i] = indicator\n copy[playerY * width + playerX] = \"@\"\n m = \"\".join(copy)\n for i in range(height):\n print(m[i * width: (i + 1) * width])\n\n\ndef shift(moves = 0):\n global maze\n phase = moves % 4\n #shift down\n if phase == 0:\n for y in range(9, -1, -1):\n for x in range(10):\n targetIndex = y * height + x\n target = maze[targetIndex]\n if target == 'S':\n if (0 <= y + 1 < height):\n moveIndex = (y + 1) * width + x\n if maze[moveIndex] == '.':\n maze[targetIndex] = '.'\n maze[moveIndex] = 'S'\n #shift left\n if phase == 1:\n for x in range(10):\n for y in range(10):\n targetIndex = y * height + x\n target = maze[targetIndex]\n if target == 'S':\n if (0 <= x - 1 < width):\n moveIndex = y * width + (x - 1)\n if maze[moveIndex] == '.':\n maze[targetIndex] = '.'\n maze[moveIndex]='S'\n #shift up\n if phase == 2:\n for y in range(10):\n for x in range(10):\n targetIndex = y * height + x\n target = maze[targetIndex]\n if target == 'S':\n if (0 <= y - 1 < height):\n moveIndex = (y - 1) * width + x\n if maze[moveIndex] == '.':\n maze[targetIndex] = '.'\n maze[moveIndex] = 'S'\n #shift right\n if phase == 3:\n for x in range(9, -1, -1):\n for y in range(10):\n targetIndex = y * height + x\n target = maze[targetIndex]\n if target == 'S':\n if (0 <= x + 1 < width):\n moveIndex = y * width + (x + 1)\n if maze[moveIndex] == '.':\n maze[targetIndex] = '.'\n maze[moveIndex]='S'\nmoves = 0\nwinSeq = \"\"\n#printMaze(0, False)\nwhile (moves <= 0x31):\n printMaze(moves)\n shift(moves)\n moves += 1\n c = ''\n c = input('>')[0]\n oldx, oldy = playerX, playerY\n if c == \"w\":\n playerY -= 1\n if c == \"a\":\n playerX -= 1\n if c == \"s\":\n playerY += 1\n if c == \"d\":\n playerX += 1\n winSeq += c\n if c == \"p\":\n print(winSeq)\n tile = maze[playerY * width + playerX]\n if (tile == 'S') or (tile =='#'):\n playerX, playerY = oldx, oldy\n\n if maze[playerY * width + playerX] == 'S':\n break\n #printMaze(moves)\n\nprint(\"sssss\")\nprint(winSeq)\nprintMaze(moves)\n","repo_name":"username123115/ctfWriteups","sub_path":"nbctf/shiftySands/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24312820","text":"import random\n\nPlay_again = True\n\n#Create the menu of the game\ndef menu():\n while Play_again:\n print('[1] for even \\n[2] for odd')\n player1_choose = int(input('Make your play: '))\n \n player1_number = int(input('Choose a number from 1 to 9: '))\n player2_number = random.randint(1,9)\n \n if player1_choose == 1 or player1_choose == 2:\n \n sum = player1_number + player2_number\n\n win_lose(player1_choose,sum)\n\n play_again()\n \n \n \n#Win and lose condition\ndef win_lose(player1_choose, sum):\n if player1_choose == 1 and (sum % 2) == 0 or player1_choose == 2 and (sum % 2) == 1:\n print(f'Player 1 win, because choose even and the number was {sum}') \n else:\n print(f'Player 2 win, because choose even and the number was {sum}') \n\n\n\ndef play_again():\n global Play_again\n again = int(input('[1]y [2]n Play again? '))\n if again == 1:\n Play_again = True\n else:\n Play_again = False\n \n return Play_again\n \n\nmenu()\n","repo_name":"AndreLucenaJr/Simple_projects","sub_path":"Even_or_odd.py","file_name":"Even_or_odd.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20621287834","text":"import copy\nimport json\nimport os.path\nimport queue\nimport random\n\nimport arcade\n\nMAX_TILE_COUNT = 256\nMIN_ENTRY_COUNT = 3\nMAX_ENTRY_COUNT = 6\n\nMAP_UNIT_RESOURCES_PATH = \"assets/map/\"\nTILE_SCALE = 3\nMAP_MODELS = {}\nMAP_UNITS = {}\nUNITS_WEIGHT = {}\n\ngenerate_step_count = 0\n\n\ndef load_resources():\n f = open(MAP_UNIT_RESOURCES_PATH + \"Tiles.json\")\n json_obj = json.load(f)\n f.close()\n\n for model in json_obj['models']:\n MAP_MODELS[model['id']] = model\n\n for unit in json_obj['tiles']:\n uid = unit['id']\n model_id = unit['model_id']\n MAP_UNITS[uid] = MapUnit(MAP_UNIT_RESOURCES_PATH + MAP_MODELS[model_id]['path'], TILE_SCALE, uid,\n unit['rotation'], unit['connect'], unit['entry'], unit['wall'])\n UNITS_WEIGHT[uid] = unit['weight']\n\n\nclass MapUnit(arcade.Sprite):\n def __init__(self, file_path: str, scale: float, uid: int, angle: int, connect: list, entry: list, wall: bool):\n super().__init__(file_path, scale)\n self.id = uid\n self.connect = connect\n self.angle = angle\n self.entry = entry\n self.is_wall = wall\n self.passable = False\n self.is_portal = False\n self.portal_id = None\n self.portal_des = [-1, -1]\n self.direction = None\n\n def placeable(self, connect_info) -> bool:\n \"\"\"如果四个方向中有一个方向的可选单元里,没有可与当前单元邻接的,则无法放置\"\"\"\n for i in range(4):\n if connect_info[i].isdisjoint(self.connect[i]):\n return False\n return True\n\n def set_portal(self, pos, pid):\n self.is_portal = True\n self.portal_des = pos\n self.portal_id = pid\n\n def copy_sprite(self) -> arcade.Sprite:\n return copy.copy(self)\n\n\ndef get_from_center(map_info_2d: list[list[MapUnit]], get_func) -> list[int, int] | None:\n dir_vec = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n row_cot = len(map_info_2d)\n col_cot = len(map_info_2d[0])\n vis = get_vis_matrix(map_info_2d)\n\n center = [row_cot // 2, col_cot // 2]\n vis[center[0]][center[1]] = True\n q = queue.Queue()\n q.put(center)\n\n while q.not_empty:\n pos = q.get()\n cell = map_info_2d[pos[0]][pos[1]]\n if get_func(cell):\n return pos\n\n for i in range(4):\n next_pos = [pos[0] + dir_vec[i][0], pos[1] + dir_vec[i][1]]\n if not in_bound(next_pos, row_cot, col_cot) or vis[next_pos[0]][next_pos[1]]:\n continue\n vis[next_pos[0]][next_pos[1]] = True\n q.put(next_pos)\n return None\n\n\ndef set_passable(map_info_2d: list[list[MapUnit]]):\n dir_vec = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n row_cot = len(map_info_2d)\n col_cot = len(map_info_2d[0])\n vis = get_vis_matrix(map_info_2d)\n\n start_pos = get_from_center(map_info_2d, lambda x: not x.is_wall)\n vis[start_pos[0]][start_pos[1]] = True\n q = queue.Queue()\n q.put(start_pos)\n\n while not q.empty():\n pos = q.get()\n cell = map_info_2d[pos[0]][pos[1]]\n cell.passable = True\n\n for i in range(4):\n next_pos = [pos[0] + dir_vec[i][0], pos[1] + dir_vec[i][1]]\n if not in_bound(next_pos, row_cot, col_cot) or vis[next_pos[0]][next_pos[1]] \\\n or map_info_2d[next_pos[0]][next_pos[1]].is_wall:\n continue\n vis[next_pos[0]][next_pos[1]] = True\n q.put(next_pos)\n\n\ndef distance(pos1, pos2):\n return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n\n\ndef choose_portal(map_info_2d: list[list[MapUnit]]) -> list[int, list]:\n edges = get_edge(map_info_2d)\n entry_list = list(filter(lambda x: map_info_2d[x[0]][x[1]].passable, edges))\n random.shuffle(entry_list)\n portal_list = []\n min_dis = None\n\n for i in range(1, len(entry_list) // 2 * 2, 2):\n portal_list.append([entry_list[i], entry_list[i-1]])\n dis = distance(entry_list[i], entry_list[i-1])\n min_dis = min(min_dis, dis) if min_dis is not None else dis\n return [min_dis, portal_list]\n\n\ndef set_portal(map_info_2d: list[list[MapUnit]]):\n min_dis = -1\n portal_list = []\n for _ in range(8000):\n portal = choose_portal(map_info_2d)\n if portal[0] is None:\n continue\n if min_dis < portal[0]:\n min_dis = portal[0]\n portal_list = portal[1]\n\n if not portal_list:\n # 没有找到portal则直接不设置\n return\n for i in range(len(portal_list)):\n pair = portal_list[i]\n map_info_2d[pair[0][0]][pair[0][1]].set_portal(pair[1], i)\n map_info_2d[pair[1][0]][pair[1][1]].set_portal(pair[0], i)\n\n\ndef copy_2d_list(x):\n return [copy.copy(ele) for ele in x]\n\n\ndef in_bound(next_pos, row: int, col: int):\n return 0 <= next_pos[0] < row and 0 <= next_pos[1] < col\n\n\ndef get_neighbour_pos(map_info, pos) -> list:\n \"\"\"获取周围的格子坐标\"\"\"\n dir_vec = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n neighbour = [[pos[0] + dir_vec[i][0], pos[1] + dir_vec[i][1]] for i in range(4)\n if in_bound([pos[0] + dir_vec[i][0], pos[1] + dir_vec[i][1]], len(map_info), len(map_info[0]))]\n return neighbour\n\n\ndef get_connect_info(map_info, pos) -> list:\n dir_vec = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n ans = []\n wall = get_wall()\n for i in range(4):\n next_pos = [pos[0] + dir_vec[i][0], pos[1] + dir_vec[i][1]]\n if in_bound(next_pos, len(map_info), len(map_info[0])):\n ans.append(map_info[next_pos[0]][next_pos[1]])\n else:\n ans.append(set(MAP_UNITS.keys()))\n return ans\n\n\ndef get_vis_matrix(map_info):\n row = len(map_info)\n col = len(map_info[0])\n return [[False for _ in range(col)] for _ in range(row)]\n\n\ndef collapse(map_info, x: int, y: int, num: int, force=False) -> list:\n num_set = {num}\n if not force and map_info[x][y].isdisjoint(num_set):\n return [] # 原先该格就不能放这个num\n\n map_info = copy_2d_list(map_info)\n map_info[x][y] = num_set\n vis = get_vis_matrix(map_info)\n vis[x][y] = True\n q = queue.Queue()\n q.put([x, y])\n while not q.empty():\n pos = q.get()\n next_pos = get_neighbour_pos(map_info, pos)\n for npos in next_pos:\n \"\"\"对于每一个有效位置,更新候选集合的可选情况\"\"\"\n if vis[npos[0]][npos[1]]:\n continue\n\n unit_set = map_info[npos[0]][npos[1]]\n new_set = set()\n connect_info = get_connect_info(map_info, npos)\n\n for _id in unit_set:\n if MAP_UNITS[_id].placeable(connect_info):\n # 可放置则继续留在候选集合内\n new_set.add(_id)\n\n if len(new_set) == 0:\n # 有一个格子无法放任何单元则说明坍缩失败,需要回溯\n return []\n\n # 可坍缩,将该位置入队,如果前后一样则不需要入队\n if unit_set != new_set:\n vis[npos[0]][npos[1]] = True\n map_info[npos[0]][npos[1]] = new_set\n q.put(npos)\n return map_info\n\n\ndef get_direction(pos, row_cot, col_cot) -> int:\n \"\"\"获取一个坐标所在的边缘方向,0、1、2、3、4分别代表上、右、下、左、中,四个角的情况会被算作左右方向\"\"\"\n if pos[1] == 0:\n return 3\n if pos[1] == col_cot - 1:\n return 1\n if pos[0] == 0:\n return 0\n if pos[0] == row_cot - 1:\n return 2\n return 4\n\n\ndef get_entry(direction, shuffle=False, seed=0) -> list:\n ls = []\n for tile in MAP_UNITS.values():\n if tile.entry[direction]:\n ls.append(tile.id)\n if shuffle:\n # random.seed = seed\n random.shuffle(ls)\n return ls\n\n\ndef get_wall():\n ls = list(filter(lambda x: x.is_wall, MAP_UNITS.values()))\n return random.choice(ls).id\n\n\ndef get_edge(map_info: list[list]):\n max_col = len(map_info[0]) - 1\n max_row = len(map_info) - 1\n range_horizontal = range(1, max_col) # 地图宽度必须大于3\n range_vertical = range(1, max_row)\n edges = list(map(lambda x: [0, x], range_horizontal)) \\\n + list(map(lambda x: [max_row, x], range_horizontal)) \\\n + list(map(lambda x: [x, 0], range_vertical)) \\\n + list(map(lambda x: [x, max_col], range_vertical))\n return edges\n\n\ndef set_entry(seed, map_info, min_entry, max_entry) -> list:\n \"\"\"在地图边缘随机添加出入口,数量一定是偶数\"\"\"\n max_col = len(map_info[0]) - 1\n max_row = len(map_info) - 1\n edges = get_edge(map_info)\n\n new_map = []\n while not new_map:\n \"\"\"不断生成entry然后collapse直到合法为止\"\"\"\n new_map = copy_2d_list(map_info)\n wall = get_wall()\n # random.seed = seed\n cot = random.randint(min_entry, max_entry)\n cot = cot // 2 * 2\n random.shuffle(edges)\n\n for pos in edges[0:cot]:\n dir_num = get_direction(pos, max_row + 1, max_col + 1)\n entry = get_entry(dir_num, shuffle=True, seed=seed)[0]\n new_map = collapse(new_map, pos[0], pos[1], entry, force=True)\n if not new_map:\n break\n\n if not new_map:\n continue\n\n for pos in edges[cot:] + [[0, max_col], [max_row, max_col], [0, 0], [max_row, 0]]:\n new_map = collapse(new_map, pos[0], pos[1], wall, force=True)\n if not new_map:\n break\n\n return new_map\n\n\ndef random_select(set_ele: set, seed):\n # random.seed = seed\n cot = len(set_ele)\n return list(set_ele)[random.randint(0, cot - 1)]\n\n\ndef shuffle_by_weight(ele_list: list, weight_list: list, seed: int):\n # random.seed = seed\n rand_list = []\n for i in range(len(ele_list)):\n num = ele_list[i]\n rand_list += [num] * weight_list[i]\n for i in range(len(ele_list) - 1):\n index = random.randint(0, len(rand_list) - 1)\n num = rand_list[index]\n index = ele_list.index(num)\n ele_list[i], ele_list[index] = ele_list[index], ele_list[i]\n rand_list = list(filter(lambda x: x != num, rand_list))\n\n\ndef dfs_gen(map_info, row_cot, col_cot, seed):\n min_entropy = MAX_TILE_COUNT + 1\n min_cell = []\n sorted_cell = []\n for i in range(row_cot):\n for j in range(col_cot):\n \"\"\"找到Entropy最小的进行Collapse,虽说最小但也一定要大于1\"\"\"\n cell = map_info[i][j]\n entropy = len(cell)\n if entropy <= 1:\n continue\n if min_entropy > entropy:\n min_entropy = entropy\n min_cell = [[i, j, cell]]\n continue\n if min_entropy == entropy:\n min_cell.append([i, j, cell])\n sorted_cell.append([i, j, cell])\n\n \"\"\"每次collapse都保证了没有为0的,所以当没找到大于1的最小时,则说明地图生成完毕\"\"\"\n if min_entropy > MAX_TILE_COUNT:\n return map_info\n\n \"\"\"先把每个cell打乱,然后再把每个cell的可选tiles按权重打乱,然后循环尝试collapse,成功一个就结束,进入下一次迭代\"\"\"\n random.shuffle(sorted_cell)\n sorted_cell.sort(key=lambda x: len(x[2]))\n random.shuffle(min_cell)\n for cell in sorted_cell:\n global generate_step_count\n if generate_step_count > 200:\n return []\n generate_step_count += 1\n\n ls = list(cell[2])\n shuffle_by_weight(ls, list(map(lambda x: UNITS_WEIGHT[x], ls)), seed)\n for uid in ls:\n new_map = collapse(map_info, cell[0], cell[1], uid)\n new_map = dfs_gen(new_map, row_cot, col_cot, seed) if new_map else new_map\n if new_map:\n return new_map # 后续递归的所有坍缩均成功,返回生成好的地图\n # 否则继续循环\n # 没有成功生成,换下一个格子\n return [] # 依然没有成功,返回[]\n\n\ndef to_sprite_map_info(map_info: list):\n info = []\n for i in range(len(map_info)):\n row = map_info[i]\n row_info = []\n info.append(row_info)\n for j in range(len(row)):\n cell = row[j]\n if len(cell) > 1:\n raise KeyError('Multiple unit')\n uid = list(cell)[0]\n unit = MAP_UNITS[uid].copy_sprite()\n d = get_direction([i, j], len(map_info), len(map_info[0]))\n unit.direction = d if d < 4 else None\n row_info.append(unit)\n return info\n\n\ndef try_generate_map(seed, row_cot, col_cot):\n global generate_step_count\n generate_step_count = 0\n random.seed(seed)\n map_info = [[set(MAP_UNITS.keys()) for _ in range(col_cot)] for _ in range(row_cot)]\n map_info = set_entry(seed, map_info, MIN_ENTRY_COUNT, MAX_ENTRY_COUNT)\n map_info = dfs_gen(map_info, row_cot, col_cot, seed)\n\n print(f\"Map generate step count: {generate_step_count}\")\n return map_info\n\n\ndef generate_map(seed, row_cot, col_cot):\n map_info = []\n while not map_info:\n map_info = try_generate_map(seed, row_cot, col_cot)\n\n map_info = to_sprite_map_info(map_info)\n set_passable(map_info)\n set_portal(map_info)\n map_info = get_map_sprites(map_info)\n return map_info\n\n\ndef get_map_sprites(map_info: list) -> list:\n map_seq = [cell for row in map_info for cell in row]\n return map_seq\n\n\ndef save_map(map_info: list):\n map_obj = []\n for row in map_info:\n r = []\n map_obj.append(r)\n for cell in row:\n r.append(list(cell))\n\n if not os.path.exists(\"cache/map\"):\n os.makedirs(\"cache/map\")\n with open(f\"cache/map/{random.randint(100000000, 999999999)}.json\", mode='w') as f:\n json.dump(map_obj, f)\n\n\ndef in_bound_2d(pos: list, center: list, width: float, height: float) -> bool:\n left = center[0] - width / 2\n right = center[0] + width / 2\n top = center[1] - height / 2\n bottom = center[1] + height / 2\n return left <= pos[0] < right and top <= pos[1] < bottom\n\n\nload_resources()\n","repo_name":"Wshine233/Not-a-Pac-Man","sub_path":"mapgen.py","file_name":"mapgen.py","file_ext":"py","file_size_in_byte":14064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4708810803","text":"# WaveShapePlay\n# Find a detailed youtube tutorial for the Arduino Com Connection Code at: https://youtu.be/DJD28uK5qIk\nimport serial.tools.list_ports\ndef get_ports():\n ports = serial.tools.list_ports.comports(include_links=False)\n for p in ports:\n print(p.device)\n print(len(ports), 'ports found')\n return ports\n\ndef findArduino(portsFound):\n \n commPort = 'None'\n numConnection = len(portsFound)\n print(numConnection)\n for i in range (0, numConnection):\n port = foundPorts[i]\n strPort = str(port)\n print(strPort)\n if 'SERIAL' in strPort: \n splitPort = strPort.split(' ')\n commPort = (splitPort[0])\n print(commPort)\n return commPort\n \n \nfoundPorts = get_ports() \nconnectPort = findArduino(foundPorts)\n\nif connectPort != 'None':\n print('Connected to ' + connectPort)\n\nelse:\n print('Connection Issue!')\n\nprint('DONE')\n\n\n\n\n\n \n","repo_name":"Camandaroba06/Cronometro-Linus-PySimpleGUI","sub_path":"autoconect.py","file_name":"autoconect.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23607628899","text":"# Define a dictionary that maps each codon to its corresponding amino acid\r\ncodon_table = {\r\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\r\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\r\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\r\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\r\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\r\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\r\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\r\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\r\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\r\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\r\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\r\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\r\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\r\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\r\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\r\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\r\n}\r\n\r\n# Define a function to decode a DNA sequence into its corresponding nitrogenous bases\r\ndef decode_dna(dna_sequence):\r\n codons = [dna_sequence[i:i+3] for i in range(0, len(dna_sequence), 3)]\r\n amino_acids = [codon_table.get(codon, 'X') for codon in codons]\r\n return ''.join(amino_acids)\r\n\r\n# Example usage:\r\ndna_sequence = 'GGTCTTTGAAGATGCTTTTGAAACTCCGAGGAAATAGCTGATCTTGTTCATCCAAATTTTGAGGAGGAGGCTGTTGTTGGGAGTTGTACCCACAGATACCTCTCTTCTACTTGGGGAGATGCTTGATGAAGTTTTTCTACTTTGAGAAGAAGAAATGCTTTGCAAGGAAATGAGATGATGACTGATCCAGGACTACACCCACCTTACATGTCTG...'\r\n\r\ndecoded_sequence = decode_dna(dna_sequence)\r\nprint(decoded_sequence)\r\n","repo_name":"shahzaibkhan2/Python-Advanced-Projects","sub_path":"code-for-decoding-dna-nitrogenous-bases/decoding_nitrogenous_bases.py","file_name":"decoding_nitrogenous_bases.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24686751989","text":"#!/usr/bin/env python\n\nimport os\nfrom fastai.vision import *\nfrom PIL import Image as PILImage\nfrom pathlib import Path\nimport warnings\nimport argparse\nwarnings.filterwarnings(\"ignore\")\n\n\ndef walk_directory_to_list(input_directory, val_fxn=lambda x: x.endswith('.png')):\n \"\"\" Walks input directory to file list. Takes a filepath validation function. \"\"\"\n file_list = []\n for root, dirs, files in os.walk(input_directory):\n for file in files:\n filepath = os.path.join(root, file)\n if val_fxn(filepath):\n file_list.append(filepath)\n return file_list\n\n\ndef predict_fastai(input_directory, output_directory, model_directory, chunk_size=50):\n \"\"\" Predict PIL readable images with fastai model. Model is average of models in model directory. \"\"\"\n\n # Retrieve learners for prediction\n learners = walk_directory_to_list(model_directory, lambda x: x.endswith('.pkl'))\n\n # Retrieve dataset for prediction\n data = SegmentationItemList.from_folder(input_directory)\n\n print({\"log_type\": \"info\", \"message\": f\"Starting fastai prediction, {len(data)} files found.\"}, flush=True)\n print({\"log_type\": \"status\", \"message\": \"0\"}, flush=True)\n\n for k in range(0, len(data), chunk_size):\n\n data_chunk = data[k:k+chunk_size]\n all_preds = []\n\n\n for l in learners:\n learner_path = Path(l)\n learn = load_learner(learner_path.parent, file=learner_path.name)\n preds = [learn.predict(d)[2].numpy() for d in data_chunk]\n all_preds.append(np.stack(preds))\n\n all_preds = np.stack(all_preds)\n mean_preds = np.mean(all_preds, axis=0)\n all_labels = np.argmax(mean_preds, axis=1)\n\n for i in range(len(data_chunk)):\n print({\"log_type\": \"status\", \"message\":f\"{round((k+i+1)/len(data),4)}\"}, flush=True)\n\n name, labels = data_chunk.items[i].name, all_labels[i, ...].astype('uint8')\n msk = PILImage.fromarray(labels)\n Path(output_directory).mkdir(parents=True, exist_ok=True)\n msk.save(os.path.join(output_directory, name), optimize=True)\n\n print({\"log_type\": \"info\", \"message\":f\"Stopping fastai prediction.\"}, flush=True)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_folder', required=True)\n parser.add_argument('-o', '--output_folder', required=True)\n parser.add_argument('-m', '--model_folder', required=True)\n parser.add_argument('-c', '--chunk_size', required=False, default=50)\n args = parser.parse_args()\n\n predict_fastai(args.input_folder, args.output_folder, args.model_folder, args.chunk_size)","repo_name":"jacobmatthewmurray/vesseg","sub_path":"app/fastai_predictor/fastai_predictor.py","file_name":"fastai_predictor.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"30147400877","text":"\"\"\"\r\n\n\nAnd who cursed the most in the fight between you and your spouse?\n\nGiven a **dict** with three rounds, with nested **dict** s as your score per\nround, return who cursed the most based on the following:\n\n * If you, return \"ME!\"\n * If your spouse, return \"SPOUSE!\"\n * If a draw, return \"DRAW!\"\n\n### Examples\n\n determine_who_cursed_the_most({\n \"round1\": { \"me\": 10, \"spouse\": 5 },\n \"round2\": { \"me\": 5, \"spouse\": 10 },\n \"round3\": { \"me\": 10, \"spouse\": 10 }}) ➞ \"DRAW!\"\n \n determine_who_cursed_the_most({\n \"round1\": { \"me\": 40, \"spouse\": 5 },\n \"round2\": { \"me\": 9, \"spouse\": 10 },\n \"round3\": { \"me\": 9, \"spouse\": 10 }}) ➞ \"ME!\"\n \n determine_who_cursed_the_most({\n \"round1\": { \"me\": 10, \"spouse\": 5 },\n \"round2\": { \"me\": 9, \"spouse\": 44 },\n \"round3\": { \"me\": 10, \"spouse\": 55 }}) ➞ \"SPOUSE!\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef determine_who_cursed_the_most(d):\n \n My_Swearing_Numbers = []\n Spouse_Swearing_Numbers = []\n \n Counter = 0\n Length = len(d)\n \n Item = d[\"round1\"]\n My_Swearing_Numbers.append(Item[\"me\"])\n Spouse_Swearing_Numbers.append(Item[\"spouse\"])\n \n Item = d[\"round2\"]\n My_Swearing_Numbers.append(Item[\"me\"])\n Spouse_Swearing_Numbers.append(Item[\"spouse\"])\n \n Item = d[\"round3\"]\n My_Swearing_Numbers.append(Item[\"me\"])\n Spouse_Swearing_Numbers.append(Item[\"spouse\"])\n \n My_Total = 0\n \n for x in My_Swearing_Numbers:\n My_Total += x\n \n Spouse_Total = 0\n \n for x in Spouse_Swearing_Numbers:\n Spouse_Total += x\n \n if (My_Total > Spouse_Total):\n return \"ME!\"\n elif (Spouse_Total > My_Total):\n return \"SPOUSE!\"\n else:\n return \"DRAW!\"\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"tPNqhkqeQaCZGcBLo_17.py","file_name":"tPNqhkqeQaCZGcBLo_17.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13369139200","text":"from django.urls import path,include\r\nfrom . import views\r\nurlpatterns = [\r\n\tpath('',views.home,name='home'),\r\n\tpath('login/',views.login,name='login'),\r\n\tpath('register/',views.register,name='register'),\r\n\tpath('registerAction/',views.registerAction,name='registerAction'),\r\n\tpath('loginAction/',views.loginAction,name='loginAction'),\r\n\tpath('getbranch/',views.getbranch,name='getbranch'),\r\n\tpath('userHome/',views.userHome,name='userHome'),\r\n\tpath('formAction/',views.formAction,name='formAction'),\r\n\tpath('logout/',views.logout,name='logout')\r\n\r\n]","repo_name":"mithunprem99/BankApp","sub_path":"Bank/MyBank/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69892035612","text":"from rest_framework.test import APIRequestFactory\n\nfrom ..models import CMHotelConnectorAPIKey\nfrom ..permissions import HasCMHotelConnectorAPIKey\n\n\ndef test_has_cm_hotel_api_key_permission(\n mocked_channex_validation, cm_hotel_connector_factory\n):\n permission = HasCMHotelConnectorAPIKey()\n\n request = APIRequestFactory().get(\"/\", HTTP_AUTHORIZATION=\"Api-Key 123\")\n assert not permission.has_permission(request, None)\n\n cm_hotel = cm_hotel_connector_factory(channex=True)\n _, api_key = CMHotelConnectorAPIKey.objects.create_key(\n name=\"API Key\",\n cm_hotel_connector=cm_hotel,\n )\n request = APIRequestFactory().get(\"/\", HTTP_AUTHORIZATION=f\"Api-Key {api_key}\")\n assert permission.has_permission(request, None)\n","repo_name":"xhoantran/HotelMS-server","sub_path":"backend/cm/tests/test_permissions.py","file_name":"test_permissions.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13982020905","text":"import pandas as pd\nimport nltk\nimport numphy as np\nimport re\nfrom nltk.stem import wordnet #to perform lemmitization\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk import pos_tag #for parts of speech\nfrom sklearn.metrics import pairwise_distances # to perform cosine similarity\nfrom nltk import word_tokenize #to create tokens\nfrom nltk.corpus import stopwords #for stop words\nimport matplotlib.pyplot as plt \n\nfrom algorithms import *\nfrom textProcessing import * \n\ndataFile = pd.read_excel(\"Q_and_A.xlsx\") \n\ndataFile.ffill(axis=0, inplace = True)\n\n### Applying to dataset\ndataFile['lemmatized_text'] = dataFile['Questions'].apply(text_normalizer)\n\nmenu = {}\nmenu['1']=\"Test the algorithms\" \nmenu['2']=\"Exit\"\n\nalgo = ['TFIDF','BOW']\n\ncounter_A = 0\ncounter_B = 0\ncounter_Equal = 0\nnumberOfQuestions = 0\n\nwhile True: \n print('\\n--------------------- ')\n options=menu.keys()\n sorted(options)\n for entry in options: \n print (entry, menu[entry])\n selection = input(\"Please select: \") \n print('--------------------- ')\n \n\n if selection =='1': \n print (' TEST ITERATION')\n\n while numberOfQuestions < 10:\n print('--------------------- ')\n Question = input('\\nAsk a question: ')\n\n numberOfQuestions += 1\n print(numberOfQuestions)\n\n print('(A) TFIDF: ' + dataFile['Questions'].loc[tfidf(Question,dataFile['lemmatized_text'])])\n\n print('(B) BOW: ' + dataFile['Questions'].loc[bagOfWords(Question,dataFile['lemmatized_text'])])\n\n best_algo = input('\\nWhich algorithm gave the best answer? A / B / E (Equal) \\nAnswer: ' )\n print('You choose ' + best_algo)\n \n \n if best_algo=='A' or best_algo =='a':\n counter_A += 1\n print(counter_A)\n elif best_algo == 'B' or best_algo == 'b': \n counter_B += 1\n print(counter_B)\n elif best_algo == 'E' or best_algo == 'e' :\n counter_Equal += 1\n print(counter_Equal)\n else: \n break\n\n\n print('\\n--------------------- ')\n print (' RESULT')\n print('--------------------- ')\n print(\"\\nTFIDF was best \" + str(counter_A) + \" times\")\n print(\"BOW was best \" + str(counter_B) + \" times\")\n print(\"They were equal in performance \" + str(counter_Equal) + \" times\")\n\n \n # defining labels\n activities = ['TFIDF', 'BOW', 'EQUAL',]\n \n # portion covered by each label\n slices = [counter_A, counter_B, counter_Equal]\n \n # color for each label\n colors = ['#ff9999', '#99ff99', '#99e6ff']\n \n # plotting the pie chart\n plt.pie(slices, labels = activities, colors=colors,\n startangle=90, shadow = False, explode = (0, 0, 0),\n radius = 1.2, autopct = '%1.1f%%', labeldistance=1.2)\n\n plt.title('RESULT', fontdict={'fontsize': 17}) \n \n # plotting legend\n plt.legend()\n \n # showing the plot\n plt.show()\n \n break \n\n elif selection == '2':\n break\n elif selection == '': \n break\n else: \n print ('Unknown Option Selected')\n\n\n\n","repo_name":"JohnstoneJ/TNM108","sub_path":"TNM108-project_testIteration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15387874089","text":"from django.contrib import admin\nfrom image_cropping import ImageCroppingMixin\nfrom ceilings.models import *\n\n\nclass CeilingImageInline(ImageCroppingMixin, admin.StackedInline):\n\tmodel = CeilingImage\n\textra = 1\n\nclass CeilingAdmin(admin.ModelAdmin):\n\tmodel = Ceiling\n\tfilter_horizontal = ('filter',)\n\tprepopulated_fields = {'slug': ('name',), }\n\tinlines = [CeilingImageInline, ]\n\nclass FilterTypeAdmin(admin.ModelAdmin):\n\tmodel = FilterType\n\tprepopulated_fields = {'slug': ('name', ), }\n\nclass FilterAdvantagesInline(admin.StackedInline):\n\tmodel = FilterAdvantages\n\textra = 1\n\nclass FilterAdvantagesAdmin(admin.ModelAdmin):\n\tmodel = Filter\n\tprepopulated_fields = {'slug': ('name', ), }\n\nclass FilterAdmin(admin.ModelAdmin):\n\tmodel = Filter\n\tprepopulated_fields = {'slug': ('name', ), }\n\tinlines = [FilterAdvantagesInline, ]\n\n\nadmin.site.register(Ceiling, CeilingAdmin)\nadmin.site.register(FilterType, FilterTypeAdmin)\nadmin.site.register(Filter, FilterAdmin)\n# admin.site.register(FilterAdvantages, FilterAdvantagesAdmin)\n","repo_name":"greenteamer/ceiling-django","sub_path":"ceilings/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2263692180","text":"import sys\n\nif (len(sys.argv) < 6):\n\traise ValueError('python arff2libsvm.py [inputfile] [outputfile] [num_nominal] [num_numeric] [num_labels]')\n\t\n\nfin = open(sys.argv[1], 'r')\n\nfout = open(sys.argv[2], 'a')\n\nenum = int(sys.argv[3])\nnumeric = int(sys.argv[4])\nnum_fea = enum + numeric\nnum_labels = int(sys.argv[5])\n\nreading_data = False\n\nfeatures = []\nlabels = []\n\nline_num = 0\nmeet_first_label = False\nfor line in fin.readlines():\n\tline_num += 1\n\t#print line\n\tif len(line) < 2:\n\t\tcontinue\n\tif not reading_data:\n\t\tif line.startswith('@relation'):\n\t\t\tcontinue\n\t\t\n\t\tif line.startswith('@data'):\n\t\t\treading_data = True\n\t\t\tif len(features) != num_fea:\n\t\t\t\traise ValueError('incompatible feature size, get ' + str(len(features)) + ', should be ' + str(num_fea) )\n\t\t\tif len(labels) != num_labels:\n\t\t\t\traise ValueError('incompatible label size, get ' + str(len(labels)) + ', should be ' + str(num_labels) )\n\t\t\tcontinue\n\t\t\n\t\tif line.startswith('@attribute'):\n\t\t\ttokens = line.split(' ')\n\t\t\tif len(tokens) != 3:\n\t\t\t\traise ValueError('Wrong number of attribute at line '+str(line_num))\t\t\n\t\t\tif tokens[1].startswith('tag_') or tokens[1].startswith('TAG_'):\n\t\t\t\t#should be a label\n\t\t\t\tmeet_first_label = True\n\t\t\t\tif (enum != 0) or (numeric != 0):\n\t\t\t\t\traise ValueError('need ' + str(enum) + 'more nominal and ' + str(numeric) + ' numeric features')\n\t\t\t\tif len(features) != num_fea:\n\t\t\t\t\traise ValueError('incompatible feature size, get ' + str(len(features)) + ', should be ' + str(num_fea) )\n\t\t\t\tlabels.append(tokens[1])\n\t\t\telse:\n\t\t\t\tif len(features) < num_fea:\n\t\t\t\t\t#should be a feature\n\t\t\t\t\tif (meet_first_label):\n\t\t\t\t\t\traise ValueError('Feature after label at line '+str(line_num))\t\n\t\t\t\t\tif tokens[2].startswith('{0,1}'):\n\t\t\t\t\t\tenum -= 1\n\t\t\t\t\telif tokens[2].startswith('numeric'):\n\t\t\t\t\t\tnumeric -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('Feature of unknown type at line '+str(line_num))\n\t\t\t\t\tfeatures.append(tokens[1])\n\t\t\t\telse:\n\t\t\t\t\t#should be a label\n\t\t\t\t\tif tokens[1].startswith('{0,1}'):\n\t\t\t\t\t\tmeet_first_label = True\n\t\t\t\t\t\tif (enum != 0) or (numeric != 0):\n\t\t\t\t\t\t\traise ValueError('need ' + str(enum) + 'more nominal and ' + str(numeric) + ' numeric features')\n\t\t\t\t\t\tlabels.append(tokens[1])\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('Unknown label type at line ' + str(line_num))\n\t\telse:\n\t\t\traise ValueError('Wrong Input Format at line '+str(line_num))\n\telse:\n\t\t#should be data\n\t\tline = line.replace('{', '')\n\t\tline = line.replace('}', '')\n\t\tline = line.replace('\\n', '')\n\t\ttokens = line.split(',')\n\t\toutput_line = ''\n\t\tfound_label=False\n\t\tfound_feature=False\n\t\tfor token in tokens:\n\t\t\tcoordinate_value = token.split(' ')\n\t\t\tif len(coordinate_value) != 2:\n\t\t\t\traise ValueError('Wrong Data Format at line '+str(line_num))\n\t\t\tif ('.' in coordinate_value[0]):\n\t\t\t\traise ValueError('Coorindates shouldn\\'t be numerical at line '+str(line_num))\n\t\t\tcoor = int(coordinate_value[0])\n\t\t\tval = float(coordinate_value[1])\n\t\t\tif coor < num_fea:\n\t\t\t\tfound_feature = True\n\t\t\t\t# this is a feature\n\t\t\t\toutput_line = output_line + ' ' + features[coor] + ':' + str(val)\n\t\t\telif coor < num_fea + num_labels:\n\t\t\t\t# this is a label\n\t\t\t\tif found_label:\n\t\t\t\t\toutput_line = ',' + output_line\n\t\t\t\toutput_line = labels[coor - num_fea] + output_line\n\t\t\t\tfound_label = True\n\t\t\telse:\n\t\t\t\traise ValueError('Coorindates out of range at line '+str(line_num))\n\t\tif (not found_label) and (not found_feature):\n\t\t\traise ValueError('Empty line at line '+str(line_num))\n\t\tif not found_label:\n\t\t\toutput_line = ' ' + output_line\n\t\tif not found_feature:\n\t\t\traise ValueError('No feature at line '+str(line_num))\n\t\tfout.write(output_line+'\\n')\n\nfin.close()\nfout.close()\n\t\n","repo_name":"xiangruhuang/arff_to_libsvm","sub_path":"arff2libsvm.py","file_name":"arff2libsvm.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33119915434","text":"import re\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.addons.sale.controllers.portal import CustomerPortal\n\n\nclass customSaleQuote(CustomerPortal):\n\n @http.route(['/my/orders/'], type='http', auth=\"public\",\n website=True)\n def portal_order_page(self, order_id, report_type=None, access_token=None,\n message=False, download=False, **kw):\n try:\n order_sudo = self._document_check_access('sale.order', order_id,\n access_token=access_token)\n except (AccessError, MissingError):\n return request.redirect('/my')\n if report_type in ('html', 'pdf', 'text'):\n return self._show_report(model=order_sudo, report_type='pdf',\n report_ref='quote_print.report_web_quotation_custom',\n download=download)\n result = super(customSaleQuote, self).portal_order_page(order_id,\n report_type=report_type,access_token=access_token,\n message=message, download=download, **kw)\n if access_token:\n order = request.env['sale.order'].sudo().search(\n [('id', '=', order_id), ('access_token', '=', access_token)])\n else:\n order = request.env['sale.order'].search([('id', '=', order_id)])\n\n if report_type == 'pdf':\n return result\n if not order:\n return result\n if hasattr(result, 'render'):\n renderedResult = result.render()\n elif hasattr(result, 'replace'):\n renderedResult = result\n else:\n return result\n # need to check\n variables = re.findall(r'\\${custom:.*?}', renderedResult)\n if not variables:\n return result\n for variable in variables:\n value = eval(variable[9:-1])\n if isinstance(value, (int, float, list, tuple, dict)):\n try:\n '''There are uncertain possible data. So making generic\n and ignore issue.'''\n try:\n value = str(value).encode(\"utf-8\").decode(\"utf-8\")\n except:\n value = str(value).decode(\"utf-8\")\n except:\n print('Invalid Data')\n value = u''\n renderedResult = renderedResult.replace(variable,\n value.encode('utf-8'))\n return renderedResult\n","repo_name":"temp-aardug/temp","sub_path":"quote_print/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21119290310","text":"import collections\nimport sys\ninput = sys.stdin.readline\n\n# 첫번째 풀이\n# N, K = map(int, input().strip().split(' '))\n# appliance = list(map(int, input().strip().split(' ')))\n#\n# multitab = []\n# count = 0\n#\n# for i in range(K):\n# temp = appliance[i]\n# if temp in multitab:\n# continue\n# if len(multitab) < N:\n# multitab.append(temp)\n# continue\n# far = 0\n# check = 0\n# for plug in multitab:\n# if plug not in appliance[i:]:\n# check = plug\n# break\n# elif appliance[i:].index(plug) > far:\n# far = appliance[i:].index(plug)\n# check = plug\n# multitab[multitab.index(check)] = temp\n# count += 1\n# print(count)\n\n\nn, k = map(int, input().strip().split())\nuse = list(map(int, input().strip().split()))\n\nresult = 0\n\ncandidate = [collections.deque() for _ in range(101)]\nfor i in range(k):\n candidate[use[i]].appendleft(i)\n\nmultiTab = collections.deque()\n\nfor i in use:\n if len(multiTab) == 0:\n multiTab.append(i)\n candidate[i].pop()\n else:\n if i in multiTab:\n candidate[i].pop()\n elif len(multiTab) < n:\n multiTab.append(i)\n candidate[i].pop()\n else:\n far = -1\n index = -1\n for j in range(n):\n if len(candidate[multiTab[j]]) == 0:\n index = j\n break\n elif far < candidate[multiTab[j]][-1]:\n far = candidate[multiTab[j]][-1]\n index = j\n multiTab[index] = i\n candidate[i].pop()\n result += 1\n\nprint(result)\n\n\n\n","repo_name":"GyuJeGal/Algorithm-Study","sub_path":"BaekJoon/Problem1700.py","file_name":"Problem1700.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73881254170","text":"from copy import copy\r\n\r\nfrom pydmrs.core import Link, LinkLabel\r\nfrom pydmrs.components import Pred, RealPred, GPred\r\nfrom pydmrs.simplification.gpred_filtering import gpred_filtering, DEFAULT_FILTER\r\n#from pydmrs.mapping.mapping import dmrs_mapping\r\nfrom pydmrs.graphlang.graphlang import parse_graphlang\r\n\r\n# Also remove pronouns\r\nextended_filter = DEFAULT_FILTER | {GPred('pron')}\r\n\r\n# Replace the first pred with the second:\r\nrename = [(RealPred('forwards','p'), RealPred('forward','p','dir'))]\r\n\r\n# Replace a pair of nodes with a single node\r\n# (the first pred linked to the second pred, is replaced by the third pred)\r\nshrink = [('_left_a_1', 'ARG1/EQ', 'place_n', '_left_n_1'),\r\n ('_right_a_1', 'ARG1/EQ', 'place_n', '_right_n_1'),\r\n ('loc_nonsp', 'ARG2/NEQ', '_left_n_1', '_left_p_dir'),\r\n ('loc_nonsp', 'ARG2/NEQ', '_right_n_1', '_right_p_dir'),\r\n ('_to_p', 'ARG2/NEQ', '_left_n_1', '_left_p_dir'),\r\n ('_to_p', 'ARG2/NEQ', '_right_n_1', '_right_p_dir')]\r\n\r\nshrink = [(Pred.from_string(a),\r\n LinkLabel.from_string(b),\r\n Pred.from_string(c),\r\n Pred.from_string(d)) for a,b,c,d in shrink]\r\n\r\ndef simplify(dmrs):\r\n \"\"\"\r\n Simplify an input DMRS to a form that can be converted to robot commands\r\n \"\"\"\r\n # Remove unnecessary GPreds (defaults, plus pronouns)\r\n gpred_filtering(dmrs, extended_filter)\r\n \r\n # Remove quantifiers\r\n for node in copy(dmrs.nodes):\r\n if dmrs.is_quantifier(node.nodeid):\r\n dmrs.remove_node(node.nodeid)\r\n \r\n # Apply mapping rules\r\n for before, after in rename:\r\n for node in dmrs.iter_nodes():\r\n if node.pred == before:\r\n node.pred = after\r\n \r\n for first, label, second, new in shrink:\r\n for node in copy(dmrs.nodes):\r\n if node.pred == first:\r\n nid = node.nodeid\r\n for link in dmrs.get_out(nid, rargname=label.rargname, post=label.post):\r\n if dmrs[link.end].pred == second:\r\n # We've found a match \r\n endid = link.end\r\n dmrs.remove_link(link)\r\n # Copy links from second node to first\r\n for old_link in dmrs.get_out(endid):\r\n dmrs.add_link(Link(nid, old_link.end, old_link.rargname, old_link.post))\r\n for old_link in dmrs.get_in(endid):\r\n dmrs.add_link(Link(old_link.start, nid, old_link.rargname, old_link.post))\r\n # Remove the second node and update the first\r\n dmrs.remove_node(link.end)\r\n dmrs[nid].pred = new\r\n \r\n return dmrs\r\n\r\n\r\ndmrsstring = '''\r\n_then_c -L-HNDL/H-> _drive_v_1 <-L-INDEX/NEQ- :_then_c -R-HNDL/H-> _turn_v_1 <-R-INDEX/NEQ- :_then_c;\r\npronoun_q -RSTR/H-> pron <-1- :_drive_v_1 <=1= _forwards_p;\r\npronoun_q -RSTR/H-> pron <-1- :_turn_v_1 <=1= loc_nonsp -2-> place_n <-RSTR/H- def_implicit_q;\r\n_left_a_1 =1=> :place_n\r\n'''\r\ndmrs = parse_graphlang(dmrsstring)\r\ndmrs.surface = 'Drive forwards then turn left'\r\n\r\nprint([(n.nodeid, n.pred) for n in dmrs.nodes])\r\nprint(dmrs.links)\r\n\r\nsimplify(dmrs)\r\n\r\nprint()\r\nprint([(n.nodeid, n.pred) for n in dmrs.nodes])\r\nprint(dmrs.links)\r\n\r\n'Go forward and then turn to the left'\r\n'Turn left at a yellow line'\r\n'On a yellow line, turn to the left'","repo_name":"delph-in/pydmrs","sub_path":"examples/examples_toy_robot.py","file_name":"examples_toy_robot.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"6508576745","text":"class Pflanze:\n name = \"\"\n farbe = \"\"\n groesse = 0\n anzahlblatt = \"\"\n wasser = 0\n mengeWasserZumWachsen = 0\n \n def __init__(self, name, farbe, groesse, anzahlblatt):\n self.name = name\n self.farbe = farbe\n self.groesse = groesse\n self.anzahlblatt = anzahlblatt\n \n def hatGenugWasser(self):\n if self.wasser >= self.mengeWasserZumWachsen:\n return True\n else:\n return False\n \n def wasserGeben(self, menge):\n self.wasser = self.wasser + menge\n \n def wachsen(self):\n possible = self.hatGenugWasser()\n if possible == True:\n self.wasser = self.wasser - self.mengeWasserZumWachsen\n self.groesse = self.groesse + 1\n print(\"Deine Pflanze ist Gewachsen\")\n self.getInfos()\n else:\n print (\"Deine Pflanze hat nicht genug Wasser zum Wachsen\")\n \n \n def getInfos(self):\n water = str(self.wasser)\n print (\"Name:\",self.name)\n print (\"Farbe:\",self.farbe)\n print (\"Grösse:\",self.groesse)\n print (\"Anzahl der Blütenblätter:\",self.anzahlblatt)\n print (\"Die Pflanze hat \" + water + \" Wasser\")\n print (\"\")","repo_name":"Jocomol/random_python_games","sub_path":"Gentechlabor/Pflanse.py","file_name":"Pflanse.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4854407058","text":"from collections import defaultdict\n\ndef gen_wordcount_dict(corpus, word_filter):\n dic = defaultdict(int)\n for text in corpus:\n for word in text:\n if word_filter is None:\n dic[word] = dic[word] + 1\n else:\n if word in word_filter:\n dic[word] = dic[word] + 1 \n return dic\n\ndef top_words(corpus, num_words = 10, word_filter = None):\n dic = gen_wordcount_dict(corpus, word_filter)\n top = sorted(dic.items(), key= lambda x:x[1], reverse=True)[0:num_words]\n\n return top ","repo_name":"MHDLab/nlp_utils","sub_path":"nlp_utils/text_analysis.py","file_name":"text_analysis.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72355574172","text":"from utils import cfed_options\r\nfrom models.Update import LocalUpdate\r\nfrom utils.options import args_parser\r\nimport copy\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nif __name__ == \"__main__\":\r\n args = args_parser()\r\n loss_train = []\r\n acc_test = []\r\n cv_loss, cv_acc = [], []\r\n val_loss_pre, counter = 0, 0\r\n net_best = None\r\n best_loss = None\r\n val_acc_list, net_list = [], []\r\n # ________model_preparing________\r\n # dict_users, 每个用户所持有的数据集,这里实际上是做了一个数据划分的list\r\n fed = cfed_options.Fed(args)\r\n dataset_trains = []\r\n dataset_tests = []\r\n for i in range(args.num_users):\r\n dataset_train, dataset_test, dict_users = fed.load_data_fl(i*0)\r\n dataset_trains.append(dataset_train)\r\n dataset_tests.append(dataset_test)\r\n net_glob = fed.build_model()\r\n net_local = net_glob\r\n w_glob = net_glob.state_dict()\r\n # __________training____________\r\n m = max(int(args.frac * args.num_users), 1)\r\n # idxs_users = np.random.choice(range(args.num_users), m, replace=False)\r\n w_locals = [w_glob for i in range(args.num_users)]\r\n for iter in range(args.epochs):\r\n loss_locals = []\r\n # 随机选取一部分clients进行aggregate\r\n for idx in range(args.num_users):\r\n net_glob.load_state_dict(w_locals[idx])\r\n # 每个迭代轮次本地更新\r\n local = LocalUpdate(\r\n args=args, dataset=dataset_trains[idx], idxs=dict_users[idx])\r\n\r\n w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\r\n # 复制参与本轮更新的users的所有权重 w_locals\r\n\r\n w_locals[i] = (copy.deepcopy(w))\r\n loss_locals.append(copy.deepcopy(loss))\r\n\r\n # ___________Weight update__________\r\n\r\n w_glob = cfed_options.sfedAvg(w_locals)\r\n # 把权重更新到global_model\r\n for i in range(args.num_users):\r\n w_locals = cfed_options.sfed(w_glob, w_locals)\r\n\r\n\r\n # ___________print loss_____________\r\n loss_avg = sum(loss_locals) / len(loss_locals)\r\n print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))\r\n loss_train.append(loss_avg)\r\n\r\n\r\n print(\"Training finished\")\r\n fed.plot(loss_train)\r\n loss_locals = pd.DataFrame(loss_train)\r\n loss_locals.to_csv('myfile2.csv')\r\n loss_locals = pd.read_csv('myfile2.csv')\r\n print(loss_locals)\r\n fed.testing(net_glob, dataset_trains[1], dataset_tests[1])","repo_name":"hanchzh/hcz","sub_path":"federated-learning - cnn - 副本/main_cfed.py","file_name":"main_cfed.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18279863493","text":"import pymysql\nimport json\nimport sqlalchemy as db\nimport configparser\nimport sys\n\ndef get_data(name, prod=True):\n\n config = configparser.ConfigParser()\n\n if prod == True:\n config.read('./resources/python/general.cfg')\n else:\n config.read('../general.cfg')\n\n db_host = config.get('DATABASE', 'db_id')\n db_user = config.get('DATABASE', 'db_user')\n db_pw = config.get('DATABASE', 'db_password')\n db_name = config.get('DATABASE', 'db_name')\n\n engine = db.create_engine('mysql+pymysql://{}:{}@{}/{}'.format(db_user, db_pw, db_host, db_name))\n connection = engine.connect()\n metadata = db.MetaData()\n\n match_data_table = db.Table('match_data', metadata, autoload=True, autoload_with=engine)\n\t# select where player is name\n select_query = match_data_table.select().where(match_data_table.c.player == name).order_by(match_data_table.c.match_id.desc())\n results = connection.execute(select_query).fetchall()\n\n row_headers = match_data_table.c.keys()\n\n json_data=[]\n for result in results:\n json_data.append(dict(zip(row_headers,result)))\n\n if prod == True:\n return json.dumps(json_data, indent=4, default=str)\n else:\n print(json_data) # maybe we want to ?\n\ndef main():\n\n\treturn get_data(sys.argv[1], sys.argv[2])\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Spaynkee/lol-data-py","sub_path":"resources/python/apis/get_user_data.py","file_name":"get_user_data.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4949401635","text":"#site = \"www.milevni.com\"\r\n#print(site[4:11])\r\n\r\nsite1 = \"www.milevni.com\"\r\nsite2 = \"www.atolyemde.com\"\r\nsite3 = \"www.google.com\"\r\nsite4 = \"www.gnu.org\"\r\n\r\nfor isim in site1, site2, site3, site4:\r\n print(\"site: \", isim[4:-4])\r\n\r\nprint(\"-\"*len(site2))\r\n\r\n\r\n#atasözlerindeki ünlem işareti kaldır ve nokta ekle\r\n\r\nata1 = \"Akıllı bizi arayıp sormaz deli bacadan akar!\"\r\nata2 = \"Ağa güçlü olunca kul suçlu olur!\"\r\nata3 = \"Avcı ne kadar hile bilirse ayı da o kadar yol bilir!\"\r\nata4 = \"Lafla pilav pişse deniz kadar yağ benden!\"\r\nata5 = \"Zenginin gönlü oluncaya kadar fukaranın canı çıkar!\"\r\n\r\nfor ata in ata1, ata2, ata3, ata4, ata5:\r\n print(ata[0:-1] + \".\")\r\n","repo_name":"bgr8/Python-Projeleri","sub_path":"Karakter Dizileri/karakterDilimle.py","file_name":"karakterDilimle.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24228072198","text":"import pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('D:\\Documents\\misc\\python_practice\\Tree_Data.csv', sep=\",\")\n\ndf_head = df.head()\n\n#egyediseg tesztelese\ndf['tree_id'].unique().size == df.shape[0]\n\ndf[\"status\"].unique()\ndf[\"health\"].unique()\ndf[df[\"status\"]==\"Alive\"][\"health\"].unique()\n\n#aranyok\n((df[\"status\"]==\"Stump\").sum() + (df[\"status\"]==\"Dead\").sum()) / (df[\"status\"]==\"Alive\").sum()\n\n#szuresek\ndf_missing = df[df[\"status\"] == 'Alive']\ndf_dead = df[(df[\"status\"] == \"Stump\") | (df[\"status\"] == \"Dead\")]\ndf_filtered = df_fintered[~df_fintered[\"tree_id\"].isin(df_dead[\"tree_id\"])]\n\ndf_filtered[(df_filtered[\"tree_dbh\"] > 60) | (df_filtered[\"tree_dbh\"] == 60)]\n\n\n#hianyzo adatok szurese\ndf_missing = df_missing[df_missing[\"health\"] != df_missing[\"health\"]]\ndf_fintered = df[~df.index.isin(df_missing.index)]\n\n\n#describe\nmean_by_spc = df_filtered.groupby(\"spc_common\").mean()\nmean_by_spc[\"tree_dbh\"].hist()\nmean_by_spc[mean_by_spc[\"tree_dbh\"] > 18]\n\n\n#vizualizacio\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf_filt_3 = df_filtered.loc[(df_filtered['spc_common']==\"London planetree\") | (df_filtered['spc_common']==\"eastern cottonwood\") | (df_filtered['spc_common']==\"silver maple\")]\n\n\nax = sns.violinplot(x=\"spc_common\", y=\"tree_dbh\", data=df_filt_3)\nax2 = sns.boxplot(x=\"spc_common\", y=\"tree_dbh\", data=df_filt_3)\n\ndf_filt_3[[\"spc_common\", \"health\", \"tree_dbh\"]].describe()\n\ndf_filt_3[ \"tree_dbh\"].mean()\ndf_filt_3[ \"tree_dbh\"].quantile(0.99)\ndf_filt_3[ \"tree_dbh\"].max()\n\n\n#75 percentilis feletti atmeroju fak kiszurese\nmask = df.groupby(\"spc_common\")[\"tree_dbh\"].describe()\ndf2 = df.merge(mask[\"75%\"], how=\"left\", on=\"spc_common\")\ndf2.loc[df2[\"tree_dbh\"] < df2['75%']]\n\nax3 = sns.lmplot(x=\"latitude\", y=\"longitude\", fit_reg=False, hue=\"health\", data=df_filt_3)\nax4 = sns.heatmap(df_filt_3, annot=True, annot_kws={\"size\": 7})\n#jarda helyzete es az egeszsegi allapot\ndf_health_con = df_filtered.groupby(\"health\")[\"tree_dbh\"].describe()\n\n\n((df[\"status\"]==\"Stump\").sum() + (df[\"status\"]==\"Dead\").sum()) / (df[\"status\"]==\"Alive\").sum()\n\n\n\n#apply function\ndf_head[\"test_length\"] = df_head[\"spc_common\"].apply(len_calc)\n\ndef len_calc(x):\n return(len(str(x)))\n\ndf2.loc[df2[\"tree_id\"]==180683]\n\n#egyszeru szuresek\ndf_mask = df[(df[\"spc_common\"]!=df[\"spc_common\"]) & (df[\"status\"]==\"Alive\")]\nmask = ((df[\"spc_common\"]!=df[\"spc_common\"]) & (df[\"status\"]==\"Alive\"))\n\ndf.loc[mask, \"spc_common\"] = df.loc[mask, \"spc_common\"].fillna(\"undefined\")\ndf.loc[mask, \"spc_common\"] = \"test\"\n\n#dealing with loc\nnew_date = df_complete[\"Completed Date\"].apply(lambda x: pd.to_datetime(x))\ndf_complete.loc[\"Completed Date\"] = new_date\n","repo_name":"abigel87/python_cheatsheet","sub_path":"tree_health.py","file_name":"tree_health.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24186178051","text":"from PageObjects.BasePage import BasePage\nfrom PageObjects.LoginPage import LoginPage\nfrom PageObjects.MainPage import MainPage\nimport time\n\ndef start_watcher():\n base_page = BasePage()\n browser = base_page.browser\n\n login_page = LoginPage(browser)\n\n if login_page.check_if_is_not_logged_in():\n login_page.login({ 'phone_country': 55, 'phone_number': 31975325480 })\n\n main_page = MainPage(browser)\n main_page.watch_channel('testedotelegra')\n\ntry:\n start_watcher()\n time.sleep(520)\nexcept KeyboardInterrupt:\n print('Watcher interrompido pelo usuario')","repo_name":"BrenoAlberto/telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4478527896","text":"\nfrom flask import request\nfrom flask.views import MethodView\nfrom flask_smorest import Blueprint,abort\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom flask_jwt_extended import jwt_required\nfrom db import db\nimport uuid\nfrom models import PostModel, post\nfrom cache import cache\nfrom schemas import PostSchema, PostUpdateSchema\nfrom pagination import pagination\nfrom flask import current_app as app\n\nblp = Blueprint(\"posts\",__name__, description = \"Operations on posts\")\n\n\n@blp.route(\"/post\")\nclass PostList(MethodView):\n\n \n @jwt_required()\n @cache.cached(timeout=200, query_string=True)\n @blp.response(200,PostSchema(many=True))\n def get(self):\n # return pagination.paginate(PostModel.query.all(),PostSchema)\n return PostModel.query.all()\n\n @jwt_required()\n @blp.arguments(PostSchema)\n @blp.response(201,PostSchema)\n def post(self,post_data):\n post = PostModel(**post_data)\n try:\n db.session.add(post)\n db.session.commit()\n print(db)\n except SQLAlchemyError:\n abort(500, message=\"An error occured while inserting the item\")\n return post\n\n\n@blp.route(\"/post/\")\nclass Post(MethodView):\n\n @jwt_required()\n @blp.response(200,PostSchema) \n def get(self,post_id):\n app.logger.info('Info level log')\n app.logger.warning('Warning level log')\n post = PostModel.query.get_or_404(post_id)\n return post\n\n @jwt_required()\n def delete(self,post_id):\n post = PostModel.query.get_or_404(post_id)\n db.session.delete(post)\n db.session.commit()\n return {\"message\":\"Post deleted.\"}\n\n\n @jwt_required()\n @blp.arguments(PostUpdateSchema)\n @blp.response(200,PostSchema)\n def put(self,post_data,post_id):\n post = PostModel.query.get(post_id)\n if post:\n post.title = post_data[\"title\"]\n post.body = post_data[\"body\"]\n # else:\n # post =PostModel(id = post_id,**post_data)\n db.session.add(post)\n db.session.commit()\n return post\n\n","repo_name":"ShubhamSoitkarNeosoft/blog-rest-api","sub_path":"resources/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42888158262","text":"from array import array\n\ncases = int(input())\n\nfor _ in range(cases):\n\tn = int(input())\n\ttemp = array('b', [0])\n\tresult = 0\n\tfor i in range(n):\n\t\toperation = input()\n\t\tif operation[0] == 'L':\n\t\t\ttemp.append(-1)\n\t\t\tresult -= 1\n\t\telif operation[0] == 'R':\n\t\t\ttemp.append(1)\n\t\t\tresult += 1\n\t\telse:\n\t\t\tstep = int(operation.split(' ')[-1])\n\t\t\ttemp.append(temp[step])\n\t\t\tresult += temp[step]\n\tprint(result)\n","repo_name":"shakib609/competitive-programming","sub_path":"UVA/12503.py","file_name":"12503.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2793904628","text":"import sys\r\nimport subprocess\r\nimport os\r\nimport shutil\r\n\r\ntry:\r\n fileName = sys.argv[1]\r\n fileName = fileName[:-4]\r\n\r\nexcept IndexError:\r\n print(\"Arguments not supplied\")\r\n\r\nwith open(fileName + \".pyb\", \"r\") as pybFile, open(fileName + \".py\", \"w\") as outputFile:\r\n pybContent = pybFile.readlines()\r\n\r\n for line in pybContent:\r\n canWriteLine = True\r\n try:\r\n if line.strip() == \"}\":\r\n canWriteLine = False\r\n elif line.strip().endswith(\" {\"):\r\n line = line[:-3]\r\n line = line + \":\\n\"\r\n\r\n if canWriteLine:\r\n outputFile.write(line)\r\n except IndexError:\r\n pass\r\n \r\nprint(\"Successfully Converted\")\r\nprint(\"Running file...\\n\")\r\nprint(\"Output:\")\r\ncmdInstance = subprocess.Popen(f'py \"{fileName}.py\"', stdout=subprocess.PIPE)\r\noutput, errors = cmdInstance.communicate()\r\nprint(output.decode())\r\nos.remove(f\"{fileName}.py\")\r\n","repo_name":"DoubleF3lix/py-bracket","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"30694606057","text":"class node(object):\n def __init__(self,value):\n self.value = value\n self.nextNode = None\n self.prevNode = None\n\ndef constructList(A,N):\n head = node(None)\n currNode = head\n for i in range(N):\n temp = node(A[i])\n currNode.nextNode = temp\n temp.prevNode = currNode\n currNode = temp\n tail = node(None)\n currNode.nextNode = tail\n tail.prevNode = currNode\n return head\n\ndef deleteFirst(head):\n delNode = head.nextNode\n postDelNode = delNode.nextNode\n head.nextNode = postDelNode\n postDelNode.prevNode = head\n return\n\ndef printForward(head):\n currNode = head.nextNode\n while currNode.nextNode != None:\n print(currNode.value,end=\" \")\n currNode = currNode.nextNode\n return\n\ndef printBackward(head):\n currNode = head.nextNode\n while currNode.nextNode != None:\n currNode = currNode.nextNode\n tail = currNode\n currNode = tail.prevNode\n while currNode.prevNode != None:\n print(currNode.value,end=\" \")\n currNode = currNode.prevNode\n return\n\ndef solution(N,A):\n # Write your code here...\n if N==0 or N==1:\n print(\"No element present\")\n return\n head = constructList(A,N)\n deleteFirst(head)\n printForward(head)\n print(\"\")\n printBackward(head)\n return\n\nN = int(input())\nA = list(map(int,input().split()))\nsolution(N,A)","repo_name":"Sayantan-world/Algorithms-and-Data-Structures-Skilling","sub_path":"002_LinkedList/2.3/2.3.3/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17274801486","text":"import logging\n\nfrom aiogram import Dispatcher\nfrom data.config import version\nfrom data.dev.data import get_sa\n\n\nasync def on_startup_notify(dp: Dispatcher):\n await send_msg_to_admin(dp, f\"Бот Запущен и готов к работе v{version}!\")\n\n\nasync def on_shutdown_notify(dp: Dispatcher):\n await send_msg_to_admin(dp, \"Работа бота завершена!\")\n\n\nasync def send_msg_to_admin(dp: Dispatcher, msg: str):\n for admin in get_sa():\n try:\n await dp.bot.send_message(admin, msg)\n except Exception as err:\n logging.exception(err)\n","repo_name":"Astler/assistant_bot","sub_path":"utils/notify_admins.py","file_name":"notify_admins.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"73817824411","text":"import math\nimport random\nlist = [0,1,2,3,4,5,6,7,8,9]\nmath.pi\nprint(math.pi)\na = random.choice(list)\n\nprint(a)\n\n\ndef get_web(url):\n \"\"\"url을 넣으면 페이지 내용을 돌려주는 함수 \"\"\"\n import urllib.request\n response = urllib.request.urlopen(url)\n data = response.read()\n decode = data.decode('utf-8')\n return decode\n\nurl = input('웹 페이지 주소? ')\ncontent = get_web(url)\nprint(content)\n","repo_name":"ldm0408/Python_Start","sub_path":"module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19155471763","text":"# Realizar un algoritmo para determinar cuánto ahorrará una persona en un año, \n# si al final de cada mes deposita cantidades variables de dinero; además, \n# se quiere saber cuánto lleva ahorrado cada mes. \n\nahorroMes=0\n\n\nfor mes in range(1,13):\n cantidad=float(input(\"Introduzca la cantidad del mes %d : \" % mes))\n porcentaje=cantidad*0.10\n ahorroMes+=porcentaje\n print(\"Va a ahorrar este mes \",mes,\" %.2f €\" % porcentaje)\n print(\"Ha conseguido ahorrar hasta hoy %.2f \"% ahorroMes)\n\n ","repo_name":"gguillamon/PYTHON-BASICOS","sub_path":"python_ejercicios basicos_III/bucles/ejercicio12.py","file_name":"ejercicio12.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5664226096","text":"import Meteor as mt\n\n\nclass MeteorCollection:\n def __init__(self, meteors):\n self.meteor_collection = meteors\n\n def extract_meteor_attributes(self):\n longitude_, latitude_, mass_ = [],[],[]\n for a_meteor in self.meteor_collection:\n longitude_.append(a_meteor.longitude)\n latitude_.append(a_meteor.latitude)\n mass_.append(float(a_meteor.mass)/1000)\n\n return longitude_, latitude_, mass_\n\n def extract_hover_text(self):\n hover_text = []\n for a_meteor in self.meteor_collection:\n hover_text.append(f'{a_meteor.year}\\nName: {a_meteor.name}')\n\n return hover_text\n","repo_name":"Greedo1977/NASA","sub_path":"MeteoriteLandings/MeteorCollection.py","file_name":"MeteorCollection.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23287575386","text":"# -*- coding: utf-8 -*-\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Library General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see .\n\nimport argparse\nimport logging\nimport sys\n\nfrom pungi_utils import patch_iso\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Print debugging information\"\n )\n parser.add_argument(\n \"--supported\",\n choices=(\"true\", \"false\"),\n help=\"Override supported bit on the ISO\",\n )\n parser.add_argument(\"--volume-id\", help=\"Override volume ID on the ISO\")\n parser.add_argument(\n \"--force-arch\", help=\"Treat the ISO as bootable on given architecture\"\n )\n parser.add_argument(\n \"--work-dir\", help=\"Set custom working directory. Default: /tmp/\", default=None\n )\n parser.add_argument(\n \"target\", metavar=\"TARGET_ISO\", help=\"which file to write the result to\"\n )\n parser.add_argument(\"source\", metavar=\"SOURCE_ISO\", help=\"source ISO to work with\")\n parser.add_argument(\n \"dirs\",\n nargs=\"+\",\n metavar=\"GRAFT_DIR\",\n help=\"extra directories to graft on the ISO\",\n )\n opts = parser.parse_args(args)\n\n level = logging.DEBUG if opts.verbose else logging.INFO\n format = \"%(levelname)s: %(message)s\"\n logging.basicConfig(level=level, format=format)\n log = logging.getLogger()\n\n patch_iso.run(log, opts)\n\n\ndef cli_main():\n if main():\n sys.exit(1)\n","repo_name":"yifengyou/pungi","sub_path":"BUILD/pungi-4.3.6/pungi/scripts/patch_iso.py","file_name":"patch_iso.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24453378530","text":"from untitled1 import NewsKeyword, db, UserKeyword, Keyword\n\n\ndef entity_extract(Id, text, news):\n from monkeylearn import MonkeyLearn\n\n ml = MonkeyLearn('f61694907b120433ddc66da1880d537c5f9d8f1e')\n text_list = [text]\n module_id = 'ex_isnnZRbS'\n res = ml.extractors.extract(module_id, text_list)\n for row in res.result[0]:\n if not db.session.query(Keyword).filter(Keyword.key_name == row['entity']).count():\n key = Keyword(key_name=row[\"entity\"])\n db.session.add(key)\n db.session.commit()\n else:\n key = Keyword.query.filter_by(key_name=row[\"entity\"]).first()\n\n if news:\n nk = NewsKeyword(news_id=Id, key_id=key.id)\n db.session.add(nk)\n db.session.commit()\n else:\n # if not UserKeyword.query.filter_by(key_id=key.id, user_id=Id).count(): #may not be needed\n uk = UserKeyword(user_id=Id, key_id=key.id, priority=1)\n db.session.add(uk)\n db.session.commit()\n\n # else:\n # uk = UserKeyword.query.filter_by(key_id=key.id).first()\n # uk.priority += 1\n # db.session.commit()","repo_name":"jsanyam/My-Duniya","sub_path":"entity_api.py","file_name":"entity_api.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7330503550","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport os\nimport matplotlib.pyplot as plt\nimport logging\nimport sys\nimport numpy as np\n\n\ndef set_soft_gpu(soft_gpu):\n if soft_gpu:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n\n\ndef save_gan(model, path):\n global z1, z2\n n = 7\n if \"z1\" not in globals():\n z1 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))\n if \"z2\" not in globals():\n z2 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))\n n_z1 = 3\n assert n_z1 < model.n_style_block - 1\n noise = np.random.normal(0, 1, [len(z1), model.img_shape[0], model.img_shape[1]])\n inputs = [\n np.ones((len(z1)*n, 1)),\n np.concatenate(\n (z1.repeat(n, axis=0).repeat(n_z1, axis=1),\n np.repeat(np.concatenate([z2 for _ in range(n)], axis=0), model.n_style_block - n_z1, axis=1)),\n axis=1\n ),\n noise.repeat(n, axis=0),\n ]\n z1_inputs = [np.ones((len(z1), 1)), z1.repeat(model.n_style_block, axis=1), noise]\n z2_inputs = [np.ones((len(z2), 1)), z2.repeat(model.n_style_block, axis=1), noise]\n\n imgs = model.predict(inputs)\n z1_imgs = model.predict(z1_inputs)\n z2_imgs = model.predict(z2_inputs)\n imgs = np.concatenate([z2_imgs, imgs], axis=0)\n rest_imgs = np.concatenate([np.ones([1, model.img_shape[0], model.img_shape[1], model.img_shape[2]], dtype=np.float32), z1_imgs], axis=0)\n for i in range(len(rest_imgs)):\n imgs = np.concatenate([imgs[:i * (n+1)], rest_imgs[i:i + 1], imgs[i * (n+1):]], axis=0)\n imgs = (imgs + 1) / 2\n\n nc, nr = n+1, n+1\n f = plt.figure(0, (nc*2, nr*2))\n for c in range(nc):\n for r in range(nr):\n i = r * nc + c\n plt.subplot(nr, nc, i + 1)\n plt.imshow(imgs[i])\n plt.axis(\"off\")\n\n plt.tight_layout()\n os.makedirs(os.path.dirname(path), exist_ok=True)\n plt.savefig(path)\n f.clear()\n plt.close(f)\n\n\ndef get_logger(date_str):\n log_fmt = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\")\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n log_path = \"visual/{}/train.log\".format(date_str)\n os.makedirs(os.path.dirname(log_path), exist_ok=True)\n fh = logging.FileHandler(log_path)\n fh.setFormatter(log_fmt)\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_fmt)\n ch.setLevel(logging.INFO)\n logger.addHandler(ch)\n return logger\n\n\nclass InstanceNormalization(keras.layers.Layer):\n def __init__(self, axis=(1, 2), epsilon=1e-6):\n super().__init__()\n self.epsilon = epsilon\n self.axis = axis\n self.beta, self.gamma = None, None\n\n def build(self, input_shape):\n shape = [1 for _ in range(len(input_shape))]\n shape[-1] = input_shape[-1]\n self.gamma = self.add_weight(\n name='gamma',\n shape=shape,\n initializer='ones')\n\n self.beta = self.add_weight(\n name='beta',\n shape=shape,\n initializer='zeros')\n\n def call(self, x, *args, **kwargs):\n mean = tf.math.reduce_mean(x, axis=self.axis, keepdims=True)\n x -= mean\n variance = tf.reduce_mean(tf.math.square(x), axis=self.axis, keepdims=True)\n x *= tf.math.rsqrt(variance + self.epsilon)\n return x * self.gamma + self.beta","repo_name":"MorvanZhou/anime-StyleGAN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"32"} +{"seq_id":"30561806137","text":"#!/usr/bin/env python3\nfrom datetime import time, date, datetime\nfrom ipwhois import IPWhois\nimport ipwhois\nimport json\nimport netmiko\nimport re\nimport sqlite3\n\nMONTHS = {\"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6,\n \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10, \"Nov\": 11, \"Dec\": 12}\nSSH_FAILED_LOGIN_REGEX_JUNIPER = \"(?P\\w+)\\s+(?P\\d+)\\s(?P